Magellan Linux

Contents of /trunk/kernel-alx/patches-3.14/0119-3.14.20-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2506 - (show annotations) (download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 6 months ago) by niro
File size: 251447 byte(s)
-patches for 3.14
1 diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
2 index 1486497a24c1..ce6a1a072028 100644
3 --- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
4 +++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
5 @@ -4,11 +4,13 @@ Specifying interrupt information for devices
6 1) Interrupt client nodes
7 -------------------------
8
9 -Nodes that describe devices which generate interrupts must contain an either an
10 -"interrupts" property or an "interrupts-extended" property. These properties
11 -contain a list of interrupt specifiers, one per output interrupt. The format of
12 -the interrupt specifier is determined by the interrupt controller to which the
13 -interrupts are routed; see section 2 below for details.
14 +Nodes that describe devices which generate interrupts must contain an
15 +"interrupts" property, an "interrupts-extended" property, or both. If both are
16 +present, the latter should take precedence; the former may be provided simply
17 +for compatibility with software that does not recognize the latter. These
18 +properties contain a list of interrupt specifiers, one per output interrupt. The
19 +format of the interrupt specifier is determined by the interrupt controller to
20 +which the interrupts are routed; see section 2 below for details.
21
22 Example:
23 interrupt-parent = <&intc1>;
24 diff --git a/Makefile b/Makefile
25 index b1746b486646..beb7e6f0803b 100644
26 --- a/Makefile
27 +++ b/Makefile
28 @@ -1,6 +1,6 @@
29 VERSION = 3
30 PATCHLEVEL = 14
31 -SUBLEVEL = 19
32 +SUBLEVEL = 20
33 EXTRAVERSION =
34 NAME = Remembering Coco
35
36 diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
37 index 904dcf5973f3..9381754b35cc 100644
38 --- a/arch/arm/boot/dts/dra7-evm.dts
39 +++ b/arch/arm/boot/dts/dra7-evm.dts
40 @@ -50,13 +50,13 @@
41
42 mcspi1_pins: pinmux_mcspi1_pins {
43 pinctrl-single,pins = <
44 - 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */
45 - 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */
46 - 0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */
47 - 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */
48 - 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */
49 - 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */
50 - 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */
51 + 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi1_sclk */
52 + 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi1_d1 */
53 + 0x3ac (PIN_INPUT | MUX_MODE0) /* spi1_d0 */
54 + 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs0 */
55 + 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs1 */
56 + 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs2.hdmi1_hpd */
57 + 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs3.hdmi1_cec */
58 >;
59 };
60
61 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
62 index 1fd75aa4639d..767f0e376f4d 100644
63 --- a/arch/arm/boot/dts/dra7.dtsi
64 +++ b/arch/arm/boot/dts/dra7.dtsi
65 @@ -178,7 +178,7 @@
66 gpio-controller;
67 #gpio-cells = <2>;
68 interrupt-controller;
69 - #interrupt-cells = <1>;
70 + #interrupt-cells = <2>;
71 };
72
73 gpio2: gpio@48055000 {
74 @@ -189,7 +189,7 @@
75 gpio-controller;
76 #gpio-cells = <2>;
77 interrupt-controller;
78 - #interrupt-cells = <1>;
79 + #interrupt-cells = <2>;
80 };
81
82 gpio3: gpio@48057000 {
83 @@ -200,7 +200,7 @@
84 gpio-controller;
85 #gpio-cells = <2>;
86 interrupt-controller;
87 - #interrupt-cells = <1>;
88 + #interrupt-cells = <2>;
89 };
90
91 gpio4: gpio@48059000 {
92 @@ -211,7 +211,7 @@
93 gpio-controller;
94 #gpio-cells = <2>;
95 interrupt-controller;
96 - #interrupt-cells = <1>;
97 + #interrupt-cells = <2>;
98 };
99
100 gpio5: gpio@4805b000 {
101 @@ -222,7 +222,7 @@
102 gpio-controller;
103 #gpio-cells = <2>;
104 interrupt-controller;
105 - #interrupt-cells = <1>;
106 + #interrupt-cells = <2>;
107 };
108
109 gpio6: gpio@4805d000 {
110 @@ -233,7 +233,7 @@
111 gpio-controller;
112 #gpio-cells = <2>;
113 interrupt-controller;
114 - #interrupt-cells = <1>;
115 + #interrupt-cells = <2>;
116 };
117
118 gpio7: gpio@48051000 {
119 @@ -244,7 +244,7 @@
120 gpio-controller;
121 #gpio-cells = <2>;
122 interrupt-controller;
123 - #interrupt-cells = <1>;
124 + #interrupt-cells = <2>;
125 };
126
127 gpio8: gpio@48053000 {
128 @@ -255,7 +255,7 @@
129 gpio-controller;
130 #gpio-cells = <2>;
131 interrupt-controller;
132 - #interrupt-cells = <1>;
133 + #interrupt-cells = <2>;
134 };
135
136 uart1: serial@4806a000 {
137 diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
138 index 83259b873333..5f833f7adba1 100644
139 --- a/arch/arm/include/asm/tls.h
140 +++ b/arch/arm/include/asm/tls.h
141 @@ -1,6 +1,9 @@
142 #ifndef __ASMARM_TLS_H
143 #define __ASMARM_TLS_H
144
145 +#include <linux/compiler.h>
146 +#include <asm/thread_info.h>
147 +
148 #ifdef __ASSEMBLY__
149 #include <asm/asm-offsets.h>
150 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
151 @@ -50,6 +53,49 @@
152 #endif
153
154 #ifndef __ASSEMBLY__
155 +
156 +static inline void set_tls(unsigned long val)
157 +{
158 + struct thread_info *thread;
159 +
160 + thread = current_thread_info();
161 +
162 + thread->tp_value[0] = val;
163 +
164 + /*
165 + * This code runs with preemption enabled and therefore must
166 + * be reentrant with respect to switch_tls.
167 + *
168 + * We need to ensure ordering between the shadow state and the
169 + * hardware state, so that we don't corrupt the hardware state
170 + * with a stale shadow state during context switch.
171 + *
172 + * If we're preempted here, switch_tls will load TPIDRURO from
173 + * thread_info upon resuming execution and the following mcr
174 + * is merely redundant.
175 + */
176 + barrier();
177 +
178 + if (!tls_emu) {
179 + if (has_tls_reg) {
180 + asm("mcr p15, 0, %0, c13, c0, 3"
181 + : : "r" (val));
182 + } else {
183 +#ifdef CONFIG_KUSER_HELPERS
184 + /*
185 + * User space must never try to access this
186 + * directly. Expect your app to break
187 + * eventually if you do so. The user helper
188 + * at 0xffff0fe0 must be used instead. (see
189 + * entry-armv.S for details)
190 + */
191 + *((unsigned int *)0xffff0ff0) = val;
192 +#endif
193 + }
194 +
195 + }
196 +}
197 +
198 static inline unsigned long get_tpuser(void)
199 {
200 unsigned long reg = 0;
201 @@ -59,5 +105,23 @@ static inline unsigned long get_tpuser(void)
202
203 return reg;
204 }
205 +
206 +static inline void set_tpuser(unsigned long val)
207 +{
208 + /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
209 + * we need not update thread_info.
210 + */
211 + if (has_tls_reg && !tls_emu) {
212 + asm("mcr p15, 0, %0, c13, c0, 2"
213 + : : "r" (val));
214 + }
215 +}
216 +
217 +static inline void flush_tls(void)
218 +{
219 + set_tls(0);
220 + set_tpuser(0);
221 +}
222 +
223 #endif
224 #endif /* __ASMARM_TLS_H */
225 diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
226 index 9723d17b8f38..1e782bdeee49 100644
227 --- a/arch/arm/kernel/irq.c
228 +++ b/arch/arm/kernel/irq.c
229 @@ -163,7 +163,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
230 c = irq_data_get_irq_chip(d);
231 if (!c->irq_set_affinity)
232 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
233 - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
234 + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
235 cpumask_copy(d->affinity, affinity);
236
237 return ret;
238 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
239 index 92f7b15dd221..5f6e650ec9ab 100644
240 --- a/arch/arm/kernel/process.c
241 +++ b/arch/arm/kernel/process.c
242 @@ -334,6 +334,8 @@ void flush_thread(void)
243 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
244 memset(&thread->fpstate, 0, sizeof(union fp_state));
245
246 + flush_tls();
247 +
248 thread_notify(THREAD_NOTIFY_FLUSH, thread);
249 }
250
251 diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
252 index 7b8403b76666..80f0d69205e7 100644
253 --- a/arch/arm/kernel/thumbee.c
254 +++ b/arch/arm/kernel/thumbee.c
255 @@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
256
257 switch (cmd) {
258 case THREAD_NOTIFY_FLUSH:
259 - thread->thumbee_state = 0;
260 + teehbr_write(0);
261 break;
262 case THREAD_NOTIFY_SWITCH:
263 current_thread_info()->thumbee_state = teehbr_read();
264 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
265 index 172ee18ff124..9265b8bb529a 100644
266 --- a/arch/arm/kernel/traps.c
267 +++ b/arch/arm/kernel/traps.c
268 @@ -578,7 +578,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
269 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
270 asmlinkage int arm_syscall(int no, struct pt_regs *regs)
271 {
272 - struct thread_info *thread = current_thread_info();
273 siginfo_t info;
274
275 if ((no >> 16) != (__ARM_NR_BASE>> 16))
276 @@ -629,21 +628,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
277 return regs->ARM_r0;
278
279 case NR(set_tls):
280 - thread->tp_value[0] = regs->ARM_r0;
281 - if (tls_emu)
282 - return 0;
283 - if (has_tls_reg) {
284 - asm ("mcr p15, 0, %0, c13, c0, 3"
285 - : : "r" (regs->ARM_r0));
286 - } else {
287 - /*
288 - * User space must never try to access this directly.
289 - * Expect your app to break eventually if you do so.
290 - * The user helper at 0xffff0fe0 must be used instead.
291 - * (see entry-armv.S for details)
292 - */
293 - *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
294 - }
295 + set_tls(regs->ARM_r0);
296 return 0;
297
298 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
299 diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
300 index 0de91fc6de0f..ec4fa868a7ba 100644
301 --- a/arch/arm/kvm/handle_exit.c
302 +++ b/arch/arm/kvm/handle_exit.c
303 @@ -89,6 +89,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
304 else
305 kvm_vcpu_block(vcpu);
306
307 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
308 +
309 return 1;
310 }
311
312 diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
313 index 1b9844d369cc..ee4f7447a1d3 100644
314 --- a/arch/arm/kvm/init.S
315 +++ b/arch/arm/kvm/init.S
316 @@ -98,6 +98,10 @@ __do_hyp_init:
317 mrc p15, 0, r0, c10, c2, 1
318 mcr p15, 4, r0, c10, c2, 1
319
320 + @ Invalidate the stale TLBs from Bootloader
321 + mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
322 + dsb ish
323 +
324 @ Set the HSCTLR to:
325 @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
326 @ - Endianness: Kernel config
327 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
328 index c914b0052fb9..4551efd28f8d 100644
329 --- a/arch/arm/mach-omap2/omap_hwmod.c
330 +++ b/arch/arm/mach-omap2/omap_hwmod.c
331 @@ -3349,6 +3349,9 @@ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois)
332 if (!ois)
333 return 0;
334
335 + if (ois[0] == NULL) /* Empty list */
336 + return 0;
337 +
338 if (!linkspace) {
339 if (_alloc_linkspace(ois)) {
340 pr_err("omap_hwmod: could not allocate link space\n");
341 diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
342 index 810c205d668b..2e35ff99f60e 100644
343 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
344 +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
345 @@ -35,6 +35,7 @@
346 #include "i2c.h"
347 #include "mmc.h"
348 #include "wd_timer.h"
349 +#include "soc.h"
350
351 /* Base offset for all DRA7XX interrupts external to MPUSS */
352 #define DRA7XX_IRQ_GIC_START 32
353 @@ -2707,7 +2708,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
354 &dra7xx_l4_per3__usb_otg_ss1,
355 &dra7xx_l4_per3__usb_otg_ss2,
356 &dra7xx_l4_per3__usb_otg_ss3,
357 - &dra7xx_l4_per3__usb_otg_ss4,
358 &dra7xx_l3_main_1__vcp1,
359 &dra7xx_l4_per2__vcp1,
360 &dra7xx_l3_main_1__vcp2,
361 @@ -2716,8 +2716,26 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
362 NULL,
363 };
364
365 +static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = {
366 + &dra7xx_l4_per3__usb_otg_ss4,
367 + NULL,
368 +};
369 +
370 +static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = {
371 + NULL,
372 +};
373 +
374 int __init dra7xx_hwmod_init(void)
375 {
376 + int ret;
377 +
378 omap_hwmod_init();
379 - return omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
380 + ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
381 +
382 + if (!ret && soc_is_dra74x())
383 + return omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
384 + else if (!ret && soc_is_dra72x())
385 + return omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
386 +
387 + return ret;
388 }
389 diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
390 index 076bd90a6ce0..8a9be09d9f38 100644
391 --- a/arch/arm/mach-omap2/soc.h
392 +++ b/arch/arm/mach-omap2/soc.h
393 @@ -245,6 +245,8 @@ IS_AM_SUBCLASS(437x, 0x437)
394 #define soc_is_omap54xx() 0
395 #define soc_is_omap543x() 0
396 #define soc_is_dra7xx() 0
397 +#define soc_is_dra74x() 0
398 +#define soc_is_dra72x() 0
399
400 #if defined(MULTI_OMAP2)
401 # if defined(CONFIG_ARCH_OMAP2)
402 @@ -393,7 +395,11 @@ IS_OMAP_TYPE(3430, 0x3430)
403
404 #if defined(CONFIG_SOC_DRA7XX)
405 #undef soc_is_dra7xx
406 +#undef soc_is_dra74x
407 +#undef soc_is_dra72x
408 #define soc_is_dra7xx() (of_machine_is_compatible("ti,dra7"))
409 +#define soc_is_dra74x() (of_machine_is_compatible("ti,dra74"))
410 +#define soc_is_dra72x() (of_machine_is_compatible("ti,dra72"))
411 #endif
412
413 /* Various silicon revisions for omap2 */
414 diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
415 index 3815a8262af0..8c48c5c22a33 100644
416 --- a/arch/arm/mm/abort-ev6.S
417 +++ b/arch/arm/mm/abort-ev6.S
418 @@ -17,12 +17,6 @@
419 */
420 .align 5
421 ENTRY(v6_early_abort)
422 -#ifdef CONFIG_CPU_V6
423 - sub r1, sp, #4 @ Get unused stack location
424 - strex r0, r1, [r1] @ Clear the exclusive monitor
425 -#elif defined(CONFIG_CPU_32v6K)
426 - clrex
427 -#endif
428 mrc p15, 0, r1, c5, c0, 0 @ get FSR
429 mrc p15, 0, r0, c6, c0, 0 @ get FAR
430 /*
431 diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
432 index 703375277ba6..4812ad054214 100644
433 --- a/arch/arm/mm/abort-ev7.S
434 +++ b/arch/arm/mm/abort-ev7.S
435 @@ -13,12 +13,6 @@
436 */
437 .align 5
438 ENTRY(v7_early_abort)
439 - /*
440 - * The effect of data aborts on on the exclusive access monitor are
441 - * UNPREDICTABLE. Do a CLREX to clear the state
442 - */
443 - clrex
444 -
445 mrc p15, 0, r1, c5, c0, 0 @ get FSR
446 mrc p15, 0, r0, c6, c0, 0 @ get FAR
447
448 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
449 index 924036473b16..d301662b7b32 100644
450 --- a/arch/arm/mm/alignment.c
451 +++ b/arch/arm/mm/alignment.c
452 @@ -40,6 +40,7 @@
453 * This code is not portable to processors with late data abort handling.
454 */
455 #define CODING_BITS(i) (i & 0x0e000000)
456 +#define COND_BITS(i) (i & 0xf0000000)
457
458 #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
459 #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
460 @@ -817,6 +818,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
461 break;
462
463 case 0x04000000: /* ldr or str immediate */
464 + if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
465 + goto bad;
466 offset.un = OFFSET_BITS(instr);
467 handler = do_alignment_ldrstr;
468 break;
469 diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
470 index d064047612b1..52b484b6aa1a 100644
471 --- a/arch/arm64/include/asm/hw_breakpoint.h
472 +++ b/arch/arm64/include/asm/hw_breakpoint.h
473 @@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
474 */
475 #define ARM_MAX_BRP 16
476 #define ARM_MAX_WRP 16
477 -#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
478
479 /* Virtual debug register bases. */
480 #define AARCH64_DBG_REG_BVR 0
481 diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
482 index 0f08dfd69ebc..dfa6e3e74fdd 100644
483 --- a/arch/arm64/kernel/irq.c
484 +++ b/arch/arm64/kernel/irq.c
485 @@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
486 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
487 return false;
488
489 - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
490 + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
491 + affinity = cpu_online_mask;
492 ret = true;
493 + }
494
495 - /*
496 - * when using forced irq_set_affinity we must ensure that the cpu
497 - * being offlined is not present in the affinity mask, it may be
498 - * selected as the target CPU otherwise
499 - */
500 - affinity = cpu_online_mask;
501 c = irq_data_get_irq_chip(d);
502 if (!c->irq_set_affinity)
503 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
504 - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
505 + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
506 cpumask_copy(d->affinity, affinity);
507
508 return ret;
509 diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
510 index 1c0a9be2ffa8..6e7e579c629d 100644
511 --- a/arch/arm64/kernel/process.c
512 +++ b/arch/arm64/kernel/process.c
513 @@ -187,9 +187,27 @@ void exit_thread(void)
514 {
515 }
516
517 +static void tls_thread_flush(void)
518 +{
519 + asm ("msr tpidr_el0, xzr");
520 +
521 + if (is_compat_task()) {
522 + current->thread.tp_value = 0;
523 +
524 + /*
525 + * We need to ensure ordering between the shadow state and the
526 + * hardware state, so that we don't corrupt the hardware state
527 + * with a stale shadow state during context switch.
528 + */
529 + barrier();
530 + asm ("msr tpidrro_el0, xzr");
531 + }
532 +}
533 +
534 void flush_thread(void)
535 {
536 fpsimd_flush_thread();
537 + tls_thread_flush();
538 flush_ptrace_hw_breakpoint(current);
539 }
540
541 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
542 index 7a50b86464cc..b1269dac1289 100644
543 --- a/arch/arm64/kernel/ptrace.c
544 +++ b/arch/arm64/kernel/ptrace.c
545 @@ -81,7 +81,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
546 break;
547 }
548 }
549 - for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
550 +
551 + for (i = 0; i < ARM_MAX_WRP; ++i) {
552 if (current->thread.debug.hbp_watch[i] == bp) {
553 info.si_errno = -((i << 1) + 1);
554 break;
555 diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
556 index 26e9c4eeaba8..78039927c807 100644
557 --- a/arch/arm64/kernel/sys_compat.c
558 +++ b/arch/arm64/kernel/sys_compat.c
559 @@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
560
561 case __ARM_NR_compat_set_tls:
562 current->thread.tp_value = regs->regs[0];
563 +
564 + /*
565 + * Protect against register corruption from context switch.
566 + * See comment in tls_thread_flush.
567 + */
568 + barrier();
569 asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
570 return 0;
571
572 diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
573 index 7bc41eab4c64..fd9aeba99683 100644
574 --- a/arch/arm64/kvm/handle_exit.c
575 +++ b/arch/arm64/kvm/handle_exit.c
576 @@ -62,6 +62,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
577 else
578 kvm_vcpu_block(vcpu);
579
580 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
581 +
582 return 1;
583 }
584
585 diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
586 index 2b0244d65c16..12e26f358c31 100644
587 --- a/arch/arm64/kvm/hyp-init.S
588 +++ b/arch/arm64/kvm/hyp-init.S
589 @@ -74,6 +74,10 @@ __do_hyp_init:
590 msr mair_el2, x4
591 isb
592
593 + /* Invalidate the stale TLBs from Bootloader */
594 + tlbi alle2
595 + dsb sy
596 +
597 mrs x4, sctlr_el2
598 and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
599 ldr x5, =SCTLR_EL2_FLAGS
600 diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
601 index c00c4ddf4514..5244cecf1e45 100644
602 --- a/arch/mips/boot/compressed/decompress.c
603 +++ b/arch/mips/boot/compressed/decompress.c
604 @@ -13,6 +13,7 @@
605
606 #include <linux/types.h>
607 #include <linux/kernel.h>
608 +#include <linux/string.h>
609
610 #include <asm/addrspace.h>
611
612 diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
613 index 539b6294b613..8f89ff4ed524 100644
614 --- a/arch/mips/kernel/mcount.S
615 +++ b/arch/mips/kernel/mcount.S
616 @@ -123,7 +123,11 @@ NESTED(_mcount, PT_SIZE, ra)
617 nop
618 #endif
619 b ftrace_stub
620 +#ifdef CONFIG_32BIT
621 + addiu sp, sp, 8
622 +#else
623 nop
624 +#endif
625
626 static_trace:
627 MCOUNT_SAVE_REGS
628 @@ -133,6 +137,9 @@ static_trace:
629 move a1, AT /* arg2: parent's return address */
630
631 MCOUNT_RESTORE_REGS
632 +#ifdef CONFIG_32BIT
633 + addiu sp, sp, 8
634 +#endif
635 .globl ftrace_stub
636 ftrace_stub:
637 RETURN_BACK
638 @@ -177,6 +184,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
639 jal prepare_ftrace_return
640 nop
641 MCOUNT_RESTORE_REGS
642 +#ifndef CONFIG_DYNAMIC_FTRACE
643 +#ifdef CONFIG_32BIT
644 + addiu sp, sp, 8
645 +#endif
646 +#endif
647 RETURN_BACK
648 END(ftrace_graph_caller)
649
650 diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
651 index 7187664034c3..5db8882f732c 100644
652 --- a/arch/parisc/Makefile
653 +++ b/arch/parisc/Makefile
654 @@ -48,7 +48,12 @@ cflags-y := -pipe
655
656 # These flags should be implied by an hppa-linux configuration, but they
657 # are not in gcc 3.2.
658 -cflags-y += -mno-space-regs -mfast-indirect-calls
659 +cflags-y += -mno-space-regs
660 +
661 +# -mfast-indirect-calls is only relevant for 32-bit kernels.
662 +ifndef CONFIG_64BIT
663 +cflags-y += -mfast-indirect-calls
664 +endif
665
666 # Currently we save and restore fpregs on all kernel entry/interruption paths.
667 # If that gets optimized, we might need to disable the use of fpregs in the
668 diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
669 index 838786011037..7ef22e3387e0 100644
670 --- a/arch/parisc/kernel/syscall.S
671 +++ b/arch/parisc/kernel/syscall.S
672 @@ -74,7 +74,7 @@ ENTRY(linux_gateway_page)
673 /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
674 /* Light-weight-syscall entry must always be located at 0xb0 */
675 /* WARNING: Keep this number updated with table size changes */
676 -#define __NR_lws_entries (2)
677 +#define __NR_lws_entries (3)
678
679 lws_entry:
680 gate lws_start, %r0 /* increase privilege */
681 @@ -502,7 +502,7 @@ lws_exit:
682
683
684 /***************************************************
685 - Implementing CAS as an atomic operation:
686 + Implementing 32bit CAS as an atomic operation:
687
688 %r26 - Address to examine
689 %r25 - Old value to check (old)
690 @@ -659,6 +659,230 @@ cas_action:
691 ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
692
693
694 + /***************************************************
695 + New CAS implementation which uses pointers and variable size
696 + information. The value pointed by old and new MUST NOT change
697 + while performing CAS. The lock only protect the value at %r26.
698 +
699 + %r26 - Address to examine
700 + %r25 - Pointer to the value to check (old)
701 + %r24 - Pointer to the value to set (new)
702 + %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
703 + %r28 - Return non-zero on failure
704 + %r21 - Kernel error code
705 +
706 + %r21 has the following meanings:
707 +
708 + EAGAIN - CAS is busy, ldcw failed, try again.
709 + EFAULT - Read or write failed.
710 +
711 + Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
712 +
713 + ****************************************************/
714 +
715 + /* ELF32 Process entry path */
716 +lws_compare_and_swap_2:
717 +#ifdef CONFIG_64BIT
718 + /* Clip the input registers */
719 + depdi 0, 31, 32, %r26
720 + depdi 0, 31, 32, %r25
721 + depdi 0, 31, 32, %r24
722 + depdi 0, 31, 32, %r23
723 +#endif
724 +
725 + /* Check the validity of the size pointer */
726 + subi,>>= 4, %r23, %r0
727 + b,n lws_exit_nosys
728 +
729 + /* Jump to the functions which will load the old and new values into
730 + registers depending on the their size */
731 + shlw %r23, 2, %r29
732 + blr %r29, %r0
733 + nop
734 +
735 + /* 8bit load */
736 +4: ldb 0(%sr3,%r25), %r25
737 + b cas2_lock_start
738 +5: ldb 0(%sr3,%r24), %r24
739 + nop
740 + nop
741 + nop
742 + nop
743 + nop
744 +
745 + /* 16bit load */
746 +6: ldh 0(%sr3,%r25), %r25
747 + b cas2_lock_start
748 +7: ldh 0(%sr3,%r24), %r24
749 + nop
750 + nop
751 + nop
752 + nop
753 + nop
754 +
755 + /* 32bit load */
756 +8: ldw 0(%sr3,%r25), %r25
757 + b cas2_lock_start
758 +9: ldw 0(%sr3,%r24), %r24
759 + nop
760 + nop
761 + nop
762 + nop
763 + nop
764 +
765 + /* 64bit load */
766 +#ifdef CONFIG_64BIT
767 +10: ldd 0(%sr3,%r25), %r25
768 +11: ldd 0(%sr3,%r24), %r24
769 +#else
770 + /* Load new value into r22/r23 - high/low */
771 +10: ldw 0(%sr3,%r25), %r22
772 +11: ldw 4(%sr3,%r25), %r23
773 + /* Load new value into fr4 for atomic store later */
774 +12: flddx 0(%sr3,%r24), %fr4
775 +#endif
776 +
777 +cas2_lock_start:
778 + /* Load start of lock table */
779 + ldil L%lws_lock_start, %r20
780 + ldo R%lws_lock_start(%r20), %r28
781 +
782 + /* Extract four bits from r26 and hash lock (Bits 4-7) */
783 + extru %r26, 27, 4, %r20
784 +
785 + /* Find lock to use, the hash is either one of 0 to
786 + 15, multiplied by 16 (keep it 16-byte aligned)
787 + and add to the lock table offset. */
788 + shlw %r20, 4, %r20
789 + add %r20, %r28, %r20
790 +
791 + rsm PSW_SM_I, %r0 /* Disable interrupts */
792 + /* COW breaks can cause contention on UP systems */
793 + LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
794 + cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
795 +cas2_wouldblock:
796 + ldo 2(%r0), %r28 /* 2nd case */
797 + ssm PSW_SM_I, %r0
798 + b lws_exit /* Contended... */
799 + ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
800 +
801 + /*
802 + prev = *addr;
803 + if ( prev == old )
804 + *addr = new;
805 + return prev;
806 + */
807 +
808 + /* NOTES:
809 + This all works becuse intr_do_signal
810 + and schedule both check the return iasq
811 + and see that we are on the kernel page
812 + so this process is never scheduled off
813 + or is ever sent any signal of any sort,
814 + thus it is wholly atomic from usrspaces
815 + perspective
816 + */
817 +cas2_action:
818 + /* Jump to the correct function */
819 + blr %r29, %r0
820 + /* Set %r28 as non-zero for now */
821 + ldo 1(%r0),%r28
822 +
823 + /* 8bit CAS */
824 +13: ldb,ma 0(%sr3,%r26), %r29
825 + sub,= %r29, %r25, %r0
826 + b,n cas2_end
827 +14: stb,ma %r24, 0(%sr3,%r26)
828 + b cas2_end
829 + copy %r0, %r28
830 + nop
831 + nop
832 +
833 + /* 16bit CAS */
834 +15: ldh,ma 0(%sr3,%r26), %r29
835 + sub,= %r29, %r25, %r0
836 + b,n cas2_end
837 +16: sth,ma %r24, 0(%sr3,%r26)
838 + b cas2_end
839 + copy %r0, %r28
840 + nop
841 + nop
842 +
843 + /* 32bit CAS */
844 +17: ldw,ma 0(%sr3,%r26), %r29
845 + sub,= %r29, %r25, %r0
846 + b,n cas2_end
847 +18: stw,ma %r24, 0(%sr3,%r26)
848 + b cas2_end
849 + copy %r0, %r28
850 + nop
851 + nop
852 +
853 + /* 64bit CAS */
854 +#ifdef CONFIG_64BIT
855 +19: ldd,ma 0(%sr3,%r26), %r29
856 + sub,= %r29, %r25, %r0
857 + b,n cas2_end
858 +20: std,ma %r24, 0(%sr3,%r26)
859 + copy %r0, %r28
860 +#else
861 + /* Compare first word */
862 +19: ldw,ma 0(%sr3,%r26), %r29
863 + sub,= %r29, %r22, %r0
864 + b,n cas2_end
865 + /* Compare second word */
866 +20: ldw,ma 4(%sr3,%r26), %r29
867 + sub,= %r29, %r23, %r0
868 + b,n cas2_end
869 + /* Perform the store */
870 +21: fstdx %fr4, 0(%sr3,%r26)
871 + copy %r0, %r28
872 +#endif
873 +
874 +cas2_end:
875 + /* Free lock */
876 + stw,ma %r20, 0(%sr2,%r20)
877 + /* Enable interrupts */
878 + ssm PSW_SM_I, %r0
879 + /* Return to userspace, set no error */
880 + b lws_exit
881 + copy %r0, %r21
882 +
883 +22:
884 + /* Error occurred on load or store */
885 + /* Free lock */
886 + stw %r20, 0(%sr2,%r20)
887 + ssm PSW_SM_I, %r0
888 + ldo 1(%r0),%r28
889 + b lws_exit
890 + ldo -EFAULT(%r0),%r21 /* set errno */
891 + nop
892 + nop
893 + nop
894 +
895 + /* Exception table entries, for the load and store, return EFAULT.
896 + Each of the entries must be relocated. */
897 + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
898 + ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
899 + ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
900 + ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
901 + ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
902 + ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
903 + ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
904 + ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
905 + ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
906 + ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
907 + ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
908 + ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
909 + ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
910 + ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
911 + ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
912 + ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
913 +#ifndef CONFIG_64BIT
914 + ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
915 + ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
916 +#endif
917 +
918 /* Make sure nothing else is placed on this page */
919 .align PAGE_SIZE
920 END(linux_gateway_page)
921 @@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page)
922 /* Light-weight-syscall table */
923 /* Start of lws table. */
924 ENTRY(lws_table)
925 - LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
926 - LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
927 + LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
928 + LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
929 + LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
930 END(lws_table)
931 /* End of lws table */
932
933 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
934 index 279b80f3bb29..c0c61fa9cd9e 100644
935 --- a/arch/powerpc/include/asm/ptrace.h
936 +++ b/arch/powerpc/include/asm/ptrace.h
937 @@ -47,6 +47,12 @@
938 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
939 #define STACK_FRAME_MARKER 12
940
941 +#if defined(_CALL_ELF) && _CALL_ELF == 2
942 +#define STACK_FRAME_MIN_SIZE 32
943 +#else
944 +#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
945 +#endif
946 +
947 /* Size of dummy stack frame allocated when calling signal handler. */
948 #define __SIGNAL_FRAMESIZE 128
949 #define __SIGNAL_FRAMESIZE32 64
950 @@ -60,6 +66,7 @@
951 #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
952 #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
953 #define STACK_FRAME_MARKER 2
954 +#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
955
956 /* Size of stack frame allocated when calling signal handler. */
957 #define __SIGNAL_FRAMESIZE 64
958 diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
959 index 35aa339410bd..4dbe072eecbe 100644
960 --- a/arch/powerpc/include/asm/spinlock.h
961 +++ b/arch/powerpc/include/asm/spinlock.h
962 @@ -61,6 +61,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
963
964 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
965 {
966 + smp_mb();
967 return !arch_spin_value_unlocked(*lock);
968 }
969
970 diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
971 index 0c9c8d7d0734..170a0346f756 100644
972 --- a/arch/powerpc/lib/locks.c
973 +++ b/arch/powerpc/lib/locks.c
974 @@ -70,12 +70,16 @@ void __rw_yield(arch_rwlock_t *rw)
975
976 void arch_spin_unlock_wait(arch_spinlock_t *lock)
977 {
978 + smp_mb();
979 +
980 while (lock->slock) {
981 HMT_low();
982 if (SHARED_PROCESSOR)
983 __spin_yield(lock);
984 }
985 HMT_medium();
986 +
987 + smp_mb();
988 }
989
990 EXPORT_SYMBOL(arch_spin_unlock_wait);
991 diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
992 index 74d1e780748b..2396dda282cd 100644
993 --- a/arch/powerpc/perf/callchain.c
994 +++ b/arch/powerpc/perf/callchain.c
995 @@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
996 return 0; /* must be 16-byte aligned */
997 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
998 return 0;
999 - if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
1000 + if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
1001 return 1;
1002 /*
1003 * sp could decrease when we jump off an interrupt stack
1004 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
1005 index 3584ed9b20a1..e309c5c41158 100644
1006 --- a/arch/s390/mm/pgtable.c
1007 +++ b/arch/s390/mm/pgtable.c
1008 @@ -810,11 +810,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1009 pte_t *ptep;
1010
1011 down_read(&mm->mmap_sem);
1012 +retry:
1013 ptep = get_locked_pte(current->mm, addr, &ptl);
1014 if (unlikely(!ptep)) {
1015 up_read(&mm->mmap_sem);
1016 return -EFAULT;
1017 }
1018 + if (!(pte_val(*ptep) & _PAGE_INVALID) &&
1019 + (pte_val(*ptep) & _PAGE_PROTECT)) {
1020 + pte_unmap_unlock(*ptep, ptl);
1021 + if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
1022 + up_read(&mm->mmap_sem);
1023 + return -EFAULT;
1024 + }
1025 + goto retry;
1026 + }
1027
1028 new = old = pgste_get_lock(ptep);
1029 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
1030 diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
1031 index 4dbf967da50d..6cfcf2a2eb93 100644
1032 --- a/arch/x86/boot/compressed/aslr.c
1033 +++ b/arch/x86/boot/compressed/aslr.c
1034 @@ -183,12 +183,27 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
1035 static bool mem_avoid_overlap(struct mem_vector *img)
1036 {
1037 int i;
1038 + struct setup_data *ptr;
1039
1040 for (i = 0; i < MEM_AVOID_MAX; i++) {
1041 if (mem_overlaps(img, &mem_avoid[i]))
1042 return true;
1043 }
1044
1045 + /* Avoid all entries in the setup_data linked list. */
1046 + ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
1047 + while (ptr) {
1048 + struct mem_vector avoid;
1049 +
1050 + avoid.start = (u64)ptr;
1051 + avoid.size = sizeof(*ptr) + ptr->len;
1052 +
1053 + if (mem_overlaps(img, &avoid))
1054 + return true;
1055 +
1056 + ptr = (struct setup_data *)(unsigned long)ptr->next;
1057 + }
1058 +
1059 return false;
1060 }
1061
1062 diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
1063 index 7252cd339175..6762a55b798a 100644
1064 --- a/arch/x86/include/asm/fixmap.h
1065 +++ b/arch/x86/include/asm/fixmap.h
1066 @@ -123,14 +123,14 @@ enum fixed_addresses {
1067 __end_of_permanent_fixed_addresses,
1068
1069 /*
1070 - * 256 temporary boot-time mappings, used by early_ioremap(),
1071 + * 512 temporary boot-time mappings, used by early_ioremap(),
1072 * before ioremap() is functional.
1073 *
1074 - * If necessary we round it up to the next 256 pages boundary so
1075 + * If necessary we round it up to the next 512 pages boundary so
1076 * that we can have a single pgd entry and a single pte table:
1077 */
1078 #define NR_FIX_BTMAPS 64
1079 -#define FIX_BTMAPS_SLOTS 4
1080 +#define FIX_BTMAPS_SLOTS 8
1081 #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
1082 FIX_BTMAP_END =
1083 (__end_of_permanent_fixed_addresses ^
1084 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
1085 index e22c1dbf7feb..d869931bde62 100644
1086 --- a/arch/x86/include/asm/pgtable_64.h
1087 +++ b/arch/x86/include/asm/pgtable_64.h
1088 @@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
1089 extern pmd_t level2_kernel_pgt[512];
1090 extern pmd_t level2_fixmap_pgt[512];
1091 extern pmd_t level2_ident_pgt[512];
1092 +extern pte_t level1_fixmap_pgt[512];
1093 extern pgd_t init_level4_pgt[];
1094
1095 #define swapper_pg_dir init_level4_pgt
1096 diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
1097 index 5ad35ad94d0f..95700e52061d 100644
1098 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
1099 +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
1100 @@ -511,6 +511,7 @@ static int rapl_cpu_prepare(int cpu)
1101 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
1102 int phys_id = topology_physical_package_id(cpu);
1103 u64 ms;
1104 + u64 msr_rapl_power_unit_bits;
1105
1106 if (pmu)
1107 return 0;
1108 @@ -518,6 +519,9 @@ static int rapl_cpu_prepare(int cpu)
1109 if (phys_id < 0)
1110 return -1;
1111
1112 + if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
1113 + return -1;
1114 +
1115 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
1116 if (!pmu)
1117 return -1;
1118 @@ -531,8 +535,7 @@ static int rapl_cpu_prepare(int cpu)
1119 *
1120 * we cache in local PMU instance
1121 */
1122 - rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit);
1123 - pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
1124 + pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
1125 pmu->pmu = &rapl_pmu_class;
1126
1127 /*
1128 @@ -649,7 +652,9 @@ static int __init rapl_pmu_init(void)
1129 get_online_cpus();
1130
1131 for_each_online_cpu(cpu) {
1132 - rapl_cpu_prepare(cpu);
1133 + ret = rapl_cpu_prepare(cpu);
1134 + if (ret)
1135 + goto out;
1136 rapl_cpu_init(cpu);
1137 }
1138
1139 @@ -672,6 +677,7 @@ static int __init rapl_pmu_init(void)
1140 hweight32(rapl_cntr_mask),
1141 ktime_to_ms(pmu->timer_interval));
1142
1143 +out:
1144 put_online_cpus();
1145
1146 return 0;
1147 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1148 index 395be6d8bbde..682876533ed9 100644
1149 --- a/arch/x86/kernel/smpboot.c
1150 +++ b/arch/x86/kernel/smpboot.c
1151 @@ -1287,6 +1287,9 @@ static void remove_siblinginfo(int cpu)
1152
1153 for_each_cpu(sibling, cpu_sibling_mask(cpu))
1154 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1155 + for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1156 + cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1157 + cpumask_clear(cpu_llc_shared_mask(cpu));
1158 cpumask_clear(cpu_sibling_mask(cpu));
1159 cpumask_clear(cpu_core_mask(cpu));
1160 c->phys_proc_id = 0;
1161 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
1162 index 2423ef04ffea..c83da6fb2dee 100644
1163 --- a/arch/x86/xen/mmu.c
1164 +++ b/arch/x86/xen/mmu.c
1165 @@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1166 *
1167 * We can construct this by grafting the Xen provided pagetable into
1168 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1169 - * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1170 - * means that only the kernel has a physical mapping to start with -
1171 - * but that's enough to get __va working. We need to fill in the rest
1172 - * of the physical mapping once some sort of allocator has been set
1173 - * up.
1174 - * NOTE: for PVH, the page tables are native.
1175 + * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1176 + * kernel has a physical mapping to start with - but that's enough to
1177 + * get __va working. We need to fill in the rest of the physical
1178 + * mapping once some sort of allocator has been set up. NOTE: for
1179 + * PVH, the page tables are native.
1180 */
1181 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1182 {
1183 @@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1184 /* L3_i[0] -> level2_ident_pgt */
1185 convert_pfn_mfn(level3_ident_pgt);
1186 /* L3_k[510] -> level2_kernel_pgt
1187 - * L3_i[511] -> level2_fixmap_pgt */
1188 + * L3_k[511] -> level2_fixmap_pgt */
1189 convert_pfn_mfn(level3_kernel_pgt);
1190 +
1191 + /* L3_k[511][506] -> level1_fixmap_pgt */
1192 + convert_pfn_mfn(level2_fixmap_pgt);
1193 }
1194 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1195 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1196 @@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1197 addr[1] = (unsigned long)l3;
1198 addr[2] = (unsigned long)l2;
1199 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1200 - * Both L4[272][0] and L4[511][511] have entries that point to the same
1201 + * Both L4[272][0] and L4[511][510] have entries that point to the same
1202 * L2 (PMD) tables. Meaning that if you modify it in __va space
1203 * it will be also modified in the __ka space! (But if you just
1204 * modify the PMD table to point to other PTE's or none, then you
1205 * are OK - which is what cleanup_highmap does) */
1206 copy_page(level2_ident_pgt, l2);
1207 - /* Graft it onto L4[511][511] */
1208 + /* Graft it onto L4[511][510] */
1209 copy_page(level2_kernel_pgt, l2);
1210
1211 - /* Get [511][510] and graft that in level2_fixmap_pgt */
1212 - l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1213 - l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1214 - copy_page(level2_fixmap_pgt, l2);
1215 - /* Note that we don't do anything with level1_fixmap_pgt which
1216 - * we don't need. */
1217 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1218 /* Make pagetable pieces RO */
1219 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1220 @@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1221 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1222 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1223 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1224 + set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1225
1226 /* Pin down new L4 */
1227 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1228 diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
1229 index 216446295ada..51230ba97bef 100644
1230 --- a/arch/xtensa/include/asm/pgtable.h
1231 +++ b/arch/xtensa/include/asm/pgtable.h
1232 @@ -67,7 +67,12 @@
1233 #define VMALLOC_START 0xC0000000
1234 #define VMALLOC_END 0xC7FEFFFF
1235 #define TLBTEMP_BASE_1 0xC7FF0000
1236 -#define TLBTEMP_BASE_2 0xC7FF8000
1237 +#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
1238 +#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
1239 +#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
1240 +#else
1241 +#define TLBTEMP_SIZE ICACHE_WAY_SIZE
1242 +#endif
1243
1244 /*
1245 * For the Xtensa architecture, the PTE layout is as follows:
1246 diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
1247 index fd686dc45d1a..c7211e7e182d 100644
1248 --- a/arch/xtensa/include/asm/uaccess.h
1249 +++ b/arch/xtensa/include/asm/uaccess.h
1250 @@ -52,7 +52,12 @@
1251 */
1252 .macro get_fs ad, sp
1253 GET_CURRENT(\ad,\sp)
1254 +#if THREAD_CURRENT_DS > 1020
1255 + addi \ad, \ad, TASK_THREAD
1256 + l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
1257 +#else
1258 l32i \ad, \ad, THREAD_CURRENT_DS
1259 +#endif
1260 .endm
1261
1262 /*
1263 diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
1264 index b4cb1100c0fb..a47909f0c34b 100644
1265 --- a/arch/xtensa/include/uapi/asm/ioctls.h
1266 +++ b/arch/xtensa/include/uapi/asm/ioctls.h
1267 @@ -28,17 +28,17 @@
1268 #define TCSETSW 0x5403
1269 #define TCSETSF 0x5404
1270
1271 -#define TCGETA _IOR('t', 23, struct termio)
1272 -#define TCSETA _IOW('t', 24, struct termio)
1273 -#define TCSETAW _IOW('t', 25, struct termio)
1274 -#define TCSETAF _IOW('t', 28, struct termio)
1275 +#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
1276 +#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
1277 +#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
1278 +#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
1279
1280 #define TCSBRK _IO('t', 29)
1281 #define TCXONC _IO('t', 30)
1282 #define TCFLSH _IO('t', 31)
1283
1284 -#define TIOCSWINSZ _IOW('t', 103, struct winsize)
1285 -#define TIOCGWINSZ _IOR('t', 104, struct winsize)
1286 +#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
1287 +#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
1288 #define TIOCSTART _IO('t', 110) /* start output, like ^Q */
1289 #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
1290 #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
1291 @@ -88,7 +88,6 @@
1292 #define TIOCSETD _IOW('T', 35, int)
1293 #define TIOCGETD _IOR('T', 36, int)
1294 #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
1295 -#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
1296 #define TIOCSBRK _IO('T', 39) /* BSD compatibility */
1297 #define TIOCCBRK _IO('T', 40) /* BSD compatibility */
1298 #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
1299 @@ -114,8 +113,10 @@
1300 #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
1301 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
1302 # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
1303 -#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */
1304 -#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
1305 +#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
1306 + /* _IOR('T', 90, struct serial_multiport_struct) */
1307 +#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
1308 + /* _IOW('T', 91, struct serial_multiport_struct) */
1309
1310 #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
1311 #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
1312 diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
1313 index ef7f4990722b..a06b7efaae82 100644
1314 --- a/arch/xtensa/kernel/entry.S
1315 +++ b/arch/xtensa/kernel/entry.S
1316 @@ -1001,9 +1001,8 @@ ENTRY(fast_syscall_xtensa)
1317 movi a7, 4 # sizeof(unsigned int)
1318 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
1319
1320 - addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
1321 - _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
1322 - _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
1323 + _bgeui a6, SYS_XTENSA_COUNT, .Lill
1324 + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1325
1326 /* Fall through for ATOMIC_CMP_SWP. */
1327
1328 @@ -1015,27 +1014,26 @@ TRY s32i a5, a3, 0 # different, modify value
1329 l32i a7, a2, PT_AREG7 # restore a7
1330 l32i a0, a2, PT_AREG0 # restore a0
1331 movi a2, 1 # and return 1
1332 - addi a6, a6, 1 # restore a6 (really necessary?)
1333 rfe
1334
1335 1: l32i a7, a2, PT_AREG7 # restore a7
1336 l32i a0, a2, PT_AREG0 # restore a0
1337 movi a2, 0 # return 0 (note that we cannot set
1338 - addi a6, a6, 1 # restore a6 (really necessary?)
1339 rfe
1340
1341 .Lnswp: /* Atomic set, add, and exg_add. */
1342
1343 TRY l32i a7, a3, 0 # orig
1344 + addi a6, a6, -SYS_XTENSA_ATOMIC_SET
1345 add a0, a4, a7 # + arg
1346 moveqz a0, a4, a6 # set
1347 + addi a6, a6, SYS_XTENSA_ATOMIC_SET
1348 TRY s32i a0, a3, 0 # write new value
1349
1350 mov a0, a2
1351 mov a2, a7
1352 l32i a7, a0, PT_AREG7 # restore a7
1353 l32i a0, a0, PT_AREG0 # restore a0
1354 - addi a6, a6, 1 # restore a6 (really necessary?)
1355 rfe
1356
1357 CATCH
1358 @@ -1044,7 +1042,7 @@ CATCH
1359 movi a2, -EFAULT
1360 rfe
1361
1362 -.Lill: l32i a7, a2, PT_AREG0 # restore a7
1363 +.Lill: l32i a7, a2, PT_AREG7 # restore a7
1364 l32i a0, a2, PT_AREG0 # restore a0
1365 movi a2, -EINVAL
1366 rfe
1367 @@ -1565,7 +1563,7 @@ ENTRY(fast_second_level_miss)
1368 rsr a0, excvaddr
1369 bltu a0, a3, 2f
1370
1371 - addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
1372 + addi a1, a0, -TLBTEMP_SIZE
1373 bgeu a1, a3, 2f
1374
1375 /* Check if we have to restore an ITLB mapping. */
1376 @@ -1820,7 +1818,6 @@ ENTRY(_switch_to)
1377
1378 entry a1, 16
1379
1380 - mov a10, a2 # preserve 'prev' (a2)
1381 mov a11, a3 # and 'next' (a3)
1382
1383 l32i a4, a2, TASK_THREAD_INFO
1384 @@ -1828,8 +1825,14 @@ ENTRY(_switch_to)
1385
1386 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1387
1388 - s32i a0, a10, THREAD_RA # save return address
1389 - s32i a1, a10, THREAD_SP # save stack pointer
1390 +#if THREAD_RA > 1020 || THREAD_SP > 1020
1391 + addi a10, a2, TASK_THREAD
1392 + s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
1393 + s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
1394 +#else
1395 + s32i a0, a2, THREAD_RA # save return address
1396 + s32i a1, a2, THREAD_SP # save stack pointer
1397 +#endif
1398
1399 /* Disable ints while we manipulate the stack pointer. */
1400
1401 @@ -1870,7 +1873,6 @@ ENTRY(_switch_to)
1402 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1403
1404 wsr a14, ps
1405 - mov a2, a10 # return 'prev'
1406 rsync
1407
1408 retw
1409 diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
1410 index 2d9cc6dbfd78..e8b76b8e4b29 100644
1411 --- a/arch/xtensa/kernel/pci-dma.c
1412 +++ b/arch/xtensa/kernel/pci-dma.c
1413 @@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
1414
1415 /* We currently don't support coherent memory outside KSEG */
1416
1417 - if (ret < XCHAL_KSEG_CACHED_VADDR
1418 - || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
1419 - BUG();
1420 + BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
1421 + ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
1422
1423
1424 if (ret != 0) {
1425 @@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent);
1426 void dma_free_coherent(struct device *hwdev, size_t size,
1427 void *vaddr, dma_addr_t dma_handle)
1428 {
1429 - long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
1430 + unsigned long addr = (unsigned long)vaddr +
1431 + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
1432
1433 - if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
1434 - BUG();
1435 + BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
1436 + addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
1437
1438 free_pages(addr, get_order(size));
1439 }
1440 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1441 index 744833b630c6..91c25f261c91 100644
1442 --- a/block/cfq-iosched.c
1443 +++ b/block/cfq-iosched.c
1444 @@ -1275,12 +1275,16 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1445 static void
1446 cfq_update_group_weight(struct cfq_group *cfqg)
1447 {
1448 - BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1449 -
1450 if (cfqg->new_weight) {
1451 cfqg->weight = cfqg->new_weight;
1452 cfqg->new_weight = 0;
1453 }
1454 +}
1455 +
1456 +static void
1457 +cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1458 +{
1459 + BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1460
1461 if (cfqg->new_leaf_weight) {
1462 cfqg->leaf_weight = cfqg->new_leaf_weight;
1463 @@ -1299,7 +1303,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1464 /* add to the service tree */
1465 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1466
1467 - cfq_update_group_weight(cfqg);
1468 + cfq_update_group_leaf_weight(cfqg);
1469 __cfq_group_service_tree_add(st, cfqg);
1470
1471 /*
1472 @@ -1323,6 +1327,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1473 */
1474 while ((parent = cfqg_parent(pos))) {
1475 if (propagate) {
1476 + cfq_update_group_weight(pos);
1477 propagate = !parent->nr_active++;
1478 parent->children_weight += pos->weight;
1479 }
1480 diff --git a/block/genhd.c b/block/genhd.c
1481 index 791f41943132..e6723bd4d7a1 100644
1482 --- a/block/genhd.c
1483 +++ b/block/genhd.c
1484 @@ -28,10 +28,10 @@ struct kobject *block_depr;
1485 /* for extended dynamic devt allocation, currently only one major is used */
1486 #define NR_EXT_DEVT (1 << MINORBITS)
1487
1488 -/* For extended devt allocation. ext_devt_mutex prevents look up
1489 +/* For extended devt allocation. ext_devt_lock prevents look up
1490 * results from going away underneath its user.
1491 */
1492 -static DEFINE_MUTEX(ext_devt_mutex);
1493 +static DEFINE_SPINLOCK(ext_devt_lock);
1494 static DEFINE_IDR(ext_devt_idr);
1495
1496 static struct device_type disk_type;
1497 @@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
1498 }
1499
1500 /* allocate ext devt */
1501 - mutex_lock(&ext_devt_mutex);
1502 - idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
1503 - mutex_unlock(&ext_devt_mutex);
1504 + idr_preload(GFP_KERNEL);
1505 +
1506 + spin_lock(&ext_devt_lock);
1507 + idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
1508 + spin_unlock(&ext_devt_lock);
1509 +
1510 + idr_preload_end();
1511 if (idx < 0)
1512 return idx == -ENOSPC ? -EBUSY : idx;
1513
1514 @@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
1515 */
1516 void blk_free_devt(dev_t devt)
1517 {
1518 - might_sleep();
1519 -
1520 if (devt == MKDEV(0, 0))
1521 return;
1522
1523 if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
1524 - mutex_lock(&ext_devt_mutex);
1525 + spin_lock(&ext_devt_lock);
1526 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
1527 - mutex_unlock(&ext_devt_mutex);
1528 + spin_unlock(&ext_devt_lock);
1529 }
1530 }
1531
1532 @@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk)
1533 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
1534 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
1535 device_del(disk_to_dev(disk));
1536 - blk_free_devt(disk_to_dev(disk)->devt);
1537 }
1538 EXPORT_SYMBOL(del_gendisk);
1539
1540 @@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
1541 } else {
1542 struct hd_struct *part;
1543
1544 - mutex_lock(&ext_devt_mutex);
1545 + spin_lock(&ext_devt_lock);
1546 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
1547 if (part && get_disk(part_to_disk(part))) {
1548 *partno = part->partno;
1549 disk = part_to_disk(part);
1550 }
1551 - mutex_unlock(&ext_devt_mutex);
1552 + spin_unlock(&ext_devt_lock);
1553 }
1554
1555 return disk;
1556 @@ -1098,6 +1099,7 @@ static void disk_release(struct device *dev)
1557 {
1558 struct gendisk *disk = dev_to_disk(dev);
1559
1560 + blk_free_devt(dev->devt);
1561 disk_release_events(disk);
1562 kfree(disk->random);
1563 disk_replace_part_tbl(disk, NULL);
1564 diff --git a/block/partition-generic.c b/block/partition-generic.c
1565 index 789cdea05893..0d9e5f97f0a8 100644
1566 --- a/block/partition-generic.c
1567 +++ b/block/partition-generic.c
1568 @@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
1569 static void part_release(struct device *dev)
1570 {
1571 struct hd_struct *p = dev_to_part(dev);
1572 + blk_free_devt(dev->devt);
1573 free_part_stats(p);
1574 free_part_info(p);
1575 kfree(p);
1576 @@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
1577 rcu_assign_pointer(ptbl->last_lookup, NULL);
1578 kobject_put(part->holder_dir);
1579 device_del(part_to_dev(part));
1580 - blk_free_devt(part_devt(part));
1581
1582 hd_struct_put(part);
1583 }
1584 diff --git a/block/partitions/aix.c b/block/partitions/aix.c
1585 index 43be471d9b1d..0931f5136ab2 100644
1586 --- a/block/partitions/aix.c
1587 +++ b/block/partitions/aix.c
1588 @@ -253,7 +253,7 @@ int aix_partition(struct parsed_partitions *state)
1589 continue;
1590 }
1591 lv_ix = be16_to_cpu(p->lv_ix) - 1;
1592 - if (lv_ix > state->limit) {
1593 + if (lv_ix >= state->limit) {
1594 cur_lv_ix = -1;
1595 continue;
1596 }
1597 diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
1598 index 84190ed89c04..aff69d9bfcbf 100644
1599 --- a/drivers/acpi/acpi_cmos_rtc.c
1600 +++ b/drivers/acpi/acpi_cmos_rtc.c
1601 @@ -35,7 +35,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
1602 void *handler_context, void *region_context)
1603 {
1604 int i;
1605 - u8 *value = (u8 *)&value64;
1606 + u8 *value = (u8 *)value64;
1607
1608 if (address > 0xff || !value64)
1609 return AE_BAD_PARAMETER;
1610 diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
1611 index d95ca5449ace..e6ab104afe42 100644
1612 --- a/drivers/acpi/acpica/aclocal.h
1613 +++ b/drivers/acpi/acpica/aclocal.h
1614 @@ -254,6 +254,7 @@ struct acpi_create_field_info {
1615 u32 field_bit_position;
1616 u32 field_bit_length;
1617 u16 resource_length;
1618 + u16 pin_number_index;
1619 u8 field_flags;
1620 u8 attribute;
1621 u8 field_type;
1622 diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
1623 index cc7ab6dd724e..a47cc78ffd4f 100644
1624 --- a/drivers/acpi/acpica/acobject.h
1625 +++ b/drivers/acpi/acpica/acobject.h
1626 @@ -263,6 +263,7 @@ struct acpi_object_region_field {
1627 ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
1628 union acpi_operand_object *region_obj; /* Containing op_region object */
1629 u8 *resource_buffer; /* resource_template for serial regions/fields */
1630 + u16 pin_number_index; /* Index relative to previous Connection/Template */
1631 };
1632
1633 struct acpi_object_bank_field {
1634 diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
1635 index e7a57c554e84..9af55bd6c4d7 100644
1636 --- a/drivers/acpi/acpica/dsfield.c
1637 +++ b/drivers/acpi/acpica/dsfield.c
1638 @@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
1639 */
1640 info->resource_buffer = NULL;
1641 info->connection_node = NULL;
1642 + info->pin_number_index = 0;
1643
1644 /*
1645 * A Connection() is either an actual resource descriptor (buffer)
1646 @@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
1647 }
1648
1649 info->field_bit_position += info->field_bit_length;
1650 + info->pin_number_index++; /* Index relative to previous Connection() */
1651 break;
1652
1653 default:
1654 diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
1655 index 144cbb9b73bc..cd4b231ae760 100644
1656 --- a/drivers/acpi/acpica/evregion.c
1657 +++ b/drivers/acpi/acpica/evregion.c
1658 @@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
1659 union acpi_operand_object *region_obj2;
1660 void *region_context = NULL;
1661 struct acpi_connection_info *context;
1662 + acpi_physical_address address;
1663
1664 ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
1665
1666 @@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
1667 /* We have everything we need, we can invoke the address space handler */
1668
1669 handler = handler_desc->address_space.handler;
1670 -
1671 - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
1672 - "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
1673 - &region_obj->region.handler->address_space, handler,
1674 - ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
1675 - region_offset),
1676 - acpi_ut_get_region_name(region_obj->region.
1677 - space_id)));
1678 + address = (region_obj->region.address + region_offset);
1679
1680 /*
1681 * Special handling for generic_serial_bus and general_purpose_io:
1682 * There are three extra parameters that must be passed to the
1683 * handler via the context:
1684 - * 1) Connection buffer, a resource template from Connection() op.
1685 - * 2) Length of the above buffer.
1686 - * 3) Actual access length from the access_as() op.
1687 + * 1) Connection buffer, a resource template from Connection() op
1688 + * 2) Length of the above buffer
1689 + * 3) Actual access length from the access_as() op
1690 + *
1691 + * In addition, for general_purpose_io, the Address and bit_width fields
1692 + * are defined as follows:
1693 + * 1) Address is the pin number index of the field (bit offset from
1694 + * the previous Connection)
1695 + * 2) bit_width is the actual bit length of the field (number of pins)
1696 */
1697 - if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
1698 - (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
1699 + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
1700 context && field_obj) {
1701
1702 /* Get the Connection (resource_template) buffer */
1703 @@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
1704 context->length = field_obj->field.resource_length;
1705 context->access_length = field_obj->field.access_length;
1706 }
1707 + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
1708 + context && field_obj) {
1709 +
1710 + /* Get the Connection (resource_template) buffer */
1711 +
1712 + context->connection = field_obj->field.resource_buffer;
1713 + context->length = field_obj->field.resource_length;
1714 + context->access_length = field_obj->field.access_length;
1715 + address = field_obj->field.pin_number_index;
1716 + bit_width = field_obj->field.bit_length;
1717 + }
1718 +
1719 + ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
1720 + "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
1721 + &region_obj->region.handler->address_space, handler,
1722 + ACPI_FORMAT_NATIVE_UINT(address),
1723 + acpi_ut_get_region_name(region_obj->region.
1724 + space_id)));
1725
1726 if (!(handler_desc->address_space.handler_flags &
1727 ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
1728 @@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
1729
1730 /* Call the handler */
1731
1732 - status = handler(function,
1733 - (region_obj->region.address + region_offset),
1734 - bit_width, value, context,
1735 + status = handler(function, address, bit_width, value, context,
1736 region_obj2->extra.region_context);
1737
1738 if (ACPI_FAILURE(status)) {
1739 diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
1740 index cfd875243421..d36894a228b1 100644
1741 --- a/drivers/acpi/acpica/exfield.c
1742 +++ b/drivers/acpi/acpica/exfield.c
1743 @@ -178,6 +178,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
1744 buffer = &buffer_desc->integer.value;
1745 }
1746
1747 + if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
1748 + (obj_desc->field.region_obj->region.space_id ==
1749 + ACPI_ADR_SPACE_GPIO)) {
1750 + /*
1751 + * For GPIO (general_purpose_io), the Address will be the bit offset
1752 + * from the previous Connection() operator, making it effectively a
1753 + * pin number index. The bit_length is the length of the field, which
1754 + * is thus the number of pins.
1755 + */
1756 + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
1757 + "GPIO FieldRead [FROM]: Pin %u Bits %u\n",
1758 + obj_desc->field.pin_number_index,
1759 + obj_desc->field.bit_length));
1760 +
1761 + /* Lock entire transaction if requested */
1762 +
1763 + acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
1764 +
1765 + /* Perform the write */
1766 +
1767 + status = acpi_ex_access_region(obj_desc, 0,
1768 + (u64 *)buffer, ACPI_READ);
1769 + acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
1770 + if (ACPI_FAILURE(status)) {
1771 + acpi_ut_remove_reference(buffer_desc);
1772 + } else {
1773 + *ret_buffer_desc = buffer_desc;
1774 + }
1775 + return_ACPI_STATUS(status);
1776 + }
1777 +
1778 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
1779 "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
1780 obj_desc, obj_desc->common.type, buffer,
1781 @@ -325,6 +356,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
1782
1783 *result_desc = buffer_desc;
1784 return_ACPI_STATUS(status);
1785 + } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
1786 + (obj_desc->field.region_obj->region.space_id ==
1787 + ACPI_ADR_SPACE_GPIO)) {
1788 + /*
1789 + * For GPIO (general_purpose_io), we will bypass the entire field
1790 + * mechanism and handoff the bit address and bit width directly to
1791 + * the handler. The Address will be the bit offset
1792 + * from the previous Connection() operator, making it effectively a
1793 + * pin number index. The bit_length is the length of the field, which
1794 + * is thus the number of pins.
1795 + */
1796 + if (source_desc->common.type != ACPI_TYPE_INTEGER) {
1797 + return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
1798 + }
1799 +
1800 + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
1801 + "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n",
1802 + acpi_ut_get_type_name(source_desc->common.
1803 + type),
1804 + source_desc->common.type,
1805 + (u32)source_desc->integer.value,
1806 + obj_desc->field.pin_number_index,
1807 + obj_desc->field.bit_length));
1808 +
1809 + buffer = &source_desc->integer.value;
1810 +
1811 + /* Lock entire transaction if requested */
1812 +
1813 + acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
1814 +
1815 + /* Perform the write */
1816 +
1817 + status = acpi_ex_access_region(obj_desc, 0,
1818 + (u64 *)buffer, ACPI_WRITE);
1819 + acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
1820 + return_ACPI_STATUS(status);
1821 }
1822
1823 /* Get a pointer to the data to be written */
1824 diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
1825 index 5a588611ab48..8c88cfdec441 100644
1826 --- a/drivers/acpi/acpica/exprep.c
1827 +++ b/drivers/acpi/acpica/exprep.c
1828 @@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
1829 obj_desc->field.resource_length = info->resource_length;
1830 }
1831
1832 + obj_desc->field.pin_number_index = info->pin_number_index;
1833 +
1834 /* Allow full data read from EC address space */
1835
1836 if ((obj_desc->field.region_obj->region.space_id ==
1837 diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
1838 index 368f9ddb8480..e4a6f78f3bbc 100644
1839 --- a/drivers/acpi/container.c
1840 +++ b/drivers/acpi/container.c
1841 @@ -96,6 +96,13 @@ static void container_device_detach(struct acpi_device *adev)
1842 device_unregister(dev);
1843 }
1844
1845 +static void container_device_online(struct acpi_device *adev)
1846 +{
1847 + struct device *dev = acpi_driver_data(adev);
1848 +
1849 + kobject_uevent(&dev->kobj, KOBJ_ONLINE);
1850 +}
1851 +
1852 static struct acpi_scan_handler container_handler = {
1853 .ids = container_device_ids,
1854 .attach = container_device_attach,
1855 @@ -103,6 +110,7 @@ static struct acpi_scan_handler container_handler = {
1856 .hotplug = {
1857 .enabled = true,
1858 .demand_offline = true,
1859 + .notify_online = container_device_online,
1860 },
1861 };
1862
1863 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
1864 index 92d5184e3654..493a342efa44 100644
1865 --- a/drivers/acpi/scan.c
1866 +++ b/drivers/acpi/scan.c
1867 @@ -106,7 +106,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
1868 list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
1869 count = snprintf(&modalias[len], size, "%s:", id->id);
1870 if (count < 0)
1871 - return EINVAL;
1872 + return -EINVAL;
1873 if (count >= size)
1874 return -ENOMEM;
1875 len += count;
1876 @@ -2068,6 +2068,9 @@ static void acpi_bus_attach(struct acpi_device *device)
1877 ok:
1878 list_for_each_entry(child, &device->children, node)
1879 acpi_bus_attach(child);
1880 +
1881 + if (device->handler && device->handler->hotplug.notify_online)
1882 + device->handler->hotplug.notify_online(device);
1883 }
1884
1885 /**
1886 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1887 index b54f8b3c7924..00663d60f6d4 100644
1888 --- a/drivers/ata/ahci.c
1889 +++ b/drivers/ata/ahci.c
1890 @@ -306,6 +306,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1891 { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
1892 { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
1893 { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
1894 + { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
1895 + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
1896 + { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
1897 + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
1898 + { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
1899 + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
1900 + { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
1901 + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
1902
1903 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
1904 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1905 @@ -443,6 +451,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1906 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
1907 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
1908 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
1909 + .driver_data = board_ahci_yes_fbs }, /* 88se9182 */
1910 + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
1911 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
1912 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
1913 .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
1914 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
1915 index 6334c8d7c3f1..39f76b987c75 100644
1916 --- a/drivers/ata/ata_piix.c
1917 +++ b/drivers/ata/ata_piix.c
1918 @@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
1919 { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
1920 /* SATA Controller IDE (Coleto Creek) */
1921 { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1922 + /* SATA Controller IDE (9 Series) */
1923 + { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
1924 + /* SATA Controller IDE (9 Series) */
1925 + { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
1926 + /* SATA Controller IDE (9 Series) */
1927 + { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
1928 + /* SATA Controller IDE (9 Series) */
1929 + { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
1930
1931 { } /* terminate list */
1932 };
1933 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1934 index f7616036663b..538574f98e22 100644
1935 --- a/drivers/ata/libata-core.c
1936 +++ b/drivers/ata/libata-core.c
1937 @@ -4227,7 +4227,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1938 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1939 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1940 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1941 - { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1942 + { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1943
1944 /*
1945 * Some WD SATA-I drives spin up and down erratically when the link
1946 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
1947 index f35f15f4d83e..f7badaa39eb6 100644
1948 --- a/drivers/ata/pata_scc.c
1949 +++ b/drivers/ata/pata_scc.c
1950 @@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
1951 * Note: Original code is ata_bus_softreset().
1952 */
1953
1954 -static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
1955 +static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
1956 unsigned long deadline)
1957 {
1958 struct ata_ioports *ioaddr = &ap->ioaddr;
1959 @@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
1960 udelay(20);
1961 out_be32(ioaddr->ctl_addr, ap->ctl);
1962
1963 - scc_wait_after_reset(&ap->link, devmask, deadline);
1964 -
1965 - return 0;
1966 + return scc_wait_after_reset(&ap->link, devmask, deadline);
1967 }
1968
1969 /**
1970 @@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
1971 {
1972 struct ata_port *ap = link->ap;
1973 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1974 - unsigned int devmask = 0, err_mask;
1975 + unsigned int devmask = 0;
1976 + int rc;
1977 u8 err;
1978
1979 DPRINTK("ENTER\n");
1980 @@ -635,9 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
1981
1982 /* issue bus reset */
1983 DPRINTK("about to softreset, devmask=%x\n", devmask);
1984 - err_mask = scc_bus_softreset(ap, devmask, deadline);
1985 - if (err_mask) {
1986 - ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
1987 + rc = scc_bus_softreset(ap, devmask, deadline);
1988 + if (rc) {
1989 + ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
1990 return -EIO;
1991 }
1992
1993 diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
1994 index d4dd77134814..154e7a8c0a04 100644
1995 --- a/drivers/base/regmap/regcache.c
1996 +++ b/drivers/base/regmap/regcache.c
1997 @@ -701,7 +701,7 @@ int regcache_sync_block(struct regmap *map, void *block,
1998 unsigned int block_base, unsigned int start,
1999 unsigned int end)
2000 {
2001 - if (regmap_can_raw_write(map))
2002 + if (regmap_can_raw_write(map) && !map->use_single_rw)
2003 return regcache_sync_block_raw(map, block, cache_present,
2004 block_base, start, end);
2005 else
2006 diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
2007 index 6a19515f8a45..2ea056c09aeb 100644
2008 --- a/drivers/base/regmap/regmap.c
2009 +++ b/drivers/base/regmap/regmap.c
2010 @@ -105,7 +105,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
2011
2012 bool regmap_volatile(struct regmap *map, unsigned int reg)
2013 {
2014 - if (!regmap_readable(map, reg))
2015 + if (!map->format.format_write && !regmap_readable(map, reg))
2016 return false;
2017
2018 if (map->volatile_reg)
2019 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
2020 index b94a311e5ab6..f9c4632d4dd3 100644
2021 --- a/drivers/clk/clk.c
2022 +++ b/drivers/clk/clk.c
2023 @@ -1487,6 +1487,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
2024 static void clk_change_rate(struct clk *clk)
2025 {
2026 struct clk *child;
2027 + struct hlist_node *tmp;
2028 unsigned long old_rate;
2029 unsigned long best_parent_rate = 0;
2030 bool skip_set_rate = false;
2031 @@ -1525,7 +1526,11 @@ static void clk_change_rate(struct clk *clk)
2032 if (clk->notifier_count && old_rate != clk->rate)
2033 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
2034
2035 - hlist_for_each_entry(child, &clk->children, child_node) {
2036 + /*
2037 + * Use safe iteration, as change_rate can actually swap parents
2038 + * for certain clock types.
2039 + */
2040 + hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
2041 /* Skip children who will be reparented to another clock */
2042 if (child->new_parent && child->new_parent != clk)
2043 continue;
2044 diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
2045 index 9be47a829144..f3c95d648a53 100644
2046 --- a/drivers/clk/qcom/mmcc-msm8960.c
2047 +++ b/drivers/clk/qcom/mmcc-msm8960.c
2048 @@ -37,6 +37,8 @@
2049 #define P_PLL2 2
2050 #define P_PLL3 3
2051
2052 +#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
2053 +
2054 static u8 mmcc_pxo_pll8_pll2_map[] = {
2055 [P_PXO] = 0,
2056 [P_PLL8] = 2,
2057 @@ -58,8 +60,8 @@ static u8 mmcc_pxo_pll8_pll2_pll3_map[] = {
2058
2059 static const char *mmcc_pxo_pll8_pll2_pll3[] = {
2060 "pxo",
2061 - "pll2",
2062 "pll8_vote",
2063 + "pll2",
2064 "pll3",
2065 };
2066
2067 @@ -709,18 +711,18 @@ static struct clk_branch csiphy2_timer_clk = {
2068 };
2069
2070 static struct freq_tbl clk_tbl_gfx2d[] = {
2071 - { 27000000, P_PXO, 1, 0 },
2072 - { 48000000, P_PLL8, 1, 8 },
2073 - { 54857000, P_PLL8, 1, 7 },
2074 - { 64000000, P_PLL8, 1, 6 },
2075 - { 76800000, P_PLL8, 1, 5 },
2076 - { 96000000, P_PLL8, 1, 4 },
2077 - { 128000000, P_PLL8, 1, 3 },
2078 - { 145455000, P_PLL2, 2, 11 },
2079 - { 160000000, P_PLL2, 1, 5 },
2080 - { 177778000, P_PLL2, 2, 9 },
2081 - { 200000000, P_PLL2, 1, 4 },
2082 - { 228571000, P_PLL2, 2, 7 },
2083 + F_MN( 27000000, P_PXO, 1, 0),
2084 + F_MN( 48000000, P_PLL8, 1, 8),
2085 + F_MN( 54857000, P_PLL8, 1, 7),
2086 + F_MN( 64000000, P_PLL8, 1, 6),
2087 + F_MN( 76800000, P_PLL8, 1, 5),
2088 + F_MN( 96000000, P_PLL8, 1, 4),
2089 + F_MN(128000000, P_PLL8, 1, 3),
2090 + F_MN(145455000, P_PLL2, 2, 11),
2091 + F_MN(160000000, P_PLL2, 1, 5),
2092 + F_MN(177778000, P_PLL2, 2, 9),
2093 + F_MN(200000000, P_PLL2, 1, 4),
2094 + F_MN(228571000, P_PLL2, 2, 7),
2095 { }
2096 };
2097
2098 @@ -841,22 +843,22 @@ static struct clk_branch gfx2d1_clk = {
2099 };
2100
2101 static struct freq_tbl clk_tbl_gfx3d[] = {
2102 - { 27000000, P_PXO, 1, 0 },
2103 - { 48000000, P_PLL8, 1, 8 },
2104 - { 54857000, P_PLL8, 1, 7 },
2105 - { 64000000, P_PLL8, 1, 6 },
2106 - { 76800000, P_PLL8, 1, 5 },
2107 - { 96000000, P_PLL8, 1, 4 },
2108 - { 128000000, P_PLL8, 1, 3 },
2109 - { 145455000, P_PLL2, 2, 11 },
2110 - { 160000000, P_PLL2, 1, 5 },
2111 - { 177778000, P_PLL2, 2, 9 },
2112 - { 200000000, P_PLL2, 1, 4 },
2113 - { 228571000, P_PLL2, 2, 7 },
2114 - { 266667000, P_PLL2, 1, 3 },
2115 - { 300000000, P_PLL3, 1, 4 },
2116 - { 320000000, P_PLL2, 2, 5 },
2117 - { 400000000, P_PLL2, 1, 2 },
2118 + F_MN( 27000000, P_PXO, 1, 0),
2119 + F_MN( 48000000, P_PLL8, 1, 8),
2120 + F_MN( 54857000, P_PLL8, 1, 7),
2121 + F_MN( 64000000, P_PLL8, 1, 6),
2122 + F_MN( 76800000, P_PLL8, 1, 5),
2123 + F_MN( 96000000, P_PLL8, 1, 4),
2124 + F_MN(128000000, P_PLL8, 1, 3),
2125 + F_MN(145455000, P_PLL2, 2, 11),
2126 + F_MN(160000000, P_PLL2, 1, 5),
2127 + F_MN(177778000, P_PLL2, 2, 9),
2128 + F_MN(200000000, P_PLL2, 1, 4),
2129 + F_MN(228571000, P_PLL2, 2, 7),
2130 + F_MN(266667000, P_PLL2, 1, 3),
2131 + F_MN(300000000, P_PLL3, 1, 4),
2132 + F_MN(320000000, P_PLL2, 2, 5),
2133 + F_MN(400000000, P_PLL2, 1, 2),
2134 { }
2135 };
2136
2137 @@ -896,7 +898,7 @@ static struct clk_dyn_rcg gfx3d_src = {
2138 .hw.init = &(struct clk_init_data){
2139 .name = "gfx3d_src",
2140 .parent_names = mmcc_pxo_pll8_pll2_pll3,
2141 - .num_parents = 3,
2142 + .num_parents = 4,
2143 .ops = &clk_dyn_rcg_ops,
2144 },
2145 },
2146 @@ -994,7 +996,7 @@ static struct clk_rcg jpegd_src = {
2147 .ns_reg = 0x00ac,
2148 .p = {
2149 .pre_div_shift = 12,
2150 - .pre_div_width = 2,
2151 + .pre_div_width = 4,
2152 },
2153 .s = {
2154 .src_sel_shift = 0,
2155 @@ -1114,7 +1116,7 @@ static struct clk_branch mdp_lut_clk = {
2156 .enable_reg = 0x016c,
2157 .enable_mask = BIT(0),
2158 .hw.init = &(struct clk_init_data){
2159 - .parent_names = (const char *[]){ "mdp_clk" },
2160 + .parent_names = (const char *[]){ "mdp_src" },
2161 .num_parents = 1,
2162 .name = "mdp_lut_clk",
2163 .ops = &clk_branch_ops,
2164 @@ -1341,15 +1343,15 @@ static struct clk_branch hdmi_app_clk = {
2165 };
2166
2167 static struct freq_tbl clk_tbl_vcodec[] = {
2168 - { 27000000, P_PXO, 1, 0 },
2169 - { 32000000, P_PLL8, 1, 12 },
2170 - { 48000000, P_PLL8, 1, 8 },
2171 - { 54860000, P_PLL8, 1, 7 },
2172 - { 96000000, P_PLL8, 1, 4 },
2173 - { 133330000, P_PLL2, 1, 6 },
2174 - { 200000000, P_PLL2, 1, 4 },
2175 - { 228570000, P_PLL2, 2, 7 },
2176 - { 266670000, P_PLL2, 1, 3 },
2177 + F_MN( 27000000, P_PXO, 1, 0),
2178 + F_MN( 32000000, P_PLL8, 1, 12),
2179 + F_MN( 48000000, P_PLL8, 1, 8),
2180 + F_MN( 54860000, P_PLL8, 1, 7),
2181 + F_MN( 96000000, P_PLL8, 1, 4),
2182 + F_MN(133330000, P_PLL2, 1, 6),
2183 + F_MN(200000000, P_PLL2, 1, 4),
2184 + F_MN(228570000, P_PLL2, 2, 7),
2185 + F_MN(266670000, P_PLL2, 1, 3),
2186 { }
2187 };
2188
2189 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2190 index 153f4b92cc05..415923606164 100644
2191 --- a/drivers/cpufreq/cpufreq.c
2192 +++ b/drivers/cpufreq/cpufreq.c
2193 @@ -1225,6 +1225,8 @@ err_get_freq:
2194 per_cpu(cpufreq_cpu_data, j) = NULL;
2195 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2196
2197 + up_write(&policy->rwsem);
2198 +
2199 if (cpufreq_driver->exit)
2200 cpufreq_driver->exit(policy);
2201 err_set_policy_cpu:
2202 diff --git a/drivers/dma/TODO b/drivers/dma/TODO
2203 index 734ed0206cd5..b8045cd42ee1 100644
2204 --- a/drivers/dma/TODO
2205 +++ b/drivers/dma/TODO
2206 @@ -7,7 +7,6 @@ TODO for slave dma
2207 - imx-dma
2208 - imx-sdma
2209 - mxs-dma.c
2210 - - dw_dmac
2211 - intel_mid_dma
2212 4. Check other subsystems for dma drivers and merge/move to dmaengine
2213 5. Remove dma_slave_config's dma direction.
2214 diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
2215 index 01a200cd0189..b0972b3869c7 100644
2216 --- a/drivers/dma/dw/core.c
2217 +++ b/drivers/dma/dw/core.c
2218 @@ -279,6 +279,15 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
2219 channel_set_bit(dw, CH_EN, dwc->mask);
2220 }
2221
2222 +static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
2223 +{
2224 + if (list_empty(&dwc->queue))
2225 + return;
2226 +
2227 + list_move(dwc->queue.next, &dwc->active_list);
2228 + dwc_dostart(dwc, dwc_first_active(dwc));
2229 +}
2230 +
2231 /*----------------------------------------------------------------------*/
2232
2233 static void
2234 @@ -335,10 +344,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
2235 * the completed ones.
2236 */
2237 list_splice_init(&dwc->active_list, &list);
2238 - if (!list_empty(&dwc->queue)) {
2239 - list_move(dwc->queue.next, &dwc->active_list);
2240 - dwc_dostart(dwc, dwc_first_active(dwc));
2241 - }
2242 + dwc_dostart_first_queued(dwc);
2243
2244 spin_unlock_irqrestore(&dwc->lock, flags);
2245
2246 @@ -467,10 +473,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
2247 /* Try to continue after resetting the channel... */
2248 dwc_chan_disable(dw, dwc);
2249
2250 - if (!list_empty(&dwc->queue)) {
2251 - list_move(dwc->queue.next, &dwc->active_list);
2252 - dwc_dostart(dwc, dwc_first_active(dwc));
2253 - }
2254 + dwc_dostart_first_queued(dwc);
2255 spin_unlock_irqrestore(&dwc->lock, flags);
2256 }
2257
2258 @@ -677,17 +680,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
2259 * possible, perhaps even appending to those already submitted
2260 * for DMA. But this is hard to do in a race-free manner.
2261 */
2262 - if (list_empty(&dwc->active_list)) {
2263 - dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
2264 - desc->txd.cookie);
2265 - list_add_tail(&desc->desc_node, &dwc->active_list);
2266 - dwc_dostart(dwc, dwc_first_active(dwc));
2267 - } else {
2268 - dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
2269 - desc->txd.cookie);
2270
2271 - list_add_tail(&desc->desc_node, &dwc->queue);
2272 - }
2273 + dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
2274 + list_add_tail(&desc->desc_node, &dwc->queue);
2275
2276 spin_unlock_irqrestore(&dwc->lock, flags);
2277
2278 @@ -1092,9 +1087,12 @@ dwc_tx_status(struct dma_chan *chan,
2279 static void dwc_issue_pending(struct dma_chan *chan)
2280 {
2281 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
2282 + unsigned long flags;
2283
2284 - if (!list_empty(&dwc->queue))
2285 - dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
2286 + spin_lock_irqsave(&dwc->lock, flags);
2287 + if (list_empty(&dwc->active_list))
2288 + dwc_dostart_first_queued(dwc);
2289 + spin_unlock_irqrestore(&dwc->lock, flags);
2290 }
2291
2292 static int dwc_alloc_chan_resources(struct dma_chan *chan)
2293 diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
2294 index 50535fd5a88d..d830b38e54f6 100644
2295 --- a/drivers/gpu/drm/ast/ast_main.c
2296 +++ b/drivers/gpu/drm/ast/ast_main.c
2297 @@ -100,7 +100,7 @@ static int ast_detect_chip(struct drm_device *dev)
2298 }
2299 ast->vga2_clone = false;
2300 } else {
2301 - ast->chip = 2000;
2302 + ast->chip = AST2000;
2303 DRM_INFO("AST 2000 detected\n");
2304 }
2305 }
2306 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2307 index 3ecb332e7cfa..7410a507eacc 100644
2308 --- a/drivers/gpu/drm/i915/i915_gem.c
2309 +++ b/drivers/gpu/drm/i915/i915_gem.c
2310 @@ -1426,10 +1426,13 @@ unlock:
2311 out:
2312 switch (ret) {
2313 case -EIO:
2314 - /* If this -EIO is due to a gpu hang, give the reset code a
2315 - * chance to clean up the mess. Otherwise return the proper
2316 - * SIGBUS. */
2317 - if (i915_terminally_wedged(&dev_priv->gpu_error)) {
2318 + /*
2319 + * We eat errors when the gpu is terminally wedged to avoid
2320 + * userspace unduly crashing (gl has no provisions for mmaps to
2321 + * fail). But any other -EIO isn't ours (e.g. swap in failure)
2322 + * and so needs to be reported.
2323 + */
2324 + if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2325 ret = VM_FAULT_SIGBUS;
2326 break;
2327 }
2328 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
2329 index f22041973f3a..08105fddfd2a 100644
2330 --- a/drivers/gpu/drm/i915/intel_bios.c
2331 +++ b/drivers/gpu/drm/i915/intel_bios.c
2332 @@ -839,7 +839,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
2333 }
2334 }
2335
2336 -static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
2337 +static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
2338 {
2339 DRM_DEBUG_KMS("Falling back to manually reading VBT from "
2340 "VBIOS ROM for %s\n",
2341 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
2342 index b19ddacbe19d..834847527982 100644
2343 --- a/drivers/gpu/drm/i915/intel_crt.c
2344 +++ b/drivers/gpu/drm/i915/intel_crt.c
2345 @@ -750,7 +750,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
2346 .destroy = intel_encoder_destroy,
2347 };
2348
2349 -static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
2350 +static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
2351 {
2352 DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
2353 return 1;
2354 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
2355 index eb8f64b5fb85..67c9ff389989 100644
2356 --- a/drivers/gpu/drm/i915/intel_lvds.c
2357 +++ b/drivers/gpu/drm/i915/intel_lvds.c
2358 @@ -544,7 +544,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
2359 .destroy = intel_encoder_destroy,
2360 };
2361
2362 -static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
2363 +static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
2364 {
2365 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
2366 return 1;
2367 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
2368 index 31b36c5ac894..d488fc71ef49 100644
2369 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
2370 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
2371 @@ -475,6 +475,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
2372 }
2373 }
2374
2375 + /* Enforce ordering by reading HEAD register back */
2376 + I915_READ_HEAD(ring);
2377 +
2378 /* Initialize the ring. This must happen _after_ we've cleared the ring
2379 * registers with the above sequence (the readback of the HEAD registers
2380 * also enforces ordering), otherwise the hw might lose the new ring
2381 diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
2382 index 365c7c47c46c..9c9606c8bb1a 100644
2383 --- a/drivers/gpu/drm/i915/intel_tv.c
2384 +++ b/drivers/gpu/drm/i915/intel_tv.c
2385 @@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
2386 struct drm_device *dev = encoder->base.dev;
2387 struct drm_i915_private *dev_priv = dev->dev_private;
2388
2389 + /* Prevents vblank waits from timing out in intel_tv_detect_type() */
2390 + intel_wait_for_vblank(encoder->base.dev,
2391 + to_intel_crtc(encoder->base.crtc)->pipe);
2392 +
2393 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
2394 }
2395
2396 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
2397 index 471347edc27e..a92fb01459c9 100644
2398 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
2399 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
2400 @@ -100,7 +100,16 @@ void
2401 nouveau_vga_fini(struct nouveau_drm *drm)
2402 {
2403 struct drm_device *dev = drm->dev;
2404 + bool runtime = false;
2405 +
2406 + if (nouveau_runtime_pm == 1)
2407 + runtime = true;
2408 + if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
2409 + runtime = true;
2410 +
2411 vga_switcheroo_unregister_client(dev->pdev);
2412 + if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
2413 + vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
2414 vga_client_register(dev->pdev, NULL, NULL, NULL);
2415 }
2416
2417 diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
2418 index 5fa854c84d62..543ba2d4a659 100644
2419 --- a/drivers/gpu/drm/radeon/ci_dpm.c
2420 +++ b/drivers/gpu/drm/radeon/ci_dpm.c
2421 @@ -851,6 +851,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
2422 WREG32_SMC(CG_THERMAL_CTRL, tmp);
2423 #endif
2424
2425 + rdev->pm.dpm.thermal.min_temp = low_temp;
2426 + rdev->pm.dpm.thermal.max_temp = high_temp;
2427 +
2428 return 0;
2429 }
2430
2431 @@ -922,7 +925,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
2432 pi->vddc_leakage.count = 0;
2433 pi->vddci_leakage.count = 0;
2434
2435 - if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
2436 + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
2437 + for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
2438 + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2439 + if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
2440 + continue;
2441 + if (vddc != 0 && vddc != virtual_voltage_id) {
2442 + pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
2443 + pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
2444 + pi->vddc_leakage.count++;
2445 + }
2446 + }
2447 + } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
2448 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
2449 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2450 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
2451 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
2452 index 7b3537c55c77..ab5c26575622 100644
2453 --- a/drivers/gpu/drm/radeon/cik.c
2454 +++ b/drivers/gpu/drm/radeon/cik.c
2455 @@ -4392,7 +4392,7 @@ struct bonaire_mqd
2456 */
2457 static int cik_cp_compute_resume(struct radeon_device *rdev)
2458 {
2459 - int r, i, idx;
2460 + int r, i, j, idx;
2461 u32 tmp;
2462 bool use_doorbell = true;
2463 u64 hqd_gpu_addr;
2464 @@ -4511,7 +4511,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2465 mqd->queue_state.cp_hqd_pq_wptr= 0;
2466 if (RREG32(CP_HQD_ACTIVE) & 1) {
2467 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
2468 - for (i = 0; i < rdev->usec_timeout; i++) {
2469 + for (j = 0; j < rdev->usec_timeout; j++) {
2470 if (!(RREG32(CP_HQD_ACTIVE) & 1))
2471 break;
2472 udelay(1);
2473 @@ -5545,12 +5545,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
2474 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2475 {
2476 struct radeon_ring *ring = &rdev->ring[ridx];
2477 + int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
2478
2479 if (vm == NULL)
2480 return;
2481
2482 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2483 - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2484 + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
2485 WRITE_DATA_DST_SEL(0)));
2486 if (vm->id < 8) {
2487 radeon_ring_write(ring,
2488 @@ -5600,7 +5601,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2489 radeon_ring_write(ring, 1 << vm->id);
2490
2491 /* compute doesn't have PFP */
2492 - if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
2493 + if (usepfp) {
2494 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2495 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2496 radeon_ring_write(ring, 0x0);
2497 diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
2498 index aac8f487e6df..66ba713ba7d7 100644
2499 --- a/drivers/gpu/drm/radeon/cik_sdma.c
2500 +++ b/drivers/gpu/drm/radeon/cik_sdma.c
2501 @@ -461,13 +461,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
2502 {
2503 int r;
2504
2505 - /* Reset dma */
2506 - WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
2507 - RREG32(SRBM_SOFT_RESET);
2508 - udelay(50);
2509 - WREG32(SRBM_SOFT_RESET, 0);
2510 - RREG32(SRBM_SOFT_RESET);
2511 -
2512 r = cik_sdma_load_microcode(rdev);
2513 if (r)
2514 return r;
2515 diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
2516 index 7cf96b15377f..94fa49e974eb 100644
2517 --- a/drivers/gpu/drm/radeon/ni_dma.c
2518 +++ b/drivers/gpu/drm/radeon/ni_dma.c
2519 @@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
2520 u32 reg_offset, wb_offset;
2521 int i, r;
2522
2523 - /* Reset dma */
2524 - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
2525 - RREG32(SRBM_SOFT_RESET);
2526 - udelay(50);
2527 - WREG32(SRBM_SOFT_RESET, 0);
2528 -
2529 for (i = 0; i < 2; i++) {
2530 if (i == 0) {
2531 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2532 diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
2533 index b2d4c91e6272..99495513f6b1 100644
2534 --- a/drivers/gpu/drm/radeon/r600_dma.c
2535 +++ b/drivers/gpu/drm/radeon/r600_dma.c
2536 @@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
2537 u32 rb_bufsz;
2538 int r;
2539
2540 - /* Reset dma */
2541 - if (rdev->family >= CHIP_RV770)
2542 - WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
2543 - else
2544 - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2545 - RREG32(SRBM_SOFT_RESET);
2546 - udelay(50);
2547 - WREG32(SRBM_SOFT_RESET, 0);
2548 -
2549 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
2550 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2551
2552 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2553 index 08e86f90c9a4..b837e9f9f8ce 100644
2554 --- a/drivers/gpu/drm/radeon/radeon.h
2555 +++ b/drivers/gpu/drm/radeon/radeon.h
2556 @@ -294,6 +294,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
2557 u16 *vddc, u16 *vddci,
2558 u16 virtual_voltage_id,
2559 u16 vbios_voltage_id);
2560 +int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
2561 + u16 virtual_voltage_id,
2562 + u16 *voltage);
2563 int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
2564 u8 voltage_type,
2565 u16 nominal_voltage,
2566 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2567 index 30844814c25a..e2de749327ad 100644
2568 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
2569 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2570 @@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2571 }
2572 }
2573
2574 + /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
2575 + if ((dev->pdev->device == 0x9805) &&
2576 + (dev->pdev->subsystem_vendor == 0x1734) &&
2577 + (dev->pdev->subsystem_device == 0x11bd)) {
2578 + if (*connector_type == DRM_MODE_CONNECTOR_VGA)
2579 + return false;
2580 + }
2581
2582 return true;
2583 }
2584 @@ -1955,7 +1962,7 @@ static const char *thermal_controller_names[] = {
2585 "adm1032",
2586 "adm1030",
2587 "max6649",
2588 - "lm64",
2589 + "lm63", /* lm64 */
2590 "f75375",
2591 "asc7xxx",
2592 };
2593 @@ -1966,7 +1973,7 @@ static const char *pp_lib_thermal_controller_names[] = {
2594 "adm1032",
2595 "adm1030",
2596 "max6649",
2597 - "lm64",
2598 + "lm63", /* lm64 */
2599 "f75375",
2600 "RV6xx",
2601 "RV770",
2602 @@ -2273,19 +2280,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2603 (controller->ucFanParameters &
2604 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2605 rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
2606 - } else if ((controller->ucType ==
2607 - ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
2608 - (controller->ucType ==
2609 - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
2610 - (controller->ucType ==
2611 - ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
2612 - DRM_INFO("Special thermal controller config\n");
2613 + } else if (controller->ucType ==
2614 + ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
2615 + DRM_INFO("External GPIO thermal controller %s fan control\n",
2616 + (controller->ucFanParameters &
2617 + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2618 + rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
2619 + } else if (controller->ucType ==
2620 + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
2621 + DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
2622 + (controller->ucFanParameters &
2623 + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2624 + rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
2625 + } else if (controller->ucType ==
2626 + ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
2627 + DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
2628 + (controller->ucFanParameters &
2629 + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2630 + rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
2631 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
2632 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
2633 pp_lib_thermal_controller_names[controller->ucType],
2634 controller->ucI2cAddress >> 1,
2635 (controller->ucFanParameters &
2636 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2637 + rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
2638 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
2639 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2640 if (rdev->pm.i2c_bus) {
2641 @@ -3228,6 +3247,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
2642 return 0;
2643 }
2644
2645 +union get_voltage_info {
2646 + struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
2647 + struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
2648 +};
2649 +
2650 +int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
2651 + u16 virtual_voltage_id,
2652 + u16 *voltage)
2653 +{
2654 + int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
2655 + u32 entry_id;
2656 + u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
2657 + union get_voltage_info args;
2658 +
2659 + for (entry_id = 0; entry_id < count; entry_id++) {
2660 + if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
2661 + virtual_voltage_id)
2662 + break;
2663 + }
2664 +
2665 + if (entry_id >= count)
2666 + return -EINVAL;
2667 +
2668 + args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
2669 + args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
2670 + args.in.ulSCLKFreq =
2671 + cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
2672 +
2673 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2674 +
2675 + *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
2676 +
2677 + return 0;
2678 +}
2679 +
2680 int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
2681 u16 voltage_level, u8 voltage_type,
2682 u32 *gpio_value, u32 *gpio_mask)
2683 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2684 index 0bf6f4a2bb97..e39026cc7d07 100644
2685 --- a/drivers/gpu/drm/radeon/radeon_device.c
2686 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2687 @@ -1314,7 +1314,7 @@ int radeon_device_init(struct radeon_device *rdev,
2688
2689 r = radeon_init(rdev);
2690 if (r)
2691 - return r;
2692 + goto failed;
2693
2694 r = radeon_ib_ring_tests(rdev);
2695 if (r)
2696 @@ -1334,7 +1334,7 @@ int radeon_device_init(struct radeon_device *rdev,
2697 radeon_agp_disable(rdev);
2698 r = radeon_init(rdev);
2699 if (r)
2700 - return r;
2701 + goto failed;
2702 }
2703
2704 if ((radeon_testing & 1)) {
2705 @@ -1356,6 +1356,11 @@ int radeon_device_init(struct radeon_device *rdev,
2706 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
2707 }
2708 return 0;
2709 +
2710 +failed:
2711 + if (runtime)
2712 + vga_switcheroo_fini_domain_pm_ops(rdev->dev);
2713 + return r;
2714 }
2715
2716 static void radeon_debugfs_remove_files(struct radeon_device *rdev);
2717 @@ -1376,6 +1381,8 @@ void radeon_device_fini(struct radeon_device *rdev)
2718 radeon_bo_evict_vram(rdev);
2719 radeon_fini(rdev);
2720 vga_switcheroo_unregister_client(rdev->pdev);
2721 + if (rdev->flags & RADEON_IS_PX)
2722 + vga_switcheroo_fini_domain_pm_ops(rdev->dev);
2723 vga_client_register(rdev->pdev, NULL, NULL, NULL);
2724 if (rdev->rio_mem)
2725 pci_iounmap(rdev->pdev, rdev->rio_mem);
2726 @@ -1600,7 +1607,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
2727 radeon_save_bios_scratch_regs(rdev);
2728 /* block TTM */
2729 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
2730 - radeon_pm_suspend(rdev);
2731 radeon_suspend(rdev);
2732
2733 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
2734 @@ -1646,9 +1652,24 @@ retry:
2735 }
2736 }
2737
2738 - radeon_pm_resume(rdev);
2739 + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2740 + /* do dpm late init */
2741 + r = radeon_pm_late_init(rdev);
2742 + if (r) {
2743 + rdev->pm.dpm_enabled = false;
2744 + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
2745 + }
2746 + } else {
2747 + /* resume old pm late */
2748 + radeon_pm_resume(rdev);
2749 + }
2750 +
2751 drm_helper_resume_force_mode(rdev->ddev);
2752
2753 + /* set the power state here in case we are a PX system or headless */
2754 + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
2755 + radeon_pm_compute_clocks(rdev);
2756 +
2757 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
2758 if (r) {
2759 /* bad news, how to tell it to userspace ? */
2760 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
2761 index ea34a31d3bc8..0bc9106ef435 100644
2762 --- a/drivers/gpu/drm/radeon/radeon_kms.c
2763 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
2764 @@ -254,7 +254,14 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
2765 }
2766 break;
2767 case RADEON_INFO_ACCEL_WORKING2:
2768 - *value = rdev->accel_working;
2769 + if (rdev->family == CHIP_HAWAII) {
2770 + if (rdev->accel_working)
2771 + *value = 2;
2772 + else
2773 + *value = 0;
2774 + } else {
2775 + *value = rdev->accel_working;
2776 + }
2777 break;
2778 case RADEON_INFO_TILING_CONFIG:
2779 if (rdev->family >= CHIP_BONAIRE)
2780 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
2781 index f77d9d0d54b5..cfb513f933d5 100644
2782 --- a/drivers/gpu/drm/radeon/radeon_pm.c
2783 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
2784 @@ -458,10 +458,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
2785 struct radeon_device *rdev = ddev->dev_private;
2786 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
2787
2788 - if ((rdev->flags & RADEON_IS_PX) &&
2789 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
2790 - return snprintf(buf, PAGE_SIZE, "off\n");
2791 -
2792 return snprintf(buf, PAGE_SIZE, "%s\n",
2793 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
2794 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
2795 @@ -475,11 +471,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
2796 struct drm_device *ddev = dev_get_drvdata(dev);
2797 struct radeon_device *rdev = ddev->dev_private;
2798
2799 - /* Can't set dpm state when the card is off */
2800 - if ((rdev->flags & RADEON_IS_PX) &&
2801 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
2802 - return -EINVAL;
2803 -
2804 mutex_lock(&rdev->pm.mutex);
2805 if (strncmp("battery", buf, strlen("battery")) == 0)
2806 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
2807 @@ -493,7 +484,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
2808 goto fail;
2809 }
2810 mutex_unlock(&rdev->pm.mutex);
2811 - radeon_pm_compute_clocks(rdev);
2812 +
2813 + /* Can't set dpm state when the card is off */
2814 + if (!(rdev->flags & RADEON_IS_PX) ||
2815 + (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
2816 + radeon_pm_compute_clocks(rdev);
2817 +
2818 fail:
2819 return count;
2820 }
2821 @@ -1276,10 +1272,6 @@ int radeon_pm_init(struct radeon_device *rdev)
2822 case CHIP_RS780:
2823 case CHIP_RS880:
2824 case CHIP_RV770:
2825 - case CHIP_BARTS:
2826 - case CHIP_TURKS:
2827 - case CHIP_CAICOS:
2828 - case CHIP_CAYMAN:
2829 /* DPM requires the RLC, RV770+ dGPU requires SMC */
2830 if (!rdev->rlc_fw)
2831 rdev->pm.pm_method = PM_METHOD_PROFILE;
2832 @@ -1303,6 +1295,10 @@ int radeon_pm_init(struct radeon_device *rdev)
2833 case CHIP_PALM:
2834 case CHIP_SUMO:
2835 case CHIP_SUMO2:
2836 + case CHIP_BARTS:
2837 + case CHIP_TURKS:
2838 + case CHIP_CAICOS:
2839 + case CHIP_CAYMAN:
2840 case CHIP_ARUBA:
2841 case CHIP_TAHITI:
2842 case CHIP_PITCAIRN:
2843 diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
2844 index 9006b32d5eed..eb7b60047e86 100644
2845 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c
2846 +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
2847 @@ -34,7 +34,7 @@
2848 int radeon_semaphore_create(struct radeon_device *rdev,
2849 struct radeon_semaphore **semaphore)
2850 {
2851 - uint32_t *cpu_addr;
2852 + uint64_t *cpu_addr;
2853 int i, r;
2854
2855 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
2856 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
2857 index ea93393374df..559564c1dc97 100644
2858 --- a/drivers/gpu/drm/radeon/si.c
2859 +++ b/drivers/gpu/drm/radeon/si.c
2860 @@ -4810,7 +4810,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2861
2862 /* write new base address */
2863 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2864 - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2865 + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
2866 WRITE_DATA_DST_SEL(0)));
2867
2868 if (vm->id < 8) {
2869 diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
2870 index 2da0e17eb960..d9cfa09b2e3f 100644
2871 --- a/drivers/gpu/drm/radeon/trinity_dpm.c
2872 +++ b/drivers/gpu/drm/radeon/trinity_dpm.c
2873 @@ -1877,7 +1877,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
2874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2875 pi->at[i] = TRINITY_AT_DFLT;
2876
2877 - pi->enable_bapm = false;
2878 + /* There are stability issues reported on with
2879 + * bapm enabled when switching between AC and battery
2880 + * power. At the same time, some MSI boards hang
2881 + * if it's not enabled and dpm is enabled. Just enable
2882 + * it for MSI boards right now.
2883 + */
2884 + if (rdev->pdev->subsystem_vendor == 0x1462)
2885 + pi->enable_bapm = true;
2886 + else
2887 + pi->enable_bapm = false;
2888 pi->enable_nbps_policy = true;
2889 pi->enable_sclk_ds = true;
2890 pi->enable_gfx_power_gating = true;
2891 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
2892 index 171a8203892c..0644429f8559 100644
2893 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
2894 +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
2895 @@ -122,6 +122,7 @@ static int tilcdc_unload(struct drm_device *dev)
2896 struct tilcdc_drm_private *priv = dev->dev_private;
2897 struct tilcdc_module *mod, *cur;
2898
2899 + drm_fbdev_cma_fini(priv->fbdev);
2900 drm_kms_helper_poll_fini(dev);
2901 drm_mode_config_cleanup(dev);
2902 drm_vblank_cleanup(dev);
2903 @@ -628,10 +629,10 @@ static int __init tilcdc_drm_init(void)
2904 static void __exit tilcdc_drm_fini(void)
2905 {
2906 DBG("fini");
2907 - tilcdc_tfp410_fini();
2908 - tilcdc_slave_fini();
2909 - tilcdc_panel_fini();
2910 platform_driver_unregister(&tilcdc_platform_driver);
2911 + tilcdc_panel_fini();
2912 + tilcdc_slave_fini();
2913 + tilcdc_tfp410_fini();
2914 }
2915
2916 late_initcall(tilcdc_drm_init);
2917 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
2918 index 86c67329b605..b085dcc54fb5 100644
2919 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
2920 +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
2921 @@ -151,6 +151,7 @@ struct panel_connector {
2922 static void panel_connector_destroy(struct drm_connector *connector)
2923 {
2924 struct panel_connector *panel_connector = to_panel_connector(connector);
2925 + drm_sysfs_connector_remove(connector);
2926 drm_connector_cleanup(connector);
2927 kfree(panel_connector);
2928 }
2929 @@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
2930 {
2931 struct panel_module *panel_mod = to_panel_module(mod);
2932
2933 - if (panel_mod->timings) {
2934 + if (panel_mod->timings)
2935 display_timings_release(panel_mod->timings);
2936 - kfree(panel_mod->timings);
2937 - }
2938
2939 tilcdc_module_cleanup(mod);
2940 kfree(panel_mod->info);
2941 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
2942 index 595068ba2d5e..2f83ffb7f37e 100644
2943 --- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
2944 +++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
2945 @@ -166,6 +166,7 @@ struct slave_connector {
2946 static void slave_connector_destroy(struct drm_connector *connector)
2947 {
2948 struct slave_connector *slave_connector = to_slave_connector(connector);
2949 + drm_sysfs_connector_remove(connector);
2950 drm_connector_cleanup(connector);
2951 kfree(slave_connector);
2952 }
2953 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
2954 index c38b56b268ac..ce75ac8de4f8 100644
2955 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
2956 +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
2957 @@ -167,6 +167,7 @@ struct tfp410_connector {
2958 static void tfp410_connector_destroy(struct drm_connector *connector)
2959 {
2960 struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
2961 + drm_sysfs_connector_remove(connector);
2962 drm_connector_cleanup(connector);
2963 kfree(tfp410_connector);
2964 }
2965 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
2966 index 863bef9f9234..cf4bad2c1d59 100644
2967 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
2968 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
2969 @@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
2970 *
2971 * @pool: to free the pages from
2972 * @free_all: If set to true will free all pages in pool
2973 + * @gfp: GFP flags.
2974 **/
2975 -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
2976 +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
2977 + gfp_t gfp)
2978 {
2979 unsigned long irq_flags;
2980 struct page *p;
2981 @@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
2982 if (NUM_PAGES_TO_ALLOC < nr_free)
2983 npages_to_free = NUM_PAGES_TO_ALLOC;
2984
2985 - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
2986 - GFP_KERNEL);
2987 + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
2988 if (!pages_to_free) {
2989 pr_err("Failed to allocate memory for pool free operation\n");
2990 return 0;
2991 @@ -382,32 +383,35 @@ out:
2992 *
2993 * XXX: (dchinner) Deadlock warning!
2994 *
2995 - * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
2996 - * this can deadlock when called a sc->gfp_mask that is not equal to
2997 - * GFP_KERNEL.
2998 + * We need to pass sc->gfp_mask to ttm_page_pool_free().
2999 *
3000 * This code is crying out for a shrinker per pool....
3001 */
3002 static unsigned long
3003 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3004 {
3005 - static atomic_t start_pool = ATOMIC_INIT(0);
3006 + static DEFINE_MUTEX(lock);
3007 + static unsigned start_pool;
3008 unsigned i;
3009 - unsigned pool_offset = atomic_add_return(1, &start_pool);
3010 + unsigned pool_offset;
3011 struct ttm_page_pool *pool;
3012 int shrink_pages = sc->nr_to_scan;
3013 unsigned long freed = 0;
3014
3015 - pool_offset = pool_offset % NUM_POOLS;
3016 + if (!mutex_trylock(&lock))
3017 + return SHRINK_STOP;
3018 + pool_offset = ++start_pool % NUM_POOLS;
3019 /* select start pool in round robin fashion */
3020 for (i = 0; i < NUM_POOLS; ++i) {
3021 unsigned nr_free = shrink_pages;
3022 if (shrink_pages == 0)
3023 break;
3024 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
3025 - shrink_pages = ttm_page_pool_free(pool, nr_free);
3026 + shrink_pages = ttm_page_pool_free(pool, nr_free,
3027 + sc->gfp_mask);
3028 freed += nr_free - shrink_pages;
3029 }
3030 + mutex_unlock(&lock);
3031 return freed;
3032 }
3033
3034 @@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
3035 }
3036 spin_unlock_irqrestore(&pool->lock, irq_flags);
3037 if (npages)
3038 - ttm_page_pool_free(pool, npages);
3039 + ttm_page_pool_free(pool, npages, GFP_KERNEL);
3040 }
3041
3042 /*
3043 @@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
3044 ttm_pool_mm_shrink_fini(_manager);
3045
3046 for (i = 0; i < NUM_POOLS; ++i)
3047 - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
3048 + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
3049 + GFP_KERNEL);
3050
3051 kobject_put(&_manager->kobj);
3052 _manager = NULL;
3053 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
3054 index fb8259f69839..ca65df144765 100644
3055 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
3056 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
3057 @@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
3058 *
3059 * @pool: to free the pages from
3060 * @nr_free: If set to true will free all pages in pool
3061 + * @gfp: GFP flags.
3062 **/
3063 -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
3064 +static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
3065 + gfp_t gfp)
3066 {
3067 unsigned long irq_flags;
3068 struct dma_page *dma_p, *tmp;
3069 @@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
3070 npages_to_free, nr_free);
3071 }
3072 #endif
3073 - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
3074 - GFP_KERNEL);
3075 + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
3076
3077 if (!pages_to_free) {
3078 pr_err("%s: Failed to allocate memory for pool free operation\n",
3079 @@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
3080 if (pool->type != type)
3081 continue;
3082 /* Takes a spinlock.. */
3083 - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
3084 + ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
3085 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
3086 /* This code path is called after _all_ references to the
3087 * struct device has been dropped - so nobody should be
3088 @@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
3089
3090 /* shrink pool if necessary (only on !is_cached pools)*/
3091 if (npages)
3092 - ttm_dma_page_pool_free(pool, npages);
3093 + ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
3094 ttm->state = tt_unpopulated;
3095 }
3096 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
3097 @@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
3098 *
3099 * XXX: (dchinner) Deadlock warning!
3100 *
3101 - * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
3102 - * needs to be paid to sc->gfp_mask to determine if this can be done or not.
3103 - * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
3104 - * bad.
3105 + * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
3106 *
3107 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
3108 * shrinkers
3109 @@ -1004,9 +1002,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
3110 static unsigned long
3111 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3112 {
3113 - static atomic_t start_pool = ATOMIC_INIT(0);
3114 + static unsigned start_pool;
3115 unsigned idx = 0;
3116 - unsigned pool_offset = atomic_add_return(1, &start_pool);
3117 + unsigned pool_offset;
3118 unsigned shrink_pages = sc->nr_to_scan;
3119 struct device_pools *p;
3120 unsigned long freed = 0;
3121 @@ -1014,8 +1012,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3122 if (list_empty(&_manager->pools))
3123 return SHRINK_STOP;
3124
3125 - mutex_lock(&_manager->lock);
3126 - pool_offset = pool_offset % _manager->npools;
3127 + if (!mutex_trylock(&_manager->lock))
3128 + return SHRINK_STOP;
3129 + if (!_manager->npools)
3130 + goto out;
3131 + pool_offset = ++start_pool % _manager->npools;
3132 list_for_each_entry(p, &_manager->pools, pools) {
3133 unsigned nr_free;
3134
3135 @@ -1027,13 +1028,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3136 if (++idx < pool_offset)
3137 continue;
3138 nr_free = shrink_pages;
3139 - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
3140 + shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
3141 + sc->gfp_mask);
3142 freed += nr_free - shrink_pages;
3143
3144 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
3145 p->pool->dev_name, p->pool->name, current->pid,
3146 nr_free, shrink_pages);
3147 }
3148 +out:
3149 mutex_unlock(&_manager->lock);
3150 return freed;
3151 }
3152 @@ -1044,7 +1047,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3153 struct device_pools *p;
3154 unsigned long count = 0;
3155
3156 - mutex_lock(&_manager->lock);
3157 + if (!mutex_trylock(&_manager->lock))
3158 + return 0;
3159 list_for_each_entry(p, &_manager->pools, pools)
3160 count += p->pool->npages_free;
3161 mutex_unlock(&_manager->lock);
3162 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
3163 index 6ccd993e26bf..6eae14d2a3f7 100644
3164 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
3165 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
3166 @@ -180,8 +180,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
3167
3168 mutex_lock(&dev_priv->hw_mutex);
3169
3170 + vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
3171 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
3172 - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
3173 + ;
3174
3175 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
3176
3177 diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
3178 index 6866448083b2..37ac7b5dbd06 100644
3179 --- a/drivers/gpu/vga/vga_switcheroo.c
3180 +++ b/drivers/gpu/vga/vga_switcheroo.c
3181 @@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
3182 }
3183 EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
3184
3185 +void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
3186 +{
3187 + dev->pm_domain = NULL;
3188 +}
3189 +EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
3190 +
3191 static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
3192 {
3193 struct pci_dev *pdev = to_pci_dev(dev);
3194 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
3195 index 0b14d3261531..5da115a6fd22 100644
3196 --- a/drivers/hid/hid-logitech-dj.c
3197 +++ b/drivers/hid/hid-logitech-dj.c
3198 @@ -687,7 +687,6 @@ static int logi_dj_raw_event(struct hid_device *hdev,
3199 struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
3200 struct dj_report *dj_report = (struct dj_report *) data;
3201 unsigned long flags;
3202 - bool report_processed = false;
3203
3204 dbg_hid("%s, size:%d\n", __func__, size);
3205
3206 @@ -714,34 +713,42 @@ static int logi_dj_raw_event(struct hid_device *hdev,
3207 * device (via hid_input_report() ) and return 1 so hid-core does not do
3208 * anything else with it.
3209 */
3210 +
3211 + /* case 1) */
3212 + if (data[0] != REPORT_ID_DJ_SHORT)
3213 + return false;
3214 +
3215 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
3216 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
3217 - dev_err(&hdev->dev, "%s: invalid device index:%d\n",
3218 + /*
3219 + * Device index is wrong, bail out.
3220 + * This driver can ignore safely the receiver notifications,
3221 + * so ignore those reports too.
3222 + */
3223 + if (dj_report->device_index != DJ_RECEIVER_INDEX)
3224 + dev_err(&hdev->dev, "%s: invalid device index:%d\n",
3225 __func__, dj_report->device_index);
3226 return false;
3227 }
3228
3229 spin_lock_irqsave(&djrcv_dev->lock, flags);
3230 - if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
3231 - switch (dj_report->report_type) {
3232 - case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
3233 - case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
3234 - logi_dj_recv_queue_notification(djrcv_dev, dj_report);
3235 - break;
3236 - case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
3237 - if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
3238 - STATUS_LINKLOSS) {
3239 - logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
3240 - }
3241 - break;
3242 - default:
3243 - logi_dj_recv_forward_report(djrcv_dev, dj_report);
3244 + switch (dj_report->report_type) {
3245 + case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
3246 + case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
3247 + logi_dj_recv_queue_notification(djrcv_dev, dj_report);
3248 + break;
3249 + case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
3250 + if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
3251 + STATUS_LINKLOSS) {
3252 + logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
3253 }
3254 - report_processed = true;
3255 + break;
3256 + default:
3257 + logi_dj_recv_forward_report(djrcv_dev, dj_report);
3258 }
3259 spin_unlock_irqrestore(&djrcv_dev->lock, flags);
3260
3261 - return report_processed;
3262 + return true;
3263 }
3264
3265 static int logi_dj_probe(struct hid_device *hdev,
3266 diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
3267 index 4a4000340ce1..daeb0aa4bee9 100644
3268 --- a/drivers/hid/hid-logitech-dj.h
3269 +++ b/drivers/hid/hid-logitech-dj.h
3270 @@ -27,6 +27,7 @@
3271
3272 #define DJ_MAX_PAIRED_DEVICES 6
3273 #define DJ_MAX_NUMBER_NOTIFICATIONS 8
3274 +#define DJ_RECEIVER_INDEX 0
3275 #define DJ_DEVICE_INDEX_MIN 1
3276 #define DJ_DEVICE_INDEX_MAX 6
3277
3278 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
3279 index 3b43d1cfa936..991ba79cfc72 100644
3280 --- a/drivers/hid/hid-magicmouse.c
3281 +++ b/drivers/hid/hid-magicmouse.c
3282 @@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
3283 if (size < 4 || ((size - 4) % 9) != 0)
3284 return 0;
3285 npoints = (size - 4) / 9;
3286 + if (npoints > 15) {
3287 + hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
3288 + size);
3289 + return 0;
3290 + }
3291 msc->ntouches = 0;
3292 for (ii = 0; ii < npoints; ii++)
3293 magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
3294 @@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
3295 if (size < 6 || ((size - 6) % 8) != 0)
3296 return 0;
3297 npoints = (size - 6) / 8;
3298 + if (npoints > 15) {
3299 + hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
3300 + size);
3301 + return 0;
3302 + }
3303 msc->ntouches = 0;
3304 for (ii = 0; ii < npoints; ii++)
3305 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
3306 diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
3307 index acbb021065ec..020df3c2e8b4 100644
3308 --- a/drivers/hid/hid-picolcd_core.c
3309 +++ b/drivers/hid/hid-picolcd_core.c
3310 @@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
3311 if (!data)
3312 return 1;
3313
3314 + if (size > 64) {
3315 + hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
3316 + size);
3317 + return 0;
3318 + }
3319 +
3320 if (report->id == REPORT_KEY_STATE) {
3321 if (data->input_keys)
3322 ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
3323 diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
3324 index fc6f5d54e7f7..8890870309e4 100644
3325 --- a/drivers/hwmon/ds1621.c
3326 +++ b/drivers/hwmon/ds1621.c
3327 @@ -309,6 +309,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
3328 data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT);
3329 i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf);
3330 data->update_interval = ds1721_convrates[resol];
3331 + data->zbits = 7 - resol;
3332 mutex_unlock(&data->update_lock);
3333
3334 return count;
3335 diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
3336 index c56be739006b..11e9c7f9bf9b 100644
3337 --- a/drivers/i2c/busses/i2c-at91.c
3338 +++ b/drivers/i2c/busses/i2c-at91.c
3339 @@ -101,6 +101,7 @@ struct at91_twi_dev {
3340 unsigned twi_cwgr_reg;
3341 struct at91_twi_pdata *pdata;
3342 bool use_dma;
3343 + bool recv_len_abort;
3344 struct at91_twi_dma dma;
3345 };
3346
3347 @@ -267,12 +268,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
3348 *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
3349 --dev->buf_len;
3350
3351 + /* return if aborting, we only needed to read RHR to clear RXRDY*/
3352 + if (dev->recv_len_abort)
3353 + return;
3354 +
3355 /* handle I2C_SMBUS_BLOCK_DATA */
3356 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
3357 - dev->msg->flags &= ~I2C_M_RECV_LEN;
3358 - dev->buf_len += *dev->buf;
3359 - dev->msg->len = dev->buf_len + 1;
3360 - dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
3361 + /* ensure length byte is a valid value */
3362 + if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
3363 + dev->msg->flags &= ~I2C_M_RECV_LEN;
3364 + dev->buf_len += *dev->buf;
3365 + dev->msg->len = dev->buf_len + 1;
3366 + dev_dbg(dev->dev, "received block length %d\n",
3367 + dev->buf_len);
3368 + } else {
3369 + /* abort and send the stop by reading one more byte */
3370 + dev->recv_len_abort = true;
3371 + dev->buf_len = 1;
3372 + }
3373 }
3374
3375 /* send stop if second but last byte has been read */
3376 @@ -421,8 +434,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
3377 }
3378 }
3379
3380 - ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
3381 - dev->adapter.timeout);
3382 + ret = wait_for_completion_io_timeout(&dev->cmd_complete,
3383 + dev->adapter.timeout);
3384 if (ret == 0) {
3385 dev_err(dev->dev, "controller timed out\n");
3386 at91_init_twi_bus(dev);
3387 @@ -444,6 +457,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
3388 ret = -EIO;
3389 goto error;
3390 }
3391 + if (dev->recv_len_abort) {
3392 + dev_err(dev->dev, "invalid smbus block length recvd\n");
3393 + ret = -EPROTO;
3394 + goto error;
3395 + }
3396 +
3397 dev_dbg(dev->dev, "transfer complete\n");
3398
3399 return 0;
3400 @@ -500,6 +519,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
3401 dev->buf_len = m_start->len;
3402 dev->buf = m_start->buf;
3403 dev->msg = m_start;
3404 + dev->recv_len_abort = false;
3405
3406 ret = at91_do_twi_transfer(dev);
3407
3408 diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
3409 index 8ce4f517fc56..6e932d140573 100644
3410 --- a/drivers/i2c/busses/i2c-ismt.c
3411 +++ b/drivers/i2c/busses/i2c-ismt.c
3412 @@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
3413 desc->wr_len_cmd = dma_size;
3414 desc->control |= ISMT_DESC_BLK;
3415 priv->dma_buffer[0] = command;
3416 - memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
3417 + memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
3418 } else {
3419 /* Block Read */
3420 dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n");
3421 @@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
3422 desc->wr_len_cmd = dma_size;
3423 desc->control |= ISMT_DESC_I2C;
3424 priv->dma_buffer[0] = command;
3425 - memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
3426 + memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
3427 } else {
3428 /* i2c Block Read */
3429 dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n");
3430 diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
3431 index d52d84937ad3..cf891752cc73 100644
3432 --- a/drivers/i2c/busses/i2c-mv64xxx.c
3433 +++ b/drivers/i2c/busses/i2c-mv64xxx.c
3434 @@ -748,8 +748,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
3435 }
3436 tclk = clk_get_rate(drv_data->clk);
3437
3438 - rc = of_property_read_u32(np, "clock-frequency", &bus_freq);
3439 - if (rc)
3440 + if (of_property_read_u32(np, "clock-frequency", &bus_freq))
3441 bus_freq = 100000; /* 100kHz by default */
3442
3443 if (!mv64xxx_find_baud_factors(bus_freq, tclk,
3444 diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
3445 index fe83d04784c8..6f039c300141 100644
3446 --- a/drivers/iio/accel/bma180.c
3447 +++ b/drivers/iio/accel/bma180.c
3448 @@ -571,7 +571,7 @@ static int bma180_probe(struct i2c_client *client,
3449 trig->ops = &bma180_trigger_ops;
3450 iio_trigger_set_drvdata(trig, indio_dev);
3451 data->trig = trig;
3452 - indio_dev->trig = trig;
3453 + indio_dev->trig = iio_trigger_get(trig);
3454
3455 ret = iio_trigger_register(trig);
3456 if (ret)
3457 diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
3458 index 9a4e0e32a771..eb799a43aef0 100644
3459 --- a/drivers/iio/adc/ad_sigma_delta.c
3460 +++ b/drivers/iio/adc/ad_sigma_delta.c
3461 @@ -472,7 +472,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
3462 goto error_free_irq;
3463
3464 /* select default trigger */
3465 - indio_dev->trig = sigma_delta->trig;
3466 + indio_dev->trig = iio_trigger_get(sigma_delta->trig);
3467
3468 return 0;
3469
3470 diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
3471 index 7dcf83998e6f..1be235b01934 100644
3472 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
3473 +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
3474 @@ -99,7 +99,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
3475 dev_err(&indio_dev->dev, "Trigger Register Failed\n");
3476 goto error_free_trig;
3477 }
3478 - indio_dev->trig = attrb->trigger = trig;
3479 + attrb->trigger = trig;
3480 + indio_dev->trig = iio_trigger_get(trig);
3481
3482 return ret;
3483
3484 diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
3485 index 8fc3a97eb266..8d8ca6f1e16a 100644
3486 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
3487 +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
3488 @@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
3489 dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
3490 goto iio_trigger_register_error;
3491 }
3492 - indio_dev->trig = sdata->trig;
3493 + indio_dev->trig = iio_trigger_get(sdata->trig);
3494
3495 return 0;
3496
3497 diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
3498 index e3b3c5084070..eef50e91f17c 100644
3499 --- a/drivers/iio/gyro/itg3200_buffer.c
3500 +++ b/drivers/iio/gyro/itg3200_buffer.c
3501 @@ -132,7 +132,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev)
3502 goto error_free_irq;
3503
3504 /* select default trigger */
3505 - indio_dev->trig = st->trig;
3506 + indio_dev->trig = iio_trigger_get(st->trig);
3507
3508 return 0;
3509
3510 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
3511 index 03b9372c1212..926fccea8de0 100644
3512 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
3513 +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
3514 @@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
3515 ret = iio_trigger_register(st->trig);
3516 if (ret)
3517 goto error_free_irq;
3518 - indio_dev->trig = st->trig;
3519 + indio_dev->trig = iio_trigger_get(st->trig);
3520
3521 return 0;
3522
3523 diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
3524 index 1e8e94d4db7d..4fc88e617acf 100644
3525 --- a/drivers/iio/inkern.c
3526 +++ b/drivers/iio/inkern.c
3527 @@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
3528 index = of_property_match_string(np, "io-channel-names",
3529 name);
3530 chan = of_iio_channel_get(np, index);
3531 - if (!IS_ERR(chan))
3532 + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
3533 break;
3534 else if (name && index >= 0) {
3535 pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
3536 diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
3537 index 52bbcfa1e077..476aa132a192 100644
3538 --- a/drivers/iio/magnetometer/st_magn_core.c
3539 +++ b/drivers/iio/magnetometer/st_magn_core.c
3540 @@ -42,7 +42,8 @@
3541 #define ST_MAGN_FS_AVL_5600MG 5600
3542 #define ST_MAGN_FS_AVL_8000MG 8000
3543 #define ST_MAGN_FS_AVL_8100MG 8100
3544 -#define ST_MAGN_FS_AVL_10000MG 10000
3545 +#define ST_MAGN_FS_AVL_12000MG 12000
3546 +#define ST_MAGN_FS_AVL_16000MG 16000
3547
3548 /* CUSTOM VALUES FOR SENSOR 1 */
3549 #define ST_MAGN_1_WAI_EXP 0x3c
3550 @@ -69,20 +70,20 @@
3551 #define ST_MAGN_1_FS_AVL_4700_VAL 0x05
3552 #define ST_MAGN_1_FS_AVL_5600_VAL 0x06
3553 #define ST_MAGN_1_FS_AVL_8100_VAL 0x07
3554 -#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 1100
3555 -#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 855
3556 -#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 670
3557 -#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 450
3558 -#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 400
3559 -#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 330
3560 -#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 230
3561 -#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 980
3562 -#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 760
3563 -#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 600
3564 -#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 400
3565 -#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 355
3566 -#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 295
3567 -#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 205
3568 +#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909
3569 +#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169
3570 +#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492
3571 +#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222
3572 +#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500
3573 +#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030
3574 +#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347
3575 +#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020
3576 +#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315
3577 +#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666
3578 +#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500
3579 +#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816
3580 +#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389
3581 +#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878
3582 #define ST_MAGN_1_MULTIREAD_BIT false
3583
3584 /* CUSTOM VALUES FOR SENSOR 2 */
3585 @@ -105,10 +106,12 @@
3586 #define ST_MAGN_2_FS_MASK 0x60
3587 #define ST_MAGN_2_FS_AVL_4000_VAL 0x00
3588 #define ST_MAGN_2_FS_AVL_8000_VAL 0x01
3589 -#define ST_MAGN_2_FS_AVL_10000_VAL 0x02
3590 -#define ST_MAGN_2_FS_AVL_4000_GAIN 430
3591 -#define ST_MAGN_2_FS_AVL_8000_GAIN 230
3592 -#define ST_MAGN_2_FS_AVL_10000_GAIN 230
3593 +#define ST_MAGN_2_FS_AVL_12000_VAL 0x02
3594 +#define ST_MAGN_2_FS_AVL_16000_VAL 0x03
3595 +#define ST_MAGN_2_FS_AVL_4000_GAIN 146
3596 +#define ST_MAGN_2_FS_AVL_8000_GAIN 292
3597 +#define ST_MAGN_2_FS_AVL_12000_GAIN 438
3598 +#define ST_MAGN_2_FS_AVL_16000_GAIN 584
3599 #define ST_MAGN_2_MULTIREAD_BIT false
3600 #define ST_MAGN_2_OUT_X_L_ADDR 0x28
3601 #define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
3602 @@ -266,9 +269,14 @@ static const struct st_sensors st_magn_sensors[] = {
3603 .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
3604 },
3605 [2] = {
3606 - .num = ST_MAGN_FS_AVL_10000MG,
3607 - .value = ST_MAGN_2_FS_AVL_10000_VAL,
3608 - .gain = ST_MAGN_2_FS_AVL_10000_GAIN,
3609 + .num = ST_MAGN_FS_AVL_12000MG,
3610 + .value = ST_MAGN_2_FS_AVL_12000_VAL,
3611 + .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
3612 + },
3613 + [3] = {
3614 + .num = ST_MAGN_FS_AVL_16000MG,
3615 + .value = ST_MAGN_2_FS_AVL_16000_VAL,
3616 + .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
3617 },
3618 },
3619 },
3620 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
3621 index e7bee46868d1..abd97247443e 100644
3622 --- a/drivers/infiniband/core/uverbs_marshall.c
3623 +++ b/drivers/infiniband/core/uverbs_marshall.c
3624 @@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
3625 dst->packet_life_time = src->packet_life_time;
3626 dst->preference = src->preference;
3627 dst->packet_life_time_selector = src->packet_life_time_selector;
3628 +
3629 + memset(dst->smac, 0, sizeof(dst->smac));
3630 + memset(dst->dmac, 0, sizeof(dst->dmac));
3631 + dst->vlan_id = 0xffff;
3632 }
3633 EXPORT_SYMBOL(ib_copy_path_rec_from_user);
3634 diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
3635 index f9c12e92fdd6..11f0606792bb 100644
3636 --- a/drivers/infiniband/hw/mlx4/main.c
3637 +++ b/drivers/infiniband/hw/mlx4/main.c
3638 @@ -1622,6 +1622,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
3639 struct inet6_dev *in6_dev;
3640 union ib_gid *pgid;
3641 struct inet6_ifaddr *ifp;
3642 + union ib_gid default_gid;
3643 #endif
3644 union ib_gid gid;
3645
3646 @@ -1642,12 +1643,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
3647 in_dev_put(in_dev);
3648 }
3649 #if IS_ENABLED(CONFIG_IPV6)
3650 + mlx4_make_default_gid(dev, &default_gid);
3651 /* IPv6 gids */
3652 in6_dev = in6_dev_get(dev);
3653 if (in6_dev) {
3654 read_lock_bh(&in6_dev->lock);
3655 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
3656 pgid = (union ib_gid *)&ifp->addr;
3657 + if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
3658 + continue;
3659 update_gid_table(ibdev, port, pgid, 0, 0);
3660 }
3661 read_unlock_bh(&in6_dev->lock);
3662 @@ -1723,31 +1727,34 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
3663 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
3664 IB_PORT_ACTIVE : IB_PORT_DOWN;
3665 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
3666 - } else {
3667 - reset_gid_table(ibdev, port);
3668 - }
3669 - /* if using bonding/team and a slave port is down, we don't the bond IP
3670 - * based gids in the table since flows that select port by gid may get
3671 - * the down port.
3672 - */
3673 - if (curr_master && (port_state == IB_PORT_DOWN)) {
3674 - reset_gid_table(ibdev, port);
3675 - mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
3676 - }
3677 - /* if bonding is used it is possible that we add it to masters
3678 - * only after IP address is assigned to the net bonding
3679 - * interface.
3680 - */
3681 - if (curr_master && (old_master != curr_master)) {
3682 - reset_gid_table(ibdev, port);
3683 - mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
3684 - mlx4_ib_get_dev_addr(curr_master, ibdev, port);
3685 - }
3686 + /* if using bonding/team and a slave port is down, we
3687 + * don't the bond IP based gids in the table since
3688 + * flows that select port by gid may get the down port.
3689 + */
3690 + if (curr_master && (port_state == IB_PORT_DOWN)) {
3691 + reset_gid_table(ibdev, port);
3692 + mlx4_ib_set_default_gid(ibdev,
3693 + curr_netdev, port);
3694 + }
3695 + /* if bonding is used it is possible that we add it to
3696 + * masters only after IP address is assigned to the
3697 + * net bonding interface.
3698 + */
3699 + if (curr_master && (old_master != curr_master)) {
3700 + reset_gid_table(ibdev, port);
3701 + mlx4_ib_set_default_gid(ibdev,
3702 + curr_netdev, port);
3703 + mlx4_ib_get_dev_addr(curr_master, ibdev, port);
3704 + }
3705
3706 - if (!curr_master && (old_master != curr_master)) {
3707 + if (!curr_master && (old_master != curr_master)) {
3708 + reset_gid_table(ibdev, port);
3709 + mlx4_ib_set_default_gid(ibdev,
3710 + curr_netdev, port);
3711 + mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
3712 + }
3713 + } else {
3714 reset_gid_table(ibdev, port);
3715 - mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
3716 - mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
3717 }
3718 }
3719
3720 diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
3721 index 799a0c3bffc4..6abd3ed3cd51 100644
3722 --- a/drivers/infiniband/hw/qib/qib_debugfs.c
3723 +++ b/drivers/infiniband/hw/qib/qib_debugfs.c
3724 @@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
3725 struct qib_qp_iter *iter;
3726 loff_t n = *pos;
3727
3728 + rcu_read_lock();
3729 iter = qib_qp_iter_init(s->private);
3730 if (!iter)
3731 return NULL;
3732 @@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
3733
3734 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
3735 {
3736 - /* nothing for now */
3737 + rcu_read_unlock();
3738 }
3739
3740 static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
3741 diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
3742 index 0cad0c40d742..6a71b2b41b27 100644
3743 --- a/drivers/infiniband/hw/qib/qib_qp.c
3744 +++ b/drivers/infiniband/hw/qib/qib_qp.c
3745 @@ -1324,7 +1324,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
3746 struct qib_qp *pqp = iter->qp;
3747 struct qib_qp *qp;
3748
3749 - rcu_read_lock();
3750 for (; n < dev->qp_table_size; n++) {
3751 if (pqp)
3752 qp = rcu_dereference(pqp->next);
3753 @@ -1332,18 +1331,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
3754 qp = rcu_dereference(dev->qp_table[n]);
3755 pqp = qp;
3756 if (qp) {
3757 - if (iter->qp)
3758 - atomic_dec(&iter->qp->refcount);
3759 - atomic_inc(&qp->refcount);
3760 - rcu_read_unlock();
3761 iter->qp = qp;
3762 iter->n = n;
3763 return 0;
3764 }
3765 }
3766 - rcu_read_unlock();
3767 - if (iter->qp)
3768 - atomic_dec(&iter->qp->refcount);
3769 return ret;
3770 }
3771
3772 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
3773 index 156205a81523..c5c194c2e0b6 100644
3774 --- a/drivers/infiniband/ulp/isert/ib_isert.c
3775 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
3776 @@ -511,7 +511,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
3777 init_completion(&isert_conn->conn_wait);
3778 init_completion(&isert_conn->conn_wait_comp_err);
3779 kref_init(&isert_conn->conn_kref);
3780 - kref_get(&isert_conn->conn_kref);
3781 mutex_init(&isert_conn->conn_mutex);
3782 spin_lock_init(&isert_conn->conn_lock);
3783 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
3784 @@ -663,7 +662,9 @@ isert_connect_release(struct isert_conn *isert_conn)
3785 static void
3786 isert_connected_handler(struct rdma_cm_id *cma_id)
3787 {
3788 - return;
3789 + struct isert_conn *isert_conn = cma_id->context;
3790 +
3791 + kref_get(&isert_conn->conn_kref);
3792 }
3793
3794 static void
3795 @@ -715,7 +716,6 @@ isert_disconnect_work(struct work_struct *work)
3796
3797 wake_up:
3798 complete(&isert_conn->conn_wait);
3799 - isert_put_conn(isert_conn);
3800 }
3801
3802 static void
3803 @@ -2800,6 +2800,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3804 wait_for_completion(&isert_conn->conn_wait_comp_err);
3805
3806 wait_for_completion(&isert_conn->conn_wait);
3807 + isert_put_conn(isert_conn);
3808 }
3809
3810 static void isert_free_conn(struct iscsi_conn *conn)
3811 diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
3812 index 2dd1d0dd4f7d..6f5d79569136 100644
3813 --- a/drivers/input/keyboard/atkbd.c
3814 +++ b/drivers/input/keyboard/atkbd.c
3815 @@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
3816 {
3817 .matches = {
3818 DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
3819 - DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
3820 - },
3821 - .callback = atkbd_deactivate_fixup,
3822 - },
3823 - {
3824 - .matches = {
3825 - DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
3826 - DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
3827 },
3828 .callback = atkbd_deactivate_fixup,
3829 },
3830 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
3831 index 233516aff595..0b75b5764f31 100644
3832 --- a/drivers/input/mouse/elantech.c
3833 +++ b/drivers/input/mouse/elantech.c
3834 @@ -1253,6 +1253,13 @@ static bool elantech_is_signature_valid(const unsigned char *param)
3835 if (param[1] == 0)
3836 return true;
3837
3838 + /*
3839 + * Some models have a revision higher then 20. Meaning param[2] may
3840 + * be 10 or 20, skip the rates check for these.
3841 + */
3842 + if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
3843 + return true;
3844 +
3845 for (i = 0; i < ARRAY_SIZE(rates); i++)
3846 if (param[2] == rates[i])
3847 return false;
3848 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
3849 index ef9e0b8a9aa7..a50a2a7a43f7 100644
3850 --- a/drivers/input/mouse/synaptics.c
3851 +++ b/drivers/input/mouse/synaptics.c
3852 @@ -626,10 +626,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
3853 ((buf[0] & 0x04) >> 1) |
3854 ((buf[3] & 0x04) >> 2));
3855
3856 + if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
3857 + SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
3858 + hw->w == 2) {
3859 + synaptics_parse_agm(buf, priv, hw);
3860 + return 1;
3861 + }
3862 +
3863 + hw->x = (((buf[3] & 0x10) << 8) |
3864 + ((buf[1] & 0x0f) << 8) |
3865 + buf[4]);
3866 + hw->y = (((buf[3] & 0x20) << 7) |
3867 + ((buf[1] & 0xf0) << 4) |
3868 + buf[5]);
3869 + hw->z = buf[2];
3870 +
3871 hw->left = (buf[0] & 0x01) ? 1 : 0;
3872 hw->right = (buf[0] & 0x02) ? 1 : 0;
3873
3874 - if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
3875 + if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
3876 + /*
3877 + * ForcePads, like Clickpads, use middle button
3878 + * bits to report primary button clicks.
3879 + * Unfortunately they report primary button not
3880 + * only when user presses on the pad above certain
3881 + * threshold, but also when there are more than one
3882 + * finger on the touchpad, which interferes with
3883 + * out multi-finger gestures.
3884 + */
3885 + if (hw->z == 0) {
3886 + /* No contacts */
3887 + priv->press = priv->report_press = false;
3888 + } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
3889 + /*
3890 + * Single-finger touch with pressure above
3891 + * the threshold. If pressure stays long
3892 + * enough, we'll start reporting primary
3893 + * button. We rely on the device continuing
3894 + * sending data even if finger does not
3895 + * move.
3896 + */
3897 + if (!priv->press) {
3898 + priv->press_start = jiffies;
3899 + priv->press = true;
3900 + } else if (time_after(jiffies,
3901 + priv->press_start +
3902 + msecs_to_jiffies(50))) {
3903 + priv->report_press = true;
3904 + }
3905 + } else {
3906 + priv->press = false;
3907 + }
3908 +
3909 + hw->left = priv->report_press;
3910 +
3911 + } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
3912 /*
3913 * Clickpad's button is transmitted as middle button,
3914 * however, since it is primary button, we will report
3915 @@ -648,21 +699,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
3916 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
3917 }
3918
3919 - if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
3920 - SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
3921 - hw->w == 2) {
3922 - synaptics_parse_agm(buf, priv, hw);
3923 - return 1;
3924 - }
3925 -
3926 - hw->x = (((buf[3] & 0x10) << 8) |
3927 - ((buf[1] & 0x0f) << 8) |
3928 - buf[4]);
3929 - hw->y = (((buf[3] & 0x20) << 7) |
3930 - ((buf[1] & 0xf0) << 4) |
3931 - buf[5]);
3932 - hw->z = buf[2];
3933 -
3934 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
3935 ((buf[0] ^ buf[3]) & 0x02)) {
3936 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
3937 diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
3938 index e594af0b264b..fb2e076738ae 100644
3939 --- a/drivers/input/mouse/synaptics.h
3940 +++ b/drivers/input/mouse/synaptics.h
3941 @@ -78,6 +78,11 @@
3942 * 2 0x08 image sensor image sensor tracks 5 fingers, but only
3943 * reports 2.
3944 * 2 0x20 report min query 0x0f gives min coord reported
3945 + * 2 0x80 forcepad forcepad is a variant of clickpad that
3946 + * does not have physical buttons but rather
3947 + * uses pressure above certain threshold to
3948 + * report primary clicks. Forcepads also have
3949 + * clickpad bit set.
3950 */
3951 #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
3952 #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
3953 @@ -86,6 +91,7 @@
3954 #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
3955 #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
3956 #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
3957 +#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
3958
3959 /* synaptics modes query bits */
3960 #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
3961 @@ -177,6 +183,11 @@ struct synaptics_data {
3962 */
3963 struct synaptics_hw_state agm;
3964 bool agm_pending; /* new AGM packet received */
3965 +
3966 + /* ForcePad handling */
3967 + unsigned long press_start;
3968 + bool press;
3969 + bool report_press;
3970 };
3971
3972 void synaptics_module_init(void);
3973 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3974 index 381b20d4c561..f1da362c3e65 100644
3975 --- a/drivers/input/serio/i8042-x86ia64io.h
3976 +++ b/drivers/input/serio/i8042-x86ia64io.h
3977 @@ -458,6 +458,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
3978 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
3979 },
3980 },
3981 + {
3982 + /* Avatar AVIU-145A6 */
3983 + .matches = {
3984 + DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
3985 + DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
3986 + },
3987 + },
3988 { }
3989 };
3990
3991 @@ -601,6 +608,14 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
3992 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
3993 },
3994 },
3995 + {
3996 + /* Fujitsu U574 laptop */
3997 + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
3998 + .matches = {
3999 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
4000 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
4001 + },
4002 + },
4003 { }
4004 };
4005
4006 diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
4007 index 0cb7ef59071b..69175b825346 100644
4008 --- a/drivers/input/serio/serport.c
4009 +++ b/drivers/input/serio/serport.c
4010 @@ -21,6 +21,7 @@
4011 #include <linux/init.h>
4012 #include <linux/serio.h>
4013 #include <linux/tty.h>
4014 +#include <linux/compat.h>
4015
4016 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
4017 MODULE_DESCRIPTION("Input device TTY line discipline");
4018 @@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
4019 return 0;
4020 }
4021
4022 +static void serport_set_type(struct tty_struct *tty, unsigned long type)
4023 +{
4024 + struct serport *serport = tty->disc_data;
4025 +
4026 + serport->id.proto = type & 0x000000ff;
4027 + serport->id.id = (type & 0x0000ff00) >> 8;
4028 + serport->id.extra = (type & 0x00ff0000) >> 16;
4029 +}
4030 +
4031 /*
4032 * serport_ldisc_ioctl() allows to set the port protocol, and device ID
4033 */
4034
4035 -static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
4036 +static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
4037 + unsigned int cmd, unsigned long arg)
4038 {
4039 - struct serport *serport = (struct serport*) tty->disc_data;
4040 - unsigned long type;
4041 -
4042 if (cmd == SPIOCSTYPE) {
4043 + unsigned long type;
4044 +
4045 if (get_user(type, (unsigned long __user *) arg))
4046 return -EFAULT;
4047
4048 - serport->id.proto = type & 0x000000ff;
4049 - serport->id.id = (type & 0x0000ff00) >> 8;
4050 - serport->id.extra = (type & 0x00ff0000) >> 16;
4051 + serport_set_type(tty, type);
4052 + return 0;
4053 + }
4054 +
4055 + return -EINVAL;
4056 +}
4057 +
4058 +#ifdef CONFIG_COMPAT
4059 +#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
4060 +static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
4061 + struct file *file,
4062 + unsigned int cmd, unsigned long arg)
4063 +{
4064 + if (cmd == COMPAT_SPIOCSTYPE) {
4065 + void __user *uarg = compat_ptr(arg);
4066 + compat_ulong_t compat_type;
4067 +
4068 + if (get_user(compat_type, (compat_ulong_t __user *)uarg))
4069 + return -EFAULT;
4070
4071 + serport_set_type(tty, compat_type);
4072 return 0;
4073 }
4074
4075 return -EINVAL;
4076 }
4077 +#endif
4078
4079 static void serport_ldisc_write_wakeup(struct tty_struct * tty)
4080 {
4081 @@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = {
4082 .close = serport_ldisc_close,
4083 .read = serport_ldisc_read,
4084 .ioctl = serport_ldisc_ioctl,
4085 +#ifdef CONFIG_COMPAT
4086 + .compat_ioctl = serport_ldisc_compat_ioctl,
4087 +#endif
4088 .receive_buf = serport_ldisc_receive,
4089 .write_wakeup = serport_ldisc_write_wakeup
4090 };
4091 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
4092 index 1d9ab39af29f..2ecac467f78f 100644
4093 --- a/drivers/iommu/arm-smmu.c
4094 +++ b/drivers/iommu/arm-smmu.c
4095 @@ -794,8 +794,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
4096 reg |= TTBCR_EAE |
4097 (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
4098 (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
4099 - (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
4100 - (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
4101 + (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
4102 +
4103 + if (!stage1)
4104 + reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
4105 +
4106 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
4107
4108 /* MAIR0 (stage-1 only) */
4109 diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
4110 index 735e939a846d..2331543005b2 100644
4111 --- a/drivers/md/dm-cache-target.c
4112 +++ b/drivers/md/dm-cache-target.c
4113 @@ -890,8 +890,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
4114 struct cache *cache = mg->cache;
4115
4116 if (mg->writeback) {
4117 - cell_defer(cache, mg->old_ocell, false);
4118 clear_dirty(cache, mg->old_oblock, mg->cblock);
4119 + cell_defer(cache, mg->old_ocell, false);
4120 cleanup_migration(mg);
4121 return;
4122
4123 @@ -946,13 +946,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
4124 }
4125
4126 } else {
4127 + clear_dirty(cache, mg->new_oblock, mg->cblock);
4128 if (mg->requeue_holder)
4129 cell_defer(cache, mg->new_ocell, true);
4130 else {
4131 bio_endio(mg->new_ocell->holder, 0);
4132 cell_defer(cache, mg->new_ocell, false);
4133 }
4134 - clear_dirty(cache, mg->new_oblock, mg->cblock);
4135 cleanup_migration(mg);
4136 }
4137 }
4138 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
4139 index 53b213226c01..9533f835ce07 100644
4140 --- a/drivers/md/dm-crypt.c
4141 +++ b/drivers/md/dm-crypt.c
4142 @@ -1681,6 +1681,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4143 unsigned int key_size, opt_params;
4144 unsigned long long tmpll;
4145 int ret;
4146 + size_t iv_size_padding;
4147 struct dm_arg_set as;
4148 const char *opt_string;
4149 char dummy;
4150 @@ -1717,12 +1718,23 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4151
4152 cc->dmreq_start = sizeof(struct ablkcipher_request);
4153 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
4154 - cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
4155 - cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
4156 - ~(crypto_tfm_ctx_alignment() - 1);
4157 + cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
4158 +
4159 + if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
4160 + /* Allocate the padding exactly */
4161 + iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
4162 + & crypto_ablkcipher_alignmask(any_tfm(cc));
4163 + } else {
4164 + /*
4165 + * If the cipher requires greater alignment than kmalloc
4166 + * alignment, we don't know the exact position of the
4167 + * initialization vector. We must assume worst case.
4168 + */
4169 + iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
4170 + }
4171
4172 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
4173 - sizeof(struct dm_crypt_request) + cc->iv_size);
4174 + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
4175 if (!cc->req_pool) {
4176 ti->error = "Cannot allocate crypt request mempool";
4177 goto bad;
4178 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
4179 index d7690f86fdb9..55de4f6f7eaf 100644
4180 --- a/drivers/md/raid1.c
4181 +++ b/drivers/md/raid1.c
4182 @@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
4183 has_nonrot_disk = 0;
4184 choose_next_idle = 0;
4185
4186 - if (conf->mddev->recovery_cp < MaxSector &&
4187 - (this_sector + sectors >= conf->next_resync))
4188 - choose_first = 1;
4189 - else
4190 - choose_first = 0;
4191 + choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
4192
4193 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
4194 sector_t dist;
4195 @@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf)
4196 * there is no normal IO happeing. It must arrange to call
4197 * lower_barrier when the particular background IO completes.
4198 */
4199 -static void raise_barrier(struct r1conf *conf)
4200 +static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
4201 {
4202 spin_lock_irq(&conf->resync_lock);
4203
4204 @@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf)
4205
4206 /* block any new IO from starting */
4207 conf->barrier++;
4208 + conf->next_resync = sector_nr;
4209
4210 /* For these conditions we must wait:
4211 * A: while the array is in frozen state
4212 @@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf)
4213 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
4214 * next resync will reach to the window which normal bios are
4215 * handling.
4216 + * D: while there are any active requests in the current window.
4217 */
4218 wait_event_lock_irq(conf->wait_barrier,
4219 !conf->array_frozen &&
4220 conf->barrier < RESYNC_DEPTH &&
4221 + conf->current_window_requests == 0 &&
4222 (conf->start_next_window >=
4223 conf->next_resync + RESYNC_SECTORS),
4224 conf->resync_lock);
4225
4226 + conf->nr_pending++;
4227 spin_unlock_irq(&conf->resync_lock);
4228 }
4229
4230 @@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf)
4231 BUG_ON(conf->barrier <= 0);
4232 spin_lock_irqsave(&conf->resync_lock, flags);
4233 conf->barrier--;
4234 + conf->nr_pending--;
4235 spin_unlock_irqrestore(&conf->resync_lock, flags);
4236 wake_up(&conf->wait_barrier);
4237 }
4238 @@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
4239 if (conf->array_frozen || !bio)
4240 wait = true;
4241 else if (conf->barrier && bio_data_dir(bio) == WRITE) {
4242 - if (conf->next_resync < RESYNC_WINDOW_SECTORS)
4243 - wait = true;
4244 - else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
4245 - >= bio_end_sector(bio)) ||
4246 - (conf->next_resync + NEXT_NORMALIO_DISTANCE
4247 - <= bio->bi_iter.bi_sector))
4248 + if ((conf->mddev->curr_resync_completed
4249 + >= bio_end_sector(bio)) ||
4250 + (conf->next_resync + NEXT_NORMALIO_DISTANCE
4251 + <= bio->bi_iter.bi_sector))
4252 wait = false;
4253 else
4254 wait = true;
4255 @@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
4256 }
4257
4258 if (bio && bio_data_dir(bio) == WRITE) {
4259 - if (conf->next_resync + NEXT_NORMALIO_DISTANCE
4260 - <= bio->bi_iter.bi_sector) {
4261 + if (bio->bi_iter.bi_sector >=
4262 + conf->mddev->curr_resync_completed) {
4263 if (conf->start_next_window == MaxSector)
4264 conf->start_next_window =
4265 conf->next_resync +
4266 @@ -1186,6 +1185,7 @@ read_again:
4267 atomic_read(&bitmap->behind_writes) == 0);
4268 }
4269 r1_bio->read_disk = rdisk;
4270 + r1_bio->start_next_window = 0;
4271
4272 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
4273 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
4274 @@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf)
4275 mempool_destroy(conf->r1buf_pool);
4276 conf->r1buf_pool = NULL;
4277
4278 + spin_lock_irq(&conf->resync_lock);
4279 conf->next_resync = 0;
4280 conf->start_next_window = MaxSector;
4281 + conf->current_window_requests +=
4282 + conf->next_window_requests;
4283 + conf->next_window_requests = 0;
4284 + spin_unlock_irq(&conf->resync_lock);
4285 }
4286
4287 static int raid1_spare_active(struct mddev *mddev)
4288 @@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
4289 d--;
4290 rdev = conf->mirrors[d].rdev;
4291 if (rdev &&
4292 - test_bit(In_sync, &rdev->flags))
4293 + !test_bit(Faulty, &rdev->flags))
4294 r1_sync_page_io(rdev, sect, s,
4295 conf->tmppage, WRITE);
4296 }
4297 @@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
4298 d--;
4299 rdev = conf->mirrors[d].rdev;
4300 if (rdev &&
4301 - test_bit(In_sync, &rdev->flags)) {
4302 + !test_bit(Faulty, &rdev->flags)) {
4303 if (r1_sync_page_io(rdev, sect, s,
4304 conf->tmppage, READ)) {
4305 atomic_add(s, &rdev->corrected_errors);
4306 @@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
4307
4308 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4309 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
4310 - raise_barrier(conf);
4311
4312 - conf->next_resync = sector_nr;
4313 + raise_barrier(conf, sector_nr);
4314
4315 rcu_read_lock();
4316 /*
4317 diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
4318 index 80643ef9183f..fabe2fce9bc5 100644
4319 --- a/drivers/media/dvb-core/dvb-usb-ids.h
4320 +++ b/drivers/media/dvb-core/dvb-usb-ids.h
4321 @@ -279,6 +279,8 @@
4322 #define USB_PID_PCTV_400E 0x020f
4323 #define USB_PID_PCTV_450E 0x0222
4324 #define USB_PID_PCTV_452E 0x021f
4325 +#define USB_PID_PCTV_78E 0x025a
4326 +#define USB_PID_PCTV_79E 0x0262
4327 #define USB_PID_REALTEK_RTL2831U 0x2831
4328 #define USB_PID_REALTEK_RTL2832U 0x2832
4329 #define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
4330 diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
4331 index 71c8570bd9ea..112394d138c9 100644
4332 --- a/drivers/media/i2c/adv7604.c
4333 +++ b/drivers/media/i2c/adv7604.c
4334 @@ -1984,7 +1984,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
4335 v4l2_info(sd, "HDCP keys read: %s%s\n",
4336 (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
4337 (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
4338 - if (!is_hdmi(sd)) {
4339 + if (is_hdmi(sd)) {
4340 bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
4341 bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
4342 bool audio_mute = io_read(sd, 0x65) & 0x40;
4343 diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
4344 index 716bdc57fac6..83f5074706f9 100644
4345 --- a/drivers/media/pci/cx18/cx18-driver.c
4346 +++ b/drivers/media/pci/cx18/cx18-driver.c
4347 @@ -1091,6 +1091,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
4348 setup.addr = ADDR_UNSET;
4349 setup.type = cx->options.tuner;
4350 setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
4351 + setup.config = NULL;
4352 if (cx->options.radio > 0)
4353 setup.mode_mask |= T_RADIO;
4354 setup.tuner_callback = (setup.type == TUNER_XC2028) ?
4355 diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
4356 index 8ede8ea762e6..88228f735342 100644
4357 --- a/drivers/media/usb/dvb-usb-v2/af9035.c
4358 +++ b/drivers/media/usb/dvb-usb-v2/af9035.c
4359 @@ -1541,6 +1541,10 @@ static const struct usb_device_id af9035_id_table[] = {
4360 &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
4361 { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
4362 &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
4363 + { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E,
4364 + &af9035_props, "PCTV 78e", RC_MAP_IT913X_V1) },
4365 + { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E,
4366 + &af9035_props, "PCTV 79e", RC_MAP_IT913X_V2) },
4367 { }
4368 };
4369 MODULE_DEVICE_TABLE(usb, af9035_id_table);
4370 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
4371 index 1fc8334fc181..55e3075492da 100644
4372 --- a/drivers/net/ethernet/ibm/ibmveth.c
4373 +++ b/drivers/net/ethernet/ibm/ibmveth.c
4374 @@ -292,6 +292,18 @@ failure:
4375 atomic_add(buffers_added, &(pool->available));
4376 }
4377
4378 +/*
4379 + * The final 8 bytes of the buffer list is a counter of frames dropped
4380 + * because there was not a buffer in the buffer list capable of holding
4381 + * the frame.
4382 + */
4383 +static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
4384 +{
4385 + __be64 *p = adapter->buffer_list_addr + 4096 - 8;
4386 +
4387 + adapter->rx_no_buffer = be64_to_cpup(p);
4388 +}
4389 +
4390 /* replenish routine */
4391 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
4392 {
4393 @@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
4394 ibmveth_replenish_buffer_pool(adapter, pool);
4395 }
4396
4397 - adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
4398 - 4096 - 8);
4399 + ibmveth_update_rx_no_buffer(adapter);
4400 }
4401
4402 /* empty and free ana buffer pool - also used to do cleanup in error paths */
4403 @@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev)
4404
4405 free_irq(netdev->irq, netdev);
4406
4407 - adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
4408 - 4096 - 8);
4409 + ibmveth_update_rx_no_buffer(adapter);
4410
4411 ibmveth_cleanup(adapter);
4412
4413 diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
4414 index 8596aba34f96..237d0cda1bcb 100644
4415 --- a/drivers/net/wireless/ath/carl9170/carl9170.h
4416 +++ b/drivers/net/wireless/ath/carl9170/carl9170.h
4417 @@ -256,6 +256,7 @@ struct ar9170 {
4418 atomic_t rx_work_urbs;
4419 atomic_t rx_pool_urbs;
4420 kernel_ulong_t features;
4421 + bool usb_ep_cmd_is_bulk;
4422
4423 /* firmware settings */
4424 struct completion fw_load_wait;
4425 diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
4426 index ca115f33746f..bc931f6f1f0f 100644
4427 --- a/drivers/net/wireless/ath/carl9170/usb.c
4428 +++ b/drivers/net/wireless/ath/carl9170/usb.c
4429 @@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
4430 goto err_free;
4431 }
4432
4433 - usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
4434 - AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
4435 - carl9170_usb_cmd_complete, ar, 1);
4436 + if (ar->usb_ep_cmd_is_bulk)
4437 + usb_fill_bulk_urb(urb, ar->udev,
4438 + usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
4439 + cmd, cmd->hdr.len + 4,
4440 + carl9170_usb_cmd_complete, ar);
4441 + else
4442 + usb_fill_int_urb(urb, ar->udev,
4443 + usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
4444 + cmd, cmd->hdr.len + 4,
4445 + carl9170_usb_cmd_complete, ar, 1);
4446
4447 if (free_buf)
4448 urb->transfer_flags |= URB_FREE_BUFFER;
4449 @@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw,
4450 static int carl9170_usb_probe(struct usb_interface *intf,
4451 const struct usb_device_id *id)
4452 {
4453 + struct usb_endpoint_descriptor *ep;
4454 struct ar9170 *ar;
4455 struct usb_device *udev;
4456 - int err;
4457 + int i, err;
4458
4459 err = usb_reset_device(interface_to_usbdev(intf));
4460 if (err)
4461 @@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf,
4462 ar->intf = intf;
4463 ar->features = id->driver_info;
4464
4465 + /* We need to remember the type of endpoint 4 because it differs
4466 + * between high- and full-speed configuration. The high-speed
4467 + * configuration specifies it as interrupt and the full-speed
4468 + * configuration as bulk endpoint. This information is required
4469 + * later when sending urbs to that endpoint.
4470 + */
4471 + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
4472 + ep = &intf->cur_altsetting->endpoint[i].desc;
4473 +
4474 + if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
4475 + usb_endpoint_dir_out(ep) &&
4476 + usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
4477 + ar->usb_ep_cmd_is_bulk = true;
4478 + }
4479 +
4480 usb_set_intfdata(intf, ar);
4481 SET_IEEE80211_DEV(ar->hw, &intf->dev);
4482
4483 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
4484 index fad77dd2a3a5..3f9cb894d001 100644
4485 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
4486 +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
4487 @@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
4488 ifevent->action, ifevent->ifidx, ifevent->bssidx,
4489 ifevent->flags, ifevent->role);
4490
4491 - if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
4492 + /* The P2P Device interface event must not be ignored
4493 + * contrary to what firmware tells us. The only way to
4494 + * distinguish the P2P Device is by looking at the ifidx
4495 + * and bssidx received.
4496 + */
4497 + if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
4498 + (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
4499 brcmf_dbg(EVENT, "event can be ignored\n");
4500 return;
4501 }
4502 @@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
4503 return;
4504 }
4505
4506 - if (ifevent->action == BRCMF_E_IF_CHANGE)
4507 + if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
4508 brcmf_fws_reset_interface(ifp);
4509
4510 err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
4511
4512 - if (ifevent->action == BRCMF_E_IF_DEL) {
4513 + if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
4514 brcmf_fws_del_interface(ifp);
4515 brcmf_del_if(drvr, ifevent->bssidx);
4516 }
4517 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
4518 index 51b53a73d074..d26b47698f68 100644
4519 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
4520 +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
4521 @@ -167,6 +167,8 @@ enum brcmf_fweh_event_code {
4522 #define BRCMF_E_IF_ROLE_STA 0
4523 #define BRCMF_E_IF_ROLE_AP 1
4524 #define BRCMF_E_IF_ROLE_WDS 2
4525 +#define BRCMF_E_IF_ROLE_P2P_GO 3
4526 +#define BRCMF_E_IF_ROLE_P2P_CLIENT 4
4527
4528 /**
4529 * definitions for event packet validation.
4530 diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
4531 index c1e311341b74..503a81e58185 100644
4532 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
4533 +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
4534 @@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
4535 /* recalculate basic rates */
4536 iwl_calc_basic_rates(priv, ctx);
4537
4538 + /*
4539 + * force CTS-to-self frames protection if RTS-CTS is not preferred
4540 + * one aggregation protection method
4541 + */
4542 + if (!priv->hw_params.use_rts_for_aggregation)
4543 + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
4544 +
4545 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
4546 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
4547 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
4548 @@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
4549 else
4550 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
4551
4552 + if (bss_conf->use_cts_prot)
4553 + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
4554 + else
4555 + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
4556 +
4557 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
4558
4559 if (vif->type == NL80211_IFTYPE_AP ||
4560 diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
4561 index 1ced525157dc..b45d78f53f08 100644
4562 --- a/drivers/net/wireless/iwlwifi/iwl-config.h
4563 +++ b/drivers/net/wireless/iwlwifi/iwl-config.h
4564 @@ -119,6 +119,8 @@ enum iwl_led_mode {
4565 #define IWL_LONG_WD_TIMEOUT 10000
4566 #define IWL_MAX_WD_TIMEOUT 120000
4567
4568 +#define IWL_DEFAULT_MAX_TX_POWER 22
4569 +
4570 /* Antenna presence definitions */
4571 #define ANT_NONE 0x0
4572 #define ANT_A BIT(0)
4573 diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
4574 index 725e954d8475..3c3eb7842c62 100644
4575 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
4576 +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
4577 @@ -118,8 +118,6 @@ static const u8 iwl_nvm_channels[] = {
4578 #define LAST_2GHZ_HT_PLUS 9
4579 #define LAST_5GHZ_HT 161
4580
4581 -#define DEFAULT_MAX_TX_POWER 16
4582 -
4583 /* rate data (static) */
4584 static struct ieee80211_rate iwl_cfg80211_rates[] = {
4585 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
4586 @@ -242,7 +240,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
4587 * Default value - highest tx power value. max_power
4588 * is not used in mvm, and is used for backwards compatibility
4589 */
4590 - channel->max_power = DEFAULT_MAX_TX_POWER;
4591 + channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
4592 is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
4593 IWL_DEBUG_EEPROM(dev,
4594 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
4595 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
4596 index 989d7dbdca6c..d0a04779d734 100644
4597 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
4598 +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
4599 @@ -1415,14 +1415,14 @@ enum iwl_sf_scenario {
4600
4601 /**
4602 * Smart Fifo configuration command.
4603 - * @state: smart fifo state, types listed in iwl_sf_sate.
4604 + * @state: smart fifo state, types listed in enum %iwl_sf_sate.
4605 * @watermark: Minimum allowed availabe free space in RXF for transient state.
4606 * @long_delay_timeouts: aging and idle timer values for each scenario
4607 * in long delay state.
4608 * @full_on_timeouts: timer values for each scenario in full on state.
4609 */
4610 struct iwl_sf_cfg_cmd {
4611 - enum iwl_sf_state state;
4612 + __le32 state;
4613 __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
4614 __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
4615 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
4616 diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
4617 index 88809b2d1654..dab8fd13857a 100644
4618 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c
4619 +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
4620 @@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
4621 enum iwl_sf_state new_state)
4622 {
4623 struct iwl_sf_cfg_cmd sf_cmd = {
4624 - .state = new_state,
4625 + .state = cpu_to_le32(new_state),
4626 };
4627 struct ieee80211_sta *sta;
4628 int ret = 0;
4629 diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
4630 index 76ee486039d7..2ca62af3f81b 100644
4631 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c
4632 +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
4633 @@ -173,10 +173,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
4634
4635 /*
4636 * for data packets, rate info comes from the table inside the fw. This
4637 - * table is controlled by LINK_QUALITY commands
4638 + * table is controlled by LINK_QUALITY commands. Exclude ctrl port
4639 + * frames like EAPOLs which should be treated as mgmt frames. This
4640 + * avoids them being sent initially in high rates which increases the
4641 + * chances for completion of the 4-Way handshake.
4642 */
4643
4644 - if (ieee80211_is_data(fc) && sta) {
4645 + if (ieee80211_is_data(fc) && sta &&
4646 + !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
4647 tx_cmd->initial_rate_index = 0;
4648 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
4649 return;
4650 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
4651 index c61311084d7e..f58316769159 100644
4652 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
4653 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
4654 @@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
4655 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
4656 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
4657 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
4658 + {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
4659 {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
4660 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
4661 {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
4662 diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
4663 index f868333271aa..963a4a5dc88e 100644
4664 --- a/drivers/nfc/microread/microread.c
4665 +++ b/drivers/nfc/microread/microread.c
4666 @@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
4667 targets->sens_res =
4668 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
4669 targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
4670 - memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
4671 - skb->data[MICROREAD_EMCF_A_LEN]);
4672 targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
4673 + if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
4674 + r = -EINVAL;
4675 + goto exit_free;
4676 + }
4677 + memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
4678 + targets->nfcid1_len);
4679 break;
4680 case MICROREAD_GATE_ID_MREAD_ISO_A_3:
4681 targets->supported_protocols =
4682 @@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
4683 targets->sens_res =
4684 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
4685 targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
4686 - memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
4687 - skb->data[MICROREAD_EMCF_A3_LEN]);
4688 targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
4689 + if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
4690 + r = -EINVAL;
4691 + goto exit_free;
4692 + }
4693 + memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
4694 + targets->nfcid1_len);
4695 break;
4696 case MICROREAD_GATE_ID_MREAD_ISO_B:
4697 targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
4698 diff --git a/drivers/of/irq.c b/drivers/of/irq.c
4699 index ca0189308d72..48f20ff1add9 100644
4700 --- a/drivers/of/irq.c
4701 +++ b/drivers/of/irq.c
4702 @@ -301,16 +301,17 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
4703 /* Get the reg property (if any) */
4704 addr = of_get_property(device, "reg", NULL);
4705
4706 + /* Try the new-style interrupts-extended first */
4707 + res = of_parse_phandle_with_args(device, "interrupts-extended",
4708 + "#interrupt-cells", index, out_irq);
4709 + if (!res)
4710 + return of_irq_parse_raw(addr, out_irq);
4711 +
4712 /* Get the interrupts property */
4713 intspec = of_get_property(device, "interrupts", &intlen);
4714 - if (intspec == NULL) {
4715 - /* Try the new-style interrupts-extended */
4716 - res = of_parse_phandle_with_args(device, "interrupts-extended",
4717 - "#interrupt-cells", index, out_irq);
4718 - if (res)
4719 - return -EINVAL;
4720 - return of_irq_parse_raw(addr, out_irq);
4721 - }
4722 + if (intspec == NULL)
4723 + return -EINVAL;
4724 +
4725 intlen /= sizeof(*intspec);
4726
4727 pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
4728 diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
4729 index c3ace1db8136..aaac3594f83b 100644
4730 --- a/drivers/phy/phy-twl4030-usb.c
4731 +++ b/drivers/phy/phy-twl4030-usb.c
4732 @@ -34,6 +34,7 @@
4733 #include <linux/delay.h>
4734 #include <linux/usb/otg.h>
4735 #include <linux/phy/phy.h>
4736 +#include <linux/pm_runtime.h>
4737 #include <linux/usb/musb-omap.h>
4738 #include <linux/usb/ulpi.h>
4739 #include <linux/i2c/twl.h>
4740 @@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
4741 }
4742 }
4743
4744 -static int twl4030_phy_power_off(struct phy *phy)
4745 +static int twl4030_usb_runtime_suspend(struct device *dev)
4746 {
4747 - struct twl4030_usb *twl = phy_get_drvdata(phy);
4748 + struct twl4030_usb *twl = dev_get_drvdata(dev);
4749
4750 + dev_dbg(twl->dev, "%s\n", __func__);
4751 if (twl->asleep)
4752 return 0;
4753
4754 twl4030_phy_power(twl, 0);
4755 twl->asleep = 1;
4756 - dev_dbg(twl->dev, "%s\n", __func__);
4757 +
4758 return 0;
4759 }
4760
4761 -static void __twl4030_phy_power_on(struct twl4030_usb *twl)
4762 +static int twl4030_usb_runtime_resume(struct device *dev)
4763 {
4764 + struct twl4030_usb *twl = dev_get_drvdata(dev);
4765 +
4766 + dev_dbg(twl->dev, "%s\n", __func__);
4767 + if (!twl->asleep)
4768 + return 0;
4769 +
4770 twl4030_phy_power(twl, 1);
4771 - twl4030_i2c_access(twl, 1);
4772 - twl4030_usb_set_mode(twl, twl->usb_mode);
4773 - if (twl->usb_mode == T2_USB_MODE_ULPI)
4774 - twl4030_i2c_access(twl, 0);
4775 + twl->asleep = 0;
4776 +
4777 + return 0;
4778 +}
4779 +
4780 +static int twl4030_phy_power_off(struct phy *phy)
4781 +{
4782 + struct twl4030_usb *twl = phy_get_drvdata(phy);
4783 +
4784 + dev_dbg(twl->dev, "%s\n", __func__);
4785 + pm_runtime_mark_last_busy(twl->dev);
4786 + pm_runtime_put_autosuspend(twl->dev);
4787 +
4788 + return 0;
4789 }
4790
4791 static int twl4030_phy_power_on(struct phy *phy)
4792 {
4793 struct twl4030_usb *twl = phy_get_drvdata(phy);
4794
4795 - if (!twl->asleep)
4796 - return 0;
4797 - __twl4030_phy_power_on(twl);
4798 - twl->asleep = 0;
4799 dev_dbg(twl->dev, "%s\n", __func__);
4800 + pm_runtime_get_sync(twl->dev);
4801 + twl4030_i2c_access(twl, 1);
4802 + twl4030_usb_set_mode(twl, twl->usb_mode);
4803 + if (twl->usb_mode == T2_USB_MODE_ULPI)
4804 + twl4030_i2c_access(twl, 0);
4805
4806 /*
4807 * XXX When VBUS gets driven after musb goes to A mode,
4808 @@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
4809 * USB_LINK_VBUS state. musb_hdrc won't care until it
4810 * starts to handle softconnect right.
4811 */
4812 + if ((status == OMAP_MUSB_VBUS_VALID) ||
4813 + (status == OMAP_MUSB_ID_GROUND)) {
4814 + if (twl->asleep)
4815 + pm_runtime_get_sync(twl->dev);
4816 + } else {
4817 + if (!twl->asleep) {
4818 + pm_runtime_mark_last_busy(twl->dev);
4819 + pm_runtime_put_autosuspend(twl->dev);
4820 + }
4821 + }
4822 omap_musb_mailbox(status);
4823 }
4824 - sysfs_notify(&twl->dev->kobj, NULL, "vbus");
4825 +
4826 + /* don't schedule during sleep - irq works right then */
4827 + if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
4828 + cancel_delayed_work(&twl->id_workaround_work);
4829 + schedule_delayed_work(&twl->id_workaround_work, HZ);
4830 + }
4831 +
4832 + if (irq)
4833 + sysfs_notify(&twl->dev->kobj, NULL, "vbus");
4834
4835 return IRQ_HANDLED;
4836 }
4837 @@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work)
4838 {
4839 struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
4840 id_workaround_work.work);
4841 - enum omap_musb_vbus_id_status status;
4842 - bool status_changed = false;
4843 -
4844 - status = twl4030_usb_linkstat(twl);
4845 -
4846 - spin_lock_irq(&twl->lock);
4847 - if (status >= 0 && status != twl->linkstat) {
4848 - twl->linkstat = status;
4849 - status_changed = true;
4850 - }
4851 - spin_unlock_irq(&twl->lock);
4852 -
4853 - if (status_changed) {
4854 - dev_dbg(twl->dev, "handle missing status change to %d\n",
4855 - status);
4856 - omap_musb_mailbox(status);
4857 - }
4858
4859 - /* don't schedule during sleep - irq works right then */
4860 - if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
4861 - cancel_delayed_work(&twl->id_workaround_work);
4862 - schedule_delayed_work(&twl->id_workaround_work, HZ);
4863 - }
4864 + twl4030_usb_irq(0, twl);
4865 }
4866
4867 static int twl4030_phy_init(struct phy *phy)
4868 @@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy)
4869 struct twl4030_usb *twl = phy_get_drvdata(phy);
4870 enum omap_musb_vbus_id_status status;
4871
4872 - /*
4873 - * Start in sleep state, we'll get called through set_suspend()
4874 - * callback when musb is runtime resumed and it's time to start.
4875 - */
4876 - __twl4030_phy_power(twl, 0);
4877 - twl->asleep = 1;
4878 -
4879 + pm_runtime_get_sync(twl->dev);
4880 status = twl4030_usb_linkstat(twl);
4881 twl->linkstat = status;
4882
4883 - if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) {
4884 + if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID)
4885 omap_musb_mailbox(twl->linkstat);
4886 - twl4030_phy_power_on(phy);
4887 - }
4888
4889 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
4890 + pm_runtime_mark_last_busy(twl->dev);
4891 + pm_runtime_put_autosuspend(twl->dev);
4892 +
4893 return 0;
4894 }
4895
4896 @@ -650,6 +661,11 @@ static const struct phy_ops ops = {
4897 .owner = THIS_MODULE,
4898 };
4899
4900 +static const struct dev_pm_ops twl4030_usb_pm_ops = {
4901 + SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
4902 + twl4030_usb_runtime_resume, NULL)
4903 +};
4904 +
4905 static int twl4030_usb_probe(struct platform_device *pdev)
4906 {
4907 struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
4908 @@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
4909
4910 ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
4911
4912 + pm_runtime_use_autosuspend(&pdev->dev);
4913 + pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
4914 + pm_runtime_enable(&pdev->dev);
4915 + pm_runtime_get_sync(&pdev->dev);
4916 +
4917 /* Our job is to use irqs and status from the power module
4918 * to keep the transceiver disabled when nothing's connected.
4919 *
4920 @@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev)
4921 return status;
4922 }
4923
4924 + pm_runtime_mark_last_busy(&pdev->dev);
4925 + pm_runtime_put_autosuspend(twl->dev);
4926 +
4927 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
4928 return 0;
4929 }
4930 @@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
4931 struct twl4030_usb *twl = platform_get_drvdata(pdev);
4932 int val;
4933
4934 + pm_runtime_get_sync(twl->dev);
4935 cancel_delayed_work(&twl->id_workaround_work);
4936 device_remove_file(twl->dev, &dev_attr_vbus);
4937
4938 @@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
4939
4940 /* disable complete OTG block */
4941 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
4942 -
4943 - if (!twl->asleep)
4944 - twl4030_phy_power(twl, 0);
4945 + pm_runtime_mark_last_busy(twl->dev);
4946 + pm_runtime_put(twl->dev);
4947
4948 return 0;
4949 }
4950 @@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = {
4951 .remove = twl4030_usb_remove,
4952 .driver = {
4953 .name = "twl4030_usb",
4954 + .pm = &twl4030_usb_pm_ops,
4955 .owner = THIS_MODULE,
4956 .of_match_table = of_match_ptr(twl4030_usb_id_table),
4957 },
4958 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
4959 index 40462415291e..454998669c2a 100644
4960 --- a/drivers/scsi/libiscsi.c
4961 +++ b/drivers/scsi/libiscsi.c
4962 @@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
4963 return NULL;
4964 }
4965
4966 + if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
4967 + iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
4968 + return NULL;
4969 + }
4970 +
4971 task = conn->login_task;
4972 } else {
4973 if (session->state != ISCSI_STATE_LOGGED_IN)
4974 return NULL;
4975
4976 + if (data_size != 0) {
4977 + iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
4978 + return NULL;
4979 + }
4980 +
4981 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
4982 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
4983
4984 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
4985 index 3f3dc1226edf..e14960470d8d 100644
4986 --- a/drivers/spi/spi-dw-pci.c
4987 +++ b/drivers/spi/spi-dw-pci.c
4988 @@ -62,6 +62,8 @@ static int spi_pci_probe(struct pci_dev *pdev,
4989 if (ret)
4990 return ret;
4991
4992 + dws->regs = pcim_iomap_table(pdev)[pci_bar];
4993 +
4994 dws->bus_num = 0;
4995 dws->num_cs = 4;
4996 dws->irq = pdev->irq;
4997 diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
4998 index a64f1557c156..b0059e7552b0 100644
4999 --- a/drivers/spi/spi-omap2-mcspi.c
5000 +++ b/drivers/spi/spi-omap2-mcspi.c
5001 @@ -321,7 +321,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
5002 disable_fifo:
5003 if (t->rx_buf != NULL)
5004 chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
5005 - else
5006 +
5007 + if (t->tx_buf != NULL)
5008 chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
5009
5010 mcspi_write_chconf0(spi, chconf);
5011 diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
5012 index 7a94ddd42f59..8c4f2896cd0d 100644
5013 --- a/drivers/staging/iio/meter/ade7758_trigger.c
5014 +++ b/drivers/staging/iio/meter/ade7758_trigger.c
5015 @@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev)
5016 ret = iio_trigger_register(st->trig);
5017
5018 /* select default trigger */
5019 - indio_dev->trig = st->trig;
5020 + indio_dev->trig = iio_trigger_get(st->trig);
5021 if (ret)
5022 goto error_free_irq;
5023
5024 diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
5025 index 34b642a12f8b..c70f1734b274 100644
5026 --- a/drivers/staging/imx-drm/ipuv3-plane.c
5027 +++ b/drivers/staging/imx-drm/ipuv3-plane.c
5028 @@ -277,7 +277,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
5029
5030 ipu_idmac_put(ipu_plane->ipu_ch);
5031 ipu_dmfc_put(ipu_plane->dmfc);
5032 - ipu_dp_put(ipu_plane->dp);
5033 + if (ipu_plane->dp)
5034 + ipu_dp_put(ipu_plane->dp);
5035 }
5036 }
5037
5038 diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
5039 index 209e4c7e6f8a..4f65ba1158bf 100644
5040 --- a/drivers/staging/lustre/lustre/Kconfig
5041 +++ b/drivers/staging/lustre/lustre/Kconfig
5042 @@ -57,4 +57,5 @@ config LUSTRE_TRANSLATE_ERRNOS
5043 config LUSTRE_LLITE_LLOOP
5044 tristate "Lustre virtual block device"
5045 depends on LUSTRE_FS && BLOCK
5046 + depends on !PPC_64K_PAGES && !ARM64_64K_PAGES
5047 default m
5048 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5049 index f329ad294fc0..104f29e6b290 100644
5050 --- a/drivers/target/iscsi/iscsi_target.c
5051 +++ b/drivers/target/iscsi/iscsi_target.c
5052 @@ -4513,6 +4513,7 @@ static void iscsit_logout_post_handler_diffcid(
5053 {
5054 struct iscsi_conn *l_conn;
5055 struct iscsi_session *sess = conn->sess;
5056 + bool conn_found = false;
5057
5058 if (!sess)
5059 return;
5060 @@ -4521,12 +4522,13 @@ static void iscsit_logout_post_handler_diffcid(
5061 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
5062 if (l_conn->cid == cid) {
5063 iscsit_inc_conn_usage_count(l_conn);
5064 + conn_found = true;
5065 break;
5066 }
5067 }
5068 spin_unlock_bh(&sess->conn_lock);
5069
5070 - if (!l_conn)
5071 + if (!conn_found)
5072 return;
5073
5074 if (l_conn->sock)
5075 diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
5076 index 4d2e23fc76fd..43b7e6a616b8 100644
5077 --- a/drivers/target/iscsi/iscsi_target_parameters.c
5078 +++ b/drivers/target/iscsi/iscsi_target_parameters.c
5079 @@ -601,7 +601,7 @@ int iscsi_copy_param_list(
5080 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
5081 if (!param_list) {
5082 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
5083 - goto err_out;
5084 + return -1;
5085 }
5086 INIT_LIST_HEAD(&param_list->param_list);
5087 INIT_LIST_HEAD(&param_list->extra_response_list);
5088 diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
5089 index 483d324020a6..f30385385544 100644
5090 --- a/drivers/target/target_core_configfs.c
5091 +++ b/drivers/target/target_core_configfs.c
5092 @@ -2359,7 +2359,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
5093 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
5094 return -EINVAL; \
5095 } \
5096 - if (!tmp) \
5097 + if (tmp) \
5098 t->_var |= _bit; \
5099 else \
5100 t->_var &= ~_bit; \
5101 diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
5102 index ab9096dc3849..148ffe4c232f 100644
5103 --- a/drivers/tty/serial/8250/8250_dma.c
5104 +++ b/drivers/tty/serial/8250/8250_dma.c
5105 @@ -192,21 +192,28 @@ int serial8250_request_dma(struct uart_8250_port *p)
5106
5107 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
5108 &dma->rx_addr, GFP_KERNEL);
5109 - if (!dma->rx_buf) {
5110 - dma_release_channel(dma->rxchan);
5111 - dma_release_channel(dma->txchan);
5112 - return -ENOMEM;
5113 - }
5114 + if (!dma->rx_buf)
5115 + goto err;
5116
5117 /* TX buffer */
5118 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
5119 p->port.state->xmit.buf,
5120 UART_XMIT_SIZE,
5121 DMA_TO_DEVICE);
5122 + if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
5123 + dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
5124 + dma->rx_buf, dma->rx_addr);
5125 + goto err;
5126 + }
5127
5128 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
5129
5130 return 0;
5131 +err:
5132 + dma_release_channel(dma->rxchan);
5133 + dma_release_channel(dma->txchan);
5134 +
5135 + return -ENOMEM;
5136 }
5137 EXPORT_SYMBOL_GPL(serial8250_request_dma);
5138
5139 diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
5140 index 2d51d852b474..ca1123d415c5 100644
5141 --- a/drivers/usb/chipidea/ci_hdrc_msm.c
5142 +++ b/drivers/usb/chipidea/ci_hdrc_msm.c
5143 @@ -20,13 +20,13 @@
5144 static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
5145 {
5146 struct device *dev = ci->gadget.dev.parent;
5147 - int val;
5148
5149 switch (event) {
5150 case CI_HDRC_CONTROLLER_RESET_EVENT:
5151 dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
5152 writel(0, USB_AHBBURST);
5153 writel(0, USB_AHBMODE);
5154 + usb_phy_init(ci->transceiver);
5155 break;
5156 case CI_HDRC_CONTROLLER_STOPPED_EVENT:
5157 dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
5158 @@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
5159 * Put the transceiver in non-driving mode. Otherwise host
5160 * may not detect soft-disconnection.
5161 */
5162 - val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
5163 - val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
5164 - val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
5165 - usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
5166 + usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
5167 break;
5168 default:
5169 dev_dbg(dev, "unknown ci_hdrc event\n");
5170 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
5171 index 6650df70bb35..263612ce1f62 100644
5172 --- a/drivers/usb/core/hub.c
5173 +++ b/drivers/usb/core/hub.c
5174 @@ -4764,9 +4764,10 @@ static void hub_events(void)
5175
5176 hub = list_entry(tmp, struct usb_hub, event_list);
5177 kref_get(&hub->kref);
5178 + hdev = hub->hdev;
5179 + usb_get_dev(hdev);
5180 spin_unlock_irq(&hub_event_lock);
5181
5182 - hdev = hub->hdev;
5183 hub_dev = hub->intfdev;
5184 intf = to_usb_interface(hub_dev);
5185 dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
5186 @@ -4979,6 +4980,7 @@ static void hub_events(void)
5187 usb_autopm_put_interface(intf);
5188 loop_disconnected:
5189 usb_unlock_device(hdev);
5190 + usb_put_dev(hdev);
5191 kref_put(&hub->kref, hub_release);
5192
5193 } /* end while (1) */
5194 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
5195 index a49217ae3533..f074755372a0 100644
5196 --- a/drivers/usb/dwc3/core.c
5197 +++ b/drivers/usb/dwc3/core.c
5198 @@ -583,12 +583,6 @@ static int dwc3_remove(struct platform_device *pdev)
5199 {
5200 struct dwc3 *dwc = platform_get_drvdata(pdev);
5201
5202 - usb_phy_set_suspend(dwc->usb2_phy, 1);
5203 - usb_phy_set_suspend(dwc->usb3_phy, 1);
5204 -
5205 - pm_runtime_put_sync(&pdev->dev);
5206 - pm_runtime_disable(&pdev->dev);
5207 -
5208 dwc3_debugfs_exit(dwc);
5209
5210 switch (dwc->dr_mode) {
5211 @@ -609,8 +603,15 @@ static int dwc3_remove(struct platform_device *pdev)
5212
5213 dwc3_event_buffers_cleanup(dwc);
5214 dwc3_free_event_buffers(dwc);
5215 +
5216 + usb_phy_set_suspend(dwc->usb2_phy, 1);
5217 + usb_phy_set_suspend(dwc->usb3_phy, 1);
5218 +
5219 dwc3_core_exit(dwc);
5220
5221 + pm_runtime_put_sync(&pdev->dev);
5222 + pm_runtime_disable(&pdev->dev);
5223 +
5224 return 0;
5225 }
5226
5227 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
5228 index b269dbd47fc4..2a6841c95b64 100644
5229 --- a/drivers/usb/dwc3/dwc3-omap.c
5230 +++ b/drivers/usb/dwc3/dwc3-omap.c
5231 @@ -582,9 +582,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
5232 if (omap->extcon_id_dev.edev)
5233 extcon_unregister_interest(&omap->extcon_id_dev);
5234 dwc3_omap_disable_irqs(omap);
5235 + device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
5236 pm_runtime_put_sync(&pdev->dev);
5237 pm_runtime_disable(&pdev->dev);
5238 - device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
5239
5240 return 0;
5241 }
5242 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
5243 index 81cda09b47e3..488a30836c36 100644
5244 --- a/drivers/usb/host/ehci-hcd.c
5245 +++ b/drivers/usb/host/ehci-hcd.c
5246 @@ -965,8 +965,6 @@ rescan:
5247 }
5248
5249 qh->exception = 1;
5250 - if (ehci->rh_state < EHCI_RH_RUNNING)
5251 - qh->qh_state = QH_STATE_IDLE;
5252 switch (qh->qh_state) {
5253 case QH_STATE_LINKED:
5254 WARN_ON(!list_empty(&qh->qtd_list));
5255 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
5256 index 9992fbfec85f..93fe089cd51a 100644
5257 --- a/drivers/usb/host/xhci-hub.c
5258 +++ b/drivers/usb/host/xhci-hub.c
5259 @@ -470,7 +470,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
5260 }
5261
5262 /* Updates Link Status for super Speed port */
5263 -static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
5264 +static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
5265 + u32 *status, u32 status_reg)
5266 {
5267 u32 pls = status_reg & PORT_PLS_MASK;
5268
5269 @@ -509,7 +510,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
5270 * in which sometimes the port enters compliance mode
5271 * caused by a delay on the host-device negotiation.
5272 */
5273 - if (pls == USB_SS_PORT_LS_COMP_MOD)
5274 + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
5275 + (pls == USB_SS_PORT_LS_COMP_MOD))
5276 pls |= USB_PORT_STAT_CONNECTION;
5277 }
5278
5279 @@ -668,7 +670,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
5280 }
5281 /* Update Port Link State */
5282 if (hcd->speed == HCD_USB3) {
5283 - xhci_hub_report_usb3_link_state(&status, raw_port_status);
5284 + xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
5285 /*
5286 * Verify if all USB3 Ports Have entered U0 already.
5287 * Delete Compliance Mode Timer if so.
5288 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
5289 index 4133a00461b1..9bce4f0e99be 100644
5290 --- a/drivers/usb/host/xhci-mem.c
5291 +++ b/drivers/usb/host/xhci-mem.c
5292 @@ -1723,7 +1723,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
5293 }
5294
5295 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
5296 - for (i = 0; i < num_ports; i++) {
5297 + for (i = 0; i < num_ports && xhci->rh_bw; i++) {
5298 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
5299 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
5300 struct list_head *ep = &bwt->interval_bw[j].endpoints;
5301 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5302 index ab831048e8a4..82b563fc4fd6 100644
5303 --- a/drivers/usb/host/xhci.c
5304 +++ b/drivers/usb/host/xhci.c
5305 @@ -3928,13 +3928,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
5306 int ret;
5307
5308 spin_lock_irqsave(&xhci->lock, flags);
5309 - if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
5310 +
5311 + virt_dev = xhci->devs[udev->slot_id];
5312 +
5313 + /*
5314 + * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
5315 + * xHC was re-initialized. Exit latency will be set later after
5316 + * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
5317 + */
5318 +
5319 + if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
5320 spin_unlock_irqrestore(&xhci->lock, flags);
5321 return 0;
5322 }
5323
5324 /* Attempt to issue an Evaluate Context command to change the MEL. */
5325 - virt_dev = xhci->devs[udev->slot_id];
5326 command = xhci->lpm_command;
5327 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
5328 if (!ctrl_ctx) {
5329 diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
5330 index de98906f786d..0aef801edbc1 100644
5331 --- a/drivers/usb/misc/sisusbvga/sisusb.c
5332 +++ b/drivers/usb/misc/sisusbvga/sisusb.c
5333 @@ -3248,6 +3248,7 @@ static const struct usb_device_id sisusb_table[] = {
5334 { USB_DEVICE(0x0711, 0x0918) },
5335 { USB_DEVICE(0x0711, 0x0920) },
5336 { USB_DEVICE(0x0711, 0x0950) },
5337 + { USB_DEVICE(0x0711, 0x5200) },
5338 { USB_DEVICE(0x182d, 0x021c) },
5339 { USB_DEVICE(0x182d, 0x0269) },
5340 { }
5341 diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
5342 index bbe4f8e6e8d7..8834b70c868c 100644
5343 --- a/drivers/usb/phy/phy-tegra-usb.c
5344 +++ b/drivers/usb/phy/phy-tegra-usb.c
5345 @@ -881,8 +881,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
5346 return -ENOMEM;
5347 }
5348
5349 - tegra_phy->config = devm_kzalloc(&pdev->dev,
5350 - sizeof(*tegra_phy->config), GFP_KERNEL);
5351 + tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
5352 + GFP_KERNEL);
5353 if (!tegra_phy->config) {
5354 dev_err(&pdev->dev,
5355 "unable to allocate memory for USB UTMIP config\n");
5356 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
5357 index 8b0f517abb6b..3614620e09e1 100644
5358 --- a/drivers/usb/serial/ftdi_sio.c
5359 +++ b/drivers/usb/serial/ftdi_sio.c
5360 @@ -741,6 +741,7 @@ static const struct usb_device_id id_table_combined[] = {
5361 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
5362 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
5363 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
5364 + { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
5365 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
5366 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
5367 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
5368 @@ -952,6 +953,8 @@ static const struct usb_device_id id_table_combined[] = {
5369 { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
5370 /* Infineon Devices */
5371 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
5372 + /* GE Healthcare devices */
5373 + { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
5374 { } /* Terminating entry */
5375 };
5376
5377 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
5378 index 70b0b1d88ae9..5937b2d242f2 100644
5379 --- a/drivers/usb/serial/ftdi_sio_ids.h
5380 +++ b/drivers/usb/serial/ftdi_sio_ids.h
5381 @@ -837,6 +837,12 @@
5382 #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
5383
5384 /*
5385 + * NOVITUS printers
5386 + */
5387 +#define NOVITUS_VID 0x1a28
5388 +#define NOVITUS_BONO_E_PID 0x6010
5389 +
5390 +/*
5391 * RT Systems programming cables for various ham radios
5392 */
5393 #define RTSYSTEMS_VID 0x2100 /* Vendor ID */
5394 @@ -1385,3 +1391,9 @@
5395 * ekey biometric systems GmbH (http://ekey.net/)
5396 */
5397 #define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */
5398 +
5399 +/*
5400 + * GE Healthcare devices
5401 + */
5402 +#define GE_HEALTHCARE_VID 0x1901
5403 +#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
5404 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5405 index 9da566a3f5c8..e47aabe0c760 100644
5406 --- a/drivers/usb/serial/option.c
5407 +++ b/drivers/usb/serial/option.c
5408 @@ -275,8 +275,12 @@ static void option_instat_callback(struct urb *urb);
5409 #define ZTE_PRODUCT_MF622 0x0001
5410 #define ZTE_PRODUCT_MF628 0x0015
5411 #define ZTE_PRODUCT_MF626 0x0031
5412 -#define ZTE_PRODUCT_MC2718 0xffe8
5413 #define ZTE_PRODUCT_AC2726 0xfff1
5414 +#define ZTE_PRODUCT_CDMA_TECH 0xfffe
5415 +#define ZTE_PRODUCT_AC8710T 0xffff
5416 +#define ZTE_PRODUCT_MC2718 0xffe8
5417 +#define ZTE_PRODUCT_AD3812 0xffeb
5418 +#define ZTE_PRODUCT_MC2716 0xffed
5419
5420 #define BENQ_VENDOR_ID 0x04a5
5421 #define BENQ_PRODUCT_H10 0x4068
5422 @@ -494,6 +498,10 @@ static void option_instat_callback(struct urb *urb);
5423 #define INOVIA_VENDOR_ID 0x20a6
5424 #define INOVIA_SEW858 0x1105
5425
5426 +/* VIA Telecom */
5427 +#define VIATELECOM_VENDOR_ID 0x15eb
5428 +#define VIATELECOM_PRODUCT_CDS7 0x0001
5429 +
5430 /* some devices interfaces need special handling due to a number of reasons */
5431 enum option_blacklist_reason {
5432 OPTION_BLACKLIST_NONE = 0,
5433 @@ -527,10 +535,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
5434 .reserved = BIT(4),
5435 };
5436
5437 +static const struct option_blacklist_info zte_ad3812_z_blacklist = {
5438 + .sendsetup = BIT(0) | BIT(1) | BIT(2),
5439 +};
5440 +
5441 static const struct option_blacklist_info zte_mc2718_z_blacklist = {
5442 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
5443 };
5444
5445 +static const struct option_blacklist_info zte_mc2716_z_blacklist = {
5446 + .sendsetup = BIT(1) | BIT(2) | BIT(3),
5447 +};
5448 +
5449 static const struct option_blacklist_info huawei_cdc12_blacklist = {
5450 .reserved = BIT(1) | BIT(2),
5451 };
5452 @@ -1070,6 +1086,7 @@ static const struct usb_device_id option_ids[] = {
5453 { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
5454 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
5455 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
5456 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
5457 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
5458 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
5459 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
5460 @@ -1544,13 +1561,18 @@ static const struct usb_device_id option_ids[] = {
5461 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
5462 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
5463
5464 - /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
5465 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
5466 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
5467 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
5468 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
5469 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
5470 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
5471 + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
5472 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
5473 + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
5474 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
5475 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
5476 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
5477 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
5478
5479 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
5480 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
5481 @@ -1724,6 +1746,7 @@ static const struct usb_device_id option_ids[] = {
5482 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
5483 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
5484 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
5485 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
5486 { } /* Terminating entry */
5487 };
5488 MODULE_DEVICE_TABLE(usb, option_ids);
5489 @@ -1917,6 +1940,8 @@ static void option_instat_callback(struct urb *urb)
5490 dev_dbg(dev, "%s: type %x req %x\n", __func__,
5491 req_pkt->bRequestType, req_pkt->bRequest);
5492 }
5493 + } else if (status == -ENOENT || status == -ESHUTDOWN) {
5494 + dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status);
5495 } else
5496 dev_err(dev, "%s: error %d\n", __func__, status);
5497
5498 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
5499 index b3d5a35c0d4b..e9bad928039f 100644
5500 --- a/drivers/usb/serial/pl2303.c
5501 +++ b/drivers/usb/serial/pl2303.c
5502 @@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = {
5503 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
5504 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
5505 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
5506 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
5507 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
5508 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
5509 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
5510 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
5511 index 42bc082896ac..71fd9da1d6e7 100644
5512 --- a/drivers/usb/serial/pl2303.h
5513 +++ b/drivers/usb/serial/pl2303.h
5514 @@ -22,6 +22,7 @@
5515 #define PL2303_PRODUCT_ID_GPRS 0x0609
5516 #define PL2303_PRODUCT_ID_HCR331 0x331a
5517 #define PL2303_PRODUCT_ID_MOTOROLA 0x0307
5518 +#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
5519
5520 #define ATEN_VENDOR_ID 0x0557
5521 #define ATEN_VENDOR_ID2 0x0547
5522 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
5523 index 37480348e39b..74a9375a9bb5 100644
5524 --- a/drivers/usb/serial/sierra.c
5525 +++ b/drivers/usb/serial/sierra.c
5526 @@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = {
5527 /* Sierra Wireless HSPA Non-Composite Device */
5528 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
5529 { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
5530 - { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
5531 + /* Sierra Wireless Direct IP modems */
5532 + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
5533 + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
5534 + },
5535 + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
5536 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
5537 },
5538 /* AT&T Direct IP LTE modems */
5539 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
5540 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
5541 },
5542 - { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
5543 + /* Airprime/Sierra Wireless Direct IP modems */
5544 + { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
5545 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
5546 },
5547
5548 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
5549 index b169b0f9b3a2..9a08e18e09b9 100644
5550 --- a/drivers/usb/serial/usb-serial.c
5551 +++ b/drivers/usb/serial/usb-serial.c
5552 @@ -764,29 +764,39 @@ static int usb_serial_probe(struct usb_interface *interface,
5553 if (usb_endpoint_is_bulk_in(endpoint)) {
5554 /* we found a bulk in endpoint */
5555 dev_dbg(ddev, "found bulk in on endpoint %d\n", i);
5556 - bulk_in_endpoint[num_bulk_in] = endpoint;
5557 - ++num_bulk_in;
5558 + if (num_bulk_in < MAX_NUM_PORTS) {
5559 + bulk_in_endpoint[num_bulk_in] = endpoint;
5560 + ++num_bulk_in;
5561 + }
5562 }
5563
5564 if (usb_endpoint_is_bulk_out(endpoint)) {
5565 /* we found a bulk out endpoint */
5566 dev_dbg(ddev, "found bulk out on endpoint %d\n", i);
5567 - bulk_out_endpoint[num_bulk_out] = endpoint;
5568 - ++num_bulk_out;
5569 + if (num_bulk_out < MAX_NUM_PORTS) {
5570 + bulk_out_endpoint[num_bulk_out] = endpoint;
5571 + ++num_bulk_out;
5572 + }
5573 }
5574
5575 if (usb_endpoint_is_int_in(endpoint)) {
5576 /* we found a interrupt in endpoint */
5577 dev_dbg(ddev, "found interrupt in on endpoint %d\n", i);
5578 - interrupt_in_endpoint[num_interrupt_in] = endpoint;
5579 - ++num_interrupt_in;
5580 + if (num_interrupt_in < MAX_NUM_PORTS) {
5581 + interrupt_in_endpoint[num_interrupt_in] =
5582 + endpoint;
5583 + ++num_interrupt_in;
5584 + }
5585 }
5586
5587 if (usb_endpoint_is_int_out(endpoint)) {
5588 /* we found an interrupt out endpoint */
5589 dev_dbg(ddev, "found interrupt out on endpoint %d\n", i);
5590 - interrupt_out_endpoint[num_interrupt_out] = endpoint;
5591 - ++num_interrupt_out;
5592 + if (num_interrupt_out < MAX_NUM_PORTS) {
5593 + interrupt_out_endpoint[num_interrupt_out] =
5594 + endpoint;
5595 + ++num_interrupt_out;
5596 + }
5597 }
5598 }
5599
5600 @@ -809,8 +819,10 @@ static int usb_serial_probe(struct usb_interface *interface,
5601 if (usb_endpoint_is_int_in(endpoint)) {
5602 /* we found a interrupt in endpoint */
5603 dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n");
5604 - interrupt_in_endpoint[num_interrupt_in] = endpoint;
5605 - ++num_interrupt_in;
5606 + if (num_interrupt_in < MAX_NUM_PORTS) {
5607 + interrupt_in_endpoint[num_interrupt_in] = endpoint;
5608 + ++num_interrupt_in;
5609 + }
5610 }
5611 }
5612 }
5613 @@ -850,6 +862,11 @@ static int usb_serial_probe(struct usb_interface *interface,
5614 num_ports = type->num_ports;
5615 }
5616
5617 + if (num_ports > MAX_NUM_PORTS) {
5618 + dev_warn(ddev, "too many ports requested: %d\n", num_ports);
5619 + num_ports = MAX_NUM_PORTS;
5620 + }
5621 +
5622 serial->num_ports = num_ports;
5623 serial->num_bulk_in = num_bulk_in;
5624 serial->num_bulk_out = num_bulk_out;
5625 diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
5626 index e40ab739c4a6..c9bb107d5e5c 100644
5627 --- a/drivers/usb/serial/zte_ev.c
5628 +++ b/drivers/usb/serial/zte_ev.c
5629 @@ -272,28 +272,16 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
5630 }
5631
5632 static const struct usb_device_id id_table[] = {
5633 - /* AC8710, AC8710T */
5634 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
5635 - /* AC8700 */
5636 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
5637 - /* MG880 */
5638 - { USB_DEVICE(0x19d2, 0xfffd) },
5639 - { USB_DEVICE(0x19d2, 0xfffc) },
5640 - { USB_DEVICE(0x19d2, 0xfffb) },
5641 - /* AC8710_V3 */
5642 + { USB_DEVICE(0x19d2, 0xffec) },
5643 + { USB_DEVICE(0x19d2, 0xffee) },
5644 { USB_DEVICE(0x19d2, 0xfff6) },
5645 { USB_DEVICE(0x19d2, 0xfff7) },
5646 { USB_DEVICE(0x19d2, 0xfff8) },
5647 { USB_DEVICE(0x19d2, 0xfff9) },
5648 - { USB_DEVICE(0x19d2, 0xffee) },
5649 - /* AC2716, MC2716 */
5650 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
5651 - /* AD3812 */
5652 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
5653 - { USB_DEVICE(0x19d2, 0xffec) },
5654 - { USB_DEVICE(0x05C6, 0x3197) },
5655 - { USB_DEVICE(0x05C6, 0x6000) },
5656 - { USB_DEVICE(0x05C6, 0x9008) },
5657 + { USB_DEVICE(0x19d2, 0xfffb) },
5658 + { USB_DEVICE(0x19d2, 0xfffc) },
5659 + /* MG880 */
5660 + { USB_DEVICE(0x19d2, 0xfffd) },
5661 { },
5662 };
5663 MODULE_DEVICE_TABLE(usb, id_table);
5664 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
5665 index 042c83b01046..7f625306ea80 100644
5666 --- a/drivers/usb/storage/unusual_devs.h
5667 +++ b/drivers/usb/storage/unusual_devs.h
5668 @@ -101,6 +101,12 @@ UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001,
5669 "PhotoSmart R707",
5670 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY),
5671
5672 +UNUSUAL_DEV( 0x03f3, 0x0001, 0x0000, 0x9999,
5673 + "Adaptec",
5674 + "USBConnect 2000",
5675 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
5676 + US_FL_SCM_MULT_TARG ),
5677 +
5678 /* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
5679 * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
5680 * for USB floppies that need the SINGLE_LUN enforcement.
5681 @@ -741,6 +747,12 @@ UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100,
5682 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5683 US_FL_SINGLE_LUN ),
5684
5685 +UNUSUAL_DEV( 0x059b, 0x0040, 0x0100, 0x0100,
5686 + "Iomega",
5687 + "Jaz USB Adapter",
5688 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5689 + US_FL_SINGLE_LUN ),
5690 +
5691 /* Reported by <Hendryk.Pfeiffer@gmx.de> */
5692 UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
5693 "LaCie",
5694 @@ -1113,6 +1125,18 @@ UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200,
5695 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5696 US_FL_NOT_LOCKABLE),
5697
5698 +UNUSUAL_DEV( 0x085a, 0x0026, 0x0100, 0x0133,
5699 + "Xircom",
5700 + "PortGear USB-SCSI (Mac USB Dock)",
5701 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
5702 + US_FL_SCM_MULT_TARG ),
5703 +
5704 +UNUSUAL_DEV( 0x085a, 0x0028, 0x0100, 0x0133,
5705 + "Xircom",
5706 + "PortGear USB to SCSI Converter",
5707 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
5708 + US_FL_SCM_MULT_TARG ),
5709 +
5710 /* Submitted by Jan De Luyck <lkml@kcore.org> */
5711 UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000,
5712 "CITIZEN",
5713 @@ -1945,6 +1969,14 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
5714 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5715 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
5716
5717 +/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
5718 + * and Mac USB Dock USB-SCSI */
5719 +UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
5720 + "Entrega Technologies",
5721 + "USB to SCSI Converter",
5722 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
5723 + US_FL_SCM_MULT_TARG ),
5724 +
5725 /* Reported by Robert Schedel <r.schedel@yahoo.de>
5726 * Note: this is a 'super top' device like the above 14cd/6600 device */
5727 UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
5728 @@ -1967,6 +1999,12 @@ UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
5729 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5730 US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
5731
5732 +UNUSUAL_DEV( 0x1822, 0x0001, 0x0000, 0x9999,
5733 + "Ariston Technologies",
5734 + "iConnect USB to SCSI adapter",
5735 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
5736 + US_FL_SCM_MULT_TARG ),
5737 +
5738 /* Reported by Hans de Goede <hdegoede@redhat.com>
5739 * These Appotech controllers are found in Picture Frames, they provide a
5740 * (buggy) emulation of a cdrom drive which contains the windows software
5741 diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
5742 index 80079b8fed15..d0303f0dbe15 100644
5743 --- a/drivers/uwb/lc-dev.c
5744 +++ b/drivers/uwb/lc-dev.c
5745 @@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
5746 uwb_dev->mac_addr = *bce->mac_addr;
5747 uwb_dev->dev_addr = bce->dev_addr;
5748 dev_set_name(&uwb_dev->dev, "%s", macbuf);
5749 +
5750 + /* plug the beacon cache */
5751 + bce->uwb_dev = uwb_dev;
5752 + uwb_dev->bce = bce;
5753 + uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
5754 +
5755 result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
5756 if (result < 0) {
5757 dev_err(dev, "new device %s: cannot instantiate device\n",
5758 macbuf);
5759 goto error_dev_add;
5760 }
5761 - /* plug the beacon cache */
5762 - bce->uwb_dev = uwb_dev;
5763 - uwb_dev->bce = bce;
5764 - uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
5765 +
5766 dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
5767 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
5768 dev_name(rc->uwb_dev.dev.parent));
5769 @@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
5770 return;
5771
5772 error_dev_add:
5773 + bce->uwb_dev = NULL;
5774 + uwb_bce_put(bce);
5775 kfree(uwb_dev);
5776 return;
5777 }
5778 diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
5779 index 624e8dc24532..602913d7ae03 100644
5780 --- a/drivers/xen/manage.c
5781 +++ b/drivers/xen/manage.c
5782 @@ -111,16 +111,11 @@ static void do_suspend(void)
5783
5784 shutting_down = SHUTDOWN_SUSPEND;
5785
5786 -#ifdef CONFIG_PREEMPT
5787 - /* If the kernel is preemptible, we need to freeze all the processes
5788 - to prevent them from being in the middle of a pagetable update
5789 - during suspend. */
5790 err = freeze_processes();
5791 if (err) {
5792 pr_err("%s: freeze failed %d\n", __func__, err);
5793 goto out;
5794 }
5795 -#endif
5796
5797 err = dpm_suspend_start(PMSG_FREEZE);
5798 if (err) {
5799 @@ -169,10 +164,8 @@ out_resume:
5800 dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
5801
5802 out_thaw:
5803 -#ifdef CONFIG_PREEMPT
5804 thaw_processes();
5805 out:
5806 -#endif
5807 shutting_down = SHUTDOWN_INVALID;
5808 }
5809 #endif /* CONFIG_HIBERNATE_CALLBACKS */
5810 diff --git a/fs/aio.c b/fs/aio.c
5811 index 6d68e01dc7ca..f45ddaa4fffa 100644
5812 --- a/fs/aio.c
5813 +++ b/fs/aio.c
5814 @@ -141,6 +141,7 @@ struct kioctx {
5815
5816 struct {
5817 unsigned tail;
5818 + unsigned completed_events;
5819 spinlock_t completion_lock;
5820 } ____cacheline_aligned_in_smp;
5821
5822 @@ -796,6 +797,9 @@ void exit_aio(struct mm_struct *mm)
5823 unsigned i = 0;
5824
5825 while (1) {
5826 + struct completion requests_done =
5827 + COMPLETION_INITIALIZER_ONSTACK(requests_done);
5828 +
5829 rcu_read_lock();
5830 table = rcu_dereference(mm->ioctx_table);
5831
5832 @@ -823,7 +827,10 @@ void exit_aio(struct mm_struct *mm)
5833 */
5834 ctx->mmap_size = 0;
5835
5836 - kill_ioctx(mm, ctx, NULL);
5837 + kill_ioctx(mm, ctx, &requests_done);
5838 +
5839 + /* Wait until all IO for the context are done. */
5840 + wait_for_completion(&requests_done);
5841 }
5842 }
5843
5844 @@ -880,6 +887,68 @@ out:
5845 return ret;
5846 }
5847
5848 +/* refill_reqs_available
5849 + * Updates the reqs_available reference counts used for tracking the
5850 + * number of free slots in the completion ring. This can be called
5851 + * from aio_complete() (to optimistically update reqs_available) or
5852 + * from aio_get_req() (the we're out of events case). It must be
5853 + * called holding ctx->completion_lock.
5854 + */
5855 +static void refill_reqs_available(struct kioctx *ctx, unsigned head,
5856 + unsigned tail)
5857 +{
5858 + unsigned events_in_ring, completed;
5859 +
5860 + /* Clamp head since userland can write to it. */
5861 + head %= ctx->nr_events;
5862 + if (head <= tail)
5863 + events_in_ring = tail - head;
5864 + else
5865 + events_in_ring = ctx->nr_events - (head - tail);
5866 +
5867 + completed = ctx->completed_events;
5868 + if (events_in_ring < completed)
5869 + completed -= events_in_ring;
5870 + else
5871 + completed = 0;
5872 +
5873 + if (!completed)
5874 + return;
5875 +
5876 + ctx->completed_events -= completed;
5877 + put_reqs_available(ctx, completed);
5878 +}
5879 +
5880 +/* user_refill_reqs_available
5881 + * Called to refill reqs_available when aio_get_req() encounters an
5882 + * out of space in the completion ring.
5883 + */
5884 +static void user_refill_reqs_available(struct kioctx *ctx)
5885 +{
5886 + spin_lock_irq(&ctx->completion_lock);
5887 + if (ctx->completed_events) {
5888 + struct aio_ring *ring;
5889 + unsigned head;
5890 +
5891 + /* Access of ring->head may race with aio_read_events_ring()
5892 + * here, but that's okay since whether we read the old version
5893 + * or the new version, and either will be valid. The important
5894 + * part is that head cannot pass tail since we prevent
5895 + * aio_complete() from updating tail by holding
5896 + * ctx->completion_lock. Even if head is invalid, the check
5897 + * against ctx->completed_events below will make sure we do the
5898 + * safe/right thing.
5899 + */
5900 + ring = kmap_atomic(ctx->ring_pages[0]);
5901 + head = ring->head;
5902 + kunmap_atomic(ring);
5903 +
5904 + refill_reqs_available(ctx, head, ctx->tail);
5905 + }
5906 +
5907 + spin_unlock_irq(&ctx->completion_lock);
5908 +}
5909 +
5910 /* aio_get_req
5911 * Allocate a slot for an aio request.
5912 * Returns NULL if no requests are free.
5913 @@ -888,8 +957,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
5914 {
5915 struct kiocb *req;
5916
5917 - if (!get_reqs_available(ctx))
5918 - return NULL;
5919 + if (!get_reqs_available(ctx)) {
5920 + user_refill_reqs_available(ctx);
5921 + if (!get_reqs_available(ctx))
5922 + return NULL;
5923 + }
5924
5925 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
5926 if (unlikely(!req))
5927 @@ -948,8 +1020,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
5928 struct kioctx *ctx = iocb->ki_ctx;
5929 struct aio_ring *ring;
5930 struct io_event *ev_page, *event;
5931 + unsigned tail, pos, head;
5932 unsigned long flags;
5933 - unsigned tail, pos;
5934
5935 /*
5936 * Special case handling for sync iocbs:
5937 @@ -1010,10 +1082,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
5938 ctx->tail = tail;
5939
5940 ring = kmap_atomic(ctx->ring_pages[0]);
5941 + head = ring->head;
5942 ring->tail = tail;
5943 kunmap_atomic(ring);
5944 flush_dcache_page(ctx->ring_pages[0]);
5945
5946 + ctx->completed_events++;
5947 + if (ctx->completed_events > 1)
5948 + refill_reqs_available(ctx, head, tail);
5949 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5950
5951 pr_debug("added to ring %p at [%u]\n", iocb, tail);
5952 @@ -1028,7 +1104,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
5953
5954 /* everything turned out well, dispose of the aiocb. */
5955 kiocb_free(iocb);
5956 - put_reqs_available(ctx, 1);
5957
5958 /*
5959 * We have to order our ring_info tail store above and test
5960 @@ -1065,6 +1140,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
5961 tail = ring->tail;
5962 kunmap_atomic(ring);
5963
5964 + /*
5965 + * Ensure that once we've read the current tail pointer, that
5966 + * we also see the events that were stored up to the tail.
5967 + */
5968 + smp_rmb();
5969 +
5970 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
5971
5972 if (head == tail)
5973 diff --git a/fs/buffer.c b/fs/buffer.c
5974 index 27265a8b43c1..71e2d0ed8530 100644
5975 --- a/fs/buffer.c
5976 +++ b/fs/buffer.c
5977 @@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
5978 bh = page_buffers(page);
5979 if (bh->b_size == size) {
5980 end_block = init_page_buffers(page, bdev,
5981 - index << sizebits, size);
5982 + (sector_t)index << sizebits,
5983 + size);
5984 goto done;
5985 }
5986 if (!try_to_free_buffers(page))
5987 @@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
5988 */
5989 spin_lock(&inode->i_mapping->private_lock);
5990 link_dev_buffers(page, bh);
5991 - end_block = init_page_buffers(page, bdev, index << sizebits, size);
5992 + end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
5993 + size);
5994 spin_unlock(&inode->i_mapping->private_lock);
5995 done:
5996 ret = (block < end_block) ? 1 : -ENXIO;
5997 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
5998 index 68559fd557fb..a5c2812ead68 100644
5999 --- a/fs/cifs/link.c
6000 +++ b/fs/cifs/link.c
6001 @@ -213,8 +213,12 @@ create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
6002 if (rc)
6003 goto out;
6004
6005 - rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon, cifs_sb,
6006 - fromName, buf, &bytes_written);
6007 + if (tcon->ses->server->ops->create_mf_symlink)
6008 + rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon,
6009 + cifs_sb, fromName, buf, &bytes_written);
6010 + else
6011 + rc = -EOPNOTSUPP;
6012 +
6013 if (rc)
6014 goto out;
6015
6016 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
6017 index ead00467282d..f50d79eb52db 100644
6018 --- a/fs/eventpoll.c
6019 +++ b/fs/eventpoll.c
6020 @@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
6021 goto error_tgt_fput;
6022
6023 /* Check if EPOLLWAKEUP is allowed */
6024 - ep_take_care_of_epollwakeup(&epds);
6025 + if (ep_op_has_event(op))
6026 + ep_take_care_of_epollwakeup(&epds);
6027
6028 /*
6029 * We have to check that the file structure underneath the file descriptor
6030 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
6031 index 5c524180c98e..bc643b97423c 100644
6032 --- a/fs/gfs2/inode.c
6033 +++ b/fs/gfs2/inode.c
6034 @@ -606,8 +606,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
6035 if (!IS_ERR(inode)) {
6036 d = d_splice_alias(inode, dentry);
6037 error = PTR_ERR(d);
6038 - if (IS_ERR(d))
6039 + if (IS_ERR(d)) {
6040 + inode = ERR_CAST(d);
6041 goto fail_gunlock;
6042 + }
6043 error = 0;
6044 if (file) {
6045 if (S_ISREG(inode->i_mode)) {
6046 @@ -823,7 +825,6 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
6047
6048 d = d_splice_alias(inode, dentry);
6049 if (IS_ERR(d)) {
6050 - iput(inode);
6051 gfs2_glock_dq_uninit(&gh);
6052 return d;
6053 }
6054 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
6055 index 6bf06a07f3e0..223e1cb14345 100644
6056 --- a/fs/lockd/svc.c
6057 +++ b/fs/lockd/svc.c
6058 @@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
6059
6060 error = make_socks(serv, net);
6061 if (error < 0)
6062 - goto err_socks;
6063 + goto err_bind;
6064 set_grace_period(net);
6065 dprintk("lockd_up_net: per-net data created; net=%p\n", net);
6066 return 0;
6067
6068 -err_socks:
6069 - svc_rpcb_cleanup(serv, net);
6070 err_bind:
6071 ln->nlmsvc_users--;
6072 return error;
6073 diff --git a/fs/namei.c b/fs/namei.c
6074 index d5a4faeb39a5..dd2f2c5bda55 100644
6075 --- a/fs/namei.c
6076 +++ b/fs/namei.c
6077 @@ -642,24 +642,22 @@ static int complete_walk(struct nameidata *nd)
6078
6079 static __always_inline void set_root(struct nameidata *nd)
6080 {
6081 - if (!nd->root.mnt)
6082 - get_fs_root(current->fs, &nd->root);
6083 + get_fs_root(current->fs, &nd->root);
6084 }
6085
6086 static int link_path_walk(const char *, struct nameidata *);
6087
6088 -static __always_inline void set_root_rcu(struct nameidata *nd)
6089 +static __always_inline unsigned set_root_rcu(struct nameidata *nd)
6090 {
6091 - if (!nd->root.mnt) {
6092 - struct fs_struct *fs = current->fs;
6093 - unsigned seq;
6094 + struct fs_struct *fs = current->fs;
6095 + unsigned seq, res;
6096
6097 - do {
6098 - seq = read_seqcount_begin(&fs->seq);
6099 - nd->root = fs->root;
6100 - nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
6101 - } while (read_seqcount_retry(&fs->seq, seq));
6102 - }
6103 + do {
6104 + seq = read_seqcount_begin(&fs->seq);
6105 + nd->root = fs->root;
6106 + res = __read_seqcount_begin(&nd->root.dentry->d_seq);
6107 + } while (read_seqcount_retry(&fs->seq, seq));
6108 + return res;
6109 }
6110
6111 static void path_put_conditional(struct path *path, struct nameidata *nd)
6112 @@ -859,7 +857,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
6113 return PTR_ERR(s);
6114 }
6115 if (*s == '/') {
6116 - set_root(nd);
6117 + if (!nd->root.mnt)
6118 + set_root(nd);
6119 path_put(&nd->path);
6120 nd->path = nd->root;
6121 path_get(&nd->root);
6122 @@ -1132,7 +1131,8 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
6123
6124 static int follow_dotdot_rcu(struct nameidata *nd)
6125 {
6126 - set_root_rcu(nd);
6127 + if (!nd->root.mnt)
6128 + set_root_rcu(nd);
6129
6130 while (1) {
6131 if (nd->path.dentry == nd->root.dentry &&
6132 @@ -1244,7 +1244,8 @@ static void follow_mount(struct path *path)
6133
6134 static void follow_dotdot(struct nameidata *nd)
6135 {
6136 - set_root(nd);
6137 + if (!nd->root.mnt)
6138 + set_root(nd);
6139
6140 while(1) {
6141 struct dentry *old = nd->path.dentry;
6142 @@ -1842,7 +1843,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
6143 if (*name=='/') {
6144 if (flags & LOOKUP_RCU) {
6145 rcu_read_lock();
6146 - set_root_rcu(nd);
6147 + nd->seq = set_root_rcu(nd);
6148 } else {
6149 set_root(nd);
6150 path_get(&nd->root);
6151 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
6152 index 0e46d3d1b6cc..1abe4f55dea2 100644
6153 --- a/fs/nfs/nfs4client.c
6154 +++ b/fs/nfs/nfs4client.c
6155 @@ -482,6 +482,16 @@ int nfs40_walk_client_list(struct nfs_client *new,
6156
6157 spin_lock(&nn->nfs_client_lock);
6158 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
6159 +
6160 + if (pos->rpc_ops != new->rpc_ops)
6161 + continue;
6162 +
6163 + if (pos->cl_proto != new->cl_proto)
6164 + continue;
6165 +
6166 + if (pos->cl_minorversion != new->cl_minorversion)
6167 + continue;
6168 +
6169 /* If "pos" isn't marked ready, we can't trust the
6170 * remaining fields in "pos" */
6171 if (pos->cl_cons_state > NFS_CS_READY) {
6172 @@ -501,15 +511,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
6173 if (pos->cl_cons_state != NFS_CS_READY)
6174 continue;
6175
6176 - if (pos->rpc_ops != new->rpc_ops)
6177 - continue;
6178 -
6179 - if (pos->cl_proto != new->cl_proto)
6180 - continue;
6181 -
6182 - if (pos->cl_minorversion != new->cl_minorversion)
6183 - continue;
6184 -
6185 if (pos->cl_clientid != new->cl_clientid)
6186 continue;
6187
6188 @@ -615,6 +616,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
6189
6190 spin_lock(&nn->nfs_client_lock);
6191 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
6192 +
6193 + if (pos->rpc_ops != new->rpc_ops)
6194 + continue;
6195 +
6196 + if (pos->cl_proto != new->cl_proto)
6197 + continue;
6198 +
6199 + if (pos->cl_minorversion != new->cl_minorversion)
6200 + continue;
6201 +
6202 /* If "pos" isn't marked ready, we can't trust the
6203 * remaining fields in "pos", especially the client
6204 * ID and serverowner fields. Wait for CREATE_SESSION
6205 @@ -640,15 +651,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
6206 if (pos->cl_cons_state != NFS_CS_READY)
6207 continue;
6208
6209 - if (pos->rpc_ops != new->rpc_ops)
6210 - continue;
6211 -
6212 - if (pos->cl_proto != new->cl_proto)
6213 - continue;
6214 -
6215 - if (pos->cl_minorversion != new->cl_minorversion)
6216 - continue;
6217 -
6218 if (!nfs4_match_clientids(pos, new))
6219 continue;
6220
6221 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6222 index 17f91a72840b..2e9662ea5451 100644
6223 --- a/fs/nfs/nfs4proc.c
6224 +++ b/fs/nfs/nfs4proc.c
6225 @@ -2558,23 +2558,23 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
6226 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
6227 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
6228 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
6229 - /* Calculate the current open share mode */
6230 - calldata->arg.fmode = 0;
6231 - if (is_rdonly || is_rdwr)
6232 - calldata->arg.fmode |= FMODE_READ;
6233 - if (is_wronly || is_rdwr)
6234 - calldata->arg.fmode |= FMODE_WRITE;
6235 /* Calculate the change in open mode */
6236 + calldata->arg.fmode = 0;
6237 if (state->n_rdwr == 0) {
6238 - if (state->n_rdonly == 0) {
6239 - call_close |= is_rdonly || is_rdwr;
6240 - calldata->arg.fmode &= ~FMODE_READ;
6241 - }
6242 - if (state->n_wronly == 0) {
6243 - call_close |= is_wronly || is_rdwr;
6244 - calldata->arg.fmode &= ~FMODE_WRITE;
6245 - }
6246 - }
6247 + if (state->n_rdonly == 0)
6248 + call_close |= is_rdonly;
6249 + else if (is_rdonly)
6250 + calldata->arg.fmode |= FMODE_READ;
6251 + if (state->n_wronly == 0)
6252 + call_close |= is_wronly;
6253 + else if (is_wronly)
6254 + calldata->arg.fmode |= FMODE_WRITE;
6255 + } else if (is_rdwr)
6256 + calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
6257 +
6258 + if (calldata->arg.fmode == 0)
6259 + call_close |= is_rdwr;
6260 +
6261 if (!nfs4_valid_open_stateid(state))
6262 call_close = 0;
6263 spin_unlock(&state->owner->so_lock);
6264 diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
6265 index 7e350c562e0e..1e0bbae06ee7 100644
6266 --- a/fs/nilfs2/inode.c
6267 +++ b/fs/nilfs2/inode.c
6268 @@ -24,6 +24,7 @@
6269 #include <linux/buffer_head.h>
6270 #include <linux/gfp.h>
6271 #include <linux/mpage.h>
6272 +#include <linux/pagemap.h>
6273 #include <linux/writeback.h>
6274 #include <linux/aio.h>
6275 #include "nilfs.h"
6276 @@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
6277
6278 static int nilfs_set_page_dirty(struct page *page)
6279 {
6280 + struct inode *inode = page->mapping->host;
6281 int ret = __set_page_dirty_nobuffers(page);
6282
6283 if (page_has_buffers(page)) {
6284 - struct inode *inode = page->mapping->host;
6285 unsigned nr_dirty = 0;
6286 struct buffer_head *bh, *head;
6287
6288 @@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page)
6289
6290 if (nr_dirty)
6291 nilfs_set_file_dirty(inode, nr_dirty);
6292 + } else if (ret) {
6293 + unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
6294 +
6295 + nilfs_set_file_dirty(inode, nr_dirty);
6296 }
6297 return ret;
6298 }
6299 diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
6300 index 238a5930cb3c..9d7e2b9659cb 100644
6301 --- a/fs/notify/fdinfo.c
6302 +++ b/fs/notify/fdinfo.c
6303 @@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
6304 {
6305 struct {
6306 struct file_handle handle;
6307 - u8 pad[64];
6308 + u8 pad[MAX_HANDLE_SZ];
6309 } f;
6310 int size, ret, i;
6311
6312 @@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
6313 size = f.handle.handle_bytes >> 2;
6314
6315 ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
6316 - if ((ret == 255) || (ret == -ENOSPC)) {
6317 + if ((ret == FILEID_INVALID) || (ret < 0)) {
6318 WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
6319 return 0;
6320 }
6321 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
6322 index af3f7aa73e13..1be3398c96f6 100644
6323 --- a/fs/ocfs2/dlm/dlmmaster.c
6324 +++ b/fs/ocfs2/dlm/dlmmaster.c
6325 @@ -650,12 +650,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
6326 clear_bit(bit, res->refmap);
6327 }
6328
6329 -
6330 -void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
6331 +static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
6332 struct dlm_lock_resource *res)
6333 {
6334 - assert_spin_locked(&res->spinlock);
6335 -
6336 res->inflight_locks++;
6337
6338 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
6339 @@ -663,6 +660,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
6340 __builtin_return_address(0));
6341 }
6342
6343 +void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
6344 + struct dlm_lock_resource *res)
6345 +{
6346 + assert_spin_locked(&res->spinlock);
6347 + __dlm_lockres_grab_inflight_ref(dlm, res);
6348 +}
6349 +
6350 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
6351 struct dlm_lock_resource *res)
6352 {
6353 @@ -852,10 +856,8 @@ lookup:
6354 /* finally add the lockres to its hash bucket */
6355 __dlm_insert_lockres(dlm, res);
6356
6357 - /* Grab inflight ref to pin the resource */
6358 - spin_lock(&res->spinlock);
6359 - dlm_lockres_grab_inflight_ref(dlm, res);
6360 - spin_unlock(&res->spinlock);
6361 + /* since this lockres is new it doesn't not require the spinlock */
6362 + __dlm_lockres_grab_inflight_ref(dlm, res);
6363
6364 /* get an extra ref on the mle in case this is a BLOCK
6365 * if so, the creator of the BLOCK may try to put the last
6366 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
6367 index e9c4f190ffae..ac46782c10d3 100644
6368 --- a/include/acpi/acpi_bus.h
6369 +++ b/include/acpi/acpi_bus.h
6370 @@ -118,6 +118,7 @@ struct acpi_device;
6371 struct acpi_hotplug_profile {
6372 struct kobject kobj;
6373 int (*scan_dependent)(struct acpi_device *adev);
6374 + void (*notify_online)(struct acpi_device *adev);
6375 bool enabled:1;
6376 bool demand_offline:1;
6377 };
6378 diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
6379 index 369cf2cd5144..68f46cd5d514 100644
6380 --- a/include/linux/iio/trigger.h
6381 +++ b/include/linux/iio/trigger.h
6382 @@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
6383 put_device(&trig->dev);
6384 }
6385
6386 -static inline void iio_trigger_get(struct iio_trigger *trig)
6387 +static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
6388 {
6389 get_device(&trig->dev);
6390 __module_get(trig->ops->owner);
6391 +
6392 + return trig;
6393 }
6394
6395 /**
6396 diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
6397 index 535f158977b9..8cf350325dc6 100644
6398 --- a/include/linux/seqlock.h
6399 +++ b/include/linux/seqlock.h
6400 @@ -164,8 +164,6 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
6401 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
6402 {
6403 unsigned ret = ACCESS_ONCE(s->sequence);
6404 -
6405 - seqcount_lockdep_reader_access(s);
6406 smp_rmb();
6407 return ret & ~1;
6408 }
6409 diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
6410 index 502073a53dd3..b483abd34493 100644
6411 --- a/include/linux/vga_switcheroo.h
6412 +++ b/include/linux/vga_switcheroo.h
6413 @@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
6414 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
6415
6416 int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
6417 +void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
6418 int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
6419 #else
6420
6421 @@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
6422 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
6423
6424 static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
6425 +static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
6426 static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
6427
6428 #endif
6429 diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
6430 index 704f4f652d0a..6c62cfa25f1a 100644
6431 --- a/include/linux/workqueue.h
6432 +++ b/include/linux/workqueue.h
6433 @@ -452,7 +452,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
6434 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
6435 1, (name))
6436 #define create_singlethread_workqueue(name) \
6437 - alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
6438 + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
6439
6440 extern void destroy_workqueue(struct workqueue_struct *wq);
6441
6442 diff --git a/include/net/regulatory.h b/include/net/regulatory.h
6443 index b07cdc9fa454..f103f30122a1 100644
6444 --- a/include/net/regulatory.h
6445 +++ b/include/net/regulatory.h
6446 @@ -160,7 +160,7 @@ struct ieee80211_reg_rule {
6447 struct ieee80211_regdomain {
6448 struct rcu_head rcu_head;
6449 u32 n_reg_rules;
6450 - char alpha2[2];
6451 + char alpha2[3];
6452 enum nl80211_dfs_regions dfs_region;
6453 struct ieee80211_reg_rule reg_rules[];
6454 };
6455 diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
6456 index 29e9c7aa9c66..f279394aafb0 100644
6457 --- a/include/uapi/linux/usb/functionfs.h
6458 +++ b/include/uapi/linux/usb/functionfs.h
6459 @@ -27,24 +27,18 @@ struct usb_endpoint_descriptor_no_audio {
6460 __u8 bInterval;
6461 } __attribute__((packed));
6462
6463 -/* Legacy format, deprecated as of 3.14. */
6464 -struct usb_functionfs_descs_head {
6465 - __le32 magic;
6466 - __le32 length;
6467 - __le32 fs_count;
6468 - __le32 hs_count;
6469 -} __attribute__((packed, deprecated));
6470
6471 /*
6472 * All numbers must be in little endian order.
6473 */
6474
6475 +/* Legacy format, deprecated as of 3.14. */
6476 struct usb_functionfs_descs_head {
6477 __le32 magic;
6478 __le32 length;
6479 __le32 fs_count;
6480 __le32 hs_count;
6481 -} __attribute__((packed));
6482 +} __attribute__((packed, deprecated));
6483
6484 /*
6485 * Descriptors format:
6486 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
6487 index c38355c1f3c9..1590c49cae57 100644
6488 --- a/include/uapi/linux/xattr.h
6489 +++ b/include/uapi/linux/xattr.h
6490 @@ -13,7 +13,7 @@
6491 #ifndef _UAPI_LINUX_XATTR_H
6492 #define _UAPI_LINUX_XATTR_H
6493
6494 -#ifdef __UAPI_DEF_XATTR
6495 +#if __UAPI_DEF_XATTR
6496 #define __USE_KERNEL_XATTR_DEFS
6497
6498 #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
6499 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
6500 index 0c753ddd223b..550e2050d778 100644
6501 --- a/kernel/cgroup.c
6502 +++ b/kernel/cgroup.c
6503 @@ -3663,7 +3663,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
6504
6505 l = cgroup_pidlist_find_create(cgrp, type);
6506 if (!l) {
6507 - mutex_unlock(&cgrp->pidlist_mutex);
6508 pidlist_free(array);
6509 return -ENOMEM;
6510 }
6511 diff --git a/kernel/events/core.c b/kernel/events/core.c
6512 index f774e9365a03..3a140ca37777 100644
6513 --- a/kernel/events/core.c
6514 +++ b/kernel/events/core.c
6515 @@ -1516,6 +1516,11 @@ retry:
6516 */
6517 if (ctx->is_active) {
6518 raw_spin_unlock_irq(&ctx->lock);
6519 + /*
6520 + * Reload the task pointer, it might have been changed by
6521 + * a concurrent perf_event_context_sched_out().
6522 + */
6523 + task = ctx->task;
6524 goto retry;
6525 }
6526
6527 @@ -1957,6 +1962,11 @@ retry:
6528 */
6529 if (ctx->is_active) {
6530 raw_spin_unlock_irq(&ctx->lock);
6531 + /*
6532 + * Reload the task pointer, it might have been changed by
6533 + * a concurrent perf_event_context_sched_out().
6534 + */
6535 + task = ctx->task;
6536 goto retry;
6537 }
6538
6539 diff --git a/kernel/futex.c b/kernel/futex.c
6540 index e3087afb7429..0b0dc02aabce 100644
6541 --- a/kernel/futex.c
6542 +++ b/kernel/futex.c
6543 @@ -2614,6 +2614,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
6544 * shared futexes. We need to compare the keys:
6545 */
6546 if (match_futex(&q.key, &key2)) {
6547 + queue_unlock(hb);
6548 ret = -EINVAL;
6549 goto out_put_keys;
6550 }
6551 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
6552 index e30ac0fe61c3..0aa69ea1d8fd 100644
6553 --- a/kernel/kcmp.c
6554 +++ b/kernel/kcmp.c
6555 @@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
6556 */
6557 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
6558 {
6559 - long ret;
6560 + long t1, t2;
6561
6562 - ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
6563 + t1 = kptr_obfuscate((long)v1, type);
6564 + t2 = kptr_obfuscate((long)v2, type);
6565
6566 - return (ret < 0) | ((ret > 0) << 1);
6567 + return (t1 < t2) | ((t1 > t2) << 1);
6568 }
6569
6570 /* The caller must have pinned the task */
6571 diff --git a/kernel/power/main.c b/kernel/power/main.c
6572 index 1d1bf630e6e9..3ae41cdd804a 100644
6573 --- a/kernel/power/main.c
6574 +++ b/kernel/power/main.c
6575 @@ -293,12 +293,12 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
6576 {
6577 char *s = buf;
6578 #ifdef CONFIG_SUSPEND
6579 - int i;
6580 + suspend_state_t i;
6581 +
6582 + for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
6583 + if (pm_states[i].state)
6584 + s += sprintf(s,"%s ", pm_states[i].label);
6585
6586 - for (i = 0; i < PM_SUSPEND_MAX; i++) {
6587 - if (pm_states[i] && valid_state(i))
6588 - s += sprintf(s,"%s ", pm_states[i]);
6589 - }
6590 #endif
6591 #ifdef CONFIG_HIBERNATION
6592 s += sprintf(s, "%s\n", "disk");
6593 @@ -314,7 +314,7 @@ static suspend_state_t decode_state(const char *buf, size_t n)
6594 {
6595 #ifdef CONFIG_SUSPEND
6596 suspend_state_t state = PM_SUSPEND_MIN;
6597 - const char * const *s;
6598 + struct pm_sleep_state *s;
6599 #endif
6600 char *p;
6601 int len;
6602 @@ -328,8 +328,9 @@ static suspend_state_t decode_state(const char *buf, size_t n)
6603
6604 #ifdef CONFIG_SUSPEND
6605 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
6606 - if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
6607 - return state;
6608 + if (s->state && len == strlen(s->label)
6609 + && !strncmp(buf, s->label, len))
6610 + return s->state;
6611 #endif
6612
6613 return PM_SUSPEND_ON;
6614 @@ -447,8 +448,8 @@ static ssize_t autosleep_show(struct kobject *kobj,
6615
6616 #ifdef CONFIG_SUSPEND
6617 if (state < PM_SUSPEND_MAX)
6618 - return sprintf(buf, "%s\n", valid_state(state) ?
6619 - pm_states[state] : "error");
6620 + return sprintf(buf, "%s\n", pm_states[state].state ?
6621 + pm_states[state].label : "error");
6622 #endif
6623 #ifdef CONFIG_HIBERNATION
6624 return sprintf(buf, "disk\n");
6625 diff --git a/kernel/power/power.h b/kernel/power/power.h
6626 index 7d4b7ffb3c1d..f770cad3666c 100644
6627 --- a/kernel/power/power.h
6628 +++ b/kernel/power/power.h
6629 @@ -175,17 +175,20 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
6630 unsigned int, char *);
6631
6632 #ifdef CONFIG_SUSPEND
6633 +struct pm_sleep_state {
6634 + const char *label;
6635 + suspend_state_t state;
6636 +};
6637 +
6638 /* kernel/power/suspend.c */
6639 -extern const char *const pm_states[];
6640 +extern struct pm_sleep_state pm_states[];
6641
6642 -extern bool valid_state(suspend_state_t state);
6643 extern int suspend_devices_and_enter(suspend_state_t state);
6644 #else /* !CONFIG_SUSPEND */
6645 static inline int suspend_devices_and_enter(suspend_state_t state)
6646 {
6647 return -ENOSYS;
6648 }
6649 -static inline bool valid_state(suspend_state_t state) { return false; }
6650 #endif /* !CONFIG_SUSPEND */
6651
6652 #ifdef CONFIG_PM_TEST_SUSPEND
6653 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
6654 index 62ee437b5c7e..5455d5c3c149 100644
6655 --- a/kernel/power/suspend.c
6656 +++ b/kernel/power/suspend.c
6657 @@ -29,10 +29,10 @@
6658
6659 #include "power.h"
6660
6661 -const char *const pm_states[PM_SUSPEND_MAX] = {
6662 - [PM_SUSPEND_FREEZE] = "freeze",
6663 - [PM_SUSPEND_STANDBY] = "standby",
6664 - [PM_SUSPEND_MEM] = "mem",
6665 +struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
6666 + [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
6667 + [PM_SUSPEND_STANDBY] = { .label = "standby", },
6668 + [PM_SUSPEND_MEM] = { .label = "mem", },
6669 };
6670
6671 static const struct platform_suspend_ops *suspend_ops;
6672 @@ -62,42 +62,34 @@ void freeze_wake(void)
6673 }
6674 EXPORT_SYMBOL_GPL(freeze_wake);
6675
6676 +static bool valid_state(suspend_state_t state)
6677 +{
6678 + /*
6679 + * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
6680 + * support and need to be valid to the low level
6681 + * implementation, no valid callback implies that none are valid.
6682 + */
6683 + return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
6684 +}
6685 +
6686 /**
6687 * suspend_set_ops - Set the global suspend method table.
6688 * @ops: Suspend operations to use.
6689 */
6690 void suspend_set_ops(const struct platform_suspend_ops *ops)
6691 {
6692 + suspend_state_t i;
6693 +
6694 lock_system_sleep();
6695 +
6696 suspend_ops = ops;
6697 + for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++)
6698 + pm_states[i].state = valid_state(i) ? i : 0;
6699 +
6700 unlock_system_sleep();
6701 }
6702 EXPORT_SYMBOL_GPL(suspend_set_ops);
6703
6704 -bool valid_state(suspend_state_t state)
6705 -{
6706 - if (state == PM_SUSPEND_FREEZE) {
6707 -#ifdef CONFIG_PM_DEBUG
6708 - if (pm_test_level != TEST_NONE &&
6709 - pm_test_level != TEST_FREEZER &&
6710 - pm_test_level != TEST_DEVICES &&
6711 - pm_test_level != TEST_PLATFORM) {
6712 - printk(KERN_WARNING "Unsupported pm_test mode for "
6713 - "freeze state, please choose "
6714 - "none/freezer/devices/platform.\n");
6715 - return false;
6716 - }
6717 -#endif
6718 - return true;
6719 - }
6720 - /*
6721 - * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
6722 - * support and need to be valid to the lowlevel
6723 - * implementation, no valid callback implies that none are valid.
6724 - */
6725 - return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
6726 -}
6727 -
6728 /**
6729 * suspend_valid_only_mem - Generic memory-only valid callback.
6730 *
6731 @@ -324,9 +316,17 @@ static int enter_state(suspend_state_t state)
6732 {
6733 int error;
6734
6735 - if (!valid_state(state))
6736 - return -ENODEV;
6737 -
6738 + if (state == PM_SUSPEND_FREEZE) {
6739 +#ifdef CONFIG_PM_DEBUG
6740 + if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
6741 + pr_warning("PM: Unsupported test mode for freeze state,"
6742 + "please choose none/freezer/devices/platform.\n");
6743 + return -EAGAIN;
6744 + }
6745 +#endif
6746 + } else if (!valid_state(state)) {
6747 + return -EINVAL;
6748 + }
6749 if (!mutex_trylock(&pm_mutex))
6750 return -EBUSY;
6751
6752 @@ -337,7 +337,7 @@ static int enter_state(suspend_state_t state)
6753 sys_sync();
6754 printk("done.\n");
6755
6756 - pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
6757 + pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
6758 error = suspend_prepare(state);
6759 if (error)
6760 goto Unlock;
6761 @@ -345,7 +345,7 @@ static int enter_state(suspend_state_t state)
6762 if (suspend_test(TEST_FREEZER))
6763 goto Finish;
6764
6765 - pr_debug("PM: Entering %s sleep\n", pm_states[state]);
6766 + pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
6767 pm_restrict_gfp_mask();
6768 error = suspend_devices_and_enter(state);
6769 pm_restore_gfp_mask();
6770 diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
6771 index 9b2a1d58558d..269b097e78ea 100644
6772 --- a/kernel/power/suspend_test.c
6773 +++ b/kernel/power/suspend_test.c
6774 @@ -92,13 +92,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
6775 }
6776
6777 if (state == PM_SUSPEND_MEM) {
6778 - printk(info_test, pm_states[state]);
6779 + printk(info_test, pm_states[state].label);
6780 status = pm_suspend(state);
6781 if (status == -ENODEV)
6782 state = PM_SUSPEND_STANDBY;
6783 }
6784 if (state == PM_SUSPEND_STANDBY) {
6785 - printk(info_test, pm_states[state]);
6786 + printk(info_test, pm_states[state].label);
6787 status = pm_suspend(state);
6788 }
6789 if (status < 0)
6790 @@ -136,18 +136,16 @@ static char warn_bad_state[] __initdata =
6791
6792 static int __init setup_test_suspend(char *value)
6793 {
6794 - unsigned i;
6795 + suspend_state_t i;
6796
6797 /* "=mem" ==> "mem" */
6798 value++;
6799 - for (i = 0; i < PM_SUSPEND_MAX; i++) {
6800 - if (!pm_states[i])
6801 - continue;
6802 - if (strcmp(pm_states[i], value) != 0)
6803 - continue;
6804 - test_state = (__force suspend_state_t) i;
6805 - return 0;
6806 - }
6807 + for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
6808 + if (!strcmp(pm_states[i].label, value)) {
6809 + test_state = pm_states[i].state;
6810 + return 0;
6811 + }
6812 +
6813 printk(warn_bad_state, value);
6814 return 0;
6815 }
6816 @@ -164,8 +162,8 @@ static int __init test_suspend(void)
6817 /* PM is initialized by now; is that state testable? */
6818 if (test_state == PM_SUSPEND_ON)
6819 goto done;
6820 - if (!valid_state(test_state)) {
6821 - printk(warn_bad_state, pm_states[test_state]);
6822 + if (!pm_states[test_state].state) {
6823 + printk(warn_bad_state, pm_states[test_state].label);
6824 goto done;
6825 }
6826
6827 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
6828 index fe75444ae7ec..cd45a0727a16 100644
6829 --- a/kernel/time/alarmtimer.c
6830 +++ b/kernel/time/alarmtimer.c
6831 @@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
6832 static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
6833 ktime_t now)
6834 {
6835 + unsigned long flags;
6836 struct k_itimer *ptr = container_of(alarm, struct k_itimer,
6837 it.alarm.alarmtimer);
6838 - if (posix_timer_event(ptr, 0) != 0)
6839 - ptr->it_overrun++;
6840 + enum alarmtimer_restart result = ALARMTIMER_NORESTART;
6841 +
6842 + spin_lock_irqsave(&ptr->it_lock, flags);
6843 + if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
6844 + if (posix_timer_event(ptr, 0) != 0)
6845 + ptr->it_overrun++;
6846 + }
6847
6848 /* Re-add periodic timers */
6849 if (ptr->it.alarm.interval.tv64) {
6850 ptr->it_overrun += alarm_forward(alarm, now,
6851 ptr->it.alarm.interval);
6852 - return ALARMTIMER_RESTART;
6853 + result = ALARMTIMER_RESTART;
6854 }
6855 - return ALARMTIMER_NORESTART;
6856 + spin_unlock_irqrestore(&ptr->it_lock, flags);
6857 +
6858 + return result;
6859 }
6860
6861 /**
6862 @@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
6863 * @new_timer: k_itimer pointer
6864 * @cur_setting: itimerspec data to fill
6865 *
6866 - * Copies the itimerspec data out from the k_itimer
6867 + * Copies out the current itimerspec data
6868 */
6869 static void alarm_timer_get(struct k_itimer *timr,
6870 struct itimerspec *cur_setting)
6871 {
6872 - memset(cur_setting, 0, sizeof(struct itimerspec));
6873 + ktime_t relative_expiry_time =
6874 + alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
6875 +
6876 + if (ktime_to_ns(relative_expiry_time) > 0) {
6877 + cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
6878 + } else {
6879 + cur_setting->it_value.tv_sec = 0;
6880 + cur_setting->it_value.tv_nsec = 0;
6881 + }
6882
6883 - cur_setting->it_interval =
6884 - ktime_to_timespec(timr->it.alarm.interval);
6885 - cur_setting->it_value =
6886 - ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
6887 - return;
6888 + cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
6889 }
6890
6891 /**
6892 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
6893 index a53f1bbc546b..773aba836e81 100644
6894 --- a/kernel/trace/ring_buffer.c
6895 +++ b/kernel/trace/ring_buffer.c
6896 @@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
6897 work = &cpu_buffer->irq_work;
6898 }
6899
6900 - work->waiters_pending = true;
6901 poll_wait(filp, &work->waiters, poll_table);
6902 + work->waiters_pending = true;
6903 + /*
6904 + * There's a tight race between setting the waiters_pending and
6905 + * checking if the ring buffer is empty. Once the waiters_pending bit
6906 + * is set, the next event will wake the task up, but we can get stuck
6907 + * if there's only a single event in.
6908 + *
6909 + * FIXME: Ideally, we need a memory barrier on the writer side as well,
6910 + * but adding a memory barrier to all events will cause too much of a
6911 + * performance hit in the fast path. We only need a memory barrier when
6912 + * the buffer goes from empty to having content. But as this race is
6913 + * extremely small, and it's not a problem if another event comes in, we
6914 + * will fix it later.
6915 + */
6916 + smp_mb();
6917
6918 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
6919 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
6920 diff --git a/mm/memblock.c b/mm/memblock.c
6921 index 39a31e7f0045..0739dc1b4095 100644
6922 --- a/mm/memblock.c
6923 +++ b/mm/memblock.c
6924 @@ -183,8 +183,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
6925 phys_addr_t align, phys_addr_t start,
6926 phys_addr_t end, int nid)
6927 {
6928 - int ret;
6929 - phys_addr_t kernel_end;
6930 + phys_addr_t kernel_end, ret;
6931
6932 /* pump up @end */
6933 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
6934 diff --git a/mm/memory.c b/mm/memory.c
6935 index 2121d8b8db56..492e36f27f43 100644
6936 --- a/mm/memory.c
6937 +++ b/mm/memory.c
6938 @@ -1120,7 +1120,7 @@ again:
6939 addr) != page->index) {
6940 pte_t ptfile = pgoff_to_pte(page->index);
6941 if (pte_soft_dirty(ptent))
6942 - pte_file_mksoft_dirty(ptfile);
6943 + ptfile = pte_file_mksoft_dirty(ptfile);
6944 set_pte_at(mm, addr, pte, ptfile);
6945 }
6946 if (PageAnon(page))
6947 diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
6948 index 3707c71ae4cd..51108165f829 100644
6949 --- a/mm/percpu-vm.c
6950 +++ b/mm/percpu-vm.c
6951 @@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
6952 int page_start, int page_end)
6953 {
6954 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
6955 - unsigned int cpu;
6956 + unsigned int cpu, tcpu;
6957 int i;
6958
6959 for_each_possible_cpu(cpu) {
6960 @@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
6961 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
6962
6963 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
6964 - if (!*pagep) {
6965 - pcpu_free_pages(chunk, pages, populated,
6966 - page_start, page_end);
6967 - return -ENOMEM;
6968 - }
6969 + if (!*pagep)
6970 + goto err;
6971 }
6972 }
6973 return 0;
6974 +
6975 +err:
6976 + while (--i >= page_start)
6977 + __free_page(pages[pcpu_page_idx(cpu, i)]);
6978 +
6979 + for_each_possible_cpu(tcpu) {
6980 + if (tcpu == cpu)
6981 + break;
6982 + for (i = page_start; i < page_end; i++)
6983 + __free_page(pages[pcpu_page_idx(tcpu, i)]);
6984 + }
6985 + return -ENOMEM;
6986 }
6987
6988 /**
6989 @@ -263,6 +272,7 @@ err:
6990 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
6991 page_end - page_start);
6992 }
6993 + pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
6994 return err;
6995 }
6996
6997 diff --git a/mm/percpu.c b/mm/percpu.c
6998 index a2a54a85f691..8cd4308471c3 100644
6999 --- a/mm/percpu.c
7000 +++ b/mm/percpu.c
7001 @@ -1917,6 +1917,8 @@ void __init setup_per_cpu_areas(void)
7002
7003 if (pcpu_setup_first_chunk(ai, fc) < 0)
7004 panic("Failed to initialize percpu areas.");
7005 +
7006 + pcpu_free_alloc_info(ai);
7007 }
7008
7009 #endif /* CONFIG_SMP */
7010 diff --git a/mm/shmem.c b/mm/shmem.c
7011 index ff85863587ee..f0d698ba7d0f 100644
7012 --- a/mm/shmem.c
7013 +++ b/mm/shmem.c
7014 @@ -2143,8 +2143,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
7015
7016 if (new_dentry->d_inode) {
7017 (void) shmem_unlink(new_dir, new_dentry);
7018 - if (they_are_dirs)
7019 + if (they_are_dirs) {
7020 + drop_nlink(new_dentry->d_inode);
7021 drop_nlink(old_dir);
7022 + }
7023 } else if (they_are_dirs) {
7024 drop_nlink(old_dir);
7025 inc_nlink(new_dir);
7026 diff --git a/mm/slab.c b/mm/slab.c
7027 index 6dd8d5f3a3ac..ea854eb2388c 100644
7028 --- a/mm/slab.c
7029 +++ b/mm/slab.c
7030 @@ -2189,7 +2189,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
7031 int
7032 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
7033 {
7034 - size_t left_over, freelist_size, ralign;
7035 + size_t left_over, freelist_size;
7036 + size_t ralign = BYTES_PER_WORD;
7037 gfp_t gfp;
7038 int err;
7039 size_t size = cachep->size;
7040 @@ -2222,14 +2223,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
7041 size &= ~(BYTES_PER_WORD - 1);
7042 }
7043
7044 - /*
7045 - * Redzoning and user store require word alignment or possibly larger.
7046 - * Note this will be overridden by architecture or caller mandated
7047 - * alignment if either is greater than BYTES_PER_WORD.
7048 - */
7049 - if (flags & SLAB_STORE_USER)
7050 - ralign = BYTES_PER_WORD;
7051 -
7052 if (flags & SLAB_RED_ZONE) {
7053 ralign = REDZONE_ALIGN;
7054 /* If redzoning, ensure that the second redzone is suitably
7055 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
7056 index e6a84cb1a5e4..189eef014c4f 100644
7057 --- a/net/mac80211/mlme.c
7058 +++ b/net/mac80211/mlme.c
7059 @@ -4240,8 +4240,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
7060 rcu_read_unlock();
7061
7062 if (bss->wmm_used && bss->uapsd_supported &&
7063 - (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
7064 - sdata->wmm_acm != 0xff) {
7065 + (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
7066 assoc_data->uapsd = true;
7067 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
7068 } else {
7069 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
7070 index a8eb0a89326a..610e19c0e13f 100644
7071 --- a/net/netfilter/ipvs/ip_vs_conn.c
7072 +++ b/net/netfilter/ipvs/ip_vs_conn.c
7073 @@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
7074 ip_vs_control_del(cp);
7075
7076 if (cp->flags & IP_VS_CONN_F_NFCT) {
7077 - ip_vs_conn_drop_conntrack(cp);
7078 /* Do not access conntracks during subsys cleanup
7079 * because nf_conntrack_find_get can not be used after
7080 * conntrack cleanup for the net.
7081 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
7082 index 3d2d2c8108ca..27d3f40de3cd 100644
7083 --- a/net/netfilter/ipvs/ip_vs_core.c
7084 +++ b/net/netfilter/ipvs/ip_vs_core.c
7085 @@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
7086 {
7087 .hook = ip_vs_local_reply6,
7088 .owner = THIS_MODULE,
7089 - .pf = NFPROTO_IPV4,
7090 + .pf = NFPROTO_IPV6,
7091 .hooknum = NF_INET_LOCAL_OUT,
7092 .priority = NF_IP6_PRI_NAT_DST + 1,
7093 },
7094 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
7095 index 7f0e1cf2d7e8..1692e7534759 100644
7096 --- a/net/netfilter/ipvs/ip_vs_xmit.c
7097 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
7098 @@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
7099 iph->nexthdr = IPPROTO_IPV6;
7100 iph->payload_len = old_iph->payload_len;
7101 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
7102 - iph->priority = old_iph->priority;
7103 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
7104 + ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
7105 iph->daddr = cp->daddr.in6;
7106 iph->saddr = saddr;
7107 iph->hop_limit = old_iph->hop_limit;
7108 diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
7109 index 9a8e77e7f8d4..ef5c75a5b200 100644
7110 --- a/net/netfilter/xt_cgroup.c
7111 +++ b/net/netfilter/xt_cgroup.c
7112 @@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct xt_mtchk_param *par)
7113 if (info->invert & ~1)
7114 return -EINVAL;
7115
7116 - return info->id ? 0 : -EINVAL;
7117 + return 0;
7118 }
7119
7120 static bool
7121 diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
7122 index a3910fc2122b..47dc6836830a 100644
7123 --- a/net/netfilter/xt_hashlimit.c
7124 +++ b/net/netfilter/xt_hashlimit.c
7125 @@ -104,7 +104,7 @@ struct xt_hashlimit_htable {
7126 spinlock_t lock; /* lock for list_head */
7127 u_int32_t rnd; /* random seed for hash */
7128 unsigned int count; /* number entries in table */
7129 - struct timer_list timer; /* timer for gc */
7130 + struct delayed_work gc_work;
7131
7132 /* seq_file stuff */
7133 struct proc_dir_entry *pde;
7134 @@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
7135 call_rcu_bh(&ent->rcu, dsthash_free_rcu);
7136 ht->count--;
7137 }
7138 -static void htable_gc(unsigned long htlong);
7139 +static void htable_gc(struct work_struct *work);
7140
7141 static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
7142 u_int8_t family)
7143 @@ -273,9 +273,9 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
7144 }
7145 hinfo->net = net;
7146
7147 - setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
7148 - hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
7149 - add_timer(&hinfo->timer);
7150 + INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
7151 + queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
7152 + msecs_to_jiffies(hinfo->cfg.gc_interval));
7153
7154 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
7155
7156 @@ -300,29 +300,30 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
7157 {
7158 unsigned int i;
7159
7160 - /* lock hash table and iterate over it */
7161 - spin_lock_bh(&ht->lock);
7162 for (i = 0; i < ht->cfg.size; i++) {
7163 struct dsthash_ent *dh;
7164 struct hlist_node *n;
7165 +
7166 + spin_lock_bh(&ht->lock);
7167 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
7168 if ((*select)(ht, dh))
7169 dsthash_free(ht, dh);
7170 }
7171 + spin_unlock_bh(&ht->lock);
7172 + cond_resched();
7173 }
7174 - spin_unlock_bh(&ht->lock);
7175 }
7176
7177 -/* hash table garbage collector, run by timer */
7178 -static void htable_gc(unsigned long htlong)
7179 +static void htable_gc(struct work_struct *work)
7180 {
7181 - struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
7182 + struct xt_hashlimit_htable *ht;
7183 +
7184 + ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
7185
7186 htable_selective_cleanup(ht, select_gc);
7187
7188 - /* re-add the timer accordingly */
7189 - ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
7190 - add_timer(&ht->timer);
7191 + queue_delayed_work(system_power_efficient_wq,
7192 + &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
7193 }
7194
7195 static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
7196 @@ -341,7 +342,7 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
7197
7198 static void htable_destroy(struct xt_hashlimit_htable *hinfo)
7199 {
7200 - del_timer_sync(&hinfo->timer);
7201 + cancel_delayed_work_sync(&hinfo->gc_work);
7202 htable_remove_proc_entry(hinfo);
7203 htable_selective_cleanup(hinfo, select_all);
7204 kfree(hinfo->name);
7205 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
7206 index e6283464a8e6..df33156ecd2d 100644
7207 --- a/net/wireless/nl80211.c
7208 +++ b/net/wireless/nl80211.c
7209 @@ -6796,6 +6796,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
7210 struct nlattr *data = ((void **)skb->cb)[2];
7211 enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
7212
7213 + /* clear CB data for netlink core to own from now on */
7214 + memset(skb->cb, 0, sizeof(skb->cb));
7215 +
7216 nla_nest_end(skb, data);
7217 genlmsg_end(skb, hdr);
7218
7219 @@ -9075,6 +9078,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
7220 void *hdr = ((void **)skb->cb)[1];
7221 struct nlattr *data = ((void **)skb->cb)[2];
7222
7223 + /* clear CB data for netlink core to own from now on */
7224 + memset(skb->cb, 0, sizeof(skb->cb));
7225 +
7226 if (WARN_ON(!rdev->cur_cmd_info)) {
7227 kfree_skb(skb);
7228 return -EINVAL;
7229 diff --git a/sound/core/info.c b/sound/core/info.c
7230 index e79baa11b60e..08070e1eefeb 100644
7231 --- a/sound/core/info.c
7232 +++ b/sound/core/info.c
7233 @@ -679,7 +679,7 @@ int snd_info_card_free(struct snd_card *card)
7234 * snd_info_get_line - read one line from the procfs buffer
7235 * @buffer: the procfs buffer
7236 * @line: the buffer to store
7237 - * @len: the max. buffer size - 1
7238 + * @len: the max. buffer size
7239 *
7240 * Reads one line from the buffer and stores the string.
7241 *
7242 @@ -699,7 +699,7 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
7243 buffer->stop = 1;
7244 if (c == '\n')
7245 break;
7246 - if (len) {
7247 + if (len > 1) {
7248 len--;
7249 *line++ = c;
7250 }
7251 diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
7252 index a2104671f51d..e1ef106c8a6f 100644
7253 --- a/sound/core/pcm_lib.c
7254 +++ b/sound/core/pcm_lib.c
7255 @@ -1783,14 +1783,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
7256 {
7257 struct snd_pcm_hw_params *params = arg;
7258 snd_pcm_format_t format;
7259 - int channels, width;
7260 + int channels;
7261 + ssize_t frame_size;
7262
7263 params->fifo_size = substream->runtime->hw.fifo_size;
7264 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
7265 format = params_format(params);
7266 channels = params_channels(params);
7267 - width = snd_pcm_format_physical_width(format);
7268 - params->fifo_size /= width * channels;
7269 + frame_size = snd_pcm_format_size(format, channels);
7270 + if (frame_size > 0)
7271 + params->fifo_size /= (unsigned)frame_size;
7272 }
7273 return 0;
7274 }
7275 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
7276 index bcf91bea3317..ffc19464b978 100644
7277 --- a/sound/pci/hda/patch_conexant.c
7278 +++ b/sound/pci/hda/patch_conexant.c
7279 @@ -3237,6 +3237,7 @@ enum {
7280 CXT_FIXUP_HEADPHONE_MIC_PIN,
7281 CXT_FIXUP_HEADPHONE_MIC,
7282 CXT_FIXUP_GPIO1,
7283 + CXT_FIXUP_ASPIRE_DMIC,
7284 CXT_FIXUP_THINKPAD_ACPI,
7285 };
7286
7287 @@ -3397,6 +3398,12 @@ static const struct hda_fixup cxt_fixups[] = {
7288 { }
7289 },
7290 },
7291 + [CXT_FIXUP_ASPIRE_DMIC] = {
7292 + .type = HDA_FIXUP_FUNC,
7293 + .v.func = cxt_fixup_stereo_dmic,
7294 + .chained = true,
7295 + .chain_id = CXT_FIXUP_GPIO1,
7296 + },
7297 [CXT_FIXUP_THINKPAD_ACPI] = {
7298 .type = HDA_FIXUP_FUNC,
7299 .v.func = hda_fixup_thinkpad_acpi,
7300 @@ -3410,7 +3417,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
7301
7302 static const struct snd_pci_quirk cxt5066_fixups[] = {
7303 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
7304 - SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
7305 + SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
7306 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
7307 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
7308 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
7309 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7310 index b35dbe25a6e3..5d0058bd6259 100644
7311 --- a/sound/pci/hda/patch_realtek.c
7312 +++ b/sound/pci/hda/patch_realtek.c
7313 @@ -327,6 +327,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
7314 case 0x10ec0885:
7315 case 0x10ec0887:
7316 /*case 0x10ec0889:*/ /* this causes an SPDIF problem */
7317 + case 0x10ec0900:
7318 alc889_coef_init(codec);
7319 break;
7320 case 0x10ec0888:
7321 @@ -2330,6 +2331,7 @@ static int patch_alc882(struct hda_codec *codec)
7322 switch (codec->vendor_id) {
7323 case 0x10ec0882:
7324 case 0x10ec0885:
7325 + case 0x10ec0900:
7326 break;
7327 default:
7328 /* ALC883 and variants */
7329 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
7330 index 978df990f27c..15270a2e71cc 100644
7331 --- a/sound/pci/hda/patch_sigmatel.c
7332 +++ b/sound/pci/hda/patch_sigmatel.c
7333 @@ -559,8 +559,8 @@ static void stac_init_power_map(struct hda_codec *codec)
7334 if (snd_hda_jack_tbl_get(codec, nid))
7335 continue;
7336 if (def_conf == AC_JACK_PORT_COMPLEX &&
7337 - !(spec->vref_mute_led_nid == nid ||
7338 - is_jack_detectable(codec, nid))) {
7339 + spec->vref_mute_led_nid != nid &&
7340 + is_jack_detectable(codec, nid)) {
7341 snd_hda_jack_detect_enable_callback(codec, nid,
7342 STAC_PWR_EVENT,
7343 jack_update_power);
7344 @@ -4212,11 +4212,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
7345 return err;
7346 }
7347
7348 - stac_init_power_map(codec);
7349 -
7350 return 0;
7351 }
7352
7353 +static int stac_build_controls(struct hda_codec *codec)
7354 +{
7355 + int err = snd_hda_gen_build_controls(codec);
7356 +
7357 + if (err < 0)
7358 + return err;
7359 + stac_init_power_map(codec);
7360 + return 0;
7361 +}
7362
7363 static int stac_init(struct hda_codec *codec)
7364 {
7365 @@ -4328,7 +4335,7 @@ static int stac_suspend(struct hda_codec *codec)
7366 #endif /* CONFIG_PM */
7367
7368 static const struct hda_codec_ops stac_patch_ops = {
7369 - .build_controls = snd_hda_gen_build_controls,
7370 + .build_controls = stac_build_controls,
7371 .build_pcms = snd_hda_gen_build_pcms,
7372 .init = stac_init,
7373 .free = stac_free,
7374 diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
7375 index 670afa29e30d..7350ebbae642 100644
7376 --- a/sound/soc/davinci/davinci-mcasp.c
7377 +++ b/sound/soc/davinci/davinci-mcasp.c
7378 @@ -418,8 +418,17 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
7379 {
7380 u32 fmt;
7381 u32 tx_rotate = (word_length / 4) & 0x7;
7382 - u32 rx_rotate = (32 - word_length) / 4;
7383 u32 mask = (1ULL << word_length) - 1;
7384 + /*
7385 + * For captured data we should not rotate, inversion and masking is
7386 + * enoguh to get the data to the right position:
7387 + * Format data from bus after reverse (XRBUF)
7388 + * S16_LE: |LSB|MSB|xxx|xxx| |xxx|xxx|MSB|LSB|
7389 + * S24_3LE: |LSB|DAT|MSB|xxx| |xxx|MSB|DAT|LSB|
7390 + * S24_LE: |LSB|DAT|MSB|xxx| |xxx|MSB|DAT|LSB|
7391 + * S32_LE: |LSB|DAT|DAT|MSB| |MSB|DAT|DAT|LSB|
7392 + */
7393 + u32 rx_rotate = 0;
7394
7395 /*
7396 * if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv()