Magellan Linux

Contents of /trunk/kernel-alx/patches-4.1/0105-4.1.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (show annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 4 months ago) by niro
File size: 149127 byte(s)
-linux-4.1 patches up to 4.1.15
1 diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt b/Documentation/devicetree/bindings/clock/keystone-pll.txt
2 index 225990f79b7c..47570d207215 100644
3 --- a/Documentation/devicetree/bindings/clock/keystone-pll.txt
4 +++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt
5 @@ -15,8 +15,8 @@ Required properties:
6 - compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock"
7 - clocks : parent clock phandle
8 - reg - pll control0 and pll multipler registers
9 -- reg-names : control and multiplier. The multiplier is applicable only for
10 - main pll clock
11 +- reg-names : control, multiplier and post-divider. The multiplier and
12 + post-divider registers are applicable only for main pll clock
13 - fixed-postdiv : fixed post divider value. If absent, use clkod register bits
14 for postdiv
15
16 @@ -25,8 +25,8 @@ Example:
17 #clock-cells = <0>;
18 compatible = "ti,keystone,main-pll-clock";
19 clocks = <&refclksys>;
20 - reg = <0x02620350 4>, <0x02310110 4>;
21 - reg-names = "control", "multiplier";
22 + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
23 + reg-names = "control", "multiplier", "post-divider";
24 fixed-postdiv = <2>;
25 };
26
27 diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
28 index c86f2f1ae4f6..1fec1135791d 100644
29 --- a/Documentation/input/alps.txt
30 +++ b/Documentation/input/alps.txt
31 @@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
32 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
33
34 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
35 -the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
36 -buttons get reported separately in the PSM, PSR and PSL bits.
37 +the DualPoint Stick. The M, R and L bits signal the combined status of both
38 +the pointingstick and touchpad buttons, except for Dell dualpoint devices
39 +where the pointingstick buttons get reported separately in the PSM, PSR
40 +and PSL bits.
41
42 Dualpoint device -- interleaved packet format
43 ---------------------------------------------
44 diff --git a/Makefile b/Makefile
45 index 068dd690933d..838dabcb7f48 100644
46 --- a/Makefile
47 +++ b/Makefile
48 @@ -1,6 +1,6 @@
49 VERSION = 4
50 PATCHLEVEL = 1
51 -SUBLEVEL = 5
52 +SUBLEVEL = 6
53 EXTRAVERSION =
54 NAME = Series 4800
55
56 diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
57 index b6478e97d6a7..e6540b5cfa4c 100644
58 --- a/arch/arm/boot/dts/imx35.dtsi
59 +++ b/arch/arm/boot/dts/imx35.dtsi
60 @@ -286,8 +286,8 @@
61 can1: can@53fe4000 {
62 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
63 reg = <0x53fe4000 0x1000>;
64 - clocks = <&clks 33>;
65 - clock-names = "ipg";
66 + clocks = <&clks 33>, <&clks 33>;
67 + clock-names = "ipg", "per";
68 interrupts = <43>;
69 status = "disabled";
70 };
71 @@ -295,8 +295,8 @@
72 can2: can@53fe8000 {
73 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
74 reg = <0x53fe8000 0x1000>;
75 - clocks = <&clks 34>;
76 - clock-names = "ipg";
77 + clocks = <&clks 34>, <&clks 34>;
78 + clock-names = "ipg", "per";
79 interrupts = <44>;
80 status = "disabled";
81 };
82 diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
83 index 4773d6af66a0..d56d68fe7ffc 100644
84 --- a/arch/arm/boot/dts/k2e-clocks.dtsi
85 +++ b/arch/arm/boot/dts/k2e-clocks.dtsi
86 @@ -13,9 +13,8 @@ clocks {
87 #clock-cells = <0>;
88 compatible = "ti,keystone,main-pll-clock";
89 clocks = <&refclksys>;
90 - reg = <0x02620350 4>, <0x02310110 4>;
91 - reg-names = "control", "multiplier";
92 - fixed-postdiv = <2>;
93 + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
94 + reg-names = "control", "multiplier", "post-divider";
95 };
96
97 papllclk: papllclk@2620358 {
98 diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
99 index d5adee3c0067..af9b7190533a 100644
100 --- a/arch/arm/boot/dts/k2hk-clocks.dtsi
101 +++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
102 @@ -22,9 +22,8 @@ clocks {
103 #clock-cells = <0>;
104 compatible = "ti,keystone,main-pll-clock";
105 clocks = <&refclksys>;
106 - reg = <0x02620350 4>, <0x02310110 4>;
107 - reg-names = "control", "multiplier";
108 - fixed-postdiv = <2>;
109 + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
110 + reg-names = "control", "multiplier", "post-divider";
111 };
112
113 papllclk: papllclk@2620358 {
114 diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
115 index eb1e3e29f073..ef8464bb11ff 100644
116 --- a/arch/arm/boot/dts/k2l-clocks.dtsi
117 +++ b/arch/arm/boot/dts/k2l-clocks.dtsi
118 @@ -22,9 +22,8 @@ clocks {
119 #clock-cells = <0>;
120 compatible = "ti,keystone,main-pll-clock";
121 clocks = <&refclksys>;
122 - reg = <0x02620350 4>, <0x02310110 4>;
123 - reg-names = "control", "multiplier";
124 - fixed-postdiv = <2>;
125 + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
126 + reg-names = "control", "multiplier", "post-divider";
127 };
128
129 papllclk: papllclk@2620358 {
130 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
131 index 752969ff9de0..5286e7773ed4 100644
132 --- a/arch/arm/mach-omap2/omap_hwmod.c
133 +++ b/arch/arm/mach-omap2/omap_hwmod.c
134 @@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
135 * registers. This address is needed early so the OCP registers that
136 * are part of the device's address space can be ioremapped properly.
137 *
138 + * If SYSC access is not needed, the registers will not be remapped
139 + * and non-availability of MPU access is not treated as an error.
140 + *
141 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
142 * -ENXIO on absent or invalid register target address space.
143 */
144 @@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
145
146 _save_mpu_port_index(oh);
147
148 + /* if we don't need sysc access we don't need to ioremap */
149 + if (!oh->class->sysc)
150 + return 0;
151 +
152 + /* we can't continue without MPU PORT if we need sysc access */
153 if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
154 return -ENXIO;
155
156 @@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
157 oh->name);
158
159 /* Extract the IO space from device tree blob */
160 - if (!np)
161 + if (!np) {
162 + pr_err("omap_hwmod: %s: no dt node\n", oh->name);
163 return -ENXIO;
164 + }
165
166 va_start = of_iomap(np, index + oh->mpu_rt_idx);
167 } else {
168 @@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
169 oh->name, np->name);
170 }
171
172 - if (oh->class->sysc) {
173 - r = _init_mpu_rt_base(oh, NULL, index, np);
174 - if (r < 0) {
175 - WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
176 - oh->name);
177 - return 0;
178 - }
179 + r = _init_mpu_rt_base(oh, NULL, index, np);
180 + if (r < 0) {
181 + WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
182 + oh->name);
183 + return 0;
184 }
185
186 r = _init_clocks(oh, NULL);
187 diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
188 index d26fcd4cd6e6..c0cff3410166 100644
189 --- a/arch/arm64/kernel/signal32.c
190 +++ b/arch/arm64/kernel/signal32.c
191 @@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
192 * Other callers might not initialize the si_lsb field,
193 * so check explicitely for the right codes here.
194 */
195 - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
196 + if (from->si_signo == SIGBUS &&
197 + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
198 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
199 #endif
200 break;
201 @@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
202
203 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
204 {
205 - memset(to, 0, sizeof *to);
206 -
207 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
208 copy_from_user(to->_sifields._pad,
209 from->_sifields._pad, SI_PAD_SIZE))
210 diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
211 index 7fc8397d16f2..fd2a36a79f97 100644
212 --- a/arch/mips/ath79/setup.c
213 +++ b/arch/mips/ath79/setup.c
214 @@ -186,6 +186,7 @@ int get_c0_perfcount_int(void)
215 {
216 return ATH79_MISC_IRQ(5);
217 }
218 +EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
219
220 unsigned int get_c0_compare_int(void)
221 {
222 diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
223 deleted file mode 100644
224 index 11d3b572b1b3..000000000000
225 --- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
226 +++ /dev/null
227 @@ -1,10 +0,0 @@
228 -#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
229 -#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
230 -
231 -#include <asm/bmips.h>
232 -
233 -#define plat_post_dma_flush bmips_post_dma_flush
234 -
235 -#include <asm/mach-generic/dma-coherence.h>
236 -
237 -#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
238 diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
239 index 819af9d057a8..70f6e7f073b0 100644
240 --- a/arch/mips/include/asm/pgtable.h
241 +++ b/arch/mips/include/asm/pgtable.h
242 @@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
243 * Make sure the buddy is global too (if it's !none,
244 * it better already be global)
245 */
246 +#ifdef CONFIG_SMP
247 + /*
248 + * For SMP, multiple CPUs can race, so we need to do
249 + * this atomically.
250 + */
251 +#ifdef CONFIG_64BIT
252 +#define LL_INSN "lld"
253 +#define SC_INSN "scd"
254 +#else /* CONFIG_32BIT */
255 +#define LL_INSN "ll"
256 +#define SC_INSN "sc"
257 +#endif
258 + unsigned long page_global = _PAGE_GLOBAL;
259 + unsigned long tmp;
260 +
261 + __asm__ __volatile__ (
262 + " .set push\n"
263 + " .set noreorder\n"
264 + "1: " LL_INSN " %[tmp], %[buddy]\n"
265 + " bnez %[tmp], 2f\n"
266 + " or %[tmp], %[tmp], %[global]\n"
267 + " " SC_INSN " %[tmp], %[buddy]\n"
268 + " beqz %[tmp], 1b\n"
269 + " nop\n"
270 + "2:\n"
271 + " .set pop"
272 + : [buddy] "+m" (buddy->pte),
273 + [tmp] "=&r" (tmp)
274 + : [global] "r" (page_global));
275 +#else /* !CONFIG_SMP */
276 if (pte_none(*buddy))
277 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
278 +#endif /* CONFIG_SMP */
279 }
280 #endif
281 }
282 diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
283 index 28d6d9364bd1..a71da576883c 100644
284 --- a/arch/mips/include/asm/stackframe.h
285 +++ b/arch/mips/include/asm/stackframe.h
286 @@ -152,6 +152,31 @@
287 .set noreorder
288 bltz k0, 8f
289 move k1, sp
290 +#ifdef CONFIG_EVA
291 + /*
292 + * Flush interAptiv's Return Prediction Stack (RPS) by writing
293 + * EntryHi. Toggling Config7.RPS is slower and less portable.
294 + *
295 + * The RPS isn't automatically flushed when exceptions are
296 + * taken, which can result in kernel mode speculative accesses
297 + * to user addresses if the RPS mispredicts. That's harmless
298 + * when user and kernel share the same address space, but with
299 + * EVA the same user segments may be unmapped to kernel mode,
300 + * even containing sensitive MMIO regions or invalid memory.
301 + *
302 + * This can happen when the kernel sets the return address to
303 + * ret_from_* and jr's to the exception handler, which looks
304 + * more like a tail call than a function call. If nested calls
305 + * don't evict the last user address in the RPS, it will
306 + * mispredict the return and fetch from a user controlled
307 + * address into the icache.
308 + *
309 + * More recent EVA-capable cores with MAAR to restrict
310 + * speculative accesses aren't affected.
311 + */
312 + MFC0 k0, CP0_ENTRYHI
313 + MTC0 k0, CP0_ENTRYHI
314 +#endif
315 .set reorder
316 /* Called from user mode, new stack. */
317 get_saved_sp
318 diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
319 index 3e4491aa6d6b..789d7bf4fef3 100644
320 --- a/arch/mips/kernel/mips-mt-fpaff.c
321 +++ b/arch/mips/kernel/mips-mt-fpaff.c
322 @@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
323 unsigned long __user *user_mask_ptr)
324 {
325 unsigned int real_len;
326 - cpumask_t mask;
327 + cpumask_t allowed, mask;
328 int retval;
329 struct task_struct *p;
330
331 @@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
332 if (retval)
333 goto out_unlock;
334
335 - cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
336 + cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
337 + cpumask_and(&mask, &allowed, cpu_active_mask);
338
339 out_unlock:
340 read_unlock(&tasklist_lock);
341 diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
342 index 74bab9ddd0e1..c6bbf2165051 100644
343 --- a/arch/mips/kernel/relocate_kernel.S
344 +++ b/arch/mips/kernel/relocate_kernel.S
345 @@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
346
347 process_entry:
348 PTR_L s2, (s0)
349 - PTR_ADD s0, s0, SZREG
350 + PTR_ADDIU s0, s0, SZREG
351
352 /*
353 * In case of a kdump/crash kernel, the indirection page is not
354 @@ -61,9 +61,9 @@ copy_word:
355 /* copy page word by word */
356 REG_L s5, (s2)
357 REG_S s5, (s4)
358 - PTR_ADD s4, s4, SZREG
359 - PTR_ADD s2, s2, SZREG
360 - LONG_SUB s6, s6, 1
361 + PTR_ADDIU s4, s4, SZREG
362 + PTR_ADDIU s2, s2, SZREG
363 + LONG_ADDIU s6, s6, -1
364 beq s6, zero, process_entry
365 b copy_word
366 b process_entry
367 diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
368 index 19a7705f2a01..5d7f2634996f 100644
369 --- a/arch/mips/kernel/signal32.c
370 +++ b/arch/mips/kernel/signal32.c
371 @@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
372
373 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
374 {
375 - memset(to, 0, sizeof *to);
376 -
377 if (copy_from_user(to, from, 3*sizeof(int)) ||
378 copy_from_user(to->_sifields._pad,
379 from->_sifields._pad, SI_PAD_SIZE32))
380 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
381 index d2d1c1933bc9..5f5f44edc77d 100644
382 --- a/arch/mips/kernel/traps.c
383 +++ b/arch/mips/kernel/traps.c
384 @@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
385 void show_stack(struct task_struct *task, unsigned long *sp)
386 {
387 struct pt_regs regs;
388 + mm_segment_t old_fs = get_fs();
389 if (sp) {
390 regs.regs[29] = (unsigned long)sp;
391 regs.regs[31] = 0;
392 @@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
393 prepare_frametrace(&regs);
394 }
395 }
396 + /*
397 + * show_stack() deals exclusively with kernel mode, so be sure to access
398 + * the stack in the kernel (not user) address space.
399 + */
400 + set_fs(KERNEL_DS);
401 show_stacktrace(task, &regs);
402 + set_fs(old_fs);
403 }
404
405 static void show_code(unsigned int __user *pc)
406 @@ -1518,6 +1525,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
407 const int field = 2 * sizeof(unsigned long);
408 int multi_match = regs->cp0_status & ST0_TS;
409 enum ctx_state prev_state;
410 + mm_segment_t old_fs = get_fs();
411
412 prev_state = exception_enter();
413 show_regs(regs);
414 @@ -1539,8 +1547,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
415 dump_tlb_all();
416 }
417
418 + if (!user_mode(regs))
419 + set_fs(KERNEL_DS);
420 +
421 show_code((unsigned int __user *) regs->cp0_epc);
422
423 + set_fs(old_fs);
424 +
425 /*
426 * Some chips may have other causes of machine check (e.g. SB1
427 * graduation timer)
428 diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
429 index af84bef0c90d..eb3efd137fd1 100644
430 --- a/arch/mips/kernel/unaligned.c
431 +++ b/arch/mips/kernel/unaligned.c
432 @@ -438,7 +438,7 @@ do { \
433 : "memory"); \
434 } while(0)
435
436 -#define StoreDW(addr, value, res) \
437 +#define _StoreDW(addr, value, res) \
438 do { \
439 __asm__ __volatile__ ( \
440 ".set\tpush\n\t" \
441 diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
442 index 6ab10573490d..d01ade63492f 100644
443 --- a/arch/mips/lantiq/irq.c
444 +++ b/arch/mips/lantiq/irq.c
445 @@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
446 {
447 return ltq_perfcount_irq;
448 }
449 +EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
450
451 unsigned int get_c0_compare_int(void)
452 {
453 diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
454 index 185e68261f45..a7f7d9ffb402 100644
455 --- a/arch/mips/mti-malta/malta-time.c
456 +++ b/arch/mips/mti-malta/malta-time.c
457 @@ -148,6 +148,7 @@ int get_c0_perfcount_int(void)
458
459 return mips_cpu_perf_irq;
460 }
461 +EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
462
463 unsigned int get_c0_compare_int(void)
464 {
465 @@ -165,14 +166,17 @@ unsigned int get_c0_compare_int(void)
466
467 static void __init init_rtc(void)
468 {
469 - /* stop the clock whilst setting it up */
470 - CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
471 + unsigned char freq, ctrl;
472
473 - /* 32KHz time base */
474 - CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
475 + /* Set 32KHz time base if not already set */
476 + freq = CMOS_READ(RTC_FREQ_SELECT);
477 + if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
478 + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
479
480 - /* start the clock */
481 - CMOS_WRITE(RTC_24H, RTC_CONTROL);
482 + /* Ensure SET bit is clear so RTC can run */
483 + ctrl = CMOS_READ(RTC_CONTROL);
484 + if (ctrl & RTC_SET)
485 + CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
486 }
487
488 void __init plat_time_init(void)
489 diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
490 index e1d69895fb1d..a120b7a5a8fe 100644
491 --- a/arch/mips/mti-sead3/sead3-time.c
492 +++ b/arch/mips/mti-sead3/sead3-time.c
493 @@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
494 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
495 return -1;
496 }
497 +EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
498
499 unsigned int get_c0_compare_int(void)
500 {
501 diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
502 index 67889fcea8aa..ab73f6f405bb 100644
503 --- a/arch/mips/pistachio/time.c
504 +++ b/arch/mips/pistachio/time.c
505 @@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
506 {
507 return gic_get_c0_perfcount_int();
508 }
509 +EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
510
511 void __init plat_time_init(void)
512 {
513 diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
514 index 7cf91b92e9d1..199ace4ca1ad 100644
515 --- a/arch/mips/ralink/irq.c
516 +++ b/arch/mips/ralink/irq.c
517 @@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
518 {
519 return rt_perfcount_irq;
520 }
521 +EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
522
523 unsigned int get_c0_compare_int(void)
524 {
525 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
526 index d3a831ac0f92..da50e0c9c57e 100644
527 --- a/arch/powerpc/kernel/signal_32.c
528 +++ b/arch/powerpc/kernel/signal_32.c
529 @@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
530
531 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
532 {
533 - memset(to, 0, sizeof *to);
534 -
535 if (copy_from_user(to, from, 3*sizeof(int)) ||
536 copy_from_user(to->_sifields._pad,
537 from->_sifields._pad, SI_PAD_SIZE32))
538 diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
539 index 1f0aa2024e94..6424249d5f78 100644
540 --- a/arch/sparc/include/asm/visasm.h
541 +++ b/arch/sparc/include/asm/visasm.h
542 @@ -28,16 +28,10 @@
543 * Must preserve %o5 between VISEntryHalf and VISExitHalf */
544
545 #define VISEntryHalf \
546 - rd %fprs, %o5; \
547 - andcc %o5, FPRS_FEF, %g0; \
548 - be,pt %icc, 297f; \
549 - sethi %hi(298f), %g7; \
550 - sethi %hi(VISenterhalf), %g1; \
551 - jmpl %g1 + %lo(VISenterhalf), %g0; \
552 - or %g7, %lo(298f), %g7; \
553 - clr %o5; \
554 -297: wr %o5, FPRS_FEF, %fprs; \
555 -298:
556 + VISEntry
557 +
558 +#define VISExitHalf \
559 + VISExit
560
561 #define VISEntryHalfFast(fail_label) \
562 rd %fprs, %o5; \
563 @@ -47,7 +41,7 @@
564 ba,a,pt %xcc, fail_label; \
565 297: wr %o5, FPRS_FEF, %fprs;
566
567 -#define VISExitHalf \
568 +#define VISExitHalfFast \
569 wr %o5, 0, %fprs;
570
571 #ifndef __ASSEMBLY__
572 diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
573 index 140527a20e7d..83aeeb1dffdb 100644
574 --- a/arch/sparc/lib/NG4memcpy.S
575 +++ b/arch/sparc/lib/NG4memcpy.S
576 @@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
577 add %o0, 0x40, %o0
578 bne,pt %icc, 1b
579 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
580 +#ifdef NON_USER_COPY
581 + VISExitHalfFast
582 +#else
583 VISExitHalf
584 -
585 +#endif
586 brz,pn %o2, .Lexit
587 cmp %o2, 19
588 ble,pn %icc, .Lsmall_unaligned
589 diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
590 index b320ae9e2e2e..a063d84336d6 100644
591 --- a/arch/sparc/lib/VISsave.S
592 +++ b/arch/sparc/lib/VISsave.S
593 @@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
594
595 stx %g3, [%g6 + TI_GSR]
596 2: add %g6, %g1, %g3
597 - cmp %o5, FPRS_DU
598 - be,pn %icc, 6f
599 - sll %g1, 3, %g1
600 + mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
601 + sll %g1, 3, %g1
602 stb %o5, [%g3 + TI_FPSAVED]
603 rd %gsr, %g2
604 add %g6, %g1, %g3
605 @@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
606 .align 32
607 80: jmpl %g7 + %g0, %g0
608 nop
609 -
610 -6: ldub [%g3 + TI_FPSAVED], %o5
611 - or %o5, FPRS_DU, %o5
612 - add %g6, TI_FPREGS+0x80, %g2
613 - stb %o5, [%g3 + TI_FPSAVED]
614 -
615 - sll %g1, 5, %g1
616 - add %g6, TI_FPREGS+0xc0, %g3
617 - wr %g0, FPRS_FEF, %fprs
618 - membar #Sync
619 - stda %f32, [%g2 + %g1] ASI_BLK_P
620 - stda %f48, [%g3 + %g1] ASI_BLK_P
621 - membar #Sync
622 - ba,pt %xcc, 80f
623 - nop
624 -
625 - .align 32
626 -80: jmpl %g7 + %g0, %g0
627 - nop
628 -
629 - .align 32
630 -VISenterhalf:
631 - ldub [%g6 + TI_FPDEPTH], %g1
632 - brnz,a,pn %g1, 1f
633 - cmp %g1, 1
634 - stb %g0, [%g6 + TI_FPSAVED]
635 - stx %fsr, [%g6 + TI_XFSR]
636 - clr %o5
637 - jmpl %g7 + %g0, %g0
638 - wr %g0, FPRS_FEF, %fprs
639 -
640 -1: bne,pn %icc, 2f
641 - srl %g1, 1, %g1
642 - ba,pt %xcc, vis1
643 - sub %g7, 8, %g7
644 -2: addcc %g6, %g1, %g3
645 - sll %g1, 3, %g1
646 - andn %o5, FPRS_DU, %g2
647 - stb %g2, [%g3 + TI_FPSAVED]
648 -
649 - rd %gsr, %g2
650 - add %g6, %g1, %g3
651 - stx %g2, [%g3 + TI_GSR]
652 - add %g6, %g1, %g2
653 - stx %fsr, [%g2 + TI_XFSR]
654 - sll %g1, 5, %g1
655 -3: andcc %o5, FPRS_DL, %g0
656 - be,pn %icc, 4f
657 - add %g6, TI_FPREGS, %g2
658 -
659 - add %g6, TI_FPREGS+0x40, %g3
660 - membar #Sync
661 - stda %f0, [%g2 + %g1] ASI_BLK_P
662 - stda %f16, [%g3 + %g1] ASI_BLK_P
663 - membar #Sync
664 - ba,pt %xcc, 4f
665 - nop
666 -
667 - .align 32
668 -4: and %o5, FPRS_DU, %o5
669 - jmpl %g7 + %g0, %g0
670 - wr %o5, FPRS_FEF, %fprs
671 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
672 index 1d649a95660c..8069ce12f20b 100644
673 --- a/arch/sparc/lib/ksyms.c
674 +++ b/arch/sparc/lib/ksyms.c
675 @@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
676 void VISenter(void);
677 EXPORT_SYMBOL(VISenter);
678
679 -/* CRYPTO code needs this */
680 -void VISenterhalf(void);
681 -EXPORT_SYMBOL(VISenterhalf);
682 -
683 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
684 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
685 unsigned long *);
686 diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
687 index e8c2c04143cd..c667e104a0c2 100644
688 --- a/arch/tile/kernel/compat_signal.c
689 +++ b/arch/tile/kernel/compat_signal.c
690 @@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
691 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
692 return -EFAULT;
693
694 - memset(to, 0, sizeof(*to));
695 -
696 err = __get_user(to->si_signo, &from->si_signo);
697 err |= __get_user(to->si_errno, &from->si_errno);
698 err |= __get_user(to->si_code, &from->si_code);
699 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
700 index 02c2eff7478d..4bd6c197563d 100644
701 --- a/arch/x86/kernel/entry_64.S
702 +++ b/arch/x86/kernel/entry_64.S
703 @@ -793,8 +793,6 @@ retint_kernel:
704 restore_c_regs_and_iret:
705 RESTORE_C_REGS
706 REMOVE_PT_GPREGS_FROM_STACK 8
707 -
708 -irq_return:
709 INTERRUPT_RETURN
710
711 ENTRY(native_iret)
712 @@ -1413,11 +1411,12 @@ ENTRY(nmi)
713 * If the variable is not set and the stack is not the NMI
714 * stack then:
715 * o Set the special variable on the stack
716 - * o Copy the interrupt frame into a "saved" location on the stack
717 - * o Copy the interrupt frame into a "copy" location on the stack
718 + * o Copy the interrupt frame into an "outermost" location on the
719 + * stack
720 + * o Copy the interrupt frame into an "iret" location on the stack
721 * o Continue processing the NMI
722 * If the variable is set or the previous stack is the NMI stack:
723 - * o Modify the "copy" location to jump to the repeate_nmi
724 + * o Modify the "iret" location to jump to the repeat_nmi
725 * o return back to the first NMI
726 *
727 * Now on exit of the first NMI, we first clear the stack variable
728 @@ -1426,32 +1425,151 @@ ENTRY(nmi)
729 * a nested NMI that updated the copy interrupt stack frame, a
730 * jump will be made to the repeat_nmi code that will handle the second
731 * NMI.
732 + *
733 + * However, espfix prevents us from directly returning to userspace
734 + * with a single IRET instruction. Similarly, IRET to user mode
735 + * can fault. We therefore handle NMIs from user space like
736 + * other IST entries.
737 */
738
739 /* Use %rdx as our temp variable throughout */
740 pushq_cfi %rdx
741 CFI_REL_OFFSET rdx, 0
742
743 + testb $3, CS-RIP+8(%rsp)
744 + jz .Lnmi_from_kernel
745 +
746 /*
747 - * If %cs was not the kernel segment, then the NMI triggered in user
748 - * space, which means it is definitely not nested.
749 + * NMI from user mode. We need to run on the thread stack, but we
750 + * can't go through the normal entry paths: NMIs are masked, and
751 + * we don't want to enable interrupts, because then we'll end
752 + * up in an awkward situation in which IRQs are on but NMIs
753 + * are off.
754 */
755 - cmpl $__KERNEL_CS, 16(%rsp)
756 - jne first_nmi
757 +
758 + SWAPGS
759 + cld
760 + movq %rsp, %rdx
761 + movq PER_CPU_VAR(kernel_stack), %rsp
762 + pushq 5*8(%rdx) /* pt_regs->ss */
763 + pushq 4*8(%rdx) /* pt_regs->rsp */
764 + pushq 3*8(%rdx) /* pt_regs->flags */
765 + pushq 2*8(%rdx) /* pt_regs->cs */
766 + pushq 1*8(%rdx) /* pt_regs->rip */
767 + pushq $-1 /* pt_regs->orig_ax */
768 + pushq %rdi /* pt_regs->di */
769 + pushq %rsi /* pt_regs->si */
770 + pushq (%rdx) /* pt_regs->dx */
771 + pushq %rcx /* pt_regs->cx */
772 + pushq %rax /* pt_regs->ax */
773 + pushq %r8 /* pt_regs->r8 */
774 + pushq %r9 /* pt_regs->r9 */
775 + pushq %r10 /* pt_regs->r10 */
776 + pushq %r11 /* pt_regs->r11 */
777 + pushq %rbx /* pt_regs->rbx */
778 + pushq %rbp /* pt_regs->rbp */
779 + pushq %r12 /* pt_regs->r12 */
780 + pushq %r13 /* pt_regs->r13 */
781 + pushq %r14 /* pt_regs->r14 */
782 + pushq %r15 /* pt_regs->r15 */
783
784 /*
785 - * Check the special variable on the stack to see if NMIs are
786 - * executing.
787 + * At this point we no longer need to worry about stack damage
788 + * due to nesting -- we're on the normal thread stack and we're
789 + * done with the NMI stack.
790 + */
791 + movq %rsp, %rdi
792 + movq $-1, %rsi
793 + call do_nmi
794 +
795 + /*
796 + * Return back to user mode. We must *not* do the normal exit
797 + * work, because we don't want to enable interrupts. Fortunately,
798 + * do_nmi doesn't modify pt_regs.
799 + */
800 + SWAPGS
801 + jmp restore_c_regs_and_iret
802 +
803 +.Lnmi_from_kernel:
804 + /*
805 + * Here's what our stack frame will look like:
806 + * +---------------------------------------------------------+
807 + * | original SS |
808 + * | original Return RSP |
809 + * | original RFLAGS |
810 + * | original CS |
811 + * | original RIP |
812 + * +---------------------------------------------------------+
813 + * | temp storage for rdx |
814 + * +---------------------------------------------------------+
815 + * | "NMI executing" variable |
816 + * +---------------------------------------------------------+
817 + * | iret SS } Copied from "outermost" frame |
818 + * | iret Return RSP } on each loop iteration; overwritten |
819 + * | iret RFLAGS } by a nested NMI to force another |
820 + * | iret CS } iteration if needed. |
821 + * | iret RIP } |
822 + * +---------------------------------------------------------+
823 + * | outermost SS } initialized in first_nmi; |
824 + * | outermost Return RSP } will not be changed before |
825 + * | outermost RFLAGS } NMI processing is done. |
826 + * | outermost CS } Copied to "iret" frame on each |
827 + * | outermost RIP } iteration. |
828 + * +---------------------------------------------------------+
829 + * | pt_regs |
830 + * +---------------------------------------------------------+
831 + *
832 + * The "original" frame is used by hardware. Before re-enabling
833 + * NMIs, we need to be done with it, and we need to leave enough
834 + * space for the asm code here.
835 + *
836 + * We return by executing IRET while RSP points to the "iret" frame.
837 + * That will either return for real or it will loop back into NMI
838 + * processing.
839 + *
840 + * The "outermost" frame is copied to the "iret" frame on each
841 + * iteration of the loop, so each iteration starts with the "iret"
842 + * frame pointing to the final return target.
843 + */
844 +
845 + /*
846 + * Determine whether we're a nested NMI.
847 + *
848 + * If we interrupted kernel code between repeat_nmi and
849 + * end_repeat_nmi, then we are a nested NMI. We must not
850 + * modify the "iret" frame because it's being written by
851 + * the outer NMI. That's okay; the outer NMI handler is
852 + * about to about to call do_nmi anyway, so we can just
853 + * resume the outer NMI.
854 + */
855 +
856 + movq $repeat_nmi, %rdx
857 + cmpq 8(%rsp), %rdx
858 + ja 1f
859 + movq $end_repeat_nmi, %rdx
860 + cmpq 8(%rsp), %rdx
861 + ja nested_nmi_out
862 +1:
863 +
864 + /*
865 + * Now check "NMI executing". If it's set, then we're nested.
866 + * This will not detect if we interrupted an outer NMI just
867 + * before IRET.
868 */
869 cmpl $1, -8(%rsp)
870 je nested_nmi
871
872 /*
873 - * Now test if the previous stack was an NMI stack.
874 - * We need the double check. We check the NMI stack to satisfy the
875 - * race when the first NMI clears the variable before returning.
876 - * We check the variable because the first NMI could be in a
877 - * breakpoint routine using a breakpoint stack.
878 + * Now test if the previous stack was an NMI stack. This covers
879 + * the case where we interrupt an outer NMI after it clears
880 + * "NMI executing" but before IRET. We need to be careful, though:
881 + * there is one case in which RSP could point to the NMI stack
882 + * despite there being no NMI active: naughty userspace controls
883 + * RSP at the very beginning of the SYSCALL targets. We can
884 + * pull a fast one on naughty userspace, though: we program
885 + * SYSCALL to mask DF, so userspace cannot cause DF to be set
886 + * if it controls the kernel's RSP. We set DF before we clear
887 + * "NMI executing".
888 */
889 lea 6*8(%rsp), %rdx
890 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
891 @@ -1462,25 +1580,21 @@ ENTRY(nmi)
892 cmpq %rdx, 4*8(%rsp)
893 /* If it is below the NMI stack, it is a normal NMI */
894 jb first_nmi
895 - /* Ah, it is within the NMI stack, treat it as nested */
896 +
897 + /* Ah, it is within the NMI stack. */
898 +
899 + testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
900 + jz first_nmi /* RSP was user controlled. */
901 +
902 + /* This is a nested NMI. */
903
904 CFI_REMEMBER_STATE
905
906 nested_nmi:
907 /*
908 - * Do nothing if we interrupted the fixup in repeat_nmi.
909 - * It's about to repeat the NMI handler, so we are fine
910 - * with ignoring this one.
911 + * Modify the "iret" frame to point to repeat_nmi, forcing another
912 + * iteration of NMI handling.
913 */
914 - movq $repeat_nmi, %rdx
915 - cmpq 8(%rsp), %rdx
916 - ja 1f
917 - movq $end_repeat_nmi, %rdx
918 - cmpq 8(%rsp), %rdx
919 - ja nested_nmi_out
920 -
921 -1:
922 - /* Set up the interrupted NMIs stack to jump to repeat_nmi */
923 leaq -1*8(%rsp), %rdx
924 movq %rdx, %rsp
925 CFI_ADJUST_CFA_OFFSET 1*8
926 @@ -1499,60 +1613,23 @@ nested_nmi_out:
927 popq_cfi %rdx
928 CFI_RESTORE rdx
929
930 - /* No need to check faults here */
931 + /* We are returning to kernel mode, so this cannot result in a fault. */
932 INTERRUPT_RETURN
933
934 CFI_RESTORE_STATE
935 first_nmi:
936 - /*
937 - * Because nested NMIs will use the pushed location that we
938 - * stored in rdx, we must keep that space available.
939 - * Here's what our stack frame will look like:
940 - * +-------------------------+
941 - * | original SS |
942 - * | original Return RSP |
943 - * | original RFLAGS |
944 - * | original CS |
945 - * | original RIP |
946 - * +-------------------------+
947 - * | temp storage for rdx |
948 - * +-------------------------+
949 - * | NMI executing variable |
950 - * +-------------------------+
951 - * | copied SS |
952 - * | copied Return RSP |
953 - * | copied RFLAGS |
954 - * | copied CS |
955 - * | copied RIP |
956 - * +-------------------------+
957 - * | Saved SS |
958 - * | Saved Return RSP |
959 - * | Saved RFLAGS |
960 - * | Saved CS |
961 - * | Saved RIP |
962 - * +-------------------------+
963 - * | pt_regs |
964 - * +-------------------------+
965 - *
966 - * The saved stack frame is used to fix up the copied stack frame
967 - * that a nested NMI may change to make the interrupted NMI iret jump
968 - * to the repeat_nmi. The original stack frame and the temp storage
969 - * is also used by nested NMIs and can not be trusted on exit.
970 - */
971 - /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
972 + /* Restore rdx. */
973 movq (%rsp), %rdx
974 CFI_RESTORE rdx
975
976 - /* Set the NMI executing variable on the stack. */
977 + /* Set "NMI executing" on the stack. */
978 pushq_cfi $1
979
980 - /*
981 - * Leave room for the "copied" frame
982 - */
983 + /* Leave room for the "iret" frame */
984 subq $(5*8), %rsp
985 CFI_ADJUST_CFA_OFFSET 5*8
986
987 - /* Copy the stack frame to the Saved frame */
988 + /* Copy the "original" frame to the "outermost" frame */
989 .rept 5
990 pushq_cfi 11*8(%rsp)
991 .endr
992 @@ -1560,6 +1637,7 @@ first_nmi:
993
994 /* Everything up to here is safe from nested NMIs */
995
996 +repeat_nmi:
997 /*
998 * If there was a nested NMI, the first NMI's iret will return
999 * here. But NMIs are still enabled and we can take another
1000 @@ -1568,16 +1646,21 @@ first_nmi:
1001 * it will just return, as we are about to repeat an NMI anyway.
1002 * This makes it safe to copy to the stack frame that a nested
1003 * NMI will update.
1004 - */
1005 -repeat_nmi:
1006 - /*
1007 - * Update the stack variable to say we are still in NMI (the update
1008 - * is benign for the non-repeat case, where 1 was pushed just above
1009 - * to this very stack slot).
1010 + *
1011 + * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1012 + * we're repeating an NMI, gsbase has the same value that it had on
1013 + * the first iteration. paranoid_entry will load the kernel
1014 + * gsbase if needed before we call do_nmi.
1015 + *
1016 + * Set "NMI executing" in case we came back here via IRET.
1017 */
1018 movq $1, 10*8(%rsp)
1019
1020 - /* Make another copy, this one may be modified by nested NMIs */
1021 + /*
1022 + * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1023 + * here must not modify the "iret" frame while we're writing to
1024 + * it or it will end up containing garbage.
1025 + */
1026 addq $(10*8), %rsp
1027 CFI_ADJUST_CFA_OFFSET -10*8
1028 .rept 5
1029 @@ -1588,9 +1671,9 @@ repeat_nmi:
1030 end_repeat_nmi:
1031
1032 /*
1033 - * Everything below this point can be preempted by a nested
1034 - * NMI if the first NMI took an exception and reset our iret stack
1035 - * so that we repeat another NMI.
1036 + * Everything below this point can be preempted by a nested NMI.
1037 + * If this happens, then the inner NMI will change the "iret"
1038 + * frame to point back to repeat_nmi.
1039 */
1040 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1041 ALLOC_PT_GPREGS_ON_STACK
1042 @@ -1605,29 +1688,11 @@ end_repeat_nmi:
1043 call paranoid_entry
1044 DEFAULT_FRAME 0
1045
1046 - /*
1047 - * Save off the CR2 register. If we take a page fault in the NMI then
1048 - * it could corrupt the CR2 value. If the NMI preempts a page fault
1049 - * handler before it was able to read the CR2 register, and then the
1050 - * NMI itself takes a page fault, the page fault that was preempted
1051 - * will read the information from the NMI page fault and not the
1052 - * origin fault. Save it off and restore it if it changes.
1053 - * Use the r12 callee-saved register.
1054 - */
1055 - movq %cr2, %r12
1056 -
1057 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1058 movq %rsp,%rdi
1059 movq $-1,%rsi
1060 call do_nmi
1061
1062 - /* Did the NMI take a page fault? Restore cr2 if it did */
1063 - movq %cr2, %rcx
1064 - cmpq %rcx, %r12
1065 - je 1f
1066 - movq %r12, %cr2
1067 -1:
1068 -
1069 testl %ebx,%ebx /* swapgs needed? */
1070 jnz nmi_restore
1071 nmi_swapgs:
1072 @@ -1635,12 +1700,27 @@ nmi_swapgs:
1073 nmi_restore:
1074 RESTORE_EXTRA_REGS
1075 RESTORE_C_REGS
1076 - /* Pop the extra iret frame at once */
1077 +
1078 + /* Point RSP at the "iret" frame. */
1079 REMOVE_PT_GPREGS_FROM_STACK 6*8
1080
1081 - /* Clear the NMI executing stack variable */
1082 - movq $0, 5*8(%rsp)
1083 - jmp irq_return
1084 + /*
1085 + * Clear "NMI executing". Set DF first so that we can easily
1086 + * distinguish the remaining code between here and IRET from
1087 + * the SYSCALL entry and exit paths. On a native kernel, we
1088 + * could just inspect RIP, but, on paravirt kernels,
1089 + * INTERRUPT_RETURN can translate into a jump into a
1090 + * hypercall page.
1091 + */
1092 + std
1093 + movq $0, 5*8(%rsp) /* clear "NMI executing" */
1094 +
1095 + /*
1096 + * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1097 + * stack in a single instruction. We are returning to kernel
1098 + * mode, so this cannot result in a fault.
1099 + */
1100 + INTERRUPT_RETURN
1101 CFI_ENDPROC
1102 END(nmi)
1103
1104 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
1105 index c3e985d1751c..d05bd2e2ee91 100644
1106 --- a/arch/x86/kernel/nmi.c
1107 +++ b/arch/x86/kernel/nmi.c
1108 @@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
1109 NOKPROBE_SYMBOL(default_do_nmi);
1110
1111 /*
1112 - * NMIs can hit breakpoints which will cause it to lose its
1113 - * NMI context with the CPU when the breakpoint does an iret.
1114 - */
1115 -#ifdef CONFIG_X86_32
1116 -/*
1117 - * For i386, NMIs use the same stack as the kernel, and we can
1118 - * add a workaround to the iret problem in C (preventing nested
1119 - * NMIs if an NMI takes a trap). Simply have 3 states the NMI
1120 - * can be in:
1121 + * NMIs can page fault or hit breakpoints which will cause it to lose
1122 + * its NMI context with the CPU when the breakpoint or page fault does an IRET.
1123 + *
1124 + * As a result, NMIs can nest if NMIs get unmasked due an IRET during
1125 + * NMI processing. On x86_64, the asm glue protects us from nested NMIs
1126 + * if the outer NMI came from kernel mode, but we can still nest if the
1127 + * outer NMI came from user mode.
1128 + *
1129 + * To handle these nested NMIs, we have three states:
1130 *
1131 * 1) not running
1132 * 2) executing
1133 @@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
1134 * (Note, the latch is binary, thus multiple NMIs triggering,
1135 * when one is running, are ignored. Only one NMI is restarted.)
1136 *
1137 - * If an NMI hits a breakpoint that executes an iret, another
1138 - * NMI can preempt it. We do not want to allow this new NMI
1139 - * to run, but we want to execute it when the first one finishes.
1140 - * We set the state to "latched", and the exit of the first NMI will
1141 - * perform a dec_return, if the result is zero (NOT_RUNNING), then
1142 - * it will simply exit the NMI handler. If not, the dec_return
1143 - * would have set the state to NMI_EXECUTING (what we want it to
1144 - * be when we are running). In this case, we simply jump back
1145 - * to rerun the NMI handler again, and restart the 'latched' NMI.
1146 + * If an NMI executes an iret, another NMI can preempt it. We do not
1147 + * want to allow this new NMI to run, but we want to execute it when the
1148 + * first one finishes. We set the state to "latched", and the exit of
1149 + * the first NMI will perform a dec_return, if the result is zero
1150 + * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
1151 + * dec_return would have set the state to NMI_EXECUTING (what we want it
1152 + * to be when we are running). In this case, we simply jump back to
1153 + * rerun the NMI handler again, and restart the 'latched' NMI.
1154 *
1155 * No trap (breakpoint or page fault) should be hit before nmi_restart,
1156 * thus there is no race between the first check of state for NOT_RUNNING
1157 @@ -461,49 +460,36 @@ enum nmi_states {
1158 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
1159 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
1160
1161 -#define nmi_nesting_preprocess(regs) \
1162 - do { \
1163 - if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
1164 - this_cpu_write(nmi_state, NMI_LATCHED); \
1165 - return; \
1166 - } \
1167 - this_cpu_write(nmi_state, NMI_EXECUTING); \
1168 - this_cpu_write(nmi_cr2, read_cr2()); \
1169 - } while (0); \
1170 - nmi_restart:
1171 -
1172 -#define nmi_nesting_postprocess() \
1173 - do { \
1174 - if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
1175 - write_cr2(this_cpu_read(nmi_cr2)); \
1176 - if (this_cpu_dec_return(nmi_state)) \
1177 - goto nmi_restart; \
1178 - } while (0)
1179 -#else /* x86_64 */
1180 +#ifdef CONFIG_X86_64
1181 /*
1182 - * In x86_64 things are a bit more difficult. This has the same problem
1183 - * where an NMI hitting a breakpoint that calls iret will remove the
1184 - * NMI context, allowing a nested NMI to enter. What makes this more
1185 - * difficult is that both NMIs and breakpoints have their own stack.
1186 - * When a new NMI or breakpoint is executed, the stack is set to a fixed
1187 - * point. If an NMI is nested, it will have its stack set at that same
1188 - * fixed address that the first NMI had, and will start corrupting the
1189 - * stack. This is handled in entry_64.S, but the same problem exists with
1190 - * the breakpoint stack.
1191 + * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
1192 + * some care, the inner breakpoint will clobber the outer breakpoint's
1193 + * stack.
1194 *
1195 - * If a breakpoint is being processed, and the debug stack is being used,
1196 - * if an NMI comes in and also hits a breakpoint, the stack pointer
1197 - * will be set to the same fixed address as the breakpoint that was
1198 - * interrupted, causing that stack to be corrupted. To handle this case,
1199 - * check if the stack that was interrupted is the debug stack, and if
1200 - * so, change the IDT so that new breakpoints will use the current stack
1201 - * and not switch to the fixed address. On return of the NMI, switch back
1202 - * to the original IDT.
1203 + * If a breakpoint is being processed, and the debug stack is being
1204 + * used, if an NMI comes in and also hits a breakpoint, the stack
1205 + * pointer will be set to the same fixed address as the breakpoint that
1206 + * was interrupted, causing that stack to be corrupted. To handle this
1207 + * case, check if the stack that was interrupted is the debug stack, and
1208 + * if so, change the IDT so that new breakpoints will use the current
1209 + * stack and not switch to the fixed address. On return of the NMI,
1210 + * switch back to the original IDT.
1211 */
1212 static DEFINE_PER_CPU(int, update_debug_stack);
1213 +#endif
1214
1215 -static inline void nmi_nesting_preprocess(struct pt_regs *regs)
1216 +dotraplinkage notrace void
1217 +do_nmi(struct pt_regs *regs, long error_code)
1218 {
1219 + if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
1220 + this_cpu_write(nmi_state, NMI_LATCHED);
1221 + return;
1222 + }
1223 + this_cpu_write(nmi_state, NMI_EXECUTING);
1224 + this_cpu_write(nmi_cr2, read_cr2());
1225 +nmi_restart:
1226 +
1227 +#ifdef CONFIG_X86_64
1228 /*
1229 * If we interrupted a breakpoint, it is possible that
1230 * the nmi handler will have breakpoints too. We need to
1231 @@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
1232 debug_stack_set_zero();
1233 this_cpu_write(update_debug_stack, 1);
1234 }
1235 -}
1236 -
1237 -static inline void nmi_nesting_postprocess(void)
1238 -{
1239 - if (unlikely(this_cpu_read(update_debug_stack))) {
1240 - debug_stack_reset();
1241 - this_cpu_write(update_debug_stack, 0);
1242 - }
1243 -}
1244 #endif
1245
1246 -dotraplinkage notrace void
1247 -do_nmi(struct pt_regs *regs, long error_code)
1248 -{
1249 - nmi_nesting_preprocess(regs);
1250 -
1251 nmi_enter();
1252
1253 inc_irq_stat(__nmi_count);
1254 @@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
1255
1256 nmi_exit();
1257
1258 - /* On i386, may loop back to preprocess */
1259 - nmi_nesting_postprocess();
1260 +#ifdef CONFIG_X86_64
1261 + if (unlikely(this_cpu_read(update_debug_stack))) {
1262 + debug_stack_reset();
1263 + this_cpu_write(update_debug_stack, 0);
1264 + }
1265 +#endif
1266 +
1267 + if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
1268 + write_cr2(this_cpu_read(nmi_cr2));
1269 + if (this_cpu_dec_return(nmi_state))
1270 + goto nmi_restart;
1271 }
1272 NOKPROBE_SYMBOL(do_nmi);
1273
1274 diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
1275 index 9d28383fc1e7..c4ea87eedf8a 100644
1276 --- a/arch/x86/kvm/lapic.h
1277 +++ b/arch/x86/kvm/lapic.h
1278 @@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
1279
1280 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
1281 {
1282 - return vcpu->arch.apic->pending_events;
1283 + return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
1284 }
1285
1286 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
1287 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1288 index 46957ead3060..a671e837228d 100644
1289 --- a/arch/x86/xen/enlighten.c
1290 +++ b/arch/x86/xen/enlighten.c
1291 @@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
1292 pte_t pte;
1293 unsigned long pfn;
1294 struct page *page;
1295 + unsigned char dummy;
1296
1297 ptep = lookup_address((unsigned long)v, &level);
1298 BUG_ON(ptep == NULL);
1299 @@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
1300
1301 pte = pfn_pte(pfn, prot);
1302
1303 + /*
1304 + * Careful: update_va_mapping() will fail if the virtual address
1305 + * we're poking isn't populated in the page tables. We don't
1306 + * need to worry about the direct map (that's always in the page
1307 + * tables), but we need to be careful about vmap space. In
1308 + * particular, the top level page table can lazily propagate
1309 + * entries between processes, so if we've switched mms since we
1310 + * vmapped the target in the first place, we might not have the
1311 + * top-level page table entry populated.
1312 + *
1313 + * We disable preemption because we want the same mm active when
1314 + * we probe the target and when we issue the hypercall. We'll
1315 + * have the same nominal mm, but if we're a kernel thread, lazy
1316 + * mm dropping could change our pgd.
1317 + *
1318 + * Out of an abundance of caution, this uses __get_user() to fault
1319 + * in the target address just in case there's some obscure case
1320 + * in which the target address isn't readable.
1321 + */
1322 +
1323 + preempt_disable();
1324 +
1325 + pagefault_disable(); /* Avoid warnings due to being atomic. */
1326 + __get_user(dummy, (unsigned char __user __force *)v);
1327 + pagefault_enable();
1328 +
1329 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
1330 BUG();
1331
1332 @@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
1333 BUG();
1334 } else
1335 kmap_flush_unused();
1336 +
1337 + preempt_enable();
1338 }
1339
1340 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
1341 @@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
1342 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
1343 int i;
1344
1345 + /*
1346 + * We need to mark the all aliases of the LDT pages RO. We
1347 + * don't need to call vm_flush_aliases(), though, since that's
1348 + * only responsible for flushing aliases out the TLBs, not the
1349 + * page tables, and Xen will flush the TLB for us if needed.
1350 + *
1351 + * To avoid confusing future readers: none of this is necessary
1352 + * to load the LDT. The hypervisor only checks this when the
1353 + * LDT is faulted in due to subsequent descriptor access.
1354 + */
1355 +
1356 for(i = 0; i < entries; i += entries_per_page)
1357 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
1358 }
1359 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1360 index 53f253574abe..010ce0b1f517 100644
1361 --- a/drivers/block/rbd.c
1362 +++ b/drivers/block/rbd.c
1363 @@ -522,6 +522,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
1364 # define rbd_assert(expr) ((void) 0)
1365 #endif /* !RBD_DEBUG */
1366
1367 +static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
1368 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
1369 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
1370 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
1371 @@ -1797,6 +1798,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1372 obj_request_done_set(obj_request);
1373 }
1374
1375 +static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1376 +{
1377 + dout("%s: obj %p\n", __func__, obj_request);
1378 +
1379 + if (obj_request_img_data_test(obj_request))
1380 + rbd_osd_copyup_callback(obj_request);
1381 + else
1382 + obj_request_done_set(obj_request);
1383 +}
1384 +
1385 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1386 struct ceph_msg *msg)
1387 {
1388 @@ -1845,6 +1856,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1389 rbd_osd_discard_callback(obj_request);
1390 break;
1391 case CEPH_OSD_OP_CALL:
1392 + rbd_osd_call_callback(obj_request);
1393 + break;
1394 case CEPH_OSD_OP_NOTIFY_ACK:
1395 case CEPH_OSD_OP_WATCH:
1396 rbd_osd_trivial_callback(obj_request);
1397 @@ -2509,13 +2522,15 @@ out_unwind:
1398 }
1399
1400 static void
1401 -rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
1402 +rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
1403 {
1404 struct rbd_img_request *img_request;
1405 struct rbd_device *rbd_dev;
1406 struct page **pages;
1407 u32 page_count;
1408
1409 + dout("%s: obj %p\n", __func__, obj_request);
1410 +
1411 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
1412 obj_request->type == OBJ_REQUEST_NODATA);
1413 rbd_assert(obj_request_img_data_test(obj_request));
1414 @@ -2542,9 +2557,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
1415 if (!obj_request->result)
1416 obj_request->xferred = obj_request->length;
1417
1418 - /* Finish up with the normal image object callback */
1419 -
1420 - rbd_img_obj_callback(obj_request);
1421 + obj_request_done_set(obj_request);
1422 }
1423
1424 static void
1425 @@ -2629,7 +2642,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
1426
1427 /* All set, send it off. */
1428
1429 - orig_request->callback = rbd_img_obj_copyup_callback;
1430 osdc = &rbd_dev->rbd_client->client->osdc;
1431 img_result = rbd_obj_request_submit(osdc, orig_request);
1432 if (!img_result)
1433 diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
1434 index da8faf78536a..5643b65cee20 100644
1435 --- a/drivers/char/hw_random/core.c
1436 +++ b/drivers/char/hw_random/core.c
1437 @@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
1438 static void start_khwrngd(void)
1439 {
1440 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
1441 - if (hwrng_fill == ERR_PTR(-ENOMEM)) {
1442 + if (IS_ERR(hwrng_fill)) {
1443 pr_err("hwrng_fill thread creation failed");
1444 hwrng_fill = NULL;
1445 }
1446 diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
1447 index a43048b5b05f..3c1a123f909c 100644
1448 --- a/drivers/char/i8k.c
1449 +++ b/drivers/char/i8k.c
1450 @@ -900,6 +900,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
1451
1452 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
1453
1454 +static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
1455 + {
1456 + /*
1457 + * CPU fan speed going up and down on Dell Studio XPS 8100
1458 + * for unknown reasons.
1459 + */
1460 + .ident = "Dell Studio XPS 8100",
1461 + .matches = {
1462 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1463 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
1464 + },
1465 + },
1466 + { }
1467 +};
1468 +
1469 /*
1470 * Probe for the presence of a supported laptop.
1471 */
1472 @@ -911,7 +926,8 @@ static int __init i8k_probe(void)
1473 /*
1474 * Get DMI information
1475 */
1476 - if (!dmi_check_system(i8k_dmi_table)) {
1477 + if (!dmi_check_system(i8k_dmi_table) ||
1478 + dmi_check_system(i8k_blacklist_dmi_table)) {
1479 if (!ignore_dmi && !force)
1480 return -ENODEV;
1481
1482 diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
1483 index 0dd8a4b12747..4a375ead70e9 100644
1484 --- a/drivers/clk/keystone/pll.c
1485 +++ b/drivers/clk/keystone/pll.c
1486 @@ -37,7 +37,8 @@
1487 * Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
1488 * or PA PLL available on keystone2. These PLLs are controlled by
1489 * this register. Main PLL is controlled by a PLL controller.
1490 - * @pllm: PLL register map address
1491 + * @pllm: PLL register map address for multiplier bits
1492 + * @pllod: PLL register map address for post divider bits
1493 * @pll_ctl0: PLL controller map address
1494 * @pllm_lower_mask: multiplier lower mask
1495 * @pllm_upper_mask: multiplier upper mask
1496 @@ -53,6 +54,7 @@ struct clk_pll_data {
1497 u32 phy_pllm;
1498 u32 phy_pll_ctl0;
1499 void __iomem *pllm;
1500 + void __iomem *pllod;
1501 void __iomem *pll_ctl0;
1502 u32 pllm_lower_mask;
1503 u32 pllm_upper_mask;
1504 @@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
1505 /* read post divider from od bits*/
1506 postdiv = ((val & pll_data->clkod_mask) >>
1507 pll_data->clkod_shift) + 1;
1508 - else
1509 + else if (pll_data->pllod) {
1510 + postdiv = readl(pll_data->pllod);
1511 + postdiv = ((postdiv & pll_data->clkod_mask) >>
1512 + pll_data->clkod_shift) + 1;
1513 + } else
1514 postdiv = pll_data->postdiv;
1515
1516 rate /= (prediv + 1);
1517 @@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
1518 /* assume the PLL has output divider register bits */
1519 pll_data->clkod_mask = CLKOD_MASK;
1520 pll_data->clkod_shift = CLKOD_SHIFT;
1521 +
1522 + /*
1523 + * Check if there is an post-divider register. If not
1524 + * assume od bits are part of control register.
1525 + */
1526 + i = of_property_match_string(node, "reg-names",
1527 + "post-divider");
1528 + pll_data->pllod = of_iomap(node, i);
1529 }
1530
1531 i = of_property_match_string(node, "reg-names", "control");
1532 pll_data->pll_ctl0 = of_iomap(node, i);
1533 if (!pll_data->pll_ctl0) {
1534 pr_err("%s: ioremap failed\n", __func__);
1535 + iounmap(pll_data->pllod);
1536 goto out;
1537 }
1538
1539 @@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
1540 pll_data->pllm = of_iomap(node, i);
1541 if (!pll_data->pllm) {
1542 iounmap(pll_data->pll_ctl0);
1543 + iounmap(pll_data->pllod);
1544 goto out;
1545 }
1546 }
1547 diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
1548 index 48f453555f1f..ede9e9e3c419 100644
1549 --- a/drivers/crypto/ixp4xx_crypto.c
1550 +++ b/drivers/crypto/ixp4xx_crypto.c
1551 @@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
1552 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1553 /* This was never tested by Intel
1554 * for more than one dst buffer, I think. */
1555 - BUG_ON(req->dst->length < nbytes);
1556 req_ctx->dst = NULL;
1557 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1558 flags, DMA_FROM_DEVICE))
1559 diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
1560 index 67f80813a06f..e4311ce0cd78 100644
1561 --- a/drivers/crypto/nx/nx-aes-ccm.c
1562 +++ b/drivers/crypto/nx/nx-aes-ccm.c
1563 @@ -494,8 +494,9 @@ out:
1564 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
1565 {
1566 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
1567 + struct nx_gcm_rctx *rctx = aead_request_ctx(req);
1568 struct blkcipher_desc desc;
1569 - u8 *iv = nx_ctx->priv.ccm.iv;
1570 + u8 *iv = rctx->iv;
1571
1572 iv[0] = 3;
1573 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
1574 @@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
1575 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
1576 {
1577 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
1578 + struct nx_gcm_rctx *rctx = aead_request_ctx(req);
1579 struct blkcipher_desc desc;
1580 - u8 *iv = nx_ctx->priv.ccm.iv;
1581 + u8 *iv = rctx->iv;
1582
1583 iv[0] = 3;
1584 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
1585 diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
1586 index 2617cd4d54dd..dd7e9f3f5b6b 100644
1587 --- a/drivers/crypto/nx/nx-aes-ctr.c
1588 +++ b/drivers/crypto/nx/nx-aes-ctr.c
1589 @@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
1590 if (key_len < CTR_RFC3686_NONCE_SIZE)
1591 return -EINVAL;
1592
1593 - memcpy(nx_ctx->priv.ctr.iv,
1594 + memcpy(nx_ctx->priv.ctr.nonce,
1595 in_key + key_len - CTR_RFC3686_NONCE_SIZE,
1596 CTR_RFC3686_NONCE_SIZE);
1597
1598 @@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
1599 unsigned int nbytes)
1600 {
1601 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
1602 - u8 *iv = nx_ctx->priv.ctr.iv;
1603 + u8 iv[16];
1604
1605 + memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
1606 memcpy(iv + CTR_RFC3686_NONCE_SIZE,
1607 desc->info, CTR_RFC3686_IV_SIZE);
1608 iv[12] = iv[13] = iv[14] = 0;
1609 iv[15] = 1;
1610
1611 - desc->info = nx_ctx->priv.ctr.iv;
1612 + desc->info = iv;
1613
1614 return ctr_aes_nx_crypt(desc, dst, src, nbytes);
1615 }
1616 diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
1617 index 88c562434bc0..c6ebeb644db4 100644
1618 --- a/drivers/crypto/nx/nx-aes-gcm.c
1619 +++ b/drivers/crypto/nx/nx-aes-gcm.c
1620 @@ -330,6 +330,7 @@ out:
1621 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
1622 {
1623 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
1624 + struct nx_gcm_rctx *rctx = aead_request_ctx(req);
1625 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
1626 struct blkcipher_desc desc;
1627 unsigned int nbytes = req->cryptlen;
1628 @@ -339,7 +340,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
1629
1630 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
1631
1632 - desc.info = nx_ctx->priv.gcm.iv;
1633 + desc.info = rctx->iv;
1634 /* initialize the counter */
1635 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
1636
1637 @@ -434,8 +435,8 @@ out:
1638
1639 static int gcm_aes_nx_encrypt(struct aead_request *req)
1640 {
1641 - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
1642 - char *iv = nx_ctx->priv.gcm.iv;
1643 + struct nx_gcm_rctx *rctx = aead_request_ctx(req);
1644 + char *iv = rctx->iv;
1645
1646 memcpy(iv, req->iv, 12);
1647
1648 @@ -444,8 +445,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
1649
1650 static int gcm_aes_nx_decrypt(struct aead_request *req)
1651 {
1652 - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
1653 - char *iv = nx_ctx->priv.gcm.iv;
1654 + struct nx_gcm_rctx *rctx = aead_request_ctx(req);
1655 + char *iv = rctx->iv;
1656
1657 memcpy(iv, req->iv, 12);
1658
1659 @@ -455,7 +456,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
1660 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
1661 {
1662 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
1663 - char *iv = nx_ctx->priv.gcm.iv;
1664 + struct nx_gcm_rctx *rctx = aead_request_ctx(req);
1665 + char *iv = rctx->iv;
1666 char *nonce = nx_ctx->priv.gcm.nonce;
1667
1668 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
1669 @@ -467,7 +469,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
1670 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
1671 {
1672 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
1673 - char *iv = nx_ctx->priv.gcm.iv;
1674 + struct nx_gcm_rctx *rctx = aead_request_ctx(req);
1675 + char *iv = rctx->iv;
1676 char *nonce = nx_ctx->priv.gcm.nonce;
1677
1678 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
1679 diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
1680 index 8c2faffab4a3..c2f7d4befb55 100644
1681 --- a/drivers/crypto/nx/nx-aes-xcbc.c
1682 +++ b/drivers/crypto/nx/nx-aes-xcbc.c
1683 @@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
1684 unsigned int key_len)
1685 {
1686 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
1687 + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
1688
1689 switch (key_len) {
1690 case AES_KEYSIZE_128:
1691 @@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
1692 return -EINVAL;
1693 }
1694
1695 - memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
1696 + memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
1697
1698 return 0;
1699 }
1700 @@ -148,32 +149,29 @@ out:
1701 return rc;
1702 }
1703
1704 -static int nx_xcbc_init(struct shash_desc *desc)
1705 +static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
1706 {
1707 - struct xcbc_state *sctx = shash_desc_ctx(desc);
1708 - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
1709 + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
1710 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
1711 - struct nx_sg *out_sg;
1712 - int len;
1713 + int err;
1714
1715 - nx_ctx_init(nx_ctx, HCOP_FC_AES);
1716 + err = nx_crypto_ctx_aes_xcbc_init(tfm);
1717 + if (err)
1718 + return err;
1719
1720 - memset(sctx, 0, sizeof *sctx);
1721 + nx_ctx_init(nx_ctx, HCOP_FC_AES);
1722
1723 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
1724 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
1725
1726 - memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
1727 - memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
1728 -
1729 - len = AES_BLOCK_SIZE;
1730 - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
1731 - &len, nx_ctx->ap->sglen);
1732 + return 0;
1733 +}
1734
1735 - if (len != AES_BLOCK_SIZE)
1736 - return -EINVAL;
1737 +static int nx_xcbc_init(struct shash_desc *desc)
1738 +{
1739 + struct xcbc_state *sctx = shash_desc_ctx(desc);
1740
1741 - nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
1742 + memset(sctx, 0, sizeof *sctx);
1743
1744 return 0;
1745 }
1746 @@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
1747 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
1748 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
1749 struct nx_sg *in_sg;
1750 + struct nx_sg *out_sg;
1751 u32 to_process = 0, leftover, total;
1752 unsigned int max_sg_len;
1753 unsigned long irq_flags;
1754 @@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
1755 max_sg_len = min_t(u64, max_sg_len,
1756 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
1757
1758 + data_len = AES_BLOCK_SIZE;
1759 + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
1760 + &len, nx_ctx->ap->sglen);
1761 +
1762 + if (data_len != AES_BLOCK_SIZE) {
1763 + rc = -EINVAL;
1764 + goto out;
1765 + }
1766 +
1767 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
1768 +
1769 do {
1770 to_process = total - to_process;
1771 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
1772 @@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
1773 (u8 *) sctx->buffer,
1774 &data_len,
1775 max_sg_len);
1776 - if (data_len != sctx->count)
1777 - return -EINVAL;
1778 + if (data_len != sctx->count) {
1779 + rc = -EINVAL;
1780 + goto out;
1781 + }
1782 }
1783
1784 data_len = to_process - sctx->count;
1785 @@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
1786 &data_len,
1787 max_sg_len);
1788
1789 - if (data_len != to_process - sctx->count)
1790 - return -EINVAL;
1791 + if (data_len != to_process - sctx->count) {
1792 + rc = -EINVAL;
1793 + goto out;
1794 + }
1795
1796 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
1797 sizeof(struct nx_sg);
1798 @@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
1799 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
1800 &len, nx_ctx->ap->sglen);
1801
1802 - if (len != sctx->count)
1803 - return -EINVAL;
1804 + if (len != sctx->count) {
1805 + rc = -EINVAL;
1806 + goto out;
1807 + }
1808
1809 len = AES_BLOCK_SIZE;
1810 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
1811 nx_ctx->ap->sglen);
1812
1813 - if (len != AES_BLOCK_SIZE)
1814 - return -EINVAL;
1815 + if (len != AES_BLOCK_SIZE) {
1816 + rc = -EINVAL;
1817 + goto out;
1818 + }
1819
1820 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
1821 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
1822 @@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
1823 .cra_blocksize = AES_BLOCK_SIZE,
1824 .cra_module = THIS_MODULE,
1825 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
1826 - .cra_init = nx_crypto_ctx_aes_xcbc_init,
1827 + .cra_init = nx_crypto_ctx_aes_xcbc_init2,
1828 .cra_exit = nx_crypto_ctx_exit,
1829 }
1830 };
1831 diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
1832 index 23621da624c3..08f8d5cd6334 100644
1833 --- a/drivers/crypto/nx/nx-sha256.c
1834 +++ b/drivers/crypto/nx/nx-sha256.c
1835 @@ -29,30 +29,28 @@
1836 #include "nx.h"
1837
1838
1839 -static int nx_sha256_init(struct shash_desc *desc)
1840 +static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
1841 {
1842 - struct sha256_state *sctx = shash_desc_ctx(desc);
1843 - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
1844 - int len;
1845 - int rc;
1846 + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
1847 + int err;
1848
1849 - nx_ctx_init(nx_ctx, HCOP_FC_SHA);
1850 + err = nx_crypto_ctx_sha_init(tfm);
1851 + if (err)
1852 + return err;
1853
1854 - memset(sctx, 0, sizeof *sctx);
1855 + nx_ctx_init(nx_ctx, HCOP_FC_SHA);
1856
1857 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
1858
1859 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
1860
1861 - len = SHA256_DIGEST_SIZE;
1862 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
1863 - &nx_ctx->op.outlen,
1864 - &len,
1865 - (u8 *) sctx->state,
1866 - NX_DS_SHA256);
1867 + return 0;
1868 +}
1869
1870 - if (rc)
1871 - goto out;
1872 +static int nx_sha256_init(struct shash_desc *desc) {
1873 + struct sha256_state *sctx = shash_desc_ctx(desc);
1874 +
1875 + memset(sctx, 0, sizeof *sctx);
1876
1877 sctx->state[0] = __cpu_to_be32(SHA256_H0);
1878 sctx->state[1] = __cpu_to_be32(SHA256_H1);
1879 @@ -64,7 +62,6 @@ static int nx_sha256_init(struct shash_desc *desc)
1880 sctx->state[7] = __cpu_to_be32(SHA256_H7);
1881 sctx->count = 0;
1882
1883 -out:
1884 return 0;
1885 }
1886
1887 @@ -74,10 +71,13 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
1888 struct sha256_state *sctx = shash_desc_ctx(desc);
1889 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
1890 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
1891 + struct nx_sg *in_sg;
1892 + struct nx_sg *out_sg;
1893 u64 to_process = 0, leftover, total;
1894 unsigned long irq_flags;
1895 int rc = 0;
1896 int data_len;
1897 + u32 max_sg_len;
1898 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
1899
1900 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
1901 @@ -97,6 +97,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
1902 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
1903 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
1904
1905 + in_sg = nx_ctx->in_sg;
1906 + max_sg_len = min_t(u64, nx_ctx->ap->sglen,
1907 + nx_driver.of.max_sg_len/sizeof(struct nx_sg));
1908 + max_sg_len = min_t(u64, max_sg_len,
1909 + nx_ctx->ap->databytelen/NX_PAGE_SIZE);
1910 +
1911 + data_len = SHA256_DIGEST_SIZE;
1912 + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
1913 + &data_len, max_sg_len);
1914 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
1915 +
1916 + if (data_len != SHA256_DIGEST_SIZE) {
1917 + rc = -EINVAL;
1918 + goto out;
1919 + }
1920 +
1921 do {
1922 /*
1923 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
1924 @@ -108,25 +124,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
1925
1926 if (buf_len) {
1927 data_len = buf_len;
1928 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
1929 - &nx_ctx->op.inlen,
1930 - &data_len,
1931 - (u8 *) sctx->buf,
1932 - NX_DS_SHA256);
1933 + in_sg = nx_build_sg_list(nx_ctx->in_sg,
1934 + (u8 *) sctx->buf,
1935 + &data_len,
1936 + max_sg_len);
1937
1938 - if (rc || data_len != buf_len)
1939 + if (data_len != buf_len) {
1940 + rc = -EINVAL;
1941 goto out;
1942 + }
1943 }
1944
1945 data_len = to_process - buf_len;
1946 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
1947 - &nx_ctx->op.inlen,
1948 - &data_len,
1949 - (u8 *) data,
1950 - NX_DS_SHA256);
1951 + in_sg = nx_build_sg_list(in_sg, (u8 *) data,
1952 + &data_len, max_sg_len);
1953
1954 - if (rc)
1955 - goto out;
1956 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
1957
1958 to_process = (data_len + buf_len);
1959 leftover = total - to_process;
1960 @@ -173,12 +186,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
1961 struct sha256_state *sctx = shash_desc_ctx(desc);
1962 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
1963 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
1964 + struct nx_sg *in_sg, *out_sg;
1965 unsigned long irq_flags;
1966 - int rc;
1967 + u32 max_sg_len;
1968 + int rc = 0;
1969 int len;
1970
1971 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
1972
1973 + max_sg_len = min_t(u64, nx_ctx->ap->sglen,
1974 + nx_driver.of.max_sg_len/sizeof(struct nx_sg));
1975 + max_sg_len = min_t(u64, max_sg_len,
1976 + nx_ctx->ap->databytelen/NX_PAGE_SIZE);
1977 +
1978 /* final is represented by continuing the operation and indicating that
1979 * this is not an intermediate operation */
1980 if (sctx->count >= SHA256_BLOCK_SIZE) {
1981 @@ -195,25 +215,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
1982 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
1983
1984 len = sctx->count & (SHA256_BLOCK_SIZE - 1);
1985 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
1986 - &nx_ctx->op.inlen,
1987 - &len,
1988 - (u8 *) sctx->buf,
1989 - NX_DS_SHA256);
1990 + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
1991 + &len, max_sg_len);
1992
1993 - if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
1994 + if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
1995 + rc = -EINVAL;
1996 goto out;
1997 + }
1998
1999 len = SHA256_DIGEST_SIZE;
2000 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
2001 - &nx_ctx->op.outlen,
2002 - &len,
2003 - out,
2004 - NX_DS_SHA256);
2005 + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
2006
2007 - if (rc || len != SHA256_DIGEST_SIZE)
2008 + if (len != SHA256_DIGEST_SIZE) {
2009 + rc = -EINVAL;
2010 goto out;
2011 + }
2012
2013 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
2014 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
2015 if (!nx_ctx->op.outlen) {
2016 rc = -EINVAL;
2017 goto out;
2018 @@ -268,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
2019 .cra_blocksize = SHA256_BLOCK_SIZE,
2020 .cra_module = THIS_MODULE,
2021 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
2022 - .cra_init = nx_crypto_ctx_sha_init,
2023 + .cra_init = nx_crypto_ctx_sha256_init,
2024 .cra_exit = nx_crypto_ctx_exit,
2025 }
2026 };
2027 diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
2028 index b3adf1022673..aff0fe58eac0 100644
2029 --- a/drivers/crypto/nx/nx-sha512.c
2030 +++ b/drivers/crypto/nx/nx-sha512.c
2031 @@ -28,30 +28,29 @@
2032 #include "nx.h"
2033
2034
2035 -static int nx_sha512_init(struct shash_desc *desc)
2036 +static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
2037 {
2038 - struct sha512_state *sctx = shash_desc_ctx(desc);
2039 - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
2040 - int len;
2041 - int rc;
2042 + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
2043 + int err;
2044
2045 - nx_ctx_init(nx_ctx, HCOP_FC_SHA);
2046 + err = nx_crypto_ctx_sha_init(tfm);
2047 + if (err)
2048 + return err;
2049
2050 - memset(sctx, 0, sizeof *sctx);
2051 + nx_ctx_init(nx_ctx, HCOP_FC_SHA);
2052
2053 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
2054
2055 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
2056
2057 - len = SHA512_DIGEST_SIZE;
2058 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
2059 - &nx_ctx->op.outlen,
2060 - &len,
2061 - (u8 *)sctx->state,
2062 - NX_DS_SHA512);
2063 + return 0;
2064 +}
2065
2066 - if (rc || len != SHA512_DIGEST_SIZE)
2067 - goto out;
2068 +static int nx_sha512_init(struct shash_desc *desc)
2069 +{
2070 + struct sha512_state *sctx = shash_desc_ctx(desc);
2071 +
2072 + memset(sctx, 0, sizeof *sctx);
2073
2074 sctx->state[0] = __cpu_to_be64(SHA512_H0);
2075 sctx->state[1] = __cpu_to_be64(SHA512_H1);
2076 @@ -63,7 +62,6 @@ static int nx_sha512_init(struct shash_desc *desc)
2077 sctx->state[7] = __cpu_to_be64(SHA512_H7);
2078 sctx->count[0] = 0;
2079
2080 -out:
2081 return 0;
2082 }
2083
2084 @@ -73,10 +71,13 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
2085 struct sha512_state *sctx = shash_desc_ctx(desc);
2086 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
2087 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
2088 + struct nx_sg *in_sg;
2089 + struct nx_sg *out_sg;
2090 u64 to_process, leftover = 0, total;
2091 unsigned long irq_flags;
2092 int rc = 0;
2093 int data_len;
2094 + u32 max_sg_len;
2095 u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
2096
2097 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
2098 @@ -96,6 +97,22 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
2099 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
2100 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
2101
2102 + in_sg = nx_ctx->in_sg;
2103 + max_sg_len = min_t(u64, nx_ctx->ap->sglen,
2104 + nx_driver.of.max_sg_len/sizeof(struct nx_sg));
2105 + max_sg_len = min_t(u64, max_sg_len,
2106 + nx_ctx->ap->databytelen/NX_PAGE_SIZE);
2107 +
2108 + data_len = SHA512_DIGEST_SIZE;
2109 + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
2110 + &data_len, max_sg_len);
2111 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
2112 +
2113 + if (data_len != SHA512_DIGEST_SIZE) {
2114 + rc = -EINVAL;
2115 + goto out;
2116 + }
2117 +
2118 do {
2119 /*
2120 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
2121 @@ -108,25 +125,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
2122
2123 if (buf_len) {
2124 data_len = buf_len;
2125 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
2126 - &nx_ctx->op.inlen,
2127 - &data_len,
2128 - (u8 *) sctx->buf,
2129 - NX_DS_SHA512);
2130 + in_sg = nx_build_sg_list(nx_ctx->in_sg,
2131 + (u8 *) sctx->buf,
2132 + &data_len, max_sg_len);
2133
2134 - if (rc || data_len != buf_len)
2135 + if (data_len != buf_len) {
2136 + rc = -EINVAL;
2137 goto out;
2138 + }
2139 }
2140
2141 data_len = to_process - buf_len;
2142 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
2143 - &nx_ctx->op.inlen,
2144 - &data_len,
2145 - (u8 *) data,
2146 - NX_DS_SHA512);
2147 + in_sg = nx_build_sg_list(in_sg, (u8 *) data,
2148 + &data_len, max_sg_len);
2149 +
2150 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
2151
2152 - if (rc || data_len != (to_process - buf_len))
2153 + if (data_len != (to_process - buf_len)) {
2154 + rc = -EINVAL;
2155 goto out;
2156 + }
2157
2158 to_process = (data_len + buf_len);
2159 leftover = total - to_process;
2160 @@ -172,13 +190,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
2161 struct sha512_state *sctx = shash_desc_ctx(desc);
2162 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
2163 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
2164 + struct nx_sg *in_sg, *out_sg;
2165 + u32 max_sg_len;
2166 u64 count0;
2167 unsigned long irq_flags;
2168 - int rc;
2169 + int rc = 0;
2170 int len;
2171
2172 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
2173
2174 + max_sg_len = min_t(u64, nx_ctx->ap->sglen,
2175 + nx_driver.of.max_sg_len/sizeof(struct nx_sg));
2176 + max_sg_len = min_t(u64, max_sg_len,
2177 + nx_ctx->ap->databytelen/NX_PAGE_SIZE);
2178 +
2179 /* final is represented by continuing the operation and indicating that
2180 * this is not an intermediate operation */
2181 if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
2182 @@ -200,24 +225,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
2183 csbcpb->cpb.sha512.message_bit_length_lo = count0;
2184
2185 len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
2186 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
2187 - &nx_ctx->op.inlen,
2188 - &len,
2189 - (u8 *)sctx->buf,
2190 - NX_DS_SHA512);
2191 + in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
2192 + max_sg_len);
2193
2194 - if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
2195 + if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
2196 + rc = -EINVAL;
2197 goto out;
2198 + }
2199
2200 len = SHA512_DIGEST_SIZE;
2201 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
2202 - &nx_ctx->op.outlen,
2203 - &len,
2204 - out,
2205 - NX_DS_SHA512);
2206 + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
2207 + max_sg_len);
2208
2209 - if (rc)
2210 - goto out;
2211 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
2212 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
2213
2214 if (!nx_ctx->op.outlen) {
2215 rc = -EINVAL;
2216 @@ -273,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
2217 .cra_blocksize = SHA512_BLOCK_SIZE,
2218 .cra_module = THIS_MODULE,
2219 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
2220 - .cra_init = nx_crypto_ctx_sha_init,
2221 + .cra_init = nx_crypto_ctx_sha512_init,
2222 .cra_exit = nx_crypto_ctx_exit,
2223 }
2224 };
2225 diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
2226 index 1da6dc59d0dd..737d33dc50b8 100644
2227 --- a/drivers/crypto/nx/nx.c
2228 +++ b/drivers/crypto/nx/nx.c
2229 @@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
2230 * @delta: is the amount we need to crop in order to bound the list.
2231 *
2232 */
2233 -static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
2234 +static long int trim_sg_list(struct nx_sg *sg,
2235 + struct nx_sg *end,
2236 + unsigned int delta,
2237 + unsigned int *nbytes)
2238 {
2239 + long int oplen;
2240 + long int data_back;
2241 + unsigned int is_delta = delta;
2242 +
2243 while (delta && end > sg) {
2244 struct nx_sg *last = end - 1;
2245
2246 @@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d
2247 delta -= last->len;
2248 }
2249 }
2250 - return (sg - end) * sizeof(struct nx_sg);
2251 -}
2252 -
2253 -/**
2254 - * nx_sha_build_sg_list - walk and build sg list to sha modes
2255 - * using right bounds and limits.
2256 - * @nx_ctx: NX crypto context for the lists we're building
2257 - * @nx_sg: current sg list in or out list
2258 - * @op_len: current op_len to be used in order to build a sg list
2259 - * @nbytes: number or bytes to be processed
2260 - * @offset: buf offset
2261 - * @mode: SHA256 or SHA512
2262 - */
2263 -int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
2264 - struct nx_sg *nx_in_outsg,
2265 - s64 *op_len,
2266 - unsigned int *nbytes,
2267 - u8 *offset,
2268 - u32 mode)
2269 -{
2270 - unsigned int delta = 0;
2271 - unsigned int total = *nbytes;
2272 - struct nx_sg *nx_insg = nx_in_outsg;
2273 - unsigned int max_sg_len;
2274
2275 - max_sg_len = min_t(u64, nx_ctx->ap->sglen,
2276 - nx_driver.of.max_sg_len/sizeof(struct nx_sg));
2277 - max_sg_len = min_t(u64, max_sg_len,
2278 - nx_ctx->ap->databytelen/NX_PAGE_SIZE);
2279 -
2280 - *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
2281 - nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
2282 -
2283 - switch (mode) {
2284 - case NX_DS_SHA256:
2285 - if (*nbytes < total)
2286 - delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
2287 - break;
2288 - case NX_DS_SHA512:
2289 - if (*nbytes < total)
2290 - delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
2291 - break;
2292 - default:
2293 - return -EINVAL;
2294 + /* There are cases where we need to crop list in order to make it
2295 + * a block size multiple, but we also need to align data. In order to
2296 + * that we need to calculate how much we need to put back to be
2297 + * processed
2298 + */
2299 + oplen = (sg - end) * sizeof(struct nx_sg);
2300 + if (is_delta) {
2301 + data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
2302 + data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
2303 + *nbytes -= data_back;
2304 }
2305 - *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
2306
2307 - return 0;
2308 + return oplen;
2309 }
2310
2311 /**
2312 @@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
2313 /* these lengths should be negative, which will indicate to phyp that
2314 * the input and output parameters are scatterlists, not linear
2315 * buffers */
2316 - nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
2317 - nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
2318 + nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
2319 + nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
2320
2321 return 0;
2322 }
2323 @@ -662,12 +635,14 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
2324 /* entry points from the crypto tfm initializers */
2325 int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
2326 {
2327 + tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
2328 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
2329 NX_MODE_AES_CCM);
2330 }
2331
2332 int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
2333 {
2334 + tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
2335 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
2336 NX_MODE_AES_GCM);
2337 }
2338 diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
2339 index 6c9ecaaead52..c3ed83764fef 100644
2340 --- a/drivers/crypto/nx/nx.h
2341 +++ b/drivers/crypto/nx/nx.h
2342 @@ -2,6 +2,8 @@
2343 #ifndef __NX_H__
2344 #define __NX_H__
2345
2346 +#include <crypto/ctr.h>
2347 +
2348 #define NX_NAME "nx-crypto"
2349 #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
2350 #define NX_VERSION "1.0"
2351 @@ -91,8 +93,11 @@ struct nx_crypto_driver {
2352
2353 #define NX_GCM4106_NONCE_LEN (4)
2354 #define NX_GCM_CTR_OFFSET (12)
2355 -struct nx_gcm_priv {
2356 +struct nx_gcm_rctx {
2357 u8 iv[16];
2358 +};
2359 +
2360 +struct nx_gcm_priv {
2361 u8 iauth_tag[16];
2362 u8 nonce[NX_GCM4106_NONCE_LEN];
2363 };
2364 @@ -100,8 +105,11 @@ struct nx_gcm_priv {
2365 #define NX_CCM_AES_KEY_LEN (16)
2366 #define NX_CCM4309_AES_KEY_LEN (19)
2367 #define NX_CCM4309_NONCE_LEN (3)
2368 -struct nx_ccm_priv {
2369 +struct nx_ccm_rctx {
2370 u8 iv[16];
2371 +};
2372 +
2373 +struct nx_ccm_priv {
2374 u8 b0[16];
2375 u8 iauth_tag[16];
2376 u8 oauth_tag[16];
2377 @@ -113,7 +121,7 @@ struct nx_xcbc_priv {
2378 };
2379
2380 struct nx_ctr_priv {
2381 - u8 iv[16];
2382 + u8 nonce[CTR_RFC3686_NONCE_SIZE];
2383 };
2384
2385 struct nx_crypto_ctx {
2386 @@ -153,8 +161,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
2387 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
2388 int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
2389 u32 may_sleep);
2390 -int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
2391 - s64 *, unsigned int *, u8 *, u32);
2392 struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
2393 int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
2394 struct scatterlist *, struct scatterlist *, unsigned int *,
2395 diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
2396 index 1dc5b0a17cf7..34139a8894a0 100644
2397 --- a/drivers/crypto/qat/qat_common/qat_algs.c
2398 +++ b/drivers/crypto/qat/qat_common/qat_algs.c
2399 @@ -73,7 +73,8 @@
2400 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
2401 ICP_QAT_HW_CIPHER_DECRYPT)
2402
2403 -static atomic_t active_dev;
2404 +static DEFINE_MUTEX(algs_lock);
2405 +static unsigned int active_devs;
2406
2407 struct qat_alg_buf {
2408 uint32_t len;
2409 @@ -1271,7 +1272,10 @@ static struct crypto_alg qat_algs[] = { {
2410
2411 int qat_algs_register(void)
2412 {
2413 - if (atomic_add_return(1, &active_dev) == 1) {
2414 + int ret = 0;
2415 +
2416 + mutex_lock(&algs_lock);
2417 + if (++active_devs == 1) {
2418 int i;
2419
2420 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
2421 @@ -1280,21 +1284,25 @@ int qat_algs_register(void)
2422 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
2423 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
2424
2425 - return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
2426 + ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
2427 }
2428 - return 0;
2429 + mutex_unlock(&algs_lock);
2430 + return ret;
2431 }
2432
2433 int qat_algs_unregister(void)
2434 {
2435 - if (atomic_sub_return(1, &active_dev) == 0)
2436 - return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
2437 - return 0;
2438 + int ret = 0;
2439 +
2440 + mutex_lock(&algs_lock);
2441 + if (--active_devs == 0)
2442 + ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
2443 + mutex_unlock(&algs_lock);
2444 + return ret;
2445 }
2446
2447 int qat_algs_init(void)
2448 {
2449 - atomic_set(&active_dev, 0);
2450 crypto_get_default_rng();
2451 return 0;
2452 }
2453 diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
2454 index 7992164ea9ec..c89a7abb523f 100644
2455 --- a/drivers/dma/at_xdmac.c
2456 +++ b/drivers/dma/at_xdmac.c
2457 @@ -648,16 +648,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2458 desc->lld.mbr_sa = mem;
2459 desc->lld.mbr_da = atchan->sconfig.dst_addr;
2460 }
2461 - desc->lld.mbr_cfg = atchan->cfg;
2462 - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
2463 + dwidth = at_xdmac_get_dwidth(atchan->cfg);
2464 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
2465 - ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
2466 + ? dwidth
2467 : AT_XDMAC_CC_DWIDTH_BYTE;
2468 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
2469 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
2470 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
2471 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
2472 | (len >> fixed_dwidth); /* microblock length */
2473 + desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
2474 + AT_XDMAC_CC_DWIDTH(fixed_dwidth);
2475 dev_dbg(chan2dev(chan),
2476 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
2477 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
2478 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
2479 index 340f9e607cd8..3dabc52b9615 100644
2480 --- a/drivers/dma/pl330.c
2481 +++ b/drivers/dma/pl330.c
2482 @@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2483 desc->txd.callback = last->txd.callback;
2484 desc->txd.callback_param = last->txd.callback_param;
2485 }
2486 - last->last = false;
2487 + desc->last = false;
2488
2489 dma_cookie_assign(&desc->txd);
2490
2491 @@ -2621,6 +2621,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2492 desc->rqcfg.brst_len = 1;
2493
2494 desc->rqcfg.brst_len = get_burst_len(desc, len);
2495 + desc->bytes_requested = len;
2496
2497 desc->txd.flags = flags;
2498
2499 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
2500 index 778bbb6425b8..b0487c9f018c 100644
2501 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
2502 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
2503 @@ -1294,7 +1294,6 @@ retry:
2504 goto retry;
2505 }
2506 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2507 - WARN(1, "fail\n");
2508
2509 return -EIO;
2510 }
2511 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2512 index 8ae6f7f06b3a..683a9b004c11 100644
2513 --- a/drivers/gpu/drm/i915/i915_drv.h
2514 +++ b/drivers/gpu/drm/i915/i915_drv.h
2515 @@ -3190,15 +3190,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
2516 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2517
2518 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \
2519 - u32 upper = I915_READ(upper_reg); \
2520 - u32 lower = I915_READ(lower_reg); \
2521 - u32 tmp = I915_READ(upper_reg); \
2522 - if (upper != tmp) { \
2523 - upper = tmp; \
2524 - lower = I915_READ(lower_reg); \
2525 - WARN_ON(I915_READ(upper_reg) != upper); \
2526 - } \
2527 - (u64)upper << 32 | lower; })
2528 + u32 upper, lower, tmp; \
2529 + tmp = I915_READ(upper_reg); \
2530 + do { \
2531 + upper = tmp; \
2532 + lower = I915_READ(lower_reg); \
2533 + tmp = I915_READ(upper_reg); \
2534 + } while (upper != tmp); \
2535 + (u64)upper << 32 | lower; })
2536
2537 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2538 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2539 diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
2540 index 6377b22269ad..7ee23d1d1e74 100644
2541 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
2542 +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
2543 @@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
2544 }
2545
2546 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
2547 - args->phys_swizzle_mode = args->swizzle_mode;
2548 + if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2549 + args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
2550 + else
2551 + args->phys_swizzle_mode = args->swizzle_mode;
2552 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
2553 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
2554 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
2555 diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
2556 index 68fd9fc677e3..44480c1b9738 100644
2557 --- a/drivers/gpu/drm/radeon/dce6_afmt.c
2558 +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
2559 @@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
2560 struct radeon_device *rdev = encoder->dev->dev_private;
2561 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2562 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2563 - u32 offset;
2564
2565 - if (!dig || !dig->afmt || !dig->afmt->pin)
2566 + if (!dig || !dig->afmt || !dig->pin)
2567 return;
2568
2569 - offset = dig->afmt->offset;
2570 -
2571 - WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
2572 - AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
2573 + WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
2574 + AFMT_AUDIO_SRC_SELECT(dig->pin->id));
2575 }
2576
2577 void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
2578 - struct drm_connector *connector, struct drm_display_mode *mode)
2579 + struct drm_connector *connector,
2580 + struct drm_display_mode *mode)
2581 {
2582 struct radeon_device *rdev = encoder->dev->dev_private;
2583 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2584 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2585 - u32 tmp = 0, offset;
2586 + u32 tmp = 0;
2587
2588 - if (!dig || !dig->afmt || !dig->afmt->pin)
2589 + if (!dig || !dig->afmt || !dig->pin)
2590 return;
2591
2592 - offset = dig->afmt->pin->offset;
2593 -
2594 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2595 if (connector->latency_present[1])
2596 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
2597 @@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
2598 else
2599 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
2600 }
2601 - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
2602 + WREG32_ENDPOINT(dig->pin->offset,
2603 + AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
2604 }
2605
2606 void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
2607 - u8 *sadb, int sad_count)
2608 + u8 *sadb, int sad_count)
2609 {
2610 struct radeon_device *rdev = encoder->dev->dev_private;
2611 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2612 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2613 - u32 offset, tmp;
2614 + u32 tmp;
2615
2616 - if (!dig || !dig->afmt || !dig->afmt->pin)
2617 + if (!dig || !dig->afmt || !dig->pin)
2618 return;
2619
2620 - offset = dig->afmt->pin->offset;
2621 -
2622 /* program the speaker allocation */
2623 - tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
2624 + tmp = RREG32_ENDPOINT(dig->pin->offset,
2625 + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
2626 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
2627 /* set HDMI mode */
2628 tmp |= HDMI_CONNECTION;
2629 @@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
2630 tmp |= SPEAKER_ALLOCATION(sadb[0]);
2631 else
2632 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
2633 - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
2634 + WREG32_ENDPOINT(dig->pin->offset,
2635 + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
2636 }
2637
2638 void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
2639 - u8 *sadb, int sad_count)
2640 + u8 *sadb, int sad_count)
2641 {
2642 struct radeon_device *rdev = encoder->dev->dev_private;
2643 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2644 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2645 - u32 offset, tmp;
2646 + u32 tmp;
2647
2648 - if (!dig || !dig->afmt || !dig->afmt->pin)
2649 + if (!dig || !dig->afmt || !dig->pin)
2650 return;
2651
2652 - offset = dig->afmt->pin->offset;
2653 -
2654 /* program the speaker allocation */
2655 - tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
2656 + tmp = RREG32_ENDPOINT(dig->pin->offset,
2657 + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
2658 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
2659 /* set DP mode */
2660 tmp |= DP_CONNECTION;
2661 @@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
2662 tmp |= SPEAKER_ALLOCATION(sadb[0]);
2663 else
2664 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
2665 - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
2666 + WREG32_ENDPOINT(dig->pin->offset,
2667 + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
2668 }
2669
2670 void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
2671 - struct cea_sad *sads, int sad_count)
2672 + struct cea_sad *sads, int sad_count)
2673 {
2674 - u32 offset;
2675 int i;
2676 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2677 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2678 @@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
2679 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
2680 };
2681
2682 - if (!dig || !dig->afmt || !dig->afmt->pin)
2683 + if (!dig || !dig->afmt || !dig->pin)
2684 return;
2685
2686 - offset = dig->afmt->pin->offset;
2687 -
2688 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
2689 u32 value = 0;
2690 u8 stereo_freqs = 0;
2691 @@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
2692
2693 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
2694
2695 - WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
2696 + WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
2697 }
2698 }
2699
2700 @@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
2701 }
2702
2703 void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
2704 - struct radeon_crtc *crtc, unsigned int clock)
2705 + struct radeon_crtc *crtc, unsigned int clock)
2706 {
2707 /* Two dtos; generally use dto0 for HDMI */
2708 u32 value = 0;
2709 @@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
2710 }
2711
2712 void dce6_dp_audio_set_dto(struct radeon_device *rdev,
2713 - struct radeon_crtc *crtc, unsigned int clock)
2714 + struct radeon_crtc *crtc, unsigned int clock)
2715 {
2716 /* Two dtos; generally use dto1 for DP */
2717 u32 value = 0;
2718 diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
2719 index fa719c53449b..59b3d3221294 100644
2720 --- a/drivers/gpu/drm/radeon/radeon_audio.c
2721 +++ b/drivers/gpu/drm/radeon/radeon_audio.c
2722 @@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
2723 static void radeon_audio_enable(struct radeon_device *rdev,
2724 struct r600_audio_pin *pin, u8 enable_mask)
2725 {
2726 + struct drm_encoder *encoder;
2727 + struct radeon_encoder *radeon_encoder;
2728 + struct radeon_encoder_atom_dig *dig;
2729 + int pin_count = 0;
2730 +
2731 + if (!pin)
2732 + return;
2733 +
2734 + if (rdev->mode_info.mode_config_initialized) {
2735 + list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
2736 + if (radeon_encoder_is_digital(encoder)) {
2737 + radeon_encoder = to_radeon_encoder(encoder);
2738 + dig = radeon_encoder->enc_priv;
2739 + if (dig->pin == pin)
2740 + pin_count++;
2741 + }
2742 + }
2743 +
2744 + if ((pin_count > 1) && (enable_mask == 0))
2745 + return;
2746 + }
2747 +
2748 if (rdev->audio.funcs->enable)
2749 rdev->audio.funcs->enable(rdev, pin, enable_mask);
2750 }
2751 @@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
2752
2753 static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
2754 {
2755 - struct radeon_encoder *radeon_encoder;
2756 - struct drm_connector *connector;
2757 - struct radeon_connector *radeon_connector = NULL;
2758 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2759 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2760 struct cea_sad *sads;
2761 int sad_count;
2762
2763 - list_for_each_entry(connector,
2764 - &encoder->dev->mode_config.connector_list, head) {
2765 - if (connector->encoder == encoder) {
2766 - radeon_connector = to_radeon_connector(connector);
2767 - break;
2768 - }
2769 - }
2770 -
2771 - if (!radeon_connector) {
2772 - DRM_ERROR("Couldn't find encoder's connector\n");
2773 + if (!connector)
2774 return;
2775 - }
2776
2777 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
2778 if (sad_count <= 0) {
2779 @@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
2780 }
2781 BUG_ON(!sads);
2782
2783 - radeon_encoder = to_radeon_encoder(encoder);
2784 -
2785 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
2786 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
2787
2788 @@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
2789
2790 static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
2791 {
2792 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2793 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2794 - struct drm_connector *connector;
2795 - struct radeon_connector *radeon_connector = NULL;
2796 u8 *sadb = NULL;
2797 int sad_count;
2798
2799 - list_for_each_entry(connector,
2800 - &encoder->dev->mode_config.connector_list, head) {
2801 - if (connector->encoder == encoder) {
2802 - radeon_connector = to_radeon_connector(connector);
2803 - break;
2804 - }
2805 - }
2806 -
2807 - if (!radeon_connector) {
2808 - DRM_ERROR("Couldn't find encoder's connector\n");
2809 + if (!connector)
2810 return;
2811 - }
2812
2813 - sad_count = drm_edid_to_speaker_allocation(
2814 - radeon_connector_edid(connector), &sadb);
2815 + sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
2816 + &sadb);
2817 if (sad_count < 0) {
2818 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
2819 sad_count);
2820 @@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
2821 }
2822
2823 static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
2824 - struct drm_display_mode *mode)
2825 + struct drm_display_mode *mode)
2826 {
2827 - struct radeon_encoder *radeon_encoder;
2828 - struct drm_connector *connector;
2829 - struct radeon_connector *radeon_connector = 0;
2830 -
2831 - list_for_each_entry(connector,
2832 - &encoder->dev->mode_config.connector_list, head) {
2833 - if (connector->encoder == encoder) {
2834 - radeon_connector = to_radeon_connector(connector);
2835 - break;
2836 - }
2837 - }
2838 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2839 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2840
2841 - if (!radeon_connector) {
2842 - DRM_ERROR("Couldn't find encoder's connector\n");
2843 + if (!connector)
2844 return;
2845 - }
2846 -
2847 - radeon_encoder = to_radeon_encoder(encoder);
2848
2849 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
2850 radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
2851 @@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
2852 }
2853
2854 void radeon_audio_detect(struct drm_connector *connector,
2855 + struct drm_encoder *encoder,
2856 enum drm_connector_status status)
2857 {
2858 - struct radeon_device *rdev;
2859 - struct radeon_encoder *radeon_encoder;
2860 + struct drm_device *dev = connector->dev;
2861 + struct radeon_device *rdev = dev->dev_private;
2862 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2863 struct radeon_encoder_atom_dig *dig;
2864
2865 - if (!connector || !connector->encoder)
2866 + if (!radeon_audio_chipset_supported(rdev))
2867 return;
2868
2869 - rdev = connector->encoder->dev->dev_private;
2870 -
2871 - if (!radeon_audio_chipset_supported(rdev))
2872 + if (!radeon_encoder_is_digital(encoder))
2873 return;
2874
2875 - radeon_encoder = to_radeon_encoder(connector->encoder);
2876 dig = radeon_encoder->enc_priv;
2877
2878 if (status == connector_status_connected) {
2879 - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
2880 - radeon_encoder->audio = NULL;
2881 - return;
2882 - }
2883 -
2884 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
2885 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2886
2887 @@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
2888 radeon_encoder->audio = rdev->audio.hdmi_funcs;
2889 }
2890
2891 - dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
2892 - radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
2893 + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
2894 + if (!dig->pin)
2895 + dig->pin = radeon_audio_get_pin(encoder);
2896 + radeon_audio_enable(rdev, dig->pin, 0xf);
2897 + } else {
2898 + radeon_audio_enable(rdev, dig->pin, 0);
2899 + dig->pin = NULL;
2900 + }
2901 } else {
2902 - radeon_audio_enable(rdev, dig->afmt->pin, 0);
2903 - dig->afmt->pin = NULL;
2904 + radeon_audio_enable(rdev, dig->pin, 0);
2905 + dig->pin = NULL;
2906 }
2907 }
2908
2909 @@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
2910 }
2911
2912 static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
2913 - struct drm_display_mode *mode)
2914 + struct drm_display_mode *mode)
2915 {
2916 struct radeon_device *rdev = encoder->dev->dev_private;
2917 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2918 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2919 - struct drm_connector *connector;
2920 - struct radeon_connector *radeon_connector = NULL;
2921 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2922 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
2923 struct hdmi_avi_infoframe frame;
2924 int err;
2925
2926 - list_for_each_entry(connector,
2927 - &encoder->dev->mode_config.connector_list, head) {
2928 - if (connector->encoder == encoder) {
2929 - radeon_connector = to_radeon_connector(connector);
2930 - break;
2931 - }
2932 - }
2933 -
2934 - if (!radeon_connector) {
2935 - DRM_ERROR("Couldn't find encoder's connector\n");
2936 - return -ENOENT;
2937 - }
2938 + if (!connector)
2939 + return -EINVAL;
2940
2941 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
2942 if (err < 0) {
2943 @@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
2944 return err;
2945 }
2946
2947 - if (dig && dig->afmt &&
2948 - radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
2949 + if (dig && dig->afmt && radeon_encoder->audio &&
2950 + radeon_encoder->audio->set_avi_packet)
2951 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
2952 buffer, sizeof(buffer));
2953
2954 @@ -745,7 +719,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
2955 }
2956
2957 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
2958 - struct drm_display_mode *mode)
2959 + struct drm_display_mode *mode)
2960 {
2961 struct drm_device *dev = encoder->dev;
2962 struct radeon_device *rdev = dev->dev_private;
2963 @@ -756,6 +730,9 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
2964 struct radeon_connector_atom_dig *dig_connector =
2965 radeon_connector->con_priv;
2966
2967 + if (!connector)
2968 + return;
2969 +
2970 if (!dig || !dig->afmt)
2971 return;
2972
2973 @@ -774,7 +751,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
2974 }
2975
2976 void radeon_audio_mode_set(struct drm_encoder *encoder,
2977 - struct drm_display_mode *mode)
2978 + struct drm_display_mode *mode)
2979 {
2980 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2981
2982 diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
2983 index 8438304f7139..059cc3012062 100644
2984 --- a/drivers/gpu/drm/radeon/radeon_audio.h
2985 +++ b/drivers/gpu/drm/radeon/radeon_audio.h
2986 @@ -68,7 +68,8 @@ struct radeon_audio_funcs
2987
2988 int radeon_audio_init(struct radeon_device *rdev);
2989 void radeon_audio_detect(struct drm_connector *connector,
2990 - enum drm_connector_status status);
2991 + struct drm_encoder *encoder,
2992 + enum drm_connector_status status);
2993 u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
2994 u32 offset, u32 reg);
2995 void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
2996 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
2997 index 3e5f6b71f3ad..c097d3a82bda 100644
2998 --- a/drivers/gpu/drm/radeon/radeon_combios.c
2999 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
3000 @@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
3001
3002 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
3003 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
3004 + u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
3005 +
3006 + if (hss > lvds->native_mode.hdisplay)
3007 + hss = (10 - 1) * 8;
3008 +
3009 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
3010 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
3011 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
3012 - (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
3013 + hss;
3014 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
3015 (RBIOS8(tmp + 23) * 8);
3016
3017 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
3018 index cebb65e07e1d..94b21ae70ef7 100644
3019 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
3020 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
3021 @@ -1379,8 +1379,16 @@ out:
3022 /* updated in get modes as well since we need to know if it's analog or digital */
3023 radeon_connector_update_scratch_regs(connector, ret);
3024
3025 - if (radeon_audio != 0)
3026 - radeon_audio_detect(connector, ret);
3027 + if ((radeon_audio != 0) && radeon_connector->use_digital) {
3028 + const struct drm_connector_helper_funcs *connector_funcs =
3029 + connector->helper_private;
3030 +
3031 + encoder = connector_funcs->best_encoder(connector);
3032 + if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
3033 + radeon_connector_get_edid(connector);
3034 + radeon_audio_detect(connector, encoder, ret);
3035 + }
3036 + }
3037
3038 exit:
3039 pm_runtime_mark_last_busy(connector->dev->dev);
3040 @@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
3041
3042 radeon_connector_update_scratch_regs(connector, ret);
3043
3044 - if (radeon_audio != 0)
3045 - radeon_audio_detect(connector, ret);
3046 + if ((radeon_audio != 0) && encoder) {
3047 + radeon_connector_get_edid(connector);
3048 + radeon_audio_detect(connector, encoder, ret);
3049 + }
3050
3051 out:
3052 pm_runtime_mark_last_busy(connector->dev->dev);
3053 diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
3054 index f01c797b78cf..9af2d8398e90 100644
3055 --- a/drivers/gpu/drm/radeon/radeon_mode.h
3056 +++ b/drivers/gpu/drm/radeon/radeon_mode.h
3057 @@ -237,7 +237,6 @@ struct radeon_afmt {
3058 int offset;
3059 bool last_buffer_filled_status;
3060 int id;
3061 - struct r600_audio_pin *pin;
3062 };
3063
3064 struct radeon_mode_info {
3065 @@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
3066 uint8_t backlight_level;
3067 int panel_mode;
3068 struct radeon_afmt *afmt;
3069 + struct r600_audio_pin *pin;
3070 int active_mst_links;
3071 };
3072
3073 diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
3074 index 6153df735e82..08ff89d222e5 100644
3075 --- a/drivers/hwmon/nct7904.c
3076 +++ b/drivers/hwmon/nct7904.c
3077 @@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
3078 {"nct7904", 0},
3079 {}
3080 };
3081 +MODULE_DEVICE_TABLE(i2c, nct7904_id);
3082
3083 static struct i2c_driver nct7904_driver = {
3084 .class = I2C_CLASS_HWMON,
3085 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
3086 index a353b7de6d22..bc7eed67998a 100644
3087 --- a/drivers/input/mouse/alps.c
3088 +++ b/drivers/input/mouse/alps.c
3089 @@ -20,6 +20,7 @@
3090 #include <linux/input/mt.h>
3091 #include <linux/serio.h>
3092 #include <linux/libps2.h>
3093 +#include <linux/dmi.h>
3094
3095 #include "psmouse.h"
3096 #include "alps.h"
3097 @@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
3098 #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
3099 #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
3100 6-byte ALPS packet */
3101 +#define ALPS_DELL 0x100 /* device is a Dell laptop */
3102 #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
3103
3104 static const struct alps_model_info alps_model_data[] = {
3105 @@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
3106 return;
3107 }
3108
3109 - /* Non interleaved V2 dualpoint has separate stick button bits */
3110 + /* Dell non interleaved V2 dualpoint has separate stick button bits */
3111 if (priv->proto_version == ALPS_PROTO_V2 &&
3112 - priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
3113 + priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
3114 left |= packet[0] & 1;
3115 right |= packet[0] & 2;
3116 middle |= packet[0] & 4;
3117 @@ -2542,6 +2544,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
3118 priv->byte0 = protocol->byte0;
3119 priv->mask0 = protocol->mask0;
3120 priv->flags = protocol->flags;
3121 + if (dmi_name_in_vendors("Dell"))
3122 + priv->flags |= ALPS_DELL;
3123
3124 priv->x_max = 2000;
3125 priv->y_max = 1400;
3126 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3127 index e8d84566f311..697f34fba06b 100644
3128 --- a/drivers/md/dm.c
3129 +++ b/drivers/md/dm.c
3130 @@ -1719,7 +1719,8 @@ static int dm_merge_bvec(struct request_queue *q,
3131 struct mapped_device *md = q->queuedata;
3132 struct dm_table *map = dm_get_live_table_fast(md);
3133 struct dm_target *ti;
3134 - sector_t max_sectors, max_size = 0;
3135 + sector_t max_sectors;
3136 + int max_size = 0;
3137
3138 if (unlikely(!map))
3139 goto out;
3140 @@ -1732,18 +1733,10 @@ static int dm_merge_bvec(struct request_queue *q,
3141 * Find maximum amount of I/O that won't need splitting
3142 */
3143 max_sectors = min(max_io_len(bvm->bi_sector, ti),
3144 - (sector_t) queue_max_sectors(q));
3145 + (sector_t) BIO_MAX_SECTORS);
3146 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
3147 -
3148 - /*
3149 - * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
3150 - * to the targets' merge function since it holds sectors not bytes).
3151 - * Just doing this as an interim fix for stable@ because the more
3152 - * comprehensive cleanup of switching to sector_t will impact every
3153 - * DM target that implements a ->merge hook.
3154 - */
3155 - if (max_size > INT_MAX)
3156 - max_size = INT_MAX;
3157 + if (max_size < 0)
3158 + max_size = 0;
3159
3160 /*
3161 * merge_bvec_fn() returns number of bytes
3162 @@ -1751,13 +1744,13 @@ static int dm_merge_bvec(struct request_queue *q,
3163 * max is precomputed maximal io size
3164 */
3165 if (max_size && ti->type->merge)
3166 - max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
3167 + max_size = ti->type->merge(ti, bvm, biovec, max_size);
3168 /*
3169 * If the target doesn't support merge method and some of the devices
3170 - * provided their merge_bvec method (we know this by looking for the
3171 - * max_hw_sectors that dm_set_device_limits may set), then we can't
3172 - * allow bios with multiple vector entries. So always set max_size
3173 - * to 0, and the code below allows just one page.
3174 + * provided their merge_bvec method (we know this by looking at
3175 + * queue_max_hw_sectors), then we can't allow bios with multiple vector
3176 + * entries. So always set max_size to 0, and the code below allows
3177 + * just one page.
3178 */
3179 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
3180 max_size = 0;
3181 diff --git a/drivers/md/md.c b/drivers/md/md.c
3182 index b9200282fd77..e4621511d118 100644
3183 --- a/drivers/md/md.c
3184 +++ b/drivers/md/md.c
3185 @@ -5740,7 +5740,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
3186 char *ptr;
3187 int err;
3188
3189 - file = kmalloc(sizeof(*file), GFP_NOIO);
3190 + file = kzalloc(sizeof(*file), GFP_NOIO);
3191 if (!file)
3192 return -ENOMEM;
3193
3194 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
3195 index cd7b0c1e882d..5ce3cd5c4e1d 100644
3196 --- a/drivers/md/raid1.c
3197 +++ b/drivers/md/raid1.c
3198 @@ -1475,6 +1475,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
3199 {
3200 char b[BDEVNAME_SIZE];
3201 struct r1conf *conf = mddev->private;
3202 + unsigned long flags;
3203
3204 /*
3205 * If it is not operational, then we have already marked it as dead
3206 @@ -1494,14 +1495,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
3207 return;
3208 }
3209 set_bit(Blocked, &rdev->flags);
3210 + spin_lock_irqsave(&conf->device_lock, flags);
3211 if (test_and_clear_bit(In_sync, &rdev->flags)) {
3212 - unsigned long flags;
3213 - spin_lock_irqsave(&conf->device_lock, flags);
3214 mddev->degraded++;
3215 set_bit(Faulty, &rdev->flags);
3216 - spin_unlock_irqrestore(&conf->device_lock, flags);
3217 } else
3218 set_bit(Faulty, &rdev->flags);
3219 + spin_unlock_irqrestore(&conf->device_lock, flags);
3220 /*
3221 * if recovery is running, make sure it aborts.
3222 */
3223 @@ -1567,7 +1567,10 @@ static int raid1_spare_active(struct mddev *mddev)
3224 * Find all failed disks within the RAID1 configuration
3225 * and mark them readable.
3226 * Called under mddev lock, so rcu protection not needed.
3227 + * device_lock used to avoid races with raid1_end_read_request
3228 + * which expects 'In_sync' flags and ->degraded to be consistent.
3229 */
3230 + spin_lock_irqsave(&conf->device_lock, flags);
3231 for (i = 0; i < conf->raid_disks; i++) {
3232 struct md_rdev *rdev = conf->mirrors[i].rdev;
3233 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
3234 @@ -1598,7 +1601,6 @@ static int raid1_spare_active(struct mddev *mddev)
3235 sysfs_notify_dirent_safe(rdev->sysfs_state);
3236 }
3237 }
3238 - spin_lock_irqsave(&conf->device_lock, flags);
3239 mddev->degraded -= count;
3240 spin_unlock_irqrestore(&conf->device_lock, flags);
3241
3242 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
3243 index 7681237fe298..ead543282128 100644
3244 --- a/drivers/net/wireless/ath/ath10k/pci.c
3245 +++ b/drivers/net/wireless/ath/ath10k/pci.c
3246 @@ -1524,12 +1524,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
3247 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
3248 case QCA6174_HW_1_0_CHIP_ID_REV:
3249 case QCA6174_HW_1_1_CHIP_ID_REV:
3250 + case QCA6174_HW_2_1_CHIP_ID_REV:
3251 + case QCA6174_HW_2_2_CHIP_ID_REV:
3252 return 3;
3253 case QCA6174_HW_1_3_CHIP_ID_REV:
3254 return 2;
3255 - case QCA6174_HW_2_1_CHIP_ID_REV:
3256 - case QCA6174_HW_2_2_CHIP_ID_REV:
3257 - return 6;
3258 case QCA6174_HW_3_0_CHIP_ID_REV:
3259 case QCA6174_HW_3_1_CHIP_ID_REV:
3260 case QCA6174_HW_3_2_CHIP_ID_REV:
3261 diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
3262 index 8882afbef688..6285f46f3ddb 100644
3263 --- a/drivers/phy/phy-twl4030-usb.c
3264 +++ b/drivers/phy/phy-twl4030-usb.c
3265 @@ -144,6 +144,16 @@
3266 #define PMBR1 0x0D
3267 #define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
3268
3269 +/*
3270 + * If VBUS is valid or ID is ground, then we know a
3271 + * cable is present and we need to be runtime-enabled
3272 + */
3273 +static inline bool cable_present(enum omap_musb_vbus_id_status stat)
3274 +{
3275 + return stat == OMAP_MUSB_VBUS_VALID ||
3276 + stat == OMAP_MUSB_ID_GROUND;
3277 +}
3278 +
3279 struct twl4030_usb {
3280 struct usb_phy phy;
3281 struct device *dev;
3282 @@ -536,8 +546,10 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
3283
3284 mutex_lock(&twl->lock);
3285 if (status >= 0 && status != twl->linkstat) {
3286 + status_changed =
3287 + cable_present(twl->linkstat) !=
3288 + cable_present(status);
3289 twl->linkstat = status;
3290 - status_changed = true;
3291 }
3292 mutex_unlock(&twl->lock);
3293
3294 @@ -553,15 +565,11 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
3295 * USB_LINK_VBUS state. musb_hdrc won't care until it
3296 * starts to handle softconnect right.
3297 */
3298 - if ((status == OMAP_MUSB_VBUS_VALID) ||
3299 - (status == OMAP_MUSB_ID_GROUND)) {
3300 - if (pm_runtime_suspended(twl->dev))
3301 - pm_runtime_get_sync(twl->dev);
3302 + if (cable_present(status)) {
3303 + pm_runtime_get_sync(twl->dev);
3304 } else {
3305 - if (pm_runtime_active(twl->dev)) {
3306 - pm_runtime_mark_last_busy(twl->dev);
3307 - pm_runtime_put_autosuspend(twl->dev);
3308 - }
3309 + pm_runtime_mark_last_busy(twl->dev);
3310 + pm_runtime_put_autosuspend(twl->dev);
3311 }
3312 omap_musb_mailbox(status);
3313 }
3314 @@ -766,6 +774,9 @@ static int twl4030_usb_remove(struct platform_device *pdev)
3315
3316 /* disable complete OTG block */
3317 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
3318 +
3319 + if (cable_present(twl->linkstat))
3320 + pm_runtime_put_noidle(twl->dev);
3321 pm_runtime_mark_last_busy(twl->dev);
3322 pm_runtime_put(twl->dev);
3323
3324 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
3325 index 882744852aac..a9aa38903efe 100644
3326 --- a/drivers/scsi/ipr.c
3327 +++ b/drivers/scsi/ipr.c
3328 @@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
3329 {
3330 struct ipr_trace_entry *trace_entry;
3331 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3332 + unsigned int trace_index;
3333
3334 - trace_entry = &ioa_cfg->trace[atomic_add_return
3335 - (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
3336 + trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
3337 + trace_entry = &ioa_cfg->trace[trace_index];
3338 trace_entry->time = jiffies;
3339 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
3340 trace_entry->type = type;
3341 @@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
3342
3343 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
3344 {
3345 + unsigned int hrrq;
3346 +
3347 if (ioa_cfg->hrrq_num == 1)
3348 - return 0;
3349 - else
3350 - return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
3351 + hrrq = 0;
3352 + else {
3353 + hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
3354 + hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
3355 + }
3356 + return hrrq;
3357 }
3358
3359 /**
3360 @@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3361 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3362 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3363 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
3364 - unsigned long hrrq_flags;
3365 + unsigned long lock_flags;
3366
3367 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
3368
3369 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3370 scsi_dma_unmap(scsi_cmd);
3371
3372 - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
3373 + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
3374 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
3375 scsi_cmd->scsi_done(scsi_cmd);
3376 - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
3377 + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
3378 } else {
3379 - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
3380 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3381 + spin_lock(&ipr_cmd->hrrq->_lock);
3382 ipr_erp_start(ioa_cfg, ipr_cmd);
3383 - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
3384 + spin_unlock(&ipr_cmd->hrrq->_lock);
3385 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3386 }
3387 }
3388
3389 diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
3390 index 73790a1d0969..6b97ee45c7b4 100644
3391 --- a/drivers/scsi/ipr.h
3392 +++ b/drivers/scsi/ipr.h
3393 @@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
3394
3395 #define IPR_NUM_TRACE_INDEX_BITS 8
3396 #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
3397 +#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
3398 #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
3399 char trace_start[8];
3400 #define IPR_TRACE_START_LABEL "trace"
3401 diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
3402 index 9c934e6d2ea1..c61add46b426 100644
3403 --- a/drivers/staging/lustre/lustre/obdclass/debug.c
3404 +++ b/drivers/staging/lustre/lustre/obdclass/debug.c
3405 @@ -40,7 +40,7 @@
3406
3407 #define DEBUG_SUBSYSTEM D_OTHER
3408
3409 -#include <linux/unaligned/access_ok.h>
3410 +#include <asm/unaligned.h>
3411
3412 #include "../include/obd_support.h"
3413 #include "../include/lustre_debug.h"
3414 diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
3415 index 15baacb126ad..376e4a0c15c6 100644
3416 --- a/drivers/staging/vt6655/device_main.c
3417 +++ b/drivers/staging/vt6655/device_main.c
3418 @@ -1486,8 +1486,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
3419 }
3420 }
3421
3422 - if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
3423 - if (conf->assoc) {
3424 + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
3425 + priv->op_mode != NL80211_IFTYPE_AP) {
3426 + if (conf->assoc && conf->beacon_rate) {
3427 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
3428 conf->sync_tsf);
3429
3430 diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
3431 index 1d30b0975651..67098a8a7a02 100644
3432 --- a/drivers/thermal/samsung/exynos_tmu.c
3433 +++ b/drivers/thermal/samsung/exynos_tmu.c
3434 @@ -1209,6 +1209,8 @@ err_clk_sec:
3435 if (!IS_ERR(data->clk_sec))
3436 clk_unprepare(data->clk_sec);
3437 err_sensor:
3438 + if (!IS_ERR_OR_NULL(data->regulator))
3439 + regulator_disable(data->regulator);
3440 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
3441
3442 return ret;
3443 diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
3444 index 74fea4fa41b1..3ad48e1c0c57 100644
3445 --- a/drivers/usb/chipidea/core.c
3446 +++ b/drivers/usb/chipidea/core.c
3447 @@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
3448 },
3449 };
3450
3451 -module_platform_driver(ci_hdrc_driver);
3452 +static int __init ci_hdrc_platform_register(void)
3453 +{
3454 + ci_hdrc_host_driver_init();
3455 + return platform_driver_register(&ci_hdrc_driver);
3456 +}
3457 +module_init(ci_hdrc_platform_register);
3458 +
3459 +static void __exit ci_hdrc_platform_unregister(void)
3460 +{
3461 + platform_driver_unregister(&ci_hdrc_driver);
3462 +}
3463 +module_exit(ci_hdrc_platform_unregister);
3464
3465 MODULE_ALIAS("platform:ci_hdrc");
3466 MODULE_LICENSE("GPL v2");
3467 diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
3468 index 21fe1a314313..2f8af40e87ca 100644
3469 --- a/drivers/usb/chipidea/host.c
3470 +++ b/drivers/usb/chipidea/host.c
3471 @@ -237,9 +237,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
3472 rdrv->name = "host";
3473 ci->roles[CI_ROLE_HOST] = rdrv;
3474
3475 + return 0;
3476 +}
3477 +
3478 +void ci_hdrc_host_driver_init(void)
3479 +{
3480 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
3481 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
3482 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
3483 -
3484 - return 0;
3485 }
3486 diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
3487 index 5707bf379bfb..0f12f131bdd3 100644
3488 --- a/drivers/usb/chipidea/host.h
3489 +++ b/drivers/usb/chipidea/host.h
3490 @@ -5,6 +5,7 @@
3491
3492 int ci_hdrc_host_init(struct ci_hdrc *ci);
3493 void ci_hdrc_host_destroy(struct ci_hdrc *ci);
3494 +void ci_hdrc_host_driver_init(void);
3495
3496 #else
3497
3498 @@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
3499
3500 }
3501
3502 +static void ci_hdrc_host_driver_init(void)
3503 +{
3504 +
3505 +}
3506 +
3507 #endif
3508
3509 #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
3510 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
3511 index 6d3eb8b00a48..531861547253 100644
3512 --- a/drivers/usb/gadget/function/f_uac2.c
3513 +++ b/drivers/usb/gadget/function/f_uac2.c
3514 @@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
3515 factor = 1000;
3516 } else {
3517 ep_desc = &hs_epin_desc;
3518 - factor = 125;
3519 + factor = 8000;
3520 }
3521
3522 /* pre-compute some values for iso_complete() */
3523 uac2->p_framesize = opts->p_ssize *
3524 num_channels(opts->p_chmask);
3525 rate = opts->p_srate * uac2->p_framesize;
3526 - uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
3527 + uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
3528 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
3529 prm->max_psize);
3530
3531 diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
3532 index d69c35558f68..7d69931cf45d 100644
3533 --- a/drivers/usb/gadget/udc/udc-core.c
3534 +++ b/drivers/usb/gadget/udc/udc-core.c
3535 @@ -321,6 +321,7 @@ err4:
3536
3537 err3:
3538 put_device(&udc->dev);
3539 + device_del(&gadget->dev);
3540
3541 err2:
3542 put_device(&gadget->dev);
3543 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
3544 index 3e442f77a2b9..9a8c936cd42c 100644
3545 --- a/drivers/usb/host/xhci-mem.c
3546 +++ b/drivers/usb/host/xhci-mem.c
3547 @@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
3548 int size;
3549 int i, j, num_ports;
3550
3551 - del_timer_sync(&xhci->cmd_timer);
3552 + if (timer_pending(&xhci->cmd_timer))
3553 + del_timer_sync(&xhci->cmd_timer);
3554
3555 /* Free the Event Ring Segment Table and the actual Event Ring */
3556 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
3557 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3558 index d095677a0702..b3a0a2275f5a 100644
3559 --- a/drivers/usb/host/xhci-ring.c
3560 +++ b/drivers/usb/host/xhci-ring.c
3561 @@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
3562 return 0;
3563 /* offset in TRBs */
3564 segment_offset = trb - seg->trbs;
3565 - if (segment_offset > TRBS_PER_SEGMENT)
3566 + if (segment_offset >= TRBS_PER_SEGMENT)
3567 return 0;
3568 return seg->dma + (segment_offset * sizeof(*trb));
3569 }
3570 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3571 index 19b85ee98a72..876423b8892c 100644
3572 --- a/drivers/usb/serial/option.c
3573 +++ b/drivers/usb/serial/option.c
3574 @@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
3575 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
3576 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
3577 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
3578 + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
3579 + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
3580 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
3581 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
3582 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
3583 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
3584 index 9c63897b3a56..d156545728c2 100644
3585 --- a/drivers/usb/serial/qcserial.c
3586 +++ b/drivers/usb/serial/qcserial.c
3587 @@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
3588 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
3589 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
3590 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
3591 - {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
3592 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
3593 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
3594 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
3595 @@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
3596 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
3597 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
3598 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
3599 + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
3600
3601 /* Huawei devices */
3602 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
3603 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
3604 index 46179a0828eb..07d1ecd564f7 100644
3605 --- a/drivers/usb/serial/sierra.c
3606 +++ b/drivers/usb/serial/sierra.c
3607 @@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
3608 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
3609 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
3610 },
3611 + { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
3612 /* AT&T Direct IP LTE modems */
3613 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
3614 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
3615 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
3616 index 89274850741b..4bd23bba816f 100644
3617 --- a/drivers/xen/gntdev.c
3618 +++ b/drivers/xen/gntdev.c
3619 @@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
3620
3621 pr_debug("priv %p\n", priv);
3622
3623 + mutex_lock(&priv->lock);
3624 while (!list_empty(&priv->maps)) {
3625 map = list_entry(priv->maps.next, struct grant_map, next);
3626 list_del(&map->next);
3627 gntdev_put_map(NULL /* already removed */, map);
3628 }
3629 WARN_ON(!list_empty(&priv->freeable_maps));
3630 + mutex_unlock(&priv->lock);
3631
3632 if (use_ptemod)
3633 mmu_notifier_unregister(&priv->mn, priv->mm);
3634 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3635 index 039f9c8a95e8..6e13504f736e 100644
3636 --- a/fs/nfsd/nfs4state.c
3637 +++ b/fs/nfsd/nfs4state.c
3638 @@ -4397,9 +4397,9 @@ laundromat_main(struct work_struct *laundry)
3639 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
3640 }
3641
3642 -static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3643 +static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
3644 {
3645 - if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
3646 + if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
3647 return nfserr_bad_stateid;
3648 return nfs_ok;
3649 }
3650 @@ -4574,20 +4574,48 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
3651 return nfs_ok;
3652 }
3653
3654 +static struct file *
3655 +nfs4_find_file(struct nfs4_stid *s, int flags)
3656 +{
3657 + switch (s->sc_type) {
3658 + case NFS4_DELEG_STID:
3659 + if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
3660 + return NULL;
3661 + return get_file(s->sc_file->fi_deleg_file);
3662 + case NFS4_OPEN_STID:
3663 + case NFS4_LOCK_STID:
3664 + if (flags & RD_STATE)
3665 + return find_readable_file(s->sc_file);
3666 + else
3667 + return find_writeable_file(s->sc_file);
3668 + break;
3669 + }
3670 +
3671 + return NULL;
3672 +}
3673 +
3674 +static __be32
3675 +nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
3676 +{
3677 + __be32 status;
3678 +
3679 + status = nfsd4_check_openowner_confirmed(ols);
3680 + if (status)
3681 + return status;
3682 + return nfs4_check_openmode(ols, flags);
3683 +}
3684 +
3685 /*
3686 -* Checks for stateid operations
3687 -*/
3688 + * Checks for stateid operations
3689 + */
3690 __be32
3691 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3692 stateid_t *stateid, int flags, struct file **filpp)
3693 {
3694 - struct nfs4_stid *s;
3695 - struct nfs4_ol_stateid *stp = NULL;
3696 - struct nfs4_delegation *dp = NULL;
3697 - struct svc_fh *current_fh = &cstate->current_fh;
3698 - struct inode *ino = d_inode(current_fh->fh_dentry);
3699 + struct svc_fh *fhp = &cstate->current_fh;
3700 + struct inode *ino = d_inode(fhp->fh_dentry);
3701 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3702 - struct file *file = NULL;
3703 + struct nfs4_stid *s;
3704 __be32 status;
3705
3706 if (filpp)
3707 @@ -4597,60 +4625,39 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3708 return nfserr_grace;
3709
3710 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3711 - return check_special_stateids(net, current_fh, stateid, flags);
3712 + return check_special_stateids(net, fhp, stateid, flags);
3713
3714 status = nfsd4_lookup_stateid(cstate, stateid,
3715 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
3716 &s, nn);
3717 if (status)
3718 return status;
3719 - status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3720 + status = check_stateid_generation(stateid, &s->sc_stateid,
3721 + nfsd4_has_session(cstate));
3722 if (status)
3723 goto out;
3724 +
3725 switch (s->sc_type) {
3726 case NFS4_DELEG_STID:
3727 - dp = delegstateid(s);
3728 - status = nfs4_check_delegmode(dp, flags);
3729 - if (status)
3730 - goto out;
3731 - if (filpp) {
3732 - file = dp->dl_stid.sc_file->fi_deleg_file;
3733 - if (!file) {
3734 - WARN_ON_ONCE(1);
3735 - status = nfserr_serverfault;
3736 - goto out;
3737 - }
3738 - get_file(file);
3739 - }
3740 + status = nfs4_check_delegmode(delegstateid(s), flags);
3741 break;
3742 case NFS4_OPEN_STID:
3743 case NFS4_LOCK_STID:
3744 - stp = openlockstateid(s);
3745 - status = nfs4_check_fh(current_fh, stp);
3746 - if (status)
3747 - goto out;
3748 - status = nfsd4_check_openowner_confirmed(stp);
3749 - if (status)
3750 - goto out;
3751 - status = nfs4_check_openmode(stp, flags);
3752 - if (status)
3753 - goto out;
3754 - if (filpp) {
3755 - struct nfs4_file *fp = stp->st_stid.sc_file;
3756 -
3757 - if (flags & RD_STATE)
3758 - file = find_readable_file(fp);
3759 - else
3760 - file = find_writeable_file(fp);
3761 - }
3762 + status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
3763 break;
3764 default:
3765 status = nfserr_bad_stateid;
3766 + break;
3767 + }
3768 + if (status)
3769 goto out;
3770 + status = nfs4_check_fh(fhp, s);
3771 +
3772 + if (!status && filpp) {
3773 + *filpp = nfs4_find_file(s, flags);
3774 + if (!*filpp)
3775 + status = nfserr_serverfault;
3776 }
3777 - status = nfs_ok;
3778 - if (file)
3779 - *filpp = file;
3780 out:
3781 nfs4_put_stid(s);
3782 return status;
3783 @@ -4754,7 +4761,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
3784 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3785 if (status)
3786 return status;
3787 - return nfs4_check_fh(current_fh, stp);
3788 + return nfs4_check_fh(current_fh, &stp->st_stid);
3789 }
3790
3791 /*
3792 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
3793 index 158badf945df..d4d84451e0e6 100644
3794 --- a/fs/nfsd/nfs4xdr.c
3795 +++ b/fs/nfsd/nfs4xdr.c
3796 @@ -2142,6 +2142,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
3797 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
3798 FATTR4_WORD0_RDATTR_ERROR)
3799 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
3800 +#define WORD2_ABSENT_FS_ATTRS 0
3801
3802 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
3803 static inline __be32
3804 @@ -2170,7 +2171,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
3805 { return 0; }
3806 #endif
3807
3808 -static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
3809 +static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
3810 {
3811 /* As per referral draft: */
3812 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
3813 @@ -2183,6 +2184,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
3814 }
3815 *bmval0 &= WORD0_ABSENT_FS_ATTRS;
3816 *bmval1 &= WORD1_ABSENT_FS_ATTRS;
3817 + *bmval2 &= WORD2_ABSENT_FS_ATTRS;
3818 return 0;
3819 }
3820
3821 @@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
3822 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
3823
3824 if (exp->ex_fslocs.migrated) {
3825 - BUG_ON(bmval[2]);
3826 - status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
3827 + status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
3828 if (status)
3829 goto out;
3830 }
3831 @@ -2290,8 +2291,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
3832 }
3833
3834 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
3835 - if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
3836 - bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
3837 + if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
3838 + bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
3839 err = security_inode_getsecctx(d_inode(dentry),
3840 &context, &contextlen);
3841 contextsupport = (err == 0);
3842 diff --git a/fs/notify/mark.c b/fs/notify/mark.c
3843 index 92e48c70f0f0..39ddcaf0918f 100644
3844 --- a/fs/notify/mark.c
3845 +++ b/fs/notify/mark.c
3846 @@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
3847 unsigned int flags)
3848 {
3849 struct fsnotify_mark *lmark, *mark;
3850 + LIST_HEAD(to_free);
3851
3852 + /*
3853 + * We have to be really careful here. Anytime we drop mark_mutex, e.g.
3854 + * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
3855 + * to_free list so we have to use mark_mutex even when accessing that
3856 + * list. And freeing mark requires us to drop mark_mutex. So we can
3857 + * reliably free only the first mark in the list. That's why we first
3858 + * move marks to free to to_free list in one go and then free marks in
3859 + * to_free list one by one.
3860 + */
3861 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
3862 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
3863 - if (mark->flags & flags) {
3864 - fsnotify_get_mark(mark);
3865 - fsnotify_destroy_mark_locked(mark, group);
3866 - fsnotify_put_mark(mark);
3867 - }
3868 + if (mark->flags & flags)
3869 + list_move(&mark->g_list, &to_free);
3870 }
3871 mutex_unlock(&group->mark_mutex);
3872 +
3873 + while (1) {
3874 + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
3875 + if (list_empty(&to_free)) {
3876 + mutex_unlock(&group->mark_mutex);
3877 + break;
3878 + }
3879 + mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
3880 + fsnotify_get_mark(mark);
3881 + fsnotify_destroy_mark_locked(mark, group);
3882 + mutex_unlock(&group->mark_mutex);
3883 + fsnotify_put_mark(mark);
3884 + }
3885 }
3886
3887 /*
3888 diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
3889 index f906a250da6a..9ea70127074d 100644
3890 --- a/fs/ocfs2/aops.c
3891 +++ b/fs/ocfs2/aops.c
3892 @@ -686,7 +686,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
3893
3894 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3895 u64 s = i_size_read(inode);
3896 - sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
3897 + sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
3898 (do_div(s, osb->s_clustersize) >> 9);
3899
3900 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
3901 @@ -911,7 +911,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
3902 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
3903
3904 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
3905 - p_cpos << (osb->s_clustersize_bits - 9),
3906 + (u64)p_cpos << (osb->s_clustersize_bits - 9),
3907 zero_len_head >> 9, GFP_NOFS, false);
3908 if (ret < 0)
3909 mlog_errno(ret);
3910 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
3911 index 8b23aa2f52dd..23157e40dd74 100644
3912 --- a/fs/ocfs2/dlmglue.c
3913 +++ b/fs/ocfs2/dlmglue.c
3914 @@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3915 osb->dc_work_sequence = osb->dc_wake_sequence;
3916
3917 processed = osb->blocked_lock_count;
3918 - while (processed) {
3919 - BUG_ON(list_empty(&osb->blocked_lock_list));
3920 -
3921 + /*
3922 + * blocked lock processing in this loop might call iput which can
3923 + * remove items off osb->blocked_lock_list. Downconvert up to
3924 + * 'processed' number of locks, but stop short if we had some
3925 + * removed in ocfs2_mark_lockres_freeing when downconverting.
3926 + */
3927 + while (processed && !list_empty(&osb->blocked_lock_list)) {
3928 lockres = list_entry(osb->blocked_lock_list.next,
3929 struct ocfs2_lock_res, l_blocked_list);
3930 list_del_init(&lockres->l_blocked_list);
3931 diff --git a/fs/signalfd.c b/fs/signalfd.c
3932 index 7e412ad74836..270221fcef42 100644
3933 --- a/fs/signalfd.c
3934 +++ b/fs/signalfd.c
3935 @@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
3936 * Other callers might not initialize the si_lsb field,
3937 * so check explicitly for the right codes here.
3938 */
3939 - if (kinfo->si_code == BUS_MCEERR_AR ||
3940 - kinfo->si_code == BUS_MCEERR_AO)
3941 + if (kinfo->si_signo == SIGBUS &&
3942 + (kinfo->si_code == BUS_MCEERR_AR ||
3943 + kinfo->si_code == BUS_MCEERR_AO))
3944 err |= __put_user((short) kinfo->si_addr_lsb,
3945 &uinfo->ssi_addr_lsb);
3946 #endif
3947 diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
3948 index 3d4ea7eb2b68..12b75f3ba0a0 100644
3949 --- a/include/linux/mtd/nand.h
3950 +++ b/include/linux/mtd/nand.h
3951 @@ -176,17 +176,17 @@ typedef enum {
3952 /* Chip may not exist, so silence any errors in scan */
3953 #define NAND_SCAN_SILENT_NODEV 0x00040000
3954 /*
3955 - * This option could be defined by controller drivers to protect against
3956 - * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
3957 - */
3958 -#define NAND_USE_BOUNCE_BUFFER 0x00080000
3959 -/*
3960 * Autodetect nand buswidth with readid/onfi.
3961 * This suppose the driver will configure the hardware in 8 bits mode
3962 * when calling nand_scan_ident, and update its configuration
3963 * before calling nand_scan_tail.
3964 */
3965 #define NAND_BUSWIDTH_AUTO 0x00080000
3966 +/*
3967 + * This option could be defined by controller drivers to protect against
3968 + * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
3969 + */
3970 +#define NAND_USE_BOUNCE_BUFFER 0x00100000
3971
3972 /* Options set by nand scan */
3973 /* Nand scan has allocated controller struct */
3974 diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
3975 index efe3443572ba..413417f3707b 100644
3976 --- a/include/uapi/linux/pci_regs.h
3977 +++ b/include/uapi/linux/pci_regs.h
3978 @@ -319,6 +319,7 @@
3979 #define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
3980 #define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
3981 #define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
3982 +#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
3983 #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
3984
3985 /* MSI-X Table entry format */
3986 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
3987 index 3aaea7ffd077..c3fc5c2b63f3 100644
3988 --- a/ipc/mqueue.c
3989 +++ b/ipc/mqueue.c
3990 @@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
3991 if (!leaf)
3992 return -ENOMEM;
3993 INIT_LIST_HEAD(&leaf->msg_list);
3994 - info->qsize += sizeof(*leaf);
3995 }
3996 leaf->priority = msg->m_type;
3997 rb_link_node(&leaf->rb_node, parent, p);
3998 @@ -188,7 +187,6 @@ try_again:
3999 "lazy leaf delete!\n");
4000 rb_erase(&leaf->rb_node, &info->msg_tree);
4001 if (info->node_cache) {
4002 - info->qsize -= sizeof(*leaf);
4003 kfree(leaf);
4004 } else {
4005 info->node_cache = leaf;
4006 @@ -201,7 +199,6 @@ try_again:
4007 if (list_empty(&leaf->msg_list)) {
4008 rb_erase(&leaf->rb_node, &info->msg_tree);
4009 if (info->node_cache) {
4010 - info->qsize -= sizeof(*leaf);
4011 kfree(leaf);
4012 } else {
4013 info->node_cache = leaf;
4014 @@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
4015 /* Save our speculative allocation into the cache */
4016 INIT_LIST_HEAD(&new_leaf->msg_list);
4017 info->node_cache = new_leaf;
4018 - info->qsize += sizeof(*new_leaf);
4019 new_leaf = NULL;
4020 } else {
4021 kfree(new_leaf);
4022 @@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
4023 /* Save our speculative allocation into the cache */
4024 INIT_LIST_HEAD(&new_leaf->msg_list);
4025 info->node_cache = new_leaf;
4026 - info->qsize += sizeof(*new_leaf);
4027 } else {
4028 kfree(new_leaf);
4029 }
4030 diff --git a/kernel/signal.c b/kernel/signal.c
4031 index d51c5ddd855c..0206be728dac 100644
4032 --- a/kernel/signal.c
4033 +++ b/kernel/signal.c
4034 @@ -2753,12 +2753,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
4035 * Other callers might not initialize the si_lsb field,
4036 * so check explicitly for the right codes here.
4037 */
4038 - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
4039 + if (from->si_signo == SIGBUS &&
4040 + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
4041 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
4042 #endif
4043 #ifdef SEGV_BNDERR
4044 - err |= __put_user(from->si_lower, &to->si_lower);
4045 - err |= __put_user(from->si_upper, &to->si_upper);
4046 + if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
4047 + err |= __put_user(from->si_lower, &to->si_lower);
4048 + err |= __put_user(from->si_upper, &to->si_upper);
4049 + }
4050 #endif
4051 break;
4052 case __SI_CHLD:
4053 @@ -3022,7 +3025,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4054 int, sig,
4055 struct compat_siginfo __user *, uinfo)
4056 {
4057 - siginfo_t info;
4058 + siginfo_t info = {};
4059 int ret = copy_siginfo_from_user32(&info, uinfo);
4060 if (unlikely(ret))
4061 return ret;
4062 @@ -3066,7 +3069,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4063 int, sig,
4064 struct compat_siginfo __user *, uinfo)
4065 {
4066 - siginfo_t info;
4067 + siginfo_t info = {};
4068
4069 if (copy_siginfo_from_user32(&info, uinfo))
4070 return -EFAULT;
4071 diff --git a/mm/vmscan.c b/mm/vmscan.c
4072 index 5e8eadd71bac..0d024fc8aa8e 100644
4073 --- a/mm/vmscan.c
4074 +++ b/mm/vmscan.c
4075 @@ -937,21 +937,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
4076 *
4077 * 2) Global reclaim encounters a page, memcg encounters a
4078 * page that is not marked for immediate reclaim or
4079 - * the caller does not have __GFP_IO. In this case mark
4080 + * the caller does not have __GFP_FS (or __GFP_IO if it's
4081 + * simply going to swap, not to fs). In this case mark
4082 * the page for immediate reclaim and continue scanning.
4083 *
4084 - * __GFP_IO is checked because a loop driver thread might
4085 + * Require may_enter_fs because we would wait on fs, which
4086 + * may not have submitted IO yet. And the loop driver might
4087 * enter reclaim, and deadlock if it waits on a page for
4088 * which it is needed to do the write (loop masks off
4089 * __GFP_IO|__GFP_FS for this reason); but more thought
4090 * would probably show more reasons.
4091 *
4092 - * Don't require __GFP_FS, since we're not going into the
4093 - * FS, just waiting on its writeback completion. Worryingly,
4094 - * ext4 gfs2 and xfs allocate pages with
4095 - * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
4096 - * may_enter_fs here is liable to OOM on them.
4097 - *
4098 * 3) memcg encounters a page that is not already marked
4099 * PageReclaim. memcg does not have any dirty pages
4100 * throttling so we could easily OOM just because too many
4101 @@ -968,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
4102
4103 /* Case 2 above */
4104 } else if (global_reclaim(sc) ||
4105 - !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
4106 + !PageReclaim(page) || !may_enter_fs) {
4107 /*
4108 * This is slightly racy - end_page_writeback()
4109 * might have just cleared PageReclaim, then
4110 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
4111 index 1ab3dc9c8f99..7b815bcc8c9b 100644
4112 --- a/net/bluetooth/smp.c
4113 +++ b/net/bluetooth/smp.c
4114 @@ -2295,6 +2295,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
4115 return 1;
4116
4117 chan = conn->smp;
4118 + if (!chan) {
4119 + BT_ERR("SMP security requested but not available");
4120 + return 1;
4121 + }
4122
4123 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
4124 return 1;
4125 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
4126 index e061355f535f..bf20593d3085 100644
4127 --- a/sound/firewire/amdtp.c
4128 +++ b/sound/firewire/amdtp.c
4129 @@ -730,8 +730,9 @@ static void handle_in_packet(struct amdtp_stream *s,
4130 s->data_block_counter != UINT_MAX)
4131 data_block_counter = s->data_block_counter;
4132
4133 - if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
4134 - (s->data_block_counter == UINT_MAX)) {
4135 + if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
4136 + data_block_counter == s->tx_first_dbc) ||
4137 + s->data_block_counter == UINT_MAX) {
4138 lost = false;
4139 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
4140 lost = data_block_counter != s->data_block_counter;
4141 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
4142 index 8a03a91e728b..25c905537658 100644
4143 --- a/sound/firewire/amdtp.h
4144 +++ b/sound/firewire/amdtp.h
4145 @@ -153,6 +153,8 @@ struct amdtp_stream {
4146
4147 /* quirk: fixed interval of dbc between previos/current packets. */
4148 unsigned int tx_dbc_interval;
4149 + /* quirk: indicate the value of dbc field in a first packet. */
4150 + unsigned int tx_first_dbc;
4151
4152 bool callbacked;
4153 wait_queue_head_t callback_wait;
4154 diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
4155 index 2682e7e3e5c9..c94a432f7cc6 100644
4156 --- a/sound/firewire/fireworks/fireworks.c
4157 +++ b/sound/firewire/fireworks/fireworks.c
4158 @@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
4159 err = get_hardware_info(efw);
4160 if (err < 0)
4161 goto error;
4162 + /* AudioFire8 (since 2009) and AudioFirePre8 */
4163 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
4164 efw->is_af9 = true;
4165 + /* These models uses the same firmware. */
4166 + if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
4167 + entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
4168 + entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
4169 + entry->model_id == MODEL_GIBSON_RIP ||
4170 + entry->model_id == MODEL_GIBSON_GOLDTOP)
4171 + efw->is_fireworks3 = true;
4172
4173 snd_efw_proc_init(efw);
4174
4175 diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
4176 index 4f0201a95222..084d414b228c 100644
4177 --- a/sound/firewire/fireworks/fireworks.h
4178 +++ b/sound/firewire/fireworks/fireworks.h
4179 @@ -71,6 +71,7 @@ struct snd_efw {
4180
4181 /* for quirks */
4182 bool is_af9;
4183 + bool is_fireworks3;
4184 u32 firmware_version;
4185
4186 unsigned int midi_in_ports;
4187 diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
4188 index c55db1bddc80..7e353f1f7bff 100644
4189 --- a/sound/firewire/fireworks/fireworks_stream.c
4190 +++ b/sound/firewire/fireworks/fireworks_stream.c
4191 @@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
4192 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
4193 /* Fireworks reset dbc at bus reset. */
4194 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
4195 + /*
4196 + * But Recent firmwares starts packets with non-zero dbc.
4197 + * Driver version 5.7.6 installs firmware version 5.7.3.
4198 + */
4199 + if (efw->is_fireworks3 &&
4200 + (efw->firmware_version == 0x5070000 ||
4201 + efw->firmware_version == 0x5070300 ||
4202 + efw->firmware_version == 0x5080000))
4203 + efw->tx_stream.tx_first_dbc = 0x02;
4204 /* AudioFire9 always reports wrong dbs. */
4205 if (efw->is_af9)
4206 efw->tx_stream.flags |= CIP_WRONG_DBS;
4207 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
4208 index 50e9dd675579..3a24f7739aaa 100644
4209 --- a/sound/pci/hda/patch_cirrus.c
4210 +++ b/sound/pci/hda/patch_cirrus.c
4211 @@ -1001,9 +1001,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
4212
4213 spec->spdif_present = spdif_present;
4214 /* SPDIF TX on/off */
4215 - if (spdif_present)
4216 - snd_hda_set_pin_ctl(codec, spdif_pin,
4217 - spdif_present ? PIN_OUT : 0);
4218 + snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
4219
4220 cs_automute(codec);
4221 }
4222 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4223 index 590bcfb0e82f..1e99f075a5ab 100644
4224 --- a/sound/pci/hda/patch_realtek.c
4225 +++ b/sound/pci/hda/patch_realtek.c
4226 @@ -5118,6 +5118,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4227 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4228 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4229 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4230 + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
4231 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4232 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4233 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
4234 diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
4235 index 477e13d30971..e7ba557979cb 100644
4236 --- a/sound/soc/codecs/pcm1681.c
4237 +++ b/sound/soc/codecs/pcm1681.c
4238 @@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
4239
4240 if (val != -1) {
4241 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
4242 - PCM1681_DEEMPH_RATE_MASK, val);
4243 + PCM1681_DEEMPH_RATE_MASK, val << 3);
4244 enable = 1;
4245 } else
4246 enable = 0;
4247 diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
4248 index a984485108cd..f7549cc7ea85 100644
4249 --- a/sound/soc/codecs/ssm4567.c
4250 +++ b/sound/soc/codecs/ssm4567.c
4251 @@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
4252 if (invert_fclk)
4253 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
4254
4255 - return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
4256 + return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
4257 + SSM4567_SAI_CTRL_1_BCLK |
4258 + SSM4567_SAI_CTRL_1_FSYNC |
4259 + SSM4567_SAI_CTRL_1_LJ |
4260 + SSM4567_SAI_CTRL_1_TDM |
4261 + SSM4567_SAI_CTRL_1_PDM,
4262 + ctrl1);
4263 }
4264
4265 static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
4266 diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
4267 index 7b50a9d17ec1..edc186908358 100644
4268 --- a/sound/soc/intel/atom/sst/sst_drv_interface.c
4269 +++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
4270 @@ -42,6 +42,11 @@
4271 #define MIN_FRAGMENT_SIZE (50 * 1024)
4272 #define MAX_FRAGMENT_SIZE (1024 * 1024)
4273 #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
4274 +#ifdef CONFIG_PM
4275 +#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
4276 +#else
4277 +#define GET_USAGE_COUNT(dev) 1
4278 +#endif
4279
4280 int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
4281 {
4282 @@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
4283 int ret = 0;
4284 int usage_count = 0;
4285
4286 -#ifdef CONFIG_PM
4287 - usage_count = atomic_read(&dev->power.usage_count);
4288 -#else
4289 - usage_count = 1;
4290 -#endif
4291 -
4292 if (state == true) {
4293 ret = pm_runtime_get_sync(dev);
4294 -
4295 + usage_count = GET_USAGE_COUNT(dev);
4296 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
4297 if (ret < 0) {
4298 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
4299 @@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
4300 }
4301 }
4302 } else {
4303 + usage_count = GET_USAGE_COUNT(dev);
4304 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
4305 return sst_pm_runtime_put(ctx);
4306 }
4307 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
4308 index 158204d08924..b6c12dccb259 100644
4309 --- a/sound/soc/soc-dapm.c
4310 +++ b/sound/soc/soc-dapm.c
4311 @@ -1811,6 +1811,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
4312 size_t count, loff_t *ppos)
4313 {
4314 struct snd_soc_dapm_widget *w = file->private_data;
4315 + struct snd_soc_card *card = w->dapm->card;
4316 char *buf;
4317 int in, out;
4318 ssize_t ret;
4319 @@ -1820,6 +1821,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
4320 if (!buf)
4321 return -ENOMEM;
4322
4323 + mutex_lock(&card->dapm_mutex);
4324 +
4325 /* Supply widgets are not handled by is_connected_{input,output}_ep() */
4326 if (w->is_supply) {
4327 in = 0;
4328 @@ -1866,6 +1869,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
4329 p->sink->name);
4330 }
4331
4332 + mutex_unlock(&card->dapm_mutex);
4333 +
4334 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
4335
4336 kfree(buf);
4337 @@ -2140,11 +2145,15 @@ static ssize_t dapm_widget_show(struct device *dev,
4338 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
4339 int i, count = 0;
4340
4341 + mutex_lock(&rtd->card->dapm_mutex);
4342 +
4343 for (i = 0; i < rtd->num_codecs; i++) {
4344 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
4345 count += dapm_widget_show_codec(codec, buf + count);
4346 }
4347
4348 + mutex_unlock(&rtd->card->dapm_mutex);
4349 +
4350 return count;
4351 }
4352
4353 @@ -3100,16 +3109,10 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
4354 }
4355
4356 prefix = soc_dapm_prefix(dapm);
4357 - if (prefix) {
4358 + if (prefix)
4359 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
4360 - if (widget->sname)
4361 - w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
4362 - widget->sname);
4363 - } else {
4364 + else
4365 w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
4366 - if (widget->sname)
4367 - w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
4368 - }
4369 if (w->name == NULL) {
4370 kfree(w);
4371 return NULL;
4372 @@ -3557,7 +3560,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
4373 break;
4374 }
4375
4376 - if (!w->sname || !strstr(w->sname, dai_w->name))
4377 + if (!w->sname || !strstr(w->sname, dai_w->sname))
4378 continue;
4379
4380 if (dai_w->id == snd_soc_dapm_dai_in) {