Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.12/0105-3.12.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2423 - (hide annotations) (download)
Tue Mar 25 12:29:50 2014 UTC (10 years, 2 months ago) by niro
File size: 148385 byte(s)
-added 3.12 branch
1 niro 2423 diff --git a/Makefile b/Makefile
2     index 986f3cdbad56..2b23383311ff 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 12
8     -SUBLEVEL = 5
9     +SUBLEVEL = 6
10     EXTRAVERSION =
11     NAME = One Giant Leap for Frogkind
12    
13     diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
14     index f244f5f02365..9d1bfe445c3c 100644
15     --- a/arch/arm/boot/dts/sun6i-a31.dtsi
16     +++ b/arch/arm/boot/dts/sun6i-a31.dtsi
17     @@ -193,7 +193,10 @@
18     pio: pinctrl@01c20800 {
19     compatible = "allwinner,sun6i-a31-pinctrl";
20     reg = <0x01c20800 0x400>;
21     - interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>;
22     + interrupts = <0 11 4>,
23     + <0 15 4>,
24     + <0 16 4>,
25     + <0 17 4>;
26     clocks = <&apb1_gates 5>;
27     gpio-controller;
28     interrupt-controller;
29     @@ -212,11 +215,11 @@
30     timer@01c20c00 {
31     compatible = "allwinner,sun4i-timer";
32     reg = <0x01c20c00 0xa0>;
33     - interrupts = <0 18 1>,
34     - <0 19 1>,
35     - <0 20 1>,
36     - <0 21 1>,
37     - <0 22 1>;
38     + interrupts = <0 18 4>,
39     + <0 19 4>,
40     + <0 20 4>,
41     + <0 21 4>,
42     + <0 22 4>;
43     clocks = <&osc24M>;
44     };
45    
46     @@ -228,7 +231,7 @@
47     uart0: serial@01c28000 {
48     compatible = "snps,dw-apb-uart";
49     reg = <0x01c28000 0x400>;
50     - interrupts = <0 0 1>;
51     + interrupts = <0 0 4>;
52     reg-shift = <2>;
53     reg-io-width = <4>;
54     clocks = <&apb2_gates 16>;
55     @@ -238,7 +241,7 @@
56     uart1: serial@01c28400 {
57     compatible = "snps,dw-apb-uart";
58     reg = <0x01c28400 0x400>;
59     - interrupts = <0 1 1>;
60     + interrupts = <0 1 4>;
61     reg-shift = <2>;
62     reg-io-width = <4>;
63     clocks = <&apb2_gates 17>;
64     @@ -248,7 +251,7 @@
65     uart2: serial@01c28800 {
66     compatible = "snps,dw-apb-uart";
67     reg = <0x01c28800 0x400>;
68     - interrupts = <0 2 1>;
69     + interrupts = <0 2 4>;
70     reg-shift = <2>;
71     reg-io-width = <4>;
72     clocks = <&apb2_gates 18>;
73     @@ -258,7 +261,7 @@
74     uart3: serial@01c28c00 {
75     compatible = "snps,dw-apb-uart";
76     reg = <0x01c28c00 0x400>;
77     - interrupts = <0 3 1>;
78     + interrupts = <0 3 4>;
79     reg-shift = <2>;
80     reg-io-width = <4>;
81     clocks = <&apb2_gates 19>;
82     @@ -268,7 +271,7 @@
83     uart4: serial@01c29000 {
84     compatible = "snps,dw-apb-uart";
85     reg = <0x01c29000 0x400>;
86     - interrupts = <0 4 1>;
87     + interrupts = <0 4 4>;
88     reg-shift = <2>;
89     reg-io-width = <4>;
90     clocks = <&apb2_gates 20>;
91     @@ -278,7 +281,7 @@
92     uart5: serial@01c29400 {
93     compatible = "snps,dw-apb-uart";
94     reg = <0x01c29400 0x400>;
95     - interrupts = <0 5 1>;
96     + interrupts = <0 5 4>;
97     reg-shift = <2>;
98     reg-io-width = <4>;
99     clocks = <&apb2_gates 21>;
100     diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
101     index 94f6b05f9e24..92f7b15dd221 100644
102     --- a/arch/arm/kernel/process.c
103     +++ b/arch/arm/kernel/process.c
104     @@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
105     unsigned long get_wchan(struct task_struct *p)
106     {
107     struct stackframe frame;
108     + unsigned long stack_page;
109     int count = 0;
110     if (!p || p == current || p->state == TASK_RUNNING)
111     return 0;
112     @@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
113     frame.sp = thread_saved_sp(p);
114     frame.lr = 0; /* recovered from the stack */
115     frame.pc = thread_saved_pc(p);
116     + stack_page = (unsigned long)task_stack_page(p);
117     do {
118     - int ret = unwind_frame(&frame);
119     - if (ret < 0)
120     + if (frame.sp < stack_page ||
121     + frame.sp >= stack_page + THREAD_SIZE ||
122     + unwind_frame(&frame) < 0)
123     return 0;
124     if (!in_sched_functions(frame.pc))
125     return frame.pc;
126     diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
127     index 00f79e59985b..af4e8c8a5422 100644
128     --- a/arch/arm/kernel/stacktrace.c
129     +++ b/arch/arm/kernel/stacktrace.c
130     @@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
131     high = ALIGN(low, THREAD_SIZE);
132    
133     /* check current frame pointer is within bounds */
134     - if (fp < (low + 12) || fp + 4 >= high)
135     + if (fp < low + 12 || fp > high - 4)
136     return -EINVAL;
137    
138     /* restore the registers from the stack frame */
139     diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
140     index 8fcda140358d..65ed63f68ef8 100644
141     --- a/arch/arm/kernel/traps.c
142     +++ b/arch/arm/kernel/traps.c
143     @@ -503,9 +503,10 @@ static inline int
144     __do_cache_op(unsigned long start, unsigned long end)
145     {
146     int ret;
147     - unsigned long chunk = PAGE_SIZE;
148    
149     do {
150     + unsigned long chunk = min(PAGE_SIZE, end - start);
151     +
152     if (signal_pending(current)) {
153     struct thread_info *ti = current_thread_info();
154    
155     diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
156     index 8e63ccdb0de3..8e44973b0139 100644
157     --- a/arch/arm/mach-highbank/highbank.c
158     +++ b/arch/arm/mach-highbank/highbank.c
159     @@ -17,12 +17,15 @@
160     #include <linux/clkdev.h>
161     #include <linux/clocksource.h>
162     #include <linux/dma-mapping.h>
163     +#include <linux/input.h>
164     #include <linux/io.h>
165     #include <linux/irqchip.h>
166     +#include <linux/mailbox.h>
167     #include <linux/of.h>
168     #include <linux/of_irq.h>
169     #include <linux/of_platform.h>
170     #include <linux/of_address.h>
171     +#include <linux/reboot.h>
172     #include <linux/amba/bus.h>
173     #include <linux/clk-provider.h>
174    
175     @@ -153,6 +156,24 @@ static struct notifier_block highbank_platform_nb = {
176     .notifier_call = highbank_platform_notifier,
177     };
178    
179     +static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data)
180     +{
181     + u32 key = *(u32 *)data;
182     +
183     + if (event != 0x1000)
184     + return 0;
185     +
186     + if (key == KEY_POWER)
187     + orderly_poweroff(false);
188     + else if (key == 0xffff)
189     + ctrl_alt_del();
190     +
191     + return 0;
192     +}
193     +static struct notifier_block hb_keys_nb = {
194     + .notifier_call = hb_keys_notifier,
195     +};
196     +
197     static void __init highbank_init(void)
198     {
199     pm_power_off = highbank_power_off;
200     @@ -161,6 +182,8 @@ static void __init highbank_init(void)
201     bus_register_notifier(&platform_bus_type, &highbank_platform_nb);
202     bus_register_notifier(&amba_bustype, &highbank_amba_nb);
203    
204     + pl320_ipc_register_notifier(&hb_keys_nb);
205     +
206     of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
207     }
208    
209     diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
210     index 3d5db8c83b3c..832adb1a6dd2 100644
211     --- a/arch/arm/mach-omap2/omap_hwmod.c
212     +++ b/arch/arm/mach-omap2/omap_hwmod.c
213     @@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
214     }
215    
216     /**
217     - * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v
218     + * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
219     * @oh: struct omap_hwmod *
220     * @v: pointer to register contents to modify
221     *
222     @@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
223     }
224    
225     /**
226     + * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
227     + * @oh: struct omap_hwmod *
228     + * @v: pointer to register contents to modify
229     + *
230     + * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
231     + * error or 0 upon success.
232     + */
233     +static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
234     +{
235     + u32 softrst_mask;
236     +
237     + if (!oh->class->sysc ||
238     + !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
239     + return -EINVAL;
240     +
241     + if (!oh->class->sysc->sysc_fields) {
242     + WARN(1,
243     + "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
244     + oh->name);
245     + return -EINVAL;
246     + }
247     +
248     + softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
249     +
250     + *v &= ~softrst_mask;
251     +
252     + return 0;
253     +}
254     +
255     +/**
256     * _wait_softreset_complete - wait for an OCP softreset to complete
257     * @oh: struct omap_hwmod * to wait on
258     *
259     @@ -1911,6 +1941,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
260     ret = _set_softreset(oh, &v);
261     if (ret)
262     goto dis_opt_clks;
263     +
264     + _write_sysconfig(v, oh);
265     + ret = _clear_softreset(oh, &v);
266     + if (ret)
267     + goto dis_opt_clks;
268     +
269     _write_sysconfig(v, oh);
270    
271     if (oh->class->sysc->srst_udelay)
272     @@ -3159,6 +3195,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
273     goto error;
274     _write_sysconfig(v, oh);
275    
276     + ret = _clear_softreset(oh, &v);
277     + if (ret)
278     + goto error;
279     + _write_sysconfig(v, oh);
280     +
281     error:
282     return ret;
283     }
284     diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
285     index 0c3a427da544..f234cbec0cb9 100644
286     --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
287     +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
288     @@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
289     .syss_offs = 0x0014,
290     .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
291     SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
292     - SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
293     + SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
294     + SYSS_HAS_RESET_STATUS),
295     .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
296     MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
297     .sysc_fields = &omap_hwmod_sysc_type1,
298     @@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
299     * hence HWMOD_SWSUP_MSTANDBY
300     */
301    
302     - /*
303     - * During system boot; If the hwmod framework resets the module
304     - * the module will have smart idle settings; which can lead to deadlock
305     - * (above Errata Id:i660); so, dont reset the module during boot;
306     - * Use HWMOD_INIT_NO_RESET.
307     - */
308     -
309     - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
310     - HWMOD_INIT_NO_RESET,
311     + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
312     };
313    
314     /*
315     diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
316     index 0d5dd646f61f..263b15249b5b 100644
317     --- a/arch/arm/mach-pxa/reset.c
318     +++ b/arch/arm/mach-pxa/reset.c
319     @@ -13,6 +13,7 @@
320    
321     #include <mach/regs-ost.h>
322     #include <mach/reset.h>
323     +#include <mach/smemc.h>
324    
325     unsigned int reset_status;
326     EXPORT_SYMBOL(reset_status);
327     @@ -81,6 +82,12 @@ static void do_hw_reset(void)
328     writel_relaxed(OSSR_M3, OSSR);
329     /* ... in 100 ms */
330     writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
331     + /*
332     + * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
333     + * we put SDRAM into self-refresh to prevent that
334     + */
335     + while (1)
336     + writel_relaxed(MDREFR_SLFRSH, MDREFR);
337     }
338    
339     void pxa_restart(enum reboot_mode mode, const char *cmd)
340     @@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd)
341     break;
342     }
343     }
344     -
345     diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
346     index 0206b915a6f6..ef5557b807ed 100644
347     --- a/arch/arm/mach-pxa/tosa.c
348     +++ b/arch/arm/mach-pxa/tosa.c
349     @@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = {
350     * Tosa Keyboard
351     */
352     static const uint32_t tosakbd_keymap[] = {
353     - KEY(0, 2, KEY_W),
354     - KEY(0, 6, KEY_K),
355     - KEY(0, 7, KEY_BACKSPACE),
356     - KEY(0, 8, KEY_P),
357     - KEY(1, 1, KEY_Q),
358     - KEY(1, 2, KEY_E),
359     - KEY(1, 3, KEY_T),
360     - KEY(1, 4, KEY_Y),
361     - KEY(1, 6, KEY_O),
362     - KEY(1, 7, KEY_I),
363     - KEY(1, 8, KEY_COMMA),
364     - KEY(2, 1, KEY_A),
365     - KEY(2, 2, KEY_D),
366     - KEY(2, 3, KEY_G),
367     - KEY(2, 4, KEY_U),
368     - KEY(2, 6, KEY_L),
369     - KEY(2, 7, KEY_ENTER),
370     - KEY(2, 8, KEY_DOT),
371     - KEY(3, 1, KEY_Z),
372     - KEY(3, 2, KEY_C),
373     - KEY(3, 3, KEY_V),
374     - KEY(3, 4, KEY_J),
375     - KEY(3, 5, TOSA_KEY_ADDRESSBOOK),
376     - KEY(3, 6, TOSA_KEY_CANCEL),
377     - KEY(3, 7, TOSA_KEY_CENTER),
378     - KEY(3, 8, TOSA_KEY_OK),
379     - KEY(3, 9, KEY_LEFTSHIFT),
380     - KEY(4, 1, KEY_S),
381     - KEY(4, 2, KEY_R),
382     - KEY(4, 3, KEY_B),
383     - KEY(4, 4, KEY_N),
384     - KEY(4, 5, TOSA_KEY_CALENDAR),
385     - KEY(4, 6, TOSA_KEY_HOMEPAGE),
386     - KEY(4, 7, KEY_LEFTCTRL),
387     - KEY(4, 8, TOSA_KEY_LIGHT),
388     - KEY(4, 10, KEY_RIGHTSHIFT),
389     - KEY(5, 1, KEY_TAB),
390     - KEY(5, 2, KEY_SLASH),
391     - KEY(5, 3, KEY_H),
392     - KEY(5, 4, KEY_M),
393     - KEY(5, 5, TOSA_KEY_MENU),
394     - KEY(5, 7, KEY_UP),
395     - KEY(5, 11, TOSA_KEY_FN),
396     - KEY(6, 1, KEY_X),
397     - KEY(6, 2, KEY_F),
398     - KEY(6, 3, KEY_SPACE),
399     - KEY(6, 4, KEY_APOSTROPHE),
400     - KEY(6, 5, TOSA_KEY_MAIL),
401     - KEY(6, 6, KEY_LEFT),
402     - KEY(6, 7, KEY_DOWN),
403     - KEY(6, 8, KEY_RIGHT),
404     + KEY(0, 1, KEY_W),
405     + KEY(0, 5, KEY_K),
406     + KEY(0, 6, KEY_BACKSPACE),
407     + KEY(0, 7, KEY_P),
408     + KEY(1, 0, KEY_Q),
409     + KEY(1, 1, KEY_E),
410     + KEY(1, 2, KEY_T),
411     + KEY(1, 3, KEY_Y),
412     + KEY(1, 5, KEY_O),
413     + KEY(1, 6, KEY_I),
414     + KEY(1, 7, KEY_COMMA),
415     + KEY(2, 0, KEY_A),
416     + KEY(2, 1, KEY_D),
417     + KEY(2, 2, KEY_G),
418     + KEY(2, 3, KEY_U),
419     + KEY(2, 5, KEY_L),
420     + KEY(2, 6, KEY_ENTER),
421     + KEY(2, 7, KEY_DOT),
422     + KEY(3, 0, KEY_Z),
423     + KEY(3, 1, KEY_C),
424     + KEY(3, 2, KEY_V),
425     + KEY(3, 3, KEY_J),
426     + KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
427     + KEY(3, 5, TOSA_KEY_CANCEL),
428     + KEY(3, 6, TOSA_KEY_CENTER),
429     + KEY(3, 7, TOSA_KEY_OK),
430     + KEY(3, 8, KEY_LEFTSHIFT),
431     + KEY(4, 0, KEY_S),
432     + KEY(4, 1, KEY_R),
433     + KEY(4, 2, KEY_B),
434     + KEY(4, 3, KEY_N),
435     + KEY(4, 4, TOSA_KEY_CALENDAR),
436     + KEY(4, 5, TOSA_KEY_HOMEPAGE),
437     + KEY(4, 6, KEY_LEFTCTRL),
438     + KEY(4, 7, TOSA_KEY_LIGHT),
439     + KEY(4, 9, KEY_RIGHTSHIFT),
440     + KEY(5, 0, KEY_TAB),
441     + KEY(5, 1, KEY_SLASH),
442     + KEY(5, 2, KEY_H),
443     + KEY(5, 3, KEY_M),
444     + KEY(5, 4, TOSA_KEY_MENU),
445     + KEY(5, 6, KEY_UP),
446     + KEY(5, 10, TOSA_KEY_FN),
447     + KEY(6, 0, KEY_X),
448     + KEY(6, 1, KEY_F),
449     + KEY(6, 2, KEY_SPACE),
450     + KEY(6, 3, KEY_APOSTROPHE),
451     + KEY(6, 4, TOSA_KEY_MAIL),
452     + KEY(6, 5, KEY_LEFT),
453     + KEY(6, 6, KEY_DOWN),
454     + KEY(6, 7, KEY_RIGHT),
455     };
456    
457     static struct matrix_keymap_data tosakbd_keymap_data = {
458     diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
459     index d57e66845c86..2e9d83673ef6 100644
460     --- a/arch/arm64/include/asm/pgtable-hwdef.h
461     +++ b/arch/arm64/include/asm/pgtable-hwdef.h
462     @@ -43,7 +43,7 @@
463     * Section
464     */
465     #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
466     -#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2)
467     +#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
468     #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
469     #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
470     #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
471     diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
472     index 27b2386f738a..842846c1b711 100644
473     --- a/arch/powerpc/include/asm/pgalloc-32.h
474     +++ b/arch/powerpc/include/asm/pgalloc-32.h
475     @@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
476     static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
477     unsigned long address)
478     {
479     - struct page *page = page_address(table);
480     -
481     tlb_flush_pgtable(tlb, address);
482     - pgtable_page_dtor(page);
483     - pgtable_free_tlb(tlb, page, 0);
484     + pgtable_page_dtor(table);
485     + pgtable_free_tlb(tlb, page_address(table), 0);
486     }
487     #endif /* _ASM_POWERPC_PGALLOC_32_H */
488     diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
489     index f65e27b09bd3..256d6f8a26a8 100644
490     --- a/arch/powerpc/include/asm/pgalloc-64.h
491     +++ b/arch/powerpc/include/asm/pgalloc-64.h
492     @@ -144,11 +144,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
493     static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
494     unsigned long address)
495     {
496     - struct page *page = page_address(table);
497     -
498     tlb_flush_pgtable(tlb, address);
499     - pgtable_page_dtor(page);
500     - pgtable_free_tlb(tlb, page, 0);
501     + pgtable_page_dtor(table);
502     + pgtable_free_tlb(tlb, page_address(table), 0);
503     }
504    
505     #else /* if CONFIG_PPC_64K_PAGES */
506     diff --git a/arch/x86/Makefile b/arch/x86/Makefile
507     index eda00f9be0cf..57d021507120 100644
508     --- a/arch/x86/Makefile
509     +++ b/arch/x86/Makefile
510     @@ -31,8 +31,8 @@ ifeq ($(CONFIG_X86_32),y)
511    
512     KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
513    
514     - # Don't autogenerate SSE instructions
515     - KBUILD_CFLAGS += -mno-sse
516     + # Don't autogenerate MMX or SSE instructions
517     + KBUILD_CFLAGS += -mno-mmx -mno-sse
518    
519     # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
520     # with nonstandard options
521     @@ -60,8 +60,8 @@ else
522     KBUILD_AFLAGS += -m64
523     KBUILD_CFLAGS += -m64
524    
525     - # Don't autogenerate SSE instructions
526     - KBUILD_CFLAGS += -mno-sse
527     + # Don't autogenerate MMX or SSE instructions
528     + KBUILD_CFLAGS += -mno-mmx -mno-sse
529    
530     # Use -mpreferred-stack-boundary=3 if supported.
531     KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
532     diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
533     index 379814bc41e3..6cf0111783d3 100644
534     --- a/arch/x86/boot/Makefile
535     +++ b/arch/x86/boot/Makefile
536     @@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
537    
538     # How to compile the 16-bit code. Note we always compile for -march=i386,
539     # that way we can complain to the user if the CPU is insufficient.
540     -KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
541     +KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
542     -DDISABLE_BRANCH_PROFILING \
543     -Wall -Wstrict-prototypes \
544     -march=i386 -mregparm=3 \
545     -include $(srctree)/$(src)/code16gcc.h \
546     -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
547     + -mno-mmx -mno-sse \
548     $(call cc-option, -ffreestanding) \
549     $(call cc-option, -fno-toplevel-reorder,\
550     - $(call cc-option, -fno-unit-at-a-time)) \
551     + $(call cc-option, -fno-unit-at-a-time)) \
552     $(call cc-option, -fno-stack-protector) \
553     $(call cc-option, -mpreferred-stack-boundary=2)
554     -KBUILD_CFLAGS += $(call cc-option, -m32)
555     KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
556     GCOV_PROFILE := n
557    
558     diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
559     index dcd90df10ab4..c8a6792e7842 100644
560     --- a/arch/x86/boot/compressed/Makefile
561     +++ b/arch/x86/boot/compressed/Makefile
562     @@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
563     cflags-$(CONFIG_X86_32) := -march=i386
564     cflags-$(CONFIG_X86_64) := -mcmodel=small
565     KBUILD_CFLAGS += $(cflags-y)
566     +KBUILD_CFLAGS += -mno-mmx -mno-sse
567     KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
568     KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
569    
570     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
571     index 5439117d5c4c..dec48bfaddb8 100644
572     --- a/arch/x86/kvm/lapic.c
573     +++ b/arch/x86/kvm/lapic.c
574     @@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
575     return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
576     }
577    
578     +#define KVM_X2APIC_CID_BITS 0
579     +
580     static void recalculate_apic_map(struct kvm *kvm)
581     {
582     struct kvm_apic_map *new, *old = NULL;
583     @@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
584     if (apic_x2apic_mode(apic)) {
585     new->ldr_bits = 32;
586     new->cid_shift = 16;
587     - new->cid_mask = new->lid_mask = 0xffff;
588     + new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
589     + new->lid_mask = 0xffff;
590     } else if (kvm_apic_sw_enabled(apic) &&
591     !new->cid_mask /* flat mode */ &&
592     kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
593     @@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
594     ASSERT(apic != NULL);
595    
596     /* if initial count is 0, current count should also be 0 */
597     - if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
598     + if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
599     + apic->lapic_timer.period == 0)
600     return 0;
601    
602     remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
603     @@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
604     void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
605     {
606     u32 data;
607     - void *vapic;
608    
609     if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
610     apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
611     @@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
612     if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
613     return;
614    
615     - vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
616     - data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
617     - kunmap_atomic(vapic);
618     + kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
619     + sizeof(u32));
620    
621     apic_set_tpr(vcpu->arch.apic, data & 0xff);
622     }
623     @@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
624     u32 data, tpr;
625     int max_irr, max_isr;
626     struct kvm_lapic *apic = vcpu->arch.apic;
627     - void *vapic;
628    
629     apic_sync_pv_eoi_to_guest(vcpu, apic);
630    
631     @@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
632     max_isr = 0;
633     data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
634    
635     - vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
636     - *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
637     - kunmap_atomic(vapic);
638     + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
639     + sizeof(u32));
640     }
641    
642     -void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
643     +int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
644     {
645     - vcpu->arch.apic->vapic_addr = vapic_addr;
646     - if (vapic_addr)
647     + if (vapic_addr) {
648     + if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
649     + &vcpu->arch.apic->vapic_cache,
650     + vapic_addr, sizeof(u32)))
651     + return -EINVAL;
652     __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
653     - else
654     + } else {
655     __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
656     + }
657     +
658     + vcpu->arch.apic->vapic_addr = vapic_addr;
659     + return 0;
660     }
661    
662     int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
663     diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
664     index c730ac9fe801..c8b0d0d2da5c 100644
665     --- a/arch/x86/kvm/lapic.h
666     +++ b/arch/x86/kvm/lapic.h
667     @@ -34,7 +34,7 @@ struct kvm_lapic {
668     */
669     void *regs;
670     gpa_t vapic_addr;
671     - struct page *vapic_page;
672     + struct gfn_to_hva_cache vapic_cache;
673     unsigned long pending_events;
674     unsigned int sipi_vector;
675     };
676     @@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
677     void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
678     void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
679    
680     -void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
681     +int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
682     void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
683     void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
684    
685     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
686     index e5ca72a5cdb6..eb9b9c9fc3d9 100644
687     --- a/arch/x86/kvm/x86.c
688     +++ b/arch/x86/kvm/x86.c
689     @@ -3192,8 +3192,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
690     r = -EFAULT;
691     if (copy_from_user(&va, argp, sizeof va))
692     goto out;
693     - r = 0;
694     - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
695     + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
696     break;
697     }
698     case KVM_X86_SETUP_MCE: {
699     @@ -5718,36 +5717,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
700     !kvm_event_needs_reinjection(vcpu);
701     }
702    
703     -static int vapic_enter(struct kvm_vcpu *vcpu)
704     -{
705     - struct kvm_lapic *apic = vcpu->arch.apic;
706     - struct page *page;
707     -
708     - if (!apic || !apic->vapic_addr)
709     - return 0;
710     -
711     - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
712     - if (is_error_page(page))
713     - return -EFAULT;
714     -
715     - vcpu->arch.apic->vapic_page = page;
716     - return 0;
717     -}
718     -
719     -static void vapic_exit(struct kvm_vcpu *vcpu)
720     -{
721     - struct kvm_lapic *apic = vcpu->arch.apic;
722     - int idx;
723     -
724     - if (!apic || !apic->vapic_addr)
725     - return;
726     -
727     - idx = srcu_read_lock(&vcpu->kvm->srcu);
728     - kvm_release_page_dirty(apic->vapic_page);
729     - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
730     - srcu_read_unlock(&vcpu->kvm->srcu, idx);
731     -}
732     -
733     static void update_cr8_intercept(struct kvm_vcpu *vcpu)
734     {
735     int max_irr, tpr;
736     @@ -6047,11 +6016,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
737     struct kvm *kvm = vcpu->kvm;
738    
739     vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
740     - r = vapic_enter(vcpu);
741     - if (r) {
742     - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
743     - return r;
744     - }
745    
746     r = 1;
747     while (r > 0) {
748     @@ -6110,8 +6074,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
749    
750     srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
751    
752     - vapic_exit(vcpu);
753     -
754     return r;
755     }
756    
757     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
758     index c7e22ab29a5a..220fa52b9bd0 100644
759     --- a/arch/x86/platform/efi/efi.c
760     +++ b/arch/x86/platform/efi/efi.c
761     @@ -768,13 +768,6 @@ void __init efi_init(void)
762    
763     set_bit(EFI_MEMMAP, &x86_efi_facility);
764    
765     -#ifdef CONFIG_X86_32
766     - if (efi_is_native()) {
767     - x86_platform.get_wallclock = efi_get_time;
768     - x86_platform.set_wallclock = efi_set_rtc_mmss;
769     - }
770     -#endif
771     -
772     #if EFI_DEBUG
773     print_efi_memmap();
774     #endif
775     diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
776     index 0f92173a12b6..efe4d7220397 100644
777     --- a/arch/x86/platform/uv/tlb_uv.c
778     +++ b/arch/x86/platform/uv/tlb_uv.c
779     @@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
780     unsigned long status;
781    
782     bcp = &per_cpu(bau_control, cpu);
783     - stat = bcp->statp;
784     - stat->s_enters++;
785    
786     if (bcp->nobau)
787     return cpumask;
788    
789     + stat = bcp->statp;
790     + stat->s_enters++;
791     +
792     if (bcp->busy) {
793     descriptor_status =
794     read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
795     diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
796     index 88692871823f..9cac82588cbc 100644
797     --- a/arch/x86/realmode/rm/Makefile
798     +++ b/arch/x86/realmode/rm/Makefile
799     @@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
800     -march=i386 -mregparm=3 \
801     -include $(srctree)/$(src)/../../boot/code16gcc.h \
802     -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
803     + -mno-mmx -mno-sse \
804     $(call cc-option, -ffreestanding) \
805     $(call cc-option, -fno-toplevel-reorder,\
806     - $(call cc-option, -fno-unit-at-a-time)) \
807     + $(call cc-option, -fno-unit-at-a-time)) \
808     $(call cc-option, -fno-stack-protector) \
809     $(call cc-option, -mpreferred-stack-boundary=2)
810     KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
811     diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
812     index 585c3b279feb..850246206b12 100644
813     --- a/crypto/algif_hash.c
814     +++ b/crypto/algif_hash.c
815     @@ -117,9 +117,6 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
816     if (flags & MSG_SENDPAGE_NOTLAST)
817     flags |= MSG_MORE;
818    
819     - if (flags & MSG_SENDPAGE_NOTLAST)
820     - flags |= MSG_MORE;
821     -
822     lock_sock(sk);
823     sg_init_table(ctx->sgl.sg, 1);
824     sg_set_page(ctx->sgl.sg, page, size, offset);
825     diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
826     index 918a3b4148b8..a19c027b29bd 100644
827     --- a/crypto/algif_skcipher.c
828     +++ b/crypto/algif_skcipher.c
829     @@ -381,9 +381,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
830     if (flags & MSG_SENDPAGE_NOTLAST)
831     flags |= MSG_MORE;
832    
833     - if (flags & MSG_SENDPAGE_NOTLAST)
834     - flags |= MSG_MORE;
835     -
836     lock_sock(sk);
837     if (!ctx->more && ctx->used)
838     goto unlock;
839     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
840     index 333aa1bca13d..f535670b42d1 100644
841     --- a/drivers/gpu/drm/i915/intel_display.c
842     +++ b/drivers/gpu/drm/i915/intel_display.c
843     @@ -1429,6 +1429,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
844     POSTING_READ(DPLL(pipe));
845     }
846    
847     +static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
848     +{
849     + u32 val = 0;
850     +
851     + /* Make sure the pipe isn't still relying on us */
852     + assert_pipe_disabled(dev_priv, pipe);
853     +
854     + /* Leave integrated clock source enabled */
855     + if (pipe == PIPE_B)
856     + val = DPLL_INTEGRATED_CRI_CLK_VLV;
857     + I915_WRITE(DPLL(pipe), val);
858     + POSTING_READ(DPLL(pipe));
859     +}
860     +
861     void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
862     {
863     u32 port_mask;
864     @@ -3824,7 +3838,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
865     if (encoder->post_disable)
866     encoder->post_disable(encoder);
867    
868     - i9xx_disable_pll(dev_priv, pipe);
869     + if (IS_VALLEYVIEW(dev))
870     + vlv_disable_pll(dev_priv, pipe);
871     + else
872     + i9xx_disable_pll(dev_priv, pipe);
873    
874     intel_crtc->active = false;
875     intel_update_fbc(dev);
876     @@ -4553,9 +4570,9 @@ static void vlv_update_pll(struct intel_crtc *crtc)
877     /* Enable DPIO clock input */
878     dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
879     DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
880     - if (pipe)
881     + /* We should never disable this, set it here for state tracking */
882     + if (pipe == PIPE_B)
883     dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
884     -
885     dpll |= DPLL_VCO_ENABLE;
886     crtc->config.dpll_hw_state.dpll = dpll;
887    
888     @@ -5015,6 +5032,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
889     I915_READ(LVDS) & LVDS_BORDER_ENABLE;
890     }
891    
892     +static void vlv_crtc_clock_get(struct intel_crtc *crtc,
893     + struct intel_crtc_config *pipe_config)
894     +{
895     + struct drm_device *dev = crtc->base.dev;
896     + struct drm_i915_private *dev_priv = dev->dev_private;
897     + int pipe = pipe_config->cpu_transcoder;
898     + intel_clock_t clock;
899     + u32 mdiv;
900     + int refclk = 100000;
901     +
902     + mutex_lock(&dev_priv->dpio_lock);
903     + mdiv = vlv_dpio_read(dev_priv, DPIO_DIV(pipe));
904     + mutex_unlock(&dev_priv->dpio_lock);
905     +
906     + clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
907     + clock.m2 = mdiv & DPIO_M2DIV_MASK;
908     + clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
909     + clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
910     + clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
911     +
912     + clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
913     + clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
914     +
915     + pipe_config->adjusted_mode.clock = clock.dot / 10;
916     +}
917     +
918     static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
919     struct intel_crtc_config *pipe_config)
920     {
921     @@ -5546,7 +5589,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
922     uint16_t postoff = 0;
923    
924     if (intel_crtc->config.limited_color_range)
925     - postoff = (16 * (1 << 13) / 255) & 0x1fff;
926     + postoff = (16 * (1 << 12) / 255) & 0x1fff;
927    
928     I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
929     I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
930     @@ -6062,7 +6105,7 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
931    
932     /* Make sure we're not on PC8 state before disabling PC8, otherwise
933     * we'll hang the machine! */
934     - dev_priv->uncore.funcs.force_wake_get(dev_priv);
935     + gen6_gt_force_wake_get(dev_priv);
936    
937     if (val & LCPLL_POWER_DOWN_ALLOW) {
938     val &= ~LCPLL_POWER_DOWN_ALLOW;
939     @@ -6093,7 +6136,7 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
940     DRM_ERROR("Switching back to LCPLL failed\n");
941     }
942    
943     - dev_priv->uncore.funcs.force_wake_put(dev_priv);
944     + gen6_gt_force_wake_put(dev_priv);
945     }
946    
947     void hsw_enable_pc8_work(struct work_struct *__work)
948     @@ -9832,7 +9875,7 @@ static void intel_init_display(struct drm_device *dev)
949     dev_priv->display.update_plane = ironlake_update_plane;
950     } else if (IS_VALLEYVIEW(dev)) {
951     dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
952     - dev_priv->display.get_clock = i9xx_crtc_clock_get;
953     + dev_priv->display.get_clock = vlv_crtc_clock_get;
954     dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
955     dev_priv->display.crtc_enable = valleyview_crtc_enable;
956     dev_priv->display.crtc_disable = i9xx_crtc_disable;
957     @@ -10088,12 +10131,19 @@ static void i915_disable_vga(struct drm_device *dev)
958    
959     void intel_modeset_init_hw(struct drm_device *dev)
960     {
961     + struct drm_i915_private *dev_priv = dev->dev_private;
962     +
963     intel_init_power_well(dev);
964    
965     intel_prepare_ddi(dev);
966    
967     intel_init_clock_gating(dev);
968    
969     + /* Enable the CRI clock source so we can get at the display */
970     + if (IS_VALLEYVIEW(dev))
971     + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
972     + DPLL_INTEGRATED_CRI_CLK_VLV);
973     +
974     mutex_lock(&dev->struct_mutex);
975     intel_enable_gt_powersave(dev);
976     mutex_unlock(&dev->struct_mutex);
977     diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
978     index 0652ee0a2098..f685035dbe39 100644
979     --- a/drivers/gpu/drm/radeon/atombios_i2c.c
980     +++ b/drivers/gpu/drm/radeon/atombios_i2c.c
981     @@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
982     PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
983     int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
984     unsigned char *base;
985     - u16 out;
986     + u16 out = cpu_to_le16(0);
987    
988     memset(&args, 0, sizeof(args));
989    
990     @@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
991     DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
992     return -EINVAL;
993     }
994     - args.ucRegIndex = buf[0];
995     - if (num > 1) {
996     + if (buf == NULL)
997     + args.ucRegIndex = 0;
998     + else
999     + args.ucRegIndex = buf[0];
1000     + if (num)
1001     num--;
1002     + if (num)
1003     memcpy(&out, &buf[1], num);
1004     - }
1005     args.lpI2CDataOut = cpu_to_le16(out);
1006     } else {
1007     if (num > ATOM_MAX_HW_I2C_READ) {
1008     @@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
1009     struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
1010     struct i2c_msg *p;
1011     int i, remaining, current_count, buffer_offset, max_bytes, ret;
1012     - u8 buf = 0, flags;
1013     + u8 flags;
1014    
1015     /* check for bus probe */
1016     p = &msgs[0];
1017     if ((num == 1) && (p->len == 0)) {
1018     ret = radeon_process_i2c_ch(i2c,
1019     p->addr, HW_I2C_WRITE,
1020     - &buf, 1);
1021     + NULL, 0);
1022     if (ret)
1023     return ret;
1024     else
1025     diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1026     index 615c5b290e78..7f3b0d9aaada 100644
1027     --- a/drivers/gpu/drm/radeon/r600_hdmi.c
1028     +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1029     @@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1030     WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
1031     WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
1032     }
1033     - } else if (ASIC_IS_DCE3(rdev)) {
1034     + } else {
1035     /* according to the reg specs, this should DCE3.2 only, but in
1036     - * practice it seems to cover DCE3.0/3.1 as well.
1037     + * practice it seems to cover DCE2.0/3.0/3.1 as well.
1038     */
1039     if (dig->dig_encoder == 0) {
1040     WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
1041     @@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1042     WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
1043     WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
1044     }
1045     - } else {
1046     - /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
1047     - WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
1048     - AUDIO_DTO_MODULE(clock / 10));
1049     }
1050     }
1051    
1052     diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1053     index f79ee184ffd5..5c39bf7c3d88 100644
1054     --- a/drivers/gpu/drm/radeon/radeon_atombios.c
1055     +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1056     @@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
1057     mpll_param->dll_speed = args.ucDllSpeed;
1058     mpll_param->bwcntl = args.ucBWCntl;
1059     mpll_param->vco_mode =
1060     - (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0;
1061     + (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
1062     mpll_param->yclk_sel =
1063     (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
1064     mpll_param->qdr =
1065     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1066     index d96f7cbca0a1..fe0ec2cb2084 100644
1067     --- a/drivers/gpu/drm/radeon/si.c
1068     +++ b/drivers/gpu/drm/radeon/si.c
1069     @@ -3887,8 +3887,15 @@ static int si_mc_init(struct radeon_device *rdev)
1070     rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1071     rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1072     /* size in MB on si */
1073     - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
1074     - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
1075     + tmp = RREG32(CONFIG_MEMSIZE);
1076     + /* some boards may have garbage in the upper 16 bits */
1077     + if (tmp & 0xffff0000) {
1078     + DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
1079     + if (tmp & 0xffff)
1080     + tmp &= 0xffff;
1081     + }
1082     + rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
1083     + rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
1084     rdev->mc.visible_vram_size = rdev->mc.aper_size;
1085     si_vram_gtt_location(rdev, &rdev->mc);
1086     radeon_update_bandwidth_info(rdev);
1087     diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
1088     index 8bf646183bac..f738800c70cf 100644
1089     --- a/drivers/gpu/drm/udl/udl_gem.c
1090     +++ b/drivers/gpu/drm/udl/udl_gem.c
1091     @@ -132,6 +132,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
1092    
1093     static void udl_gem_put_pages(struct udl_gem_object *obj)
1094     {
1095     + if (obj->base.import_attach) {
1096     + drm_free_large(obj->pages);
1097     + obj->pages = NULL;
1098     + return;
1099     + }
1100     +
1101     drm_gem_put_pages(&obj->base, obj->pages, false, false);
1102     obj->pages = NULL;
1103     }
1104     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1105     index c08b5c14fece..aedfe50d557a 100644
1106     --- a/drivers/hid/hid-core.c
1107     +++ b/drivers/hid/hid-core.c
1108     @@ -1725,6 +1725,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1109     { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1110     { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
1111     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1112     + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
1113     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1114     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1115     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
1116     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1117     index 9480b425b254..aeeea796f595 100644
1118     --- a/drivers/hid/hid-ids.h
1119     +++ b/drivers/hid/hid-ids.h
1120     @@ -489,6 +489,7 @@
1121     #define USB_VENDOR_ID_KYE 0x0458
1122     #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
1123     #define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
1124     +#define USB_DEVICE_ID_GENIUS_MANTICORE 0x0153
1125     #define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
1126     #define USB_DEVICE_ID_KYE_GPEN_560 0x5003
1127     #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
1128     diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
1129     index 73845120295e..d645caa690dd 100644
1130     --- a/drivers/hid/hid-kye.c
1131     +++ b/drivers/hid/hid-kye.c
1132     @@ -342,6 +342,10 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1133     rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
1134     "Genius Gx Imperator Keyboard");
1135     break;
1136     + case USB_DEVICE_ID_GENIUS_MANTICORE:
1137     + rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
1138     + "Genius Manticore Keyboard");
1139     + break;
1140     }
1141     return rdesc;
1142     }
1143     @@ -439,6 +443,8 @@ static const struct hid_device_id kye_devices[] = {
1144     USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1145     { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
1146     USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1147     + { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
1148     + USB_DEVICE_ID_GENIUS_MANTICORE) },
1149     { }
1150     };
1151     MODULE_DEVICE_TABLE(hid, kye_devices);
1152     diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
1153     index 2dc37c7c6947..7d68a08baaa8 100644
1154     --- a/drivers/hwmon/hih6130.c
1155     +++ b/drivers/hwmon/hih6130.c
1156     @@ -43,6 +43,7 @@
1157     * @last_update: time of last update (jiffies)
1158     * @temperature: cached temperature measurement value
1159     * @humidity: cached humidity measurement value
1160     + * @write_length: length for I2C measurement request
1161     */
1162     struct hih6130 {
1163     struct device *hwmon_dev;
1164     @@ -51,6 +52,7 @@ struct hih6130 {
1165     unsigned long last_update;
1166     int temperature;
1167     int humidity;
1168     + size_t write_length;
1169     };
1170    
1171     /**
1172     @@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client)
1173     */
1174     if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
1175    
1176     - /* write to slave address, no data, to request a measurement */
1177     - ret = i2c_master_send(client, tmp, 0);
1178     + /*
1179     + * Write to slave address to request a measurement.
1180     + * According with the datasheet it should be with no data, but
1181     + * for systems with I2C bus drivers that do not allow zero
1182     + * length packets we write one dummy byte to allow sensor
1183     + * measurements on them.
1184     + */
1185     + tmp[0] = 0;
1186     + ret = i2c_master_send(client, tmp, hih6130->write_length);
1187     if (ret < 0)
1188     goto out;
1189    
1190     @@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client,
1191     goto fail_remove_sysfs;
1192     }
1193    
1194     + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
1195     + hih6130->write_length = 1;
1196     +
1197     return 0;
1198    
1199     fail_remove_sysfs:
1200     diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
1201     index 6cf6bff79003..a2f3b4a365e4 100644
1202     --- a/drivers/hwmon/lm78.c
1203     +++ b/drivers/hwmon/lm78.c
1204     @@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
1205     {
1206     if (rpm <= 0)
1207     return 255;
1208     + if (rpm > 1350000)
1209     + return 1;
1210     return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
1211     }
1212    
1213     diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
1214     index 1404e6319deb..72a889702f0d 100644
1215     --- a/drivers/hwmon/sis5595.c
1216     +++ b/drivers/hwmon/sis5595.c
1217     @@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
1218     {
1219     if (rpm <= 0)
1220     return 255;
1221     + if (rpm > 1350000)
1222     + return 1;
1223     return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
1224     }
1225    
1226     diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
1227     index 0e7017841f7d..aee14e2192f8 100644
1228     --- a/drivers/hwmon/vt8231.c
1229     +++ b/drivers/hwmon/vt8231.c
1230     @@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
1231     */
1232     static inline u8 FAN_TO_REG(long rpm, int div)
1233     {
1234     - if (rpm == 0)
1235     + if (rpm <= 0 || rpm > 1310720)
1236     return 0;
1237     return clamp_val(1310720 / (rpm * div), 1, 255);
1238     }
1239     diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
1240     index edb06cda5a68..6ed76ceb9270 100644
1241     --- a/drivers/hwmon/w83l786ng.c
1242     +++ b/drivers/hwmon/w83l786ng.c
1243     @@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
1244     if (err)
1245     return err;
1246     val = clamp_val(val, 0, 255);
1247     + val = DIV_ROUND_CLOSEST(val, 0x11);
1248    
1249     mutex_lock(&data->update_lock);
1250     - data->pwm[nr] = val;
1251     + data->pwm[nr] = val * 0x11;
1252     + val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
1253     w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
1254     mutex_unlock(&data->update_lock);
1255     return count;
1256     @@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
1257     mutex_lock(&data->update_lock);
1258     reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
1259     data->pwm_enable[nr] = val;
1260     - reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
1261     + reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
1262     reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
1263     w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
1264     mutex_unlock(&data->update_lock);
1265     @@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
1266     ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
1267     ? 0 : 1;
1268     data->pwm_enable[i] =
1269     - ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1;
1270     - data->pwm[i] = w83l786ng_read_value(client,
1271     - W83L786NG_REG_PWM[i]);
1272     + ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
1273     + data->pwm[i] =
1274     + (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
1275     + & 0x0f) * 0x11;
1276     }
1277    
1278    
1279     diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1280     index 8551dcaf24db..597e9b8fc18d 100644
1281     --- a/drivers/input/mouse/elantech.c
1282     +++ b/drivers/input/mouse/elantech.c
1283     @@ -1313,6 +1313,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1284     break;
1285     case 6:
1286     case 7:
1287     + case 8:
1288     etd->hw_version = 4;
1289     break;
1290     default:
1291     diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
1292     index ae4b6b903629..5f87bed05467 100644
1293     --- a/drivers/input/touchscreen/usbtouchscreen.c
1294     +++ b/drivers/input/touchscreen/usbtouchscreen.c
1295     @@ -106,6 +106,7 @@ struct usbtouch_device_info {
1296     struct usbtouch_usb {
1297     unsigned char *data;
1298     dma_addr_t data_dma;
1299     + int data_size;
1300     unsigned char *buffer;
1301     int buf_len;
1302     struct urb *irq;
1303     @@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
1304     static void usbtouch_free_buffers(struct usb_device *udev,
1305     struct usbtouch_usb *usbtouch)
1306     {
1307     - usb_free_coherent(udev, usbtouch->type->rept_size,
1308     + usb_free_coherent(udev, usbtouch->data_size,
1309     usbtouch->data, usbtouch->data_dma);
1310     kfree(usbtouch->buffer);
1311     }
1312     @@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf,
1313     if (!type->process_pkt)
1314     type->process_pkt = usbtouch_process_pkt;
1315    
1316     - usbtouch->data = usb_alloc_coherent(udev, type->rept_size,
1317     + usbtouch->data_size = type->rept_size;
1318     + if (type->get_pkt_len) {
1319     + /*
1320     + * When dealing with variable-length packets we should
1321     + * not request more than wMaxPacketSize bytes at once
1322     + * as we do not know if there is more data coming or
1323     + * we filled exactly wMaxPacketSize bytes and there is
1324     + * nothing else.
1325     + */
1326     + usbtouch->data_size = min(usbtouch->data_size,
1327     + usb_endpoint_maxp(endpoint));
1328     + }
1329     +
1330     + usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size,
1331     GFP_KERNEL, &usbtouch->data_dma);
1332     if (!usbtouch->data)
1333     goto out_free;
1334     @@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf,
1335     if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT)
1336     usb_fill_int_urb(usbtouch->irq, udev,
1337     usb_rcvintpipe(udev, endpoint->bEndpointAddress),
1338     - usbtouch->data, type->rept_size,
1339     + usbtouch->data, usbtouch->data_size,
1340     usbtouch_irq, usbtouch, endpoint->bInterval);
1341     else
1342     usb_fill_bulk_urb(usbtouch->irq, udev,
1343     usb_rcvbulkpipe(udev, endpoint->bEndpointAddress),
1344     - usbtouch->data, type->rept_size,
1345     + usbtouch->data, usbtouch->data_size,
1346     usbtouch_irq, usbtouch);
1347    
1348     usbtouch->irq->dev = udev;
1349     diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
1350     index 181c9ba929cd..0046a619527d 100644
1351     --- a/drivers/iommu/arm-smmu.c
1352     +++ b/drivers/iommu/arm-smmu.c
1353     @@ -392,7 +392,7 @@ struct arm_smmu_domain {
1354     struct arm_smmu_cfg root_cfg;
1355     phys_addr_t output_mask;
1356    
1357     - spinlock_t lock;
1358     + struct mutex lock;
1359     };
1360    
1361     static DEFINE_SPINLOCK(arm_smmu_devices_lock);
1362     @@ -897,7 +897,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
1363     goto out_free_domain;
1364     smmu_domain->root_cfg.pgd = pgd;
1365    
1366     - spin_lock_init(&smmu_domain->lock);
1367     + mutex_init(&smmu_domain->lock);
1368     domain->priv = smmu_domain;
1369     return 0;
1370    
1371     @@ -1134,7 +1134,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1372     * Sanity check the domain. We don't currently support domains
1373     * that cross between different SMMU chains.
1374     */
1375     - spin_lock(&smmu_domain->lock);
1376     + mutex_lock(&smmu_domain->lock);
1377     if (!smmu_domain->leaf_smmu) {
1378     /* Now that we have a master, we can finalise the domain */
1379     ret = arm_smmu_init_domain_context(domain, dev);
1380     @@ -1149,7 +1149,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1381     dev_name(device_smmu->dev));
1382     goto err_unlock;
1383     }
1384     - spin_unlock(&smmu_domain->lock);
1385     + mutex_unlock(&smmu_domain->lock);
1386    
1387     /* Looks ok, so add the device to the domain */
1388     master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
1389     @@ -1159,7 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1390     return arm_smmu_domain_add_master(smmu_domain, master);
1391    
1392     err_unlock:
1393     - spin_unlock(&smmu_domain->lock);
1394     + mutex_unlock(&smmu_domain->lock);
1395     return ret;
1396     }
1397    
1398     @@ -1388,7 +1388,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1399     if (paddr & ~output_mask)
1400     return -ERANGE;
1401    
1402     - spin_lock(&smmu_domain->lock);
1403     + mutex_lock(&smmu_domain->lock);
1404     pgd += pgd_index(iova);
1405     end = iova + size;
1406     do {
1407     @@ -1404,7 +1404,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1408     } while (pgd++, iova != end);
1409    
1410     out_unlock:
1411     - spin_unlock(&smmu_domain->lock);
1412     + mutex_unlock(&smmu_domain->lock);
1413    
1414     /* Ensure new page tables are visible to the hardware walker */
1415     if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1416     @@ -1443,44 +1443,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1417     static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1418     dma_addr_t iova)
1419     {
1420     - pgd_t *pgd;
1421     - pud_t *pud;
1422     - pmd_t *pmd;
1423     - pte_t *pte;
1424     + pgd_t *pgdp, pgd;
1425     + pud_t pud;
1426     + pmd_t pmd;
1427     + pte_t pte;
1428     struct arm_smmu_domain *smmu_domain = domain->priv;
1429     struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1430     - struct arm_smmu_device *smmu = root_cfg->smmu;
1431    
1432     - spin_lock(&smmu_domain->lock);
1433     - pgd = root_cfg->pgd;
1434     - if (!pgd)
1435     - goto err_unlock;
1436     + pgdp = root_cfg->pgd;
1437     + if (!pgdp)
1438     + return 0;
1439    
1440     - pgd += pgd_index(iova);
1441     - if (pgd_none_or_clear_bad(pgd))
1442     - goto err_unlock;
1443     + pgd = *(pgdp + pgd_index(iova));
1444     + if (pgd_none(pgd))
1445     + return 0;
1446    
1447     - pud = pud_offset(pgd, iova);
1448     - if (pud_none_or_clear_bad(pud))
1449     - goto err_unlock;
1450     + pud = *pud_offset(&pgd, iova);
1451     + if (pud_none(pud))
1452     + return 0;
1453    
1454     - pmd = pmd_offset(pud, iova);
1455     - if (pmd_none_or_clear_bad(pmd))
1456     - goto err_unlock;
1457     + pmd = *pmd_offset(&pud, iova);
1458     + if (pmd_none(pmd))
1459     + return 0;
1460    
1461     - pte = pmd_page_vaddr(*pmd) + pte_index(iova);
1462     + pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
1463     if (pte_none(pte))
1464     - goto err_unlock;
1465     -
1466     - spin_unlock(&smmu_domain->lock);
1467     - return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
1468     + return 0;
1469    
1470     -err_unlock:
1471     - spin_unlock(&smmu_domain->lock);
1472     - dev_warn(smmu->dev,
1473     - "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1474     - (unsigned long long)iova);
1475     - return -EINVAL;
1476     + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1477     }
1478    
1479     static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
1480     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1481     index 173cbb20d104..54bdd923316f 100644
1482     --- a/drivers/md/dm-bufio.c
1483     +++ b/drivers/md/dm-bufio.c
1484     @@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
1485     {
1486     __u64 mem;
1487    
1488     + dm_bufio_allocated_kmem_cache = 0;
1489     + dm_bufio_allocated_get_free_pages = 0;
1490     + dm_bufio_allocated_vmalloc = 0;
1491     + dm_bufio_current_allocated = 0;
1492     +
1493     memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1494     memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1495    
1496     diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
1497     index 496d5f3646a5..2f91d6d4a2cc 100644
1498     --- a/drivers/md/dm-delay.c
1499     +++ b/drivers/md/dm-delay.c
1500     @@ -20,6 +20,7 @@
1501     struct delay_c {
1502     struct timer_list delay_timer;
1503     struct mutex timer_lock;
1504     + struct workqueue_struct *kdelayd_wq;
1505     struct work_struct flush_expired_bios;
1506     struct list_head delayed_bios;
1507     atomic_t may_delay;
1508     @@ -45,14 +46,13 @@ struct dm_delay_info {
1509    
1510     static DEFINE_MUTEX(delayed_bios_lock);
1511    
1512     -static struct workqueue_struct *kdelayd_wq;
1513     static struct kmem_cache *delayed_cache;
1514    
1515     static void handle_delayed_timer(unsigned long data)
1516     {
1517     struct delay_c *dc = (struct delay_c *)data;
1518    
1519     - queue_work(kdelayd_wq, &dc->flush_expired_bios);
1520     + queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
1521     }
1522    
1523     static void queue_timeout(struct delay_c *dc, unsigned long expires)
1524     @@ -191,6 +191,12 @@ out:
1525     goto bad_dev_write;
1526     }
1527    
1528     + dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
1529     + if (!dc->kdelayd_wq) {
1530     + DMERR("Couldn't start kdelayd");
1531     + goto bad_queue;
1532     + }
1533     +
1534     setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
1535    
1536     INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
1537     @@ -203,6 +209,8 @@ out:
1538     ti->private = dc;
1539     return 0;
1540    
1541     +bad_queue:
1542     + mempool_destroy(dc->delayed_pool);
1543     bad_dev_write:
1544     if (dc->dev_write)
1545     dm_put_device(ti, dc->dev_write);
1546     @@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
1547     {
1548     struct delay_c *dc = ti->private;
1549    
1550     - flush_workqueue(kdelayd_wq);
1551     + destroy_workqueue(dc->kdelayd_wq);
1552    
1553     dm_put_device(ti, dc->dev_read);
1554    
1555     @@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
1556     {
1557     int r = -ENOMEM;
1558    
1559     - kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
1560     - if (!kdelayd_wq) {
1561     - DMERR("Couldn't start kdelayd");
1562     - goto bad_queue;
1563     - }
1564     -
1565     delayed_cache = KMEM_CACHE(dm_delay_info, 0);
1566     if (!delayed_cache) {
1567     DMERR("Couldn't create delayed bio cache.");
1568     @@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
1569     bad_register:
1570     kmem_cache_destroy(delayed_cache);
1571     bad_memcache:
1572     - destroy_workqueue(kdelayd_wq);
1573     -bad_queue:
1574     return r;
1575     }
1576    
1577     @@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
1578     {
1579     dm_unregister_target(&delay_target);
1580     kmem_cache_destroy(delayed_cache);
1581     - destroy_workqueue(kdelayd_wq);
1582     }
1583    
1584     /* Module hooks */
1585     diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
1586     index aec57d76db5d..944690bafd93 100644
1587     --- a/drivers/md/dm-snap.c
1588     +++ b/drivers/md/dm-snap.c
1589     @@ -66,6 +66,18 @@ struct dm_snapshot {
1590    
1591     atomic_t pending_exceptions_count;
1592    
1593     + /* Protected by "lock" */
1594     + sector_t exception_start_sequence;
1595     +
1596     + /* Protected by kcopyd single-threaded callback */
1597     + sector_t exception_complete_sequence;
1598     +
1599     + /*
1600     + * A list of pending exceptions that completed out of order.
1601     + * Protected by kcopyd single-threaded callback.
1602     + */
1603     + struct list_head out_of_order_list;
1604     +
1605     mempool_t *pending_pool;
1606    
1607     struct dm_exception_table pending;
1608     @@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
1609     */
1610     int started;
1611    
1612     + /* There was copying error. */
1613     + int copy_error;
1614     +
1615     + /* A sequence number, it is used for in-order completion. */
1616     + sector_t exception_sequence;
1617     +
1618     + struct list_head out_of_order_entry;
1619     +
1620     /*
1621     * For writing a complete chunk, bypassing the copy.
1622     */
1623     @@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1624     s->valid = 1;
1625     s->active = 0;
1626     atomic_set(&s->pending_exceptions_count, 0);
1627     + s->exception_start_sequence = 0;
1628     + s->exception_complete_sequence = 0;
1629     + INIT_LIST_HEAD(&s->out_of_order_list);
1630     init_rwsem(&s->lock);
1631     INIT_LIST_HEAD(&s->list);
1632     spin_lock_init(&s->pe_lock);
1633     @@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
1634     pending_complete(pe, success);
1635     }
1636    
1637     +static void complete_exception(struct dm_snap_pending_exception *pe)
1638     +{
1639     + struct dm_snapshot *s = pe->snap;
1640     +
1641     + if (unlikely(pe->copy_error))
1642     + pending_complete(pe, 0);
1643     +
1644     + else
1645     + /* Update the metadata if we are persistent */
1646     + s->store->type->commit_exception(s->store, &pe->e,
1647     + commit_callback, pe);
1648     +}
1649     +
1650     /*
1651     * Called when the copy I/O has finished. kcopyd actually runs
1652     * this code so don't block.
1653     @@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
1654     struct dm_snap_pending_exception *pe = context;
1655     struct dm_snapshot *s = pe->snap;
1656    
1657     - if (read_err || write_err)
1658     - pending_complete(pe, 0);
1659     + pe->copy_error = read_err || write_err;
1660    
1661     - else
1662     - /* Update the metadata if we are persistent */
1663     - s->store->type->commit_exception(s->store, &pe->e,
1664     - commit_callback, pe);
1665     + if (pe->exception_sequence == s->exception_complete_sequence) {
1666     + s->exception_complete_sequence++;
1667     + complete_exception(pe);
1668     +
1669     + while (!list_empty(&s->out_of_order_list)) {
1670     + pe = list_entry(s->out_of_order_list.next,
1671     + struct dm_snap_pending_exception, out_of_order_entry);
1672     + if (pe->exception_sequence != s->exception_complete_sequence)
1673     + break;
1674     + s->exception_complete_sequence++;
1675     + list_del(&pe->out_of_order_entry);
1676     + complete_exception(pe);
1677     + }
1678     + } else {
1679     + struct list_head *lh;
1680     + struct dm_snap_pending_exception *pe2;
1681     +
1682     + list_for_each_prev(lh, &s->out_of_order_list) {
1683     + pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1684     + if (pe2->exception_sequence < pe->exception_sequence)
1685     + break;
1686     + }
1687     + list_add(&pe->out_of_order_entry, lh);
1688     + }
1689     }
1690    
1691     /*
1692     @@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
1693     return NULL;
1694     }
1695    
1696     + pe->exception_sequence = s->exception_start_sequence++;
1697     +
1698     dm_insert_exception(&s->pending, &pe->e);
1699    
1700     return pe;
1701     @@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
1702    
1703     static struct target_type snapshot_target = {
1704     .name = "snapshot",
1705     - .version = {1, 11, 1},
1706     + .version = {1, 12, 0},
1707     .module = THIS_MODULE,
1708     .ctr = snapshot_ctr,
1709     .dtr = snapshot_dtr,
1710     diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
1711     index 3d404c1371ed..28a90122a5a8 100644
1712     --- a/drivers/md/dm-stats.c
1713     +++ b/drivers/md/dm-stats.c
1714     @@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1715    
1716     int __init dm_statistics_init(void)
1717     {
1718     + shared_memory_amount = 0;
1719     dm_stat_need_rcu_barrier = 0;
1720     return 0;
1721     }
1722     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1723     index 41d907b58f7e..20a8cc0df7c6 100644
1724     --- a/drivers/md/dm-table.c
1725     +++ b/drivers/md/dm-table.c
1726     @@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
1727    
1728     num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
1729    
1730     + if (!num_targets) {
1731     + kfree(t);
1732     + return -ENOMEM;
1733     + }
1734     +
1735     if (alloc_targets(t, num_targets)) {
1736     kfree(t);
1737     return -ENOMEM;
1738     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1739     index 60bce435f4fa..8a30ad54bd46 100644
1740     --- a/drivers/md/dm-thin-metadata.c
1741     +++ b/drivers/md/dm-thin-metadata.c
1742     @@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
1743     up_write(&pmd->root_lock);
1744     }
1745    
1746     +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
1747     +{
1748     + down_write(&pmd->root_lock);
1749     + pmd->read_only = false;
1750     + dm_bm_set_read_write(pmd->bm);
1751     + up_write(&pmd->root_lock);
1752     +}
1753     +
1754     int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
1755     dm_block_t threshold,
1756     dm_sm_threshold_fn fn,
1757     diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1758     index 845ebbe589a9..7bcc0e1d6238 100644
1759     --- a/drivers/md/dm-thin-metadata.h
1760     +++ b/drivers/md/dm-thin-metadata.h
1761     @@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz
1762     * that nothing is changing.
1763     */
1764     void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
1765     +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
1766    
1767     int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
1768     dm_block_t threshold,
1769     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1770     index 2c0cf511ec23..ee29037ffc2e 100644
1771     --- a/drivers/md/dm-thin.c
1772     +++ b/drivers/md/dm-thin.c
1773     @@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
1774     */
1775     r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
1776     if (r) {
1777     - DMERR_LIMIT("dm_thin_insert_block() failed");
1778     + DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
1779     + dm_device_name(pool->pool_md), r);
1780     + set_pool_mode(pool, PM_READ_ONLY);
1781     cell_error(pool, m->cell);
1782     goto out;
1783     }
1784     @@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1785     }
1786     }
1787    
1788     -static int commit(struct pool *pool)
1789     -{
1790     - int r;
1791     -
1792     - r = dm_pool_commit_metadata(pool->pmd);
1793     - if (r)
1794     - DMERR_LIMIT("%s: commit failed: error = %d",
1795     - dm_device_name(pool->pool_md), r);
1796     -
1797     - return r;
1798     -}
1799     -
1800     /*
1801     * A non-zero return indicates read_only or fail_io mode.
1802     * Many callers don't care about the return value.
1803     */
1804     -static int commit_or_fallback(struct pool *pool)
1805     +static int commit(struct pool *pool)
1806     {
1807     int r;
1808    
1809     if (get_pool_mode(pool) != PM_WRITE)
1810     return -EINVAL;
1811    
1812     - r = commit(pool);
1813     - if (r)
1814     + r = dm_pool_commit_metadata(pool->pmd);
1815     + if (r) {
1816     + DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
1817     + dm_device_name(pool->pool_md), r);
1818     set_pool_mode(pool, PM_READ_ONLY);
1819     + }
1820    
1821     return r;
1822     }
1823     @@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1824     * Try to commit to see if that will free up some
1825     * more space.
1826     */
1827     - (void) commit_or_fallback(pool);
1828     + r = commit(pool);
1829     + if (r)
1830     + return r;
1831    
1832     r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1833     if (r)
1834     @@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1835     * table reload).
1836     */
1837     if (!free_blocks) {
1838     - DMWARN("%s: no free space available.",
1839     + DMWARN("%s: no free data space available.",
1840     dm_device_name(pool->pool_md));
1841     spin_lock_irqsave(&pool->lock, flags);
1842     pool->no_free_space = 1;
1843     @@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1844     }
1845    
1846     r = dm_pool_alloc_data_block(pool->pmd, result);
1847     - if (r)
1848     + if (r) {
1849     + if (r == -ENOSPC &&
1850     + !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
1851     + !free_blocks) {
1852     + DMWARN("%s: no free metadata space available.",
1853     + dm_device_name(pool->pool_md));
1854     + set_pool_mode(pool, PM_READ_ONLY);
1855     + }
1856     return r;
1857     + }
1858    
1859     return 0;
1860     }
1861     @@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool)
1862     if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1863     return;
1864    
1865     - if (commit_or_fallback(pool)) {
1866     + if (commit(pool)) {
1867     while ((bio = bio_list_pop(&bios)))
1868     bio_io_error(bio);
1869     return;
1870     @@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1871     case PM_FAIL:
1872     DMERR("%s: switching pool to failure mode",
1873     dm_device_name(pool->pool_md));
1874     + dm_pool_metadata_read_only(pool->pmd);
1875     pool->process_bio = process_bio_fail;
1876     pool->process_discard = process_bio_fail;
1877     pool->process_prepared_mapping = process_prepared_mapping_fail;
1878     @@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1879     break;
1880    
1881     case PM_WRITE:
1882     + dm_pool_metadata_read_write(pool->pmd);
1883     pool->process_bio = process_bio;
1884     pool->process_discard = process_discard;
1885     pool->process_prepared_mapping = process_prepared_mapping;
1886     @@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1887     struct pool_c *pt = ti->private;
1888    
1889     /*
1890     - * We want to make sure that degraded pools are never upgraded.
1891     + * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1892     */
1893     enum pool_mode old_mode = pool->pf.mode;
1894     enum pool_mode new_mode = pt->adjusted_pf.mode;
1895    
1896     - if (old_mode > new_mode)
1897     + /*
1898     + * If we were in PM_FAIL mode, rollback of metadata failed. We're
1899     + * not going to recover without a thin_repair. So we never let the
1900     + * pool move out of the old mode. On the other hand a PM_READ_ONLY
1901     + * may have been due to a lack of metadata or data space, and may
1902     + * now work (ie. if the underlying devices have been resized).
1903     + */
1904     + if (old_mode == PM_FAIL)
1905     new_mode = old_mode;
1906    
1907     pool->ti = ti;
1908     @@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti)
1909     return r;
1910    
1911     if (need_commit1 || need_commit2)
1912     - (void) commit_or_fallback(pool);
1913     + (void) commit(pool);
1914    
1915     return 0;
1916     }
1917     @@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti)
1918    
1919     cancel_delayed_work(&pool->waker);
1920     flush_workqueue(pool->wq);
1921     - (void) commit_or_fallback(pool);
1922     + (void) commit(pool);
1923     }
1924    
1925     static int check_arg_count(unsigned argc, unsigned args_required)
1926     @@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
1927     if (r)
1928     return r;
1929    
1930     - (void) commit_or_fallback(pool);
1931     + (void) commit(pool);
1932    
1933     r = dm_pool_reserve_metadata_snap(pool->pmd);
1934     if (r)
1935     @@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
1936     DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
1937    
1938     if (!r)
1939     - (void) commit_or_fallback(pool);
1940     + (void) commit(pool);
1941    
1942     return r;
1943     }
1944     @@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
1945    
1946     /* Commit to ensure statistics aren't out-of-date */
1947     if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1948     - (void) commit_or_fallback(pool);
1949     + (void) commit(pool);
1950    
1951     r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
1952     if (r) {
1953     diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
1954     index af96e24ec328..1d75b1dc1e2e 100644
1955     --- a/drivers/md/persistent-data/dm-array.c
1956     +++ b/drivers/md/persistent-data/dm-array.c
1957     @@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
1958     * The shadow op will often be a noop. Only insert if it really
1959     * copied data.
1960     */
1961     - if (dm_block_location(*block) != b)
1962     + if (dm_block_location(*block) != b) {
1963     + /*
1964     + * dm_tm_shadow_block will have already decremented the old
1965     + * block, but it is still referenced by the btree. We
1966     + * increment to stop the insert decrementing it below zero
1967     + * when overwriting the old value.
1968     + */
1969     + dm_tm_inc(info->btree_info.tm, b);
1970     r = insert_ablock(info, index, *block, root);
1971     + }
1972    
1973     return r;
1974     }
1975     diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
1976     index a7e8bf296388..064a3c271baa 100644
1977     --- a/drivers/md/persistent-data/dm-block-manager.c
1978     +++ b/drivers/md/persistent-data/dm-block-manager.c
1979     @@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)
1980     }
1981     EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
1982    
1983     +void dm_bm_set_read_write(struct dm_block_manager *bm)
1984     +{
1985     + bm->read_only = false;
1986     +}
1987     +EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
1988     +
1989     u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
1990     {
1991     return crc32c(~(u32) 0, data, len) ^ init_xor;
1992     diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
1993     index 9a82083a66b6..13cd58e1fe69 100644
1994     --- a/drivers/md/persistent-data/dm-block-manager.h
1995     +++ b/drivers/md/persistent-data/dm-block-manager.h
1996     @@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b);
1997     int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
1998     struct dm_block *superblock);
1999    
2000     - /*
2001     - * Request data be prefetched into the cache.
2002     - */
2003     +/*
2004     + * Request data is prefetched into the cache.
2005     + */
2006     void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
2007    
2008     /*
2009     @@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
2010     * be returned if you do.
2011     */
2012     void dm_bm_set_read_only(struct dm_block_manager *bm);
2013     +void dm_bm_set_read_write(struct dm_block_manager *bm);
2014    
2015     u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
2016    
2017     diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
2018     index 6058569fe86c..466a60bbd716 100644
2019     --- a/drivers/md/persistent-data/dm-space-map-common.c
2020     +++ b/drivers/md/persistent-data/dm-space-map-common.c
2021     @@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
2022     }
2023    
2024     static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
2025     - uint32_t (*mutator)(void *context, uint32_t old),
2026     + int (*mutator)(void *context, uint32_t old, uint32_t *new),
2027     void *context, enum allocation_event *ev)
2028     {
2029     int r;
2030     @@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
2031    
2032     if (old > 2) {
2033     r = sm_ll_lookup_big_ref_count(ll, b, &old);
2034     - if (r < 0)
2035     + if (r < 0) {
2036     + dm_tm_unlock(ll->tm, nb);
2037     return r;
2038     + }
2039     }
2040    
2041     - ref_count = mutator(context, old);
2042     + r = mutator(context, old, &ref_count);
2043     + if (r) {
2044     + dm_tm_unlock(ll->tm, nb);
2045     + return r;
2046     + }
2047    
2048     if (ref_count <= 2) {
2049     sm_set_bitmap(bm_le, bit, ref_count);
2050     @@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
2051     return ll->save_ie(ll, index, &ie_disk);
2052     }
2053    
2054     -static uint32_t set_ref_count(void *context, uint32_t old)
2055     +static int set_ref_count(void *context, uint32_t old, uint32_t *new)
2056     {
2057     - return *((uint32_t *) context);
2058     + *new = *((uint32_t *) context);
2059     + return 0;
2060     }
2061    
2062     int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
2063     @@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
2064     return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
2065     }
2066    
2067     -static uint32_t inc_ref_count(void *context, uint32_t old)
2068     +static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
2069     {
2070     - return old + 1;
2071     + *new = old + 1;
2072     + return 0;
2073     }
2074    
2075     int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
2076     @@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
2077     return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
2078     }
2079    
2080     -static uint32_t dec_ref_count(void *context, uint32_t old)
2081     +static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
2082     {
2083     - return old - 1;
2084     + if (!old) {
2085     + DMERR_LIMIT("unable to decrement a reference count below 0");
2086     + return -EINVAL;
2087     + }
2088     +
2089     + *new = old - 1;
2090     + return 0;
2091     }
2092    
2093     int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
2094     diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
2095     index 1c959684caef..58fc1eef7499 100644
2096     --- a/drivers/md/persistent-data/dm-space-map-metadata.c
2097     +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
2098     @@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
2099     struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
2100    
2101     int r = sm_metadata_new_block_(sm, b);
2102     - if (r)
2103     + if (r) {
2104     DMERR("unable to allocate new metadata block");
2105     + return r;
2106     + }
2107    
2108     r = sm_metadata_get_nr_free(sm, &count);
2109     - if (r)
2110     + if (r) {
2111     DMERR("couldn't get free block count");
2112     + return r;
2113     + }
2114    
2115     check_threshold(&smm->threshold, count);
2116    
2117     diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
2118     index 30ee59052157..65728c25ea05 100644
2119     --- a/drivers/media/dvb-frontends/af9033.c
2120     +++ b/drivers/media/dvb-frontends/af9033.c
2121     @@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
2122     static int af9033_wr_reg_val_tab(struct af9033_state *state,
2123     const struct reg_val *tab, int tab_len)
2124     {
2125     +#define MAX_TAB_LEN 212
2126     int ret, i, j;
2127     - u8 buf[MAX_XFER_SIZE];
2128     + u8 buf[1 + MAX_TAB_LEN];
2129     +
2130     + dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
2131    
2132     if (tab_len > sizeof(buf)) {
2133     - dev_warn(&state->i2c->dev,
2134     - "%s: i2c wr len=%d is too big!\n",
2135     - KBUILD_MODNAME, tab_len);
2136     + dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
2137     + KBUILD_MODNAME, tab_len);
2138     return -EINVAL;
2139     }
2140    
2141     - dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
2142     -
2143     for (i = 0, j = 0; i < tab_len; i++) {
2144     buf[j] = tab[i].val;
2145    
2146     diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
2147     index 51447a04d8f2..03930d5e9fea 100644
2148     --- a/drivers/media/dvb-frontends/cxd2820r_core.c
2149     +++ b/drivers/media/dvb-frontends/cxd2820r_core.c
2150     @@ -34,7 +34,7 @@ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
2151     {
2152     .addr = i2c,
2153     .flags = 0,
2154     - .len = sizeof(buf),
2155     + .len = len + 1,
2156     .buf = buf,
2157     }
2158     };
2159     @@ -75,7 +75,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
2160     }, {
2161     .addr = i2c,
2162     .flags = I2C_M_RD,
2163     - .len = sizeof(buf),
2164     + .len = len,
2165     .buf = buf,
2166     }
2167     };
2168     diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
2169     index 3f584a7d0781..bee7946faa7c 100644
2170     --- a/drivers/media/i2c/wm8775.c
2171     +++ b/drivers/media/i2c/wm8775.c
2172     @@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
2173     return -EINVAL;
2174     }
2175     state->input = input;
2176     - if (!v4l2_ctrl_g_ctrl(state->mute))
2177     + if (v4l2_ctrl_g_ctrl(state->mute))
2178     return 0;
2179     if (!v4l2_ctrl_g_ctrl(state->vol))
2180     return 0;
2181     - if (!v4l2_ctrl_g_ctrl(state->bal))
2182     - return 0;
2183     wm8775_set_audio(sd, 1);
2184     return 0;
2185     }
2186     diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
2187     index c6532de0eac7..4f0aaa51ae0d 100644
2188     --- a/drivers/media/pci/bt8xx/bttv-driver.c
2189     +++ b/drivers/media/pci/bt8xx/bttv-driver.c
2190     @@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
2191     }
2192     btv->std = V4L2_STD_PAL;
2193     init_irqreg(btv);
2194     - v4l2_ctrl_handler_setup(hdl);
2195     + if (!bttv_tvcards[btv->c.type].no_video)
2196     + v4l2_ctrl_handler_setup(hdl);
2197     if (hdl->error) {
2198     result = hdl->error;
2199     goto fail2;
2200     diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
2201     index d37ee37aaefe..896bd8b974b5 100644
2202     --- a/drivers/media/pci/saa7164/saa7164-core.c
2203     +++ b/drivers/media/pci/saa7164/saa7164-core.c
2204     @@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
2205     if (fw_debug) {
2206     dev->kthread = kthread_run(saa7164_thread_function, dev,
2207     "saa7164 debug");
2208     - if (!dev->kthread)
2209     + if (IS_ERR(dev->kthread)) {
2210     + dev->kthread = NULL;
2211     printk(KERN_ERR "%s() Failed to create "
2212     "debug kernel thread\n", __func__);
2213     + }
2214     }
2215    
2216     } /* != BOARD_UNKNOWN */
2217     diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
2218     index 036e2f54f4db..3ed1f5669f79 100644
2219     --- a/drivers/media/radio/radio-tea5764.c
2220     +++ b/drivers/media/radio/radio-tea5764.c
2221     @@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
2222     So we keep it as-is. */
2223     return -EINVAL;
2224     }
2225     - clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
2226     + freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
2227     tea5764_power_up(radio);
2228     tea5764_tune(radio, (freq * 125) / 2);
2229     return 0;
2230     diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
2231     index 06ac69245ca1..f4bb456b9a23 100644
2232     --- a/drivers/media/radio/tef6862.c
2233     +++ b/drivers/media/radio/tef6862.c
2234     @@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
2235     if (f->tuner != 0)
2236     return -EINVAL;
2237    
2238     - clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
2239     + freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
2240     pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
2241     i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM;
2242     i2cmsg[1] = (pll >> 8) & 0xff;
2243     diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
2244     index c8fcd78425bd..8f9b2cea88f0 100644
2245     --- a/drivers/media/usb/dvb-usb-v2/af9035.c
2246     +++ b/drivers/media/usb/dvb-usb-v2/af9035.c
2247     @@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
2248     {
2249     u8 wbuf[MAX_XFER_SIZE];
2250     u8 mbox = (reg >> 16) & 0xff;
2251     - struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
2252     + struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
2253    
2254     if (6 + len > sizeof(wbuf)) {
2255     dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
2256     @@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
2257     } else {
2258     /* I2C */
2259     u8 buf[MAX_XFER_SIZE];
2260     - struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
2261     + struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
2262     buf, msg[1].len, msg[1].buf };
2263    
2264     if (5 + msg[0].len > sizeof(buf)) {
2265     dev_warn(&d->udev->dev,
2266     "%s: i2c xfer: len=%d is too big!\n",
2267     KBUILD_MODNAME, msg[0].len);
2268     - return -EOPNOTSUPP;
2269     + ret = -EOPNOTSUPP;
2270     + goto unlock;
2271     }
2272     req.mbox |= ((msg[0].addr & 0x80) >> 3);
2273     buf[0] = msg[1].len;
2274     @@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
2275     } else {
2276     /* I2C */
2277     u8 buf[MAX_XFER_SIZE];
2278     - struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
2279     - 0, NULL };
2280     + struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
2281     + buf, 0, NULL };
2282    
2283     if (5 + msg[0].len > sizeof(buf)) {
2284     dev_warn(&d->udev->dev,
2285     "%s: i2c xfer: len=%d is too big!\n",
2286     KBUILD_MODNAME, msg[0].len);
2287     - return -EOPNOTSUPP;
2288     + ret = -EOPNOTSUPP;
2289     + goto unlock;
2290     }
2291     req.mbox |= ((msg[0].addr & 0x80) >> 3);
2292     buf[0] = msg[0].len;
2293     @@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
2294     ret = -EOPNOTSUPP;
2295     }
2296    
2297     +unlock:
2298     mutex_unlock(&d->i2c_mutex);
2299    
2300     if (ret < 0)
2301     @@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = {
2302     /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
2303     { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
2304     &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
2305     + { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
2306     + &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
2307     { }
2308     };
2309     MODULE_DEVICE_TABLE(usb, af9035_id_table);
2310     diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
2311     index c28d4e29af1a..e76a733e8a28 100644
2312     --- a/drivers/mtd/nand/pxa3xx_nand.c
2313     +++ b/drivers/mtd/nand/pxa3xx_nand.c
2314     @@ -1241,10 +1241,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = {
2315     .compatible = "marvell,pxa3xx-nand",
2316     .data = (void *)PXA3XX_NAND_VARIANT_PXA,
2317     },
2318     - {
2319     - .compatible = "marvell,armada370-nand",
2320     - .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
2321     - },
2322     {}
2323     };
2324     MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
2325     diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
2326     index 50b853a79d77..46dfb1378c17 100644
2327     --- a/drivers/net/ethernet/allwinner/sun4i-emac.c
2328     +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
2329     @@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev)
2330     if (netif_msg_ifup(db))
2331     dev_dbg(db->dev, "enabling %s\n", dev->name);
2332    
2333     - if (devm_request_irq(db->dev, dev->irq, &emac_interrupt,
2334     - 0, dev->name, dev))
2335     + if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev))
2336     return -EAGAIN;
2337    
2338     /* Initialize EMAC board */
2339     @@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev)
2340    
2341     emac_shutdown(ndev);
2342    
2343     + free_irq(ndev->irq, ndev);
2344     +
2345     return 0;
2346     }
2347    
2348     diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
2349     index e7266759a10b..556da81ab092 100644
2350     --- a/drivers/net/ethernet/intel/igb/e1000_phy.c
2351     +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
2352     @@ -1730,7 +1730,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
2353     * ownership of the resources, wait and try again to
2354     * see if they have relinquished the resources yet.
2355     */
2356     - udelay(usec_interval);
2357     + if (usec_interval >= 1000)
2358     + mdelay(usec_interval/1000);
2359     + else
2360     + udelay(usec_interval);
2361     }
2362     ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
2363     if (ret_val)
2364     diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2365     index f4864807e15b..e493150d50c3 100644
2366     --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2367     +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2368     @@ -3966,18 +3966,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
2369     int quick_drop;
2370     s32 t[3], f[3] = {5180, 5500, 5785};
2371    
2372     - if (!(pBase->miscConfiguration & BIT(1)))
2373     + if (!(pBase->miscConfiguration & BIT(4)))
2374     return;
2375    
2376     - if (freq < 4000)
2377     - quick_drop = eep->modalHeader2G.quick_drop;
2378     - else {
2379     - t[0] = eep->base_ext1.quick_drop_low;
2380     - t[1] = eep->modalHeader5G.quick_drop;
2381     - t[2] = eep->base_ext1.quick_drop_high;
2382     - quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
2383     + if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) {
2384     + if (freq < 4000) {
2385     + quick_drop = eep->modalHeader2G.quick_drop;
2386     + } else {
2387     + t[0] = eep->base_ext1.quick_drop_low;
2388     + t[1] = eep->modalHeader5G.quick_drop;
2389     + t[2] = eep->base_ext1.quick_drop_high;
2390     + quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
2391     + }
2392     + REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
2393     }
2394     - REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
2395     }
2396    
2397     static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
2398     @@ -4017,7 +4019,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
2399     struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
2400     u8 bias;
2401    
2402     - if (!(eep->baseEepHeader.featureEnable & 0x40))
2403     + if (!(eep->baseEepHeader.miscConfiguration & 0x40))
2404     return;
2405    
2406     if (!AR_SREV_9300(ah))
2407     diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2408     index dd30452df966..7fe6b5923a9c 100644
2409     --- a/drivers/net/wireless/ath/ath9k/xmit.c
2410     +++ b/drivers/net/wireless/ath/ath9k/xmit.c
2411     @@ -1275,6 +1275,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
2412     if (!rts_thresh || (len > rts_thresh))
2413     rts = true;
2414     }
2415     +
2416     + if (!aggr)
2417     + len = fi->framelen;
2418     +
2419     ath_buf_set_rate(sc, bf, &info, len, rts);
2420     }
2421    
2422     diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
2423     index 76e14c046d94..200f0d98471a 100644
2424     --- a/drivers/net/wireless/iwlwifi/iwl-7000.c
2425     +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
2426     @@ -125,6 +125,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
2427     .ht_params = &iwl7000_ht_params,
2428     .nvm_ver = IWL7260_NVM_VERSION,
2429     .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
2430     + .host_interrupt_operation_mode = true,
2431     };
2432    
2433     const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
2434     @@ -135,6 +136,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
2435     .nvm_ver = IWL7260_NVM_VERSION,
2436     .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
2437     .high_temp = true,
2438     + .host_interrupt_operation_mode = true,
2439     };
2440    
2441     const struct iwl_cfg iwl7260_2n_cfg = {
2442     @@ -144,6 +146,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
2443     .ht_params = &iwl7000_ht_params,
2444     .nvm_ver = IWL7260_NVM_VERSION,
2445     .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
2446     + .host_interrupt_operation_mode = true,
2447     };
2448    
2449     const struct iwl_cfg iwl7260_n_cfg = {
2450     @@ -153,6 +156,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
2451     .ht_params = &iwl7000_ht_params,
2452     .nvm_ver = IWL7260_NVM_VERSION,
2453     .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
2454     + .host_interrupt_operation_mode = true,
2455     };
2456    
2457     const struct iwl_cfg iwl3160_2ac_cfg = {
2458     @@ -162,6 +166,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
2459     .ht_params = &iwl7000_ht_params,
2460     .nvm_ver = IWL3160_NVM_VERSION,
2461     .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
2462     + .host_interrupt_operation_mode = true,
2463     };
2464    
2465     const struct iwl_cfg iwl3160_2n_cfg = {
2466     @@ -171,6 +176,7 @@ const struct iwl_cfg iwl3160_2n_cfg = {
2467     .ht_params = &iwl7000_ht_params,
2468     .nvm_ver = IWL3160_NVM_VERSION,
2469     .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
2470     + .host_interrupt_operation_mode = true,
2471     };
2472    
2473     const struct iwl_cfg iwl3160_n_cfg = {
2474     @@ -180,6 +186,7 @@ const struct iwl_cfg iwl3160_n_cfg = {
2475     .ht_params = &iwl7000_ht_params,
2476     .nvm_ver = IWL3160_NVM_VERSION,
2477     .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
2478     + .host_interrupt_operation_mode = true,
2479     };
2480    
2481     MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
2482     diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
2483     index b03c25e14903..028ae91edf55 100644
2484     --- a/drivers/net/wireless/iwlwifi/iwl-config.h
2485     +++ b/drivers/net/wireless/iwlwifi/iwl-config.h
2486     @@ -207,6 +207,8 @@ struct iwl_eeprom_params {
2487     * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
2488     * @internal_wimax_coex: internal wifi/wimax combo device
2489     * @high_temp: Is this NIC is designated to be in high temperature.
2490     + * @host_interrupt_operation_mode: device needs host interrupt operation
2491     + * mode set
2492     *
2493     * We enable the driver to be backward compatible wrt. hardware features.
2494     * API differences in uCode shouldn't be handled here but through TLVs
2495     @@ -235,6 +237,7 @@ struct iwl_cfg {
2496     enum iwl_led_mode led_mode;
2497     const bool rx_with_siso_diversity;
2498     const bool internal_wimax_coex;
2499     + const bool host_interrupt_operation_mode;
2500     bool high_temp;
2501     };
2502    
2503     diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
2504     index a276af476e2d..641420528771 100644
2505     --- a/drivers/net/wireless/iwlwifi/iwl-csr.h
2506     +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
2507     @@ -463,14 +463,11 @@
2508     * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
2509     *
2510     * default interrupt coalescing timer is 64 x 32 = 2048 usecs
2511     - * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
2512     */
2513     #define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
2514     #define IWL_HOST_INT_TIMEOUT_DEF (0x40)
2515     #define IWL_HOST_INT_TIMEOUT_MIN (0x0)
2516     -#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
2517     -#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
2518     -#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
2519     +#define IWL_HOST_INT_OPER_MODE BIT(31)
2520    
2521     /*****************************************************************************
2522     * 7000/3000 series SHR DTS addresses *
2523     diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
2524     index aac81b8984b0..c196425a6723 100644
2525     --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
2526     +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
2527     @@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
2528    
2529     if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
2530     return -EINVAL;
2531     + if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
2532     + return -EINVAL;
2533     + if (drain < 0 || drain > 1)
2534     + return -EINVAL;
2535    
2536     mutex_lock(&mvm->mutex);
2537    
2538     diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
2539     index 3f237b42eb36..83d28bcf0d48 100644
2540     --- a/drivers/net/wireless/iwlwifi/pcie/rx.c
2541     +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
2542     @@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
2543    
2544     /* Set interrupt coalescing timer to default (2048 usecs) */
2545     iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
2546     +
2547     + /* W/A for interrupt coalescing bug in 7260 and 3160 */
2548     + if (trans->cfg->host_interrupt_operation_mode)
2549     + iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
2550     }
2551    
2552     static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
2553     diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
2554     index c3f904d422b0..6bc31003a32c 100644
2555     --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
2556     +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
2557     @@ -276,9 +276,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
2558     spin_lock_irqsave(&trans_pcie->irq_lock, flags);
2559     iwl_pcie_apm_init(trans);
2560    
2561     - /* Set interrupt coalescing calibration timer to default (512 usecs) */
2562     - iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
2563     -
2564     spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
2565    
2566     iwl_pcie_set_pwr(trans, false);
2567     diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
2568     index f084412eee0b..bf63e13d14c9 100644
2569     --- a/drivers/net/wireless/mwifiex/sta_ioctl.c
2570     +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
2571     @@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
2572     if (bss_desc && bss_desc->ssid.ssid_len &&
2573     (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
2574     ssid, &bss_desc->ssid))) {
2575     - kfree(bss_desc);
2576     - return 0;
2577     + ret = 0;
2578     + goto done;
2579     }
2580    
2581     /* Exit Adhoc mode first */
2582     diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
2583     index 98f7b9b89507..53dc57127ca3 100644
2584     --- a/drivers/pci/pci-driver.c
2585     +++ b/drivers/pci/pci-driver.c
2586     @@ -19,6 +19,7 @@
2587     #include <linux/cpu.h>
2588     #include <linux/pm_runtime.h>
2589     #include <linux/suspend.h>
2590     +#include <linux/kexec.h>
2591     #include "pci.h"
2592    
2593     struct pci_dynid {
2594     @@ -388,12 +389,17 @@ static void pci_device_shutdown(struct device *dev)
2595     pci_msi_shutdown(pci_dev);
2596     pci_msix_shutdown(pci_dev);
2597    
2598     +#ifdef CONFIG_KEXEC
2599     /*
2600     - * Turn off Bus Master bit on the device to tell it to not
2601     - * continue to do DMA. Don't touch devices in D3cold or unknown states.
2602     + * If this is a kexec reboot, turn off Bus Master bit on the
2603     + * device to tell it to not continue to do DMA. Don't touch
2604     + * devices in D3cold or unknown states.
2605     + * If it is not a kexec reboot, firmware will hit the PCI
2606     + * devices with big hammer and stop their DMA any way.
2607     */
2608     - if (pci_dev->current_state <= PCI_D3hot)
2609     + if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
2610     pci_clear_master(pci_dev);
2611     +#endif
2612     }
2613    
2614     #ifdef CONFIG_PM
2615     diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
2616     index 032df3799efb..8b5e4c712a01 100644
2617     --- a/drivers/regulator/pfuze100-regulator.c
2618     +++ b/drivers/regulator/pfuze100-regulator.c
2619     @@ -38,7 +38,7 @@
2620    
2621     #define PFUZE100_DEVICEID 0x0
2622     #define PFUZE100_REVID 0x3
2623     -#define PFUZE100_FABID 0x3
2624     +#define PFUZE100_FABID 0x4
2625    
2626     #define PFUZE100_SW1ABVOL 0x20
2627     #define PFUZE100_SW1CVOL 0x2e
2628     diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
2629     index 741892632ae0..b86eec3ffba8 100644
2630     --- a/drivers/rtc/rtc-at91rm9200.c
2631     +++ b/drivers/rtc/rtc-at91rm9200.c
2632     @@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
2633    
2634     at91_alarm_year = tm.tm_year;
2635    
2636     + tm.tm_mon = alrm->time.tm_mon;
2637     + tm.tm_mday = alrm->time.tm_mday;
2638     tm.tm_hour = alrm->time.tm_hour;
2639     tm.tm_min = alrm->time.tm_min;
2640     tm.tm_sec = alrm->time.tm_sec;
2641     diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
2642     index e7108045f553..d13c532b68cd 100644
2643     --- a/drivers/staging/comedi/drivers/amplc_pc263.c
2644     +++ b/drivers/staging/comedi/drivers/amplc_pc263.c
2645     @@ -68,6 +68,9 @@ static int pc263_do_insn_bits(struct comedi_device *dev,
2646     outb(s->state & 0xFF, dev->iobase);
2647     outb(s->state >> 8, dev->iobase + 1);
2648     }
2649     +
2650     + data[1] = s->state;
2651     +
2652     return insn->n;
2653     }
2654    
2655     diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
2656     index 145bb48f618e..a9c77af3b76d 100644
2657     --- a/drivers/staging/comedi/drivers/amplc_pci263.c
2658     +++ b/drivers/staging/comedi/drivers/amplc_pci263.c
2659     @@ -55,6 +55,9 @@ static int pci263_do_insn_bits(struct comedi_device *dev,
2660     outb(s->state & 0xFF, dev->iobase);
2661     outb(s->state >> 8, dev->iobase + 1);
2662     }
2663     +
2664     + data[1] = s->state;
2665     +
2666     return insn->n;
2667     }
2668    
2669     diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
2670     index 11758a515c1b..0e687de7848f 100644
2671     --- a/drivers/staging/comedi/drivers/ssv_dnp.c
2672     +++ b/drivers/staging/comedi/drivers/ssv_dnp.c
2673     @@ -83,11 +83,11 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
2674    
2675     /* on return, data[1] contains the value of the digital input lines. */
2676     outb(PADR, CSCIR);
2677     - data[0] = inb(CSCDR);
2678     + data[1] = inb(CSCDR);
2679     outb(PBDR, CSCIR);
2680     - data[0] += inb(CSCDR) << 8;
2681     + data[1] += inb(CSCDR) << 8;
2682     outb(PCDR, CSCIR);
2683     - data[0] += ((inb(CSCDR) & 0xF0) << 12);
2684     + data[1] += ((inb(CSCDR) & 0xF0) << 12);
2685    
2686     return insn->n;
2687    
2688     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2689     index 243c6729c320..c5c366790e6a 100644
2690     --- a/drivers/usb/core/hub.c
2691     +++ b/drivers/usb/core/hub.c
2692     @@ -4836,8 +4836,9 @@ static void hub_events(void)
2693     hub->ports[i - 1]->child;
2694    
2695     dev_dbg(hub_dev, "warm reset port %d\n", i);
2696     - if (!udev || !(portstatus &
2697     - USB_PORT_STAT_CONNECTION)) {
2698     + if (!udev ||
2699     + !(portstatus & USB_PORT_STAT_CONNECTION) ||
2700     + udev->state == USB_STATE_NOTATTACHED) {
2701     status = hub_port_reset(hub, i,
2702     NULL, HUB_BH_RESET_TIME,
2703     true);
2704     diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
2705     index 7fa93f4bc507..056da977ebdf 100644
2706     --- a/drivers/usb/dwc3/ep0.c
2707     +++ b/drivers/usb/dwc3/ep0.c
2708     @@ -459,6 +459,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
2709     dep = dwc3_wIndex_to_dep(dwc, wIndex);
2710     if (!dep)
2711     return -EINVAL;
2712     + if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
2713     + break;
2714     ret = __dwc3_gadget_ep_set_halt(dep, set);
2715     if (ret)
2716     return -EINVAL;
2717     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2718     index 5452c0fce360..02e44fcaf205 100644
2719     --- a/drivers/usb/dwc3/gadget.c
2720     +++ b/drivers/usb/dwc3/gadget.c
2721     @@ -1200,9 +1200,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
2722     else
2723     dep->flags |= DWC3_EP_STALL;
2724     } else {
2725     - if (dep->flags & DWC3_EP_WEDGE)
2726     - return 0;
2727     -
2728     ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2729     DWC3_DEPCMD_CLEARSTALL, &params);
2730     if (ret)
2731     @@ -1210,7 +1207,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
2732     value ? "set" : "clear",
2733     dep->name);
2734     else
2735     - dep->flags &= ~DWC3_EP_STALL;
2736     + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
2737     }
2738    
2739     return ret;
2740     diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
2741     index d4f0f3305759..7c0adb9812aa 100644
2742     --- a/drivers/usb/gadget/composite.c
2743     +++ b/drivers/usb/gadget/composite.c
2744     @@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev)
2745     bitmap_zero(f->endpoints, 32);
2746     }
2747     cdev->config = NULL;
2748     + cdev->delayed_status = 0;
2749     }
2750    
2751     static int set_config(struct usb_composite_dev *cdev,
2752     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2753     index 6bfbd80ec2b9..55fc0c39b7e1 100644
2754     --- a/drivers/usb/host/xhci-ring.c
2755     +++ b/drivers/usb/host/xhci-ring.c
2756     @@ -2929,8 +2929,58 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2757     }
2758    
2759     while (1) {
2760     - if (room_on_ring(xhci, ep_ring, num_trbs))
2761     - break;
2762     + if (room_on_ring(xhci, ep_ring, num_trbs)) {
2763     + union xhci_trb *trb = ep_ring->enqueue;
2764     + unsigned int usable = ep_ring->enq_seg->trbs +
2765     + TRBS_PER_SEGMENT - 1 - trb;
2766     + u32 nop_cmd;
2767     +
2768     + /*
2769     + * Section 4.11.7.1 TD Fragments states that a link
2770     + * TRB must only occur at the boundary between
2771     + * data bursts (eg 512 bytes for 480M).
2772     + * While it is possible to split a large fragment
2773     + * we don't know the size yet.
2774     + * Simplest solution is to fill the trb before the
2775     + * LINK with nop commands.
2776     + */
2777     + if (num_trbs == 1 || num_trbs <= usable || usable == 0)
2778     + break;
2779     +
2780     + if (ep_ring->type != TYPE_BULK)
2781     + /*
2782     + * While isoc transfers might have a buffer that
2783     + * crosses a 64k boundary it is unlikely.
2784     + * Since we can't add NOPs without generating
2785     + * gaps in the traffic just hope it never
2786     + * happens at the end of the ring.
2787     + * This could be fixed by writing a LINK TRB
2788     + * instead of the first NOP - however the
2789     + * TRB_TYPE_LINK_LE32() calls would all need
2790     + * changing to check the ring length.
2791     + */
2792     + break;
2793     +
2794     + if (num_trbs >= TRBS_PER_SEGMENT) {
2795     + xhci_err(xhci, "Too many fragments %d, max %d\n",
2796     + num_trbs, TRBS_PER_SEGMENT - 1);
2797     + return -ENOMEM;
2798     + }
2799     +
2800     + nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
2801     + ep_ring->cycle_state);
2802     + ep_ring->num_trbs_free -= usable;
2803     + do {
2804     + trb->generic.field[0] = 0;
2805     + trb->generic.field[1] = 0;
2806     + trb->generic.field[2] = 0;
2807     + trb->generic.field[3] = nop_cmd;
2808     + trb++;
2809     + } while (--usable);
2810     + ep_ring->enqueue = trb;
2811     + if (room_on_ring(xhci, ep_ring, num_trbs))
2812     + break;
2813     + }
2814    
2815     if (ep_ring == xhci->cmd_ring) {
2816     xhci_err(xhci, "Do not support expand command ring\n");
2817     diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
2818     index ae959746f77f..0c593afc3185 100644
2819     --- a/drivers/usb/musb/musb_cppi41.c
2820     +++ b/drivers/usb/musb/musb_cppi41.c
2821     @@ -38,6 +38,7 @@ struct cppi41_dma_channel {
2822     u32 prog_len;
2823     u32 transferred;
2824     u32 packet_sz;
2825     + struct list_head tx_check;
2826     };
2827    
2828     #define MUSB_DMA_NUM_CHANNELS 15
2829     @@ -47,6 +48,8 @@ struct cppi41_dma_controller {
2830     struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
2831     struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
2832     struct musb *musb;
2833     + struct hrtimer early_tx;
2834     + struct list_head early_tx_list;
2835     u32 rx_mode;
2836     u32 tx_mode;
2837     u32 auto_req;
2838     @@ -96,31 +99,27 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
2839     cppi41_channel->usb_toggle = toggle;
2840     }
2841    
2842     -static void cppi41_dma_callback(void *private_data)
2843     +static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
2844     {
2845     - struct dma_channel *channel = private_data;
2846     - struct cppi41_dma_channel *cppi41_channel = channel->private_data;
2847     - struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
2848     - struct musb *musb = hw_ep->musb;
2849     - unsigned long flags;
2850     - struct dma_tx_state txstate;
2851     - u32 transferred;
2852     + u8 epnum = hw_ep->epnum;
2853     + struct musb *musb = hw_ep->musb;
2854     + void __iomem *epio = musb->endpoints[epnum].regs;
2855     + u16 csr;
2856    
2857     - spin_lock_irqsave(&musb->lock, flags);
2858     + csr = musb_readw(epio, MUSB_TXCSR);
2859     + if (csr & MUSB_TXCSR_TXPKTRDY)
2860     + return false;
2861     + return true;
2862     +}
2863    
2864     - dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
2865     - &txstate);
2866     - transferred = cppi41_channel->prog_len - txstate.residue;
2867     - cppi41_channel->transferred += transferred;
2868     +static void cppi41_dma_callback(void *private_data);
2869    
2870     - dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
2871     - hw_ep->epnum, cppi41_channel->transferred,
2872     - cppi41_channel->total_len);
2873     +static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
2874     +{
2875     + struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
2876     + struct musb *musb = hw_ep->musb;
2877    
2878     - update_rx_toggle(cppi41_channel);
2879     -
2880     - if (cppi41_channel->transferred == cppi41_channel->total_len ||
2881     - transferred < cppi41_channel->packet_sz) {
2882     + if (!cppi41_channel->prog_len) {
2883    
2884     /* done, complete */
2885     cppi41_channel->channel.actual_len =
2886     @@ -150,13 +149,11 @@ static void cppi41_dma_callback(void *private_data)
2887     remain_bytes,
2888     direction,
2889     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
2890     - if (WARN_ON(!dma_desc)) {
2891     - spin_unlock_irqrestore(&musb->lock, flags);
2892     + if (WARN_ON(!dma_desc))
2893     return;
2894     - }
2895    
2896     dma_desc->callback = cppi41_dma_callback;
2897     - dma_desc->callback_param = channel;
2898     + dma_desc->callback_param = &cppi41_channel->channel;
2899     cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
2900     dma_async_issue_pending(dc);
2901    
2902     @@ -166,6 +163,117 @@ static void cppi41_dma_callback(void *private_data)
2903     musb_writew(epio, MUSB_RXCSR, csr);
2904     }
2905     }
2906     +}
2907     +
2908     +static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
2909     +{
2910     + struct cppi41_dma_controller *controller;
2911     + struct cppi41_dma_channel *cppi41_channel, *n;
2912     + struct musb *musb;
2913     + unsigned long flags;
2914     + enum hrtimer_restart ret = HRTIMER_NORESTART;
2915     +
2916     + controller = container_of(timer, struct cppi41_dma_controller,
2917     + early_tx);
2918     + musb = controller->musb;
2919     +
2920     + spin_lock_irqsave(&musb->lock, flags);
2921     + list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
2922     + tx_check) {
2923     + bool empty;
2924     + struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
2925     +
2926     + empty = musb_is_tx_fifo_empty(hw_ep);
2927     + if (empty) {
2928     + list_del_init(&cppi41_channel->tx_check);
2929     + cppi41_trans_done(cppi41_channel);
2930     + }
2931     + }
2932     +
2933     + if (!list_empty(&controller->early_tx_list)) {
2934     + ret = HRTIMER_RESTART;
2935     + hrtimer_forward_now(&controller->early_tx,
2936     + ktime_set(0, 150 * NSEC_PER_USEC));
2937     + }
2938     +
2939     + spin_unlock_irqrestore(&musb->lock, flags);
2940     + return ret;
2941     +}
2942     +
2943     +static void cppi41_dma_callback(void *private_data)
2944     +{
2945     + struct dma_channel *channel = private_data;
2946     + struct cppi41_dma_channel *cppi41_channel = channel->private_data;
2947     + struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
2948     + struct musb *musb = hw_ep->musb;
2949     + unsigned long flags;
2950     + struct dma_tx_state txstate;
2951     + u32 transferred;
2952     + bool empty;
2953     +
2954     + spin_lock_irqsave(&musb->lock, flags);
2955     +
2956     + dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
2957     + &txstate);
2958     + transferred = cppi41_channel->prog_len - txstate.residue;
2959     + cppi41_channel->transferred += transferred;
2960     +
2961     + dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
2962     + hw_ep->epnum, cppi41_channel->transferred,
2963     + cppi41_channel->total_len);
2964     +
2965     + update_rx_toggle(cppi41_channel);
2966     +
2967     + if (cppi41_channel->transferred == cppi41_channel->total_len ||
2968     + transferred < cppi41_channel->packet_sz)
2969     + cppi41_channel->prog_len = 0;
2970     +
2971     + empty = musb_is_tx_fifo_empty(hw_ep);
2972     + if (empty) {
2973     + cppi41_trans_done(cppi41_channel);
2974     + } else {
2975     + struct cppi41_dma_controller *controller;
2976     + /*
2977     + * On AM335x it has been observed that the TX interrupt fires
2978     + * too early that means the TXFIFO is not yet empty but the DMA
2979     + * engine says that it is done with the transfer. We don't
2980     + * receive a FIFO empty interrupt so the only thing we can do is
2981     + * to poll for the bit. On HS it usually takes 2us, on FS around
2982     + * 110us - 150us depending on the transfer size.
2983     + * We spin on HS (no longer than than 25us and setup a timer on
2984     + * FS to check for the bit and complete the transfer.
2985     + */
2986     + controller = cppi41_channel->controller;
2987     +
2988     + if (musb->g.speed == USB_SPEED_HIGH) {
2989     + unsigned wait = 25;
2990     +
2991     + do {
2992     + empty = musb_is_tx_fifo_empty(hw_ep);
2993     + if (empty)
2994     + break;
2995     + wait--;
2996     + if (!wait)
2997     + break;
2998     + udelay(1);
2999     + } while (1);
3000     +
3001     + empty = musb_is_tx_fifo_empty(hw_ep);
3002     + if (empty) {
3003     + cppi41_trans_done(cppi41_channel);
3004     + goto out;
3005     + }
3006     + }
3007     + list_add_tail(&cppi41_channel->tx_check,
3008     + &controller->early_tx_list);
3009     + if (!hrtimer_active(&controller->early_tx)) {
3010     + hrtimer_start_range_ns(&controller->early_tx,
3011     + ktime_set(0, 140 * NSEC_PER_USEC),
3012     + 40 * NSEC_PER_USEC,
3013     + HRTIMER_MODE_REL);
3014     + }
3015     + }
3016     +out:
3017     spin_unlock_irqrestore(&musb->lock, flags);
3018     }
3019    
3020     @@ -364,6 +472,8 @@ static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
3021     WARN_ON(1);
3022     return 1;
3023     }
3024     + if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
3025     + return 0;
3026     if (cppi41_channel->is_tx)
3027     return 1;
3028     /* AM335x Advisory 1.0.13. No workaround for device RX mode */
3029     @@ -388,6 +498,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
3030     if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
3031     return 0;
3032    
3033     + list_del_init(&cppi41_channel->tx_check);
3034     if (is_tx) {
3035     csr = musb_readw(epio, MUSB_TXCSR);
3036     csr &= ~MUSB_TXCSR_DMAENAB;
3037     @@ -494,6 +605,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
3038     cppi41_channel->controller = controller;
3039     cppi41_channel->port_num = port;
3040     cppi41_channel->is_tx = is_tx;
3041     + INIT_LIST_HEAD(&cppi41_channel->tx_check);
3042    
3043     musb_dma = &cppi41_channel->channel;
3044     musb_dma->private_data = cppi41_channel;
3045     @@ -518,6 +630,7 @@ void dma_controller_destroy(struct dma_controller *c)
3046     struct cppi41_dma_controller *controller = container_of(c,
3047     struct cppi41_dma_controller, controller);
3048    
3049     + hrtimer_cancel(&controller->early_tx);
3050     cppi41_dma_controller_stop(controller);
3051     kfree(controller);
3052     }
3053     @@ -537,6 +650,9 @@ struct dma_controller *dma_controller_create(struct musb *musb,
3054     if (!controller)
3055     goto kzalloc_fail;
3056    
3057     + hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3058     + controller->early_tx.function = cppi41_recheck_tx_req;
3059     + INIT_LIST_HEAD(&controller->early_tx_list);
3060     controller->musb = musb;
3061    
3062     controller->controller.channel_alloc = cppi41_dma_channel_allocate;
3063     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3064     index c3d94853b4ab..496b7e39d5be 100644
3065     --- a/drivers/usb/serial/option.c
3066     +++ b/drivers/usb/serial/option.c
3067     @@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
3068     #define HUAWEI_PRODUCT_K4505 0x1464
3069     #define HUAWEI_PRODUCT_K3765 0x1465
3070     #define HUAWEI_PRODUCT_K4605 0x14C6
3071     +#define HUAWEI_PRODUCT_E173S6 0x1C07
3072    
3073     #define QUANTA_VENDOR_ID 0x0408
3074     #define QUANTA_PRODUCT_Q101 0xEA02
3075     @@ -572,6 +573,8 @@ static const struct usb_device_id option_ids[] = {
3076     { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
3077     { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
3078     .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
3079     + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
3080     + .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
3081     { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
3082     .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
3083     { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
3084     @@ -634,6 +637,10 @@ static const struct usb_device_id option_ids[] = {
3085     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
3086     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
3087     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
3088     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
3089     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
3090     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
3091     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
3092     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
3093     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
3094     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
3095     @@ -688,6 +695,10 @@ static const struct usb_device_id option_ids[] = {
3096     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
3097     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
3098     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
3099     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
3100     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
3101     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
3102     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
3103     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
3104     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
3105     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
3106     @@ -742,6 +753,10 @@ static const struct usb_device_id option_ids[] = {
3107     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
3108     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
3109     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
3110     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
3111     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
3112     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
3113     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
3114     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
3115     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
3116     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
3117     @@ -796,6 +811,10 @@ static const struct usb_device_id option_ids[] = {
3118     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
3119     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
3120     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
3121     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
3122     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
3123     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
3124     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
3125     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
3126     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
3127     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
3128     @@ -850,6 +869,10 @@ static const struct usb_device_id option_ids[] = {
3129     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
3130     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
3131     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
3132     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
3133     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
3134     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
3135     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
3136     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
3137     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
3138     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
3139     @@ -904,6 +927,10 @@ static const struct usb_device_id option_ids[] = {
3140     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
3141     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
3142     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
3143     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
3144     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
3145     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
3146     + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
3147     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
3148     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
3149     { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
3150     diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
3151     index 3fb83b0c28c2..ab6d3f56cbca 100644
3152     --- a/drivers/watchdog/sc1200wdt.c
3153     +++ b/drivers/watchdog/sc1200wdt.c
3154     @@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void)
3155     #if defined CONFIG_PNP
3156     /* now that the user has specified an IO port and we haven't detected
3157     * any devices, disable pnp support */
3158     + if (isapnp)
3159     + pnp_unregister_driver(&scl200wdt_pnp_driver);
3160     isapnp = 0;
3161     - pnp_unregister_driver(&scl200wdt_pnp_driver);
3162     #endif
3163    
3164     if (!request_region(io, io_len, SC1200_MODULE_NAME)) {
3165     diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
3166     index e15d2b0d8d3b..0890c83643e9 100644
3167     --- a/fs/btrfs/acl.c
3168     +++ b/fs/btrfs/acl.c
3169     @@ -229,7 +229,7 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
3170     if (ret > 0) {
3171     /* we need an acl */
3172     ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
3173     - } else {
3174     + } else if (ret < 0) {
3175     cache_no_acl(inode);
3176     }
3177     } else {
3178     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
3179     index 0552a599b28f..5eb50b5df777 100644
3180     --- a/fs/btrfs/backref.c
3181     +++ b/fs/btrfs/backref.c
3182     @@ -185,6 +185,9 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
3183     {
3184     struct __prelim_ref *ref;
3185    
3186     + if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
3187     + return 0;
3188     +
3189     ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
3190     if (!ref)
3191     return -ENOMEM;
3192     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3193     index 61b5bcd57b7e..b544a44d696e 100644
3194     --- a/fs/btrfs/ctree.c
3195     +++ b/fs/btrfs/ctree.c
3196     @@ -2758,7 +2758,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
3197     int level;
3198     int lowest_unlock = 1;
3199     u8 lowest_level = 0;
3200     - int prev_cmp;
3201     + int prev_cmp = -1;
3202    
3203     lowest_level = p->lowest_level;
3204     WARN_ON(p->nodes[0] != NULL);
3205     @@ -2769,7 +2769,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
3206     }
3207    
3208     again:
3209     - prev_cmp = -1;
3210     b = get_old_root(root, time_seq);
3211     level = btrfs_header_level(b);
3212     p->locks[level] = BTRFS_READ_LOCK;
3213     @@ -2787,6 +2786,11 @@ again:
3214     */
3215     btrfs_unlock_up_safe(p, level + 1);
3216    
3217     + /*
3218     + * Since we can unwind eb's we want to do a real search every
3219     + * time.
3220     + */
3221     + prev_cmp = -1;
3222     ret = key_search(b, key, level, &prev_cmp, &slot);
3223    
3224     if (level != 0) {
3225     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3226     index 51e3afa78354..8b8eff051493 100644
3227     --- a/fs/btrfs/inode.c
3228     +++ b/fs/btrfs/inode.c
3229     @@ -2367,10 +2367,23 @@ out_unlock:
3230     return ret;
3231     }
3232    
3233     +static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
3234     +{
3235     + struct old_sa_defrag_extent *old, *tmp;
3236     +
3237     + if (!new)
3238     + return;
3239     +
3240     + list_for_each_entry_safe(old, tmp, &new->head, list) {
3241     + list_del(&old->list);
3242     + kfree(old);
3243     + }
3244     + kfree(new);
3245     +}
3246     +
3247     static void relink_file_extents(struct new_sa_defrag_extent *new)
3248     {
3249     struct btrfs_path *path;
3250     - struct old_sa_defrag_extent *old, *tmp;
3251     struct sa_defrag_extent_backref *backref;
3252     struct sa_defrag_extent_backref *prev = NULL;
3253     struct inode *inode;
3254     @@ -2413,16 +2426,11 @@ static void relink_file_extents(struct new_sa_defrag_extent *new)
3255     kfree(prev);
3256    
3257     btrfs_free_path(path);
3258     -
3259     - list_for_each_entry_safe(old, tmp, &new->head, list) {
3260     - list_del(&old->list);
3261     - kfree(old);
3262     - }
3263     out:
3264     + free_sa_defrag_extent(new);
3265     +
3266     atomic_dec(&root->fs_info->defrag_running);
3267     wake_up(&root->fs_info->transaction_wait);
3268     -
3269     - kfree(new);
3270     }
3271    
3272     static struct new_sa_defrag_extent *
3273     @@ -2432,7 +2440,7 @@ record_old_file_extents(struct inode *inode,
3274     struct btrfs_root *root = BTRFS_I(inode)->root;
3275     struct btrfs_path *path;
3276     struct btrfs_key key;
3277     - struct old_sa_defrag_extent *old, *tmp;
3278     + struct old_sa_defrag_extent *old;
3279     struct new_sa_defrag_extent *new;
3280     int ret;
3281    
3282     @@ -2480,7 +2488,7 @@ record_old_file_extents(struct inode *inode,
3283     if (slot >= btrfs_header_nritems(l)) {
3284     ret = btrfs_next_leaf(root, path);
3285     if (ret < 0)
3286     - goto out_free_list;
3287     + goto out_free_path;
3288     else if (ret > 0)
3289     break;
3290     continue;
3291     @@ -2509,7 +2517,7 @@ record_old_file_extents(struct inode *inode,
3292    
3293     old = kmalloc(sizeof(*old), GFP_NOFS);
3294     if (!old)
3295     - goto out_free_list;
3296     + goto out_free_path;
3297    
3298     offset = max(new->file_pos, key.offset);
3299     end = min(new->file_pos + new->len, key.offset + num_bytes);
3300     @@ -2531,15 +2539,10 @@ next:
3301    
3302     return new;
3303    
3304     -out_free_list:
3305     - list_for_each_entry_safe(old, tmp, &new->head, list) {
3306     - list_del(&old->list);
3307     - kfree(old);
3308     - }
3309     out_free_path:
3310     btrfs_free_path(path);
3311     out_kfree:
3312     - kfree(new);
3313     + free_sa_defrag_extent(new);
3314     return NULL;
3315     }
3316    
3317     @@ -2710,8 +2713,14 @@ out:
3318     btrfs_remove_ordered_extent(inode, ordered_extent);
3319    
3320     /* for snapshot-aware defrag */
3321     - if (new)
3322     - relink_file_extents(new);
3323     + if (new) {
3324     + if (ret) {
3325     + free_sa_defrag_extent(new);
3326     + atomic_dec(&root->fs_info->defrag_running);
3327     + } else {
3328     + relink_file_extents(new);
3329     + }
3330     + }
3331    
3332     /* once for us */
3333     btrfs_put_ordered_extent(ordered_extent);
3334     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3335     index 9d46f60cb943..8747feb77ec9 100644
3336     --- a/fs/btrfs/ioctl.c
3337     +++ b/fs/btrfs/ioctl.c
3338     @@ -2130,7 +2130,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
3339    
3340     err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
3341     if (err == -EINTR)
3342     - goto out;
3343     + goto out_drop_write;
3344     dentry = lookup_one_len(vol_args->name, parent, namelen);
3345     if (IS_ERR(dentry)) {
3346     err = PTR_ERR(dentry);
3347     @@ -2293,6 +2293,7 @@ out_dput:
3348     dput(dentry);
3349     out_unlock_dir:
3350     mutex_unlock(&dir->i_mutex);
3351     +out_drop_write:
3352     mnt_drop_write_file(file);
3353     out:
3354     kfree(vol_args);
3355     diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
3356     index c702cb62f78a..bda1cd84ee5f 100644
3357     --- a/fs/btrfs/ordered-data.c
3358     +++ b/fs/btrfs/ordered-data.c
3359     @@ -537,7 +537,9 @@ void btrfs_remove_ordered_extent(struct inode *inode,
3360     */
3361     if (RB_EMPTY_ROOT(&tree->tree) &&
3362     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
3363     + spin_lock(&root->fs_info->ordered_root_lock);
3364     list_del_init(&BTRFS_I(inode)->ordered_operations);
3365     + spin_unlock(&root->fs_info->ordered_root_lock);
3366     }
3367    
3368     if (!root->nr_ordered_extents) {
3369     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
3370     index e46e0ed74925..741c839fa46a 100644
3371     --- a/fs/btrfs/send.c
3372     +++ b/fs/btrfs/send.c
3373     @@ -121,7 +121,6 @@ struct send_ctx {
3374     struct list_head name_cache_list;
3375     int name_cache_size;
3376    
3377     - struct file *cur_inode_filp;
3378     char *read_buf;
3379     };
3380    
3381     @@ -2120,77 +2119,6 @@ out:
3382     }
3383    
3384     /*
3385     - * Called for regular files when sending extents data. Opens a struct file
3386     - * to read from the file.
3387     - */
3388     -static int open_cur_inode_file(struct send_ctx *sctx)
3389     -{
3390     - int ret = 0;
3391     - struct btrfs_key key;
3392     - struct path path;
3393     - struct inode *inode;
3394     - struct dentry *dentry;
3395     - struct file *filp;
3396     - int new = 0;
3397     -
3398     - if (sctx->cur_inode_filp)
3399     - goto out;
3400     -
3401     - key.objectid = sctx->cur_ino;
3402     - key.type = BTRFS_INODE_ITEM_KEY;
3403     - key.offset = 0;
3404     -
3405     - inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root,
3406     - &new);
3407     - if (IS_ERR(inode)) {
3408     - ret = PTR_ERR(inode);
3409     - goto out;
3410     - }
3411     -
3412     - dentry = d_obtain_alias(inode);
3413     - inode = NULL;
3414     - if (IS_ERR(dentry)) {
3415     - ret = PTR_ERR(dentry);
3416     - goto out;
3417     - }
3418     -
3419     - path.mnt = sctx->mnt;
3420     - path.dentry = dentry;
3421     - filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred());
3422     - dput(dentry);
3423     - dentry = NULL;
3424     - if (IS_ERR(filp)) {
3425     - ret = PTR_ERR(filp);
3426     - goto out;
3427     - }
3428     - sctx->cur_inode_filp = filp;
3429     -
3430     -out:
3431     - /*
3432     - * no xxxput required here as every vfs op
3433     - * does it by itself on failure
3434     - */
3435     - return ret;
3436     -}
3437     -
3438     -/*
3439     - * Closes the struct file that was created in open_cur_inode_file
3440     - */
3441     -static int close_cur_inode_file(struct send_ctx *sctx)
3442     -{
3443     - int ret = 0;
3444     -
3445     - if (!sctx->cur_inode_filp)
3446     - goto out;
3447     -
3448     - ret = filp_close(sctx->cur_inode_filp, NULL);
3449     - sctx->cur_inode_filp = NULL;
3450     -
3451     -out:
3452     - return ret;
3453     -}
3454     -
3455     -/*
3456     * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
3457     */
3458     static int send_subvol_begin(struct send_ctx *sctx)
3459     @@ -3622,6 +3550,72 @@ out:
3460     return ret;
3461     }
3462    
3463     +static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
3464     +{
3465     + struct btrfs_root *root = sctx->send_root;
3466     + struct btrfs_fs_info *fs_info = root->fs_info;
3467     + struct inode *inode;
3468     + struct page *page;
3469     + char *addr;
3470     + struct btrfs_key key;
3471     + pgoff_t index = offset >> PAGE_CACHE_SHIFT;
3472     + pgoff_t last_index;
3473     + unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
3474     + ssize_t ret = 0;
3475     +
3476     + key.objectid = sctx->cur_ino;
3477     + key.type = BTRFS_INODE_ITEM_KEY;
3478     + key.offset = 0;
3479     +
3480     + inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3481     + if (IS_ERR(inode))
3482     + return PTR_ERR(inode);
3483     +
3484     + if (offset + len > i_size_read(inode)) {
3485     + if (offset > i_size_read(inode))
3486     + len = 0;
3487     + else
3488     + len = offset - i_size_read(inode);
3489     + }
3490     + if (len == 0)
3491     + goto out;
3492     +
3493     + last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
3494     + while (index <= last_index) {
3495     + unsigned cur_len = min_t(unsigned, len,
3496     + PAGE_CACHE_SIZE - pg_offset);
3497     + page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3498     + if (!page) {
3499     + ret = -ENOMEM;
3500     + break;
3501     + }
3502     +
3503     + if (!PageUptodate(page)) {
3504     + btrfs_readpage(NULL, page);
3505     + lock_page(page);
3506     + if (!PageUptodate(page)) {
3507     + unlock_page(page);
3508     + page_cache_release(page);
3509     + ret = -EIO;
3510     + break;
3511     + }
3512     + }
3513     +
3514     + addr = kmap(page);
3515     + memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
3516     + kunmap(page);
3517     + unlock_page(page);
3518     + page_cache_release(page);
3519     + index++;
3520     + pg_offset = 0;
3521     + len -= cur_len;
3522     + ret += cur_len;
3523     + }
3524     +out:
3525     + iput(inode);
3526     + return ret;
3527     +}
3528     +
3529     /*
3530     * Read some bytes from the current inode/file and send a write command to
3531     * user space.
3532     @@ -3630,35 +3624,20 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
3533     {
3534     int ret = 0;
3535     struct fs_path *p;
3536     - loff_t pos = offset;
3537     - int num_read = 0;
3538     - mm_segment_t old_fs;
3539     + ssize_t num_read = 0;
3540    
3541     p = fs_path_alloc();
3542     if (!p)
3543     return -ENOMEM;
3544    
3545     - /*
3546     - * vfs normally only accepts user space buffers for security reasons.
3547     - * we only read from the file and also only provide the read_buf buffer
3548     - * to vfs. As this buffer does not come from a user space call, it's
3549     - * ok to temporary allow kernel space buffers.
3550     - */
3551     - old_fs = get_fs();
3552     - set_fs(KERNEL_DS);
3553     -
3554     verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
3555    
3556     - ret = open_cur_inode_file(sctx);
3557     - if (ret < 0)
3558     - goto out;
3559     -
3560     - ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos);
3561     - if (ret < 0)
3562     - goto out;
3563     - num_read = ret;
3564     - if (!num_read)
3565     + num_read = fill_read_buf(sctx, offset, len);
3566     + if (num_read <= 0) {
3567     + if (num_read < 0)
3568     + ret = num_read;
3569     goto out;
3570     + }
3571    
3572     ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
3573     if (ret < 0)
3574     @@ -3677,7 +3656,6 @@ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
3575     tlv_put_failure:
3576     out:
3577     fs_path_free(p);
3578     - set_fs(old_fs);
3579     if (ret < 0)
3580     return ret;
3581     return num_read;
3582     @@ -4222,10 +4200,6 @@ static int changed_inode(struct send_ctx *sctx,
3583     u64 left_gen = 0;
3584     u64 right_gen = 0;
3585    
3586     - ret = close_cur_inode_file(sctx);
3587     - if (ret < 0)
3588     - goto out;
3589     -
3590     sctx->cur_ino = key->objectid;
3591     sctx->cur_inode_new_gen = 0;
3592    
3593     @@ -4686,11 +4660,6 @@ static int send_subvol(struct send_ctx *sctx)
3594     }
3595    
3596     out:
3597     - if (!ret)
3598     - ret = close_cur_inode_file(sctx);
3599     - else
3600     - close_cur_inode_file(sctx);
3601     -
3602     free_recorded_refs(sctx);
3603     return ret;
3604     }
3605     @@ -4756,8 +4725,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
3606     }
3607    
3608     if (!access_ok(VERIFY_READ, arg->clone_sources,
3609     - sizeof(*arg->clone_sources *
3610     - arg->clone_sources_count))) {
3611     + sizeof(*arg->clone_sources) *
3612     + arg->clone_sources_count)) {
3613     ret = -EFAULT;
3614     goto out;
3615     }
3616     diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
3617     index 8c81bdc1ef9b..b791cfb9a050 100644
3618     --- a/fs/btrfs/transaction.c
3619     +++ b/fs/btrfs/transaction.c
3620     @@ -1453,7 +1453,7 @@ static void do_async_commit(struct work_struct *work)
3621     * We've got freeze protection passed with the transaction.
3622     * Tell lockdep about it.
3623     */
3624     - if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
3625     + if (ac->newtrans->type & __TRANS_FREEZABLE)
3626     rwsem_acquire_read(
3627     &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
3628     0, 1, _THIS_IP_);
3629     @@ -1494,7 +1494,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
3630     * Tell lockdep we've released the freeze rwsem, since the
3631     * async commit thread will be the one to unlock it.
3632     */
3633     - if (trans->type < TRANS_JOIN_NOLOCK)
3634     + if (ac->newtrans->type & __TRANS_FREEZABLE)
3635     rwsem_release(
3636     &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
3637     1, _THIS_IP_);
3638     @@ -1552,6 +1552,8 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
3639     root->fs_info->running_transaction = NULL;
3640     spin_unlock(&root->fs_info->trans_lock);
3641    
3642     + if (trans->type & __TRANS_FREEZABLE)
3643     + sb_end_intwrite(root->fs_info->sb);
3644     put_transaction(cur_trans);
3645     put_transaction(cur_trans);
3646    
3647     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3648     index 79f057c0619a..e14e1f7748e5 100644
3649     --- a/fs/btrfs/tree-log.c
3650     +++ b/fs/btrfs/tree-log.c
3651     @@ -3375,7 +3375,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
3652     btrfs_set_token_file_extent_type(leaf, fi,
3653     BTRFS_FILE_EXTENT_REG,
3654     &token);
3655     - if (em->block_start == 0)
3656     + if (em->block_start == EXTENT_MAP_HOLE)
3657     skip_csum = true;
3658     }
3659    
3660     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3661     index 043b215769c2..b691f375d837 100644
3662     --- a/fs/btrfs/volumes.c
3663     +++ b/fs/btrfs/volumes.c
3664     @@ -4488,6 +4488,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
3665     btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
3666     "%Lu-%Lu\n", logical, logical+len, em->start,
3667     em->start + em->len);
3668     + free_extent_map(em);
3669     return 1;
3670     }
3671    
3672     @@ -4668,6 +4669,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
3673     btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
3674     "found %Lu-%Lu\n", logical, em->start,
3675     em->start + em->len);
3676     + free_extent_map(em);
3677     return -EINVAL;
3678     }
3679    
3680     diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
3681     index a235f0016889..c43fe9b39ff2 100644
3682     --- a/fs/exportfs/expfs.c
3683     +++ b/fs/exportfs/expfs.c
3684     @@ -215,7 +215,7 @@ struct getdents_callback {
3685     struct dir_context ctx;
3686     char *name; /* name that was found. It already points to a
3687     buffer NAME_MAX+1 is size */
3688     - unsigned long ino; /* the inum we are looking for */
3689     + u64 ino; /* the inum we are looking for */
3690     int found; /* inode matched? */
3691     int sequence; /* sequence counter */
3692     };
3693     @@ -255,10 +255,14 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
3694     struct inode *dir = path->dentry->d_inode;
3695     int error;
3696     struct file *file;
3697     + struct kstat stat;
3698     + struct path child_path = {
3699     + .mnt = path->mnt,
3700     + .dentry = child,
3701     + };
3702     struct getdents_callback buffer = {
3703     .ctx.actor = filldir_one,
3704     .name = name,
3705     - .ino = child->d_inode->i_ino
3706     };
3707    
3708     error = -ENOTDIR;
3709     @@ -268,6 +272,16 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
3710     if (!dir->i_fop)
3711     goto out;
3712     /*
3713     + * inode->i_ino is unsigned long, kstat->ino is u64, so the
3714     + * former would be insufficient on 32-bit hosts when the
3715     + * filesystem supports 64-bit inode numbers. So we need to
3716     + * actually call ->getattr, not just read i_ino:
3717     + */
3718     + error = vfs_getattr_nosec(&child_path, &stat);
3719     + if (error)
3720     + return error;
3721     + buffer.ino = stat.ino;
3722     + /*
3723     * Open the directory ...
3724     */
3725     file = dentry_open(path, O_RDONLY, cred);
3726     diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
3727     index 9c3e117c3ed1..4d0161442565 100644
3728     --- a/fs/nfs/blocklayout/extents.c
3729     +++ b/fs/nfs/blocklayout/extents.c
3730     @@ -44,7 +44,7 @@
3731     static inline sector_t normalize(sector_t s, int base)
3732     {
3733     sector_t tmp = s; /* Since do_div modifies its argument */
3734     - return s - do_div(tmp, base);
3735     + return s - sector_div(tmp, base);
3736     }
3737    
3738     static inline sector_t normalize_up(sector_t s, int base)
3739     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3740     index 9be8021c70d8..dacb2979e8ac 100644
3741     --- a/fs/nfs/nfs4proc.c
3742     +++ b/fs/nfs/nfs4proc.c
3743     @@ -4752,8 +4752,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3744     dprintk("%s ERROR %d, Reset session\n", __func__,
3745     task->tk_status);
3746     nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
3747     - task->tk_status = 0;
3748     - return -EAGAIN;
3749     + goto wait_on_recovery;
3750     #endif /* CONFIG_NFS_V4_1 */
3751     case -NFS4ERR_DELAY:
3752     nfs_inc_server_stats(server, NFSIOS_DELAY);
3753     diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
3754     index 9186c7ce0b14..b6af150c96b8 100644
3755     --- a/fs/nfsd/nfscache.c
3756     +++ b/fs/nfsd/nfscache.c
3757     @@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void)
3758     }
3759    
3760     static void
3761     +nfsd_reply_cache_unhash(struct svc_cacherep *rp)
3762     +{
3763     + hlist_del_init(&rp->c_hash);
3764     + list_del_init(&rp->c_lru);
3765     +}
3766     +
3767     +static void
3768     nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
3769     {
3770     if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
3771     @@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
3772     rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
3773     if (nfsd_cache_entry_expired(rp) ||
3774     num_drc_entries >= max_drc_entries) {
3775     - lru_put_end(rp);
3776     + nfsd_reply_cache_unhash(rp);
3777     prune_cache_entries();
3778     goto search_cache;
3779     }
3780     diff --git a/fs/stat.c b/fs/stat.c
3781     index d0ea7ef75e26..ae0c3cef9927 100644
3782     --- a/fs/stat.c
3783     +++ b/fs/stat.c
3784     @@ -37,14 +37,21 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
3785    
3786     EXPORT_SYMBOL(generic_fillattr);
3787    
3788     -int vfs_getattr(struct path *path, struct kstat *stat)
3789     +/**
3790     + * vfs_getattr_nosec - getattr without security checks
3791     + * @path: file to get attributes from
3792     + * @stat: structure to return attributes in
3793     + *
3794     + * Get attributes without calling security_inode_getattr.
3795     + *
3796     + * Currently the only caller other than vfs_getattr is internal to the
3797     + * filehandle lookup code, which uses only the inode number and returns
3798     + * no attributes to any user. Any other code probably wants
3799     + * vfs_getattr.
3800     + */
3801     +int vfs_getattr_nosec(struct path *path, struct kstat *stat)
3802     {
3803     struct inode *inode = path->dentry->d_inode;
3804     - int retval;
3805     -
3806     - retval = security_inode_getattr(path->mnt, path->dentry);
3807     - if (retval)
3808     - return retval;
3809    
3810     if (inode->i_op->getattr)
3811     return inode->i_op->getattr(path->mnt, path->dentry, stat);
3812     @@ -53,6 +60,18 @@ int vfs_getattr(struct path *path, struct kstat *stat)
3813     return 0;
3814     }
3815    
3816     +EXPORT_SYMBOL(vfs_getattr_nosec);
3817     +
3818     +int vfs_getattr(struct path *path, struct kstat *stat)
3819     +{
3820     + int retval;
3821     +
3822     + retval = security_inode_getattr(path->mnt, path->dentry);
3823     + if (retval)
3824     + return retval;
3825     + return vfs_getattr_nosec(path, stat);
3826     +}
3827     +
3828     EXPORT_SYMBOL(vfs_getattr);
3829    
3830     int vfs_fstat(unsigned int fd, struct kstat *stat)
3831     diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
3832     index e64ee5288b86..c888040a1e93 100644
3833     --- a/fs/xfs/xfs_fsops.c
3834     +++ b/fs/xfs/xfs_fsops.c
3835     @@ -217,6 +217,8 @@ xfs_growfs_data_private(
3836     */
3837     nfree = 0;
3838     for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
3839     + __be32 *agfl_bno;
3840     +
3841     /*
3842     * AG freespace header block
3843     */
3844     @@ -276,8 +278,10 @@ xfs_growfs_data_private(
3845     agfl->agfl_seqno = cpu_to_be32(agno);
3846     uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
3847     }
3848     +
3849     + agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
3850     for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
3851     - agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
3852     + agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
3853    
3854     error = xfs_bwrite(bp);
3855     xfs_buf_relse(bp);
3856     diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
3857     index 2e1e6c33841d..8c8ef246c6b4 100644
3858     --- a/fs/xfs/xfs_ioctl.c
3859     +++ b/fs/xfs/xfs_ioctl.c
3860     @@ -443,7 +443,8 @@ xfs_attrlist_by_handle(
3861     return -XFS_ERROR(EPERM);
3862     if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
3863     return -XFS_ERROR(EFAULT);
3864     - if (al_hreq.buflen > XATTR_LIST_MAX)
3865     + if (al_hreq.buflen < sizeof(struct attrlist) ||
3866     + al_hreq.buflen > XATTR_LIST_MAX)
3867     return -XFS_ERROR(EINVAL);
3868    
3869     /*
3870     diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
3871     index f671f7e472ac..53365c6db2c2 100644
3872     --- a/fs/xfs/xfs_ioctl32.c
3873     +++ b/fs/xfs/xfs_ioctl32.c
3874     @@ -357,7 +357,8 @@ xfs_compat_attrlist_by_handle(
3875     if (copy_from_user(&al_hreq, arg,
3876     sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
3877     return -XFS_ERROR(EFAULT);
3878     - if (al_hreq.buflen > XATTR_LIST_MAX)
3879     + if (al_hreq.buflen < sizeof(struct attrlist) ||
3880     + al_hreq.buflen > XATTR_LIST_MAX)
3881     return -XFS_ERROR(EINVAL);
3882    
3883     /*
3884     diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
3885     index 973ce10c40b6..dc1bd3dcf11f 100644
3886     --- a/include/linux/compiler-intel.h
3887     +++ b/include/linux/compiler-intel.h
3888     @@ -28,8 +28,6 @@
3889    
3890     #endif
3891    
3892     -#define uninitialized_var(x) x
3893     -
3894     #ifndef __HAVE_BUILTIN_BSWAP16__
3895     /* icc has this, but it's called _bswap16 */
3896     #define __HAVE_BUILTIN_BSWAP16__
3897     diff --git a/include/linux/fs.h b/include/linux/fs.h
3898     index fefa7b00ba42..164d2a91667f 100644
3899     --- a/include/linux/fs.h
3900     +++ b/include/linux/fs.h
3901     @@ -2504,6 +2504,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len);
3902     extern const struct inode_operations page_symlink_inode_operations;
3903     extern int generic_readlink(struct dentry *, char __user *, int);
3904     extern void generic_fillattr(struct inode *, struct kstat *);
3905     +int vfs_getattr_nosec(struct path *path, struct kstat *stat);
3906     extern int vfs_getattr(struct path *, struct kstat *);
3907     void __inode_add_bytes(struct inode *inode, loff_t bytes);
3908     void inode_add_bytes(struct inode *inode, loff_t bytes);
3909     diff --git a/include/linux/kexec.h b/include/linux/kexec.h
3910     index d78d28a733b1..5fd33dc1fe3a 100644
3911     --- a/include/linux/kexec.h
3912     +++ b/include/linux/kexec.h
3913     @@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
3914     extern size_t vmcoreinfo_size;
3915     extern size_t vmcoreinfo_max_size;
3916    
3917     +/* flag to track if kexec reboot is in progress */
3918     +extern bool kexec_in_progress;
3919     +
3920     int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
3921     unsigned long long *crash_size, unsigned long long *crash_base);
3922     int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
3923     diff --git a/include/linux/usb.h b/include/linux/usb.h
3924     index 39cfa0aca91f..6b02370256e4 100644
3925     --- a/include/linux/usb.h
3926     +++ b/include/linux/usb.h
3927     @@ -1262,6 +1262,8 @@ typedef void (*usb_complete_t)(struct urb *);
3928     * @sg: scatter gather buffer list, the buffer size of each element in
3929     * the list (except the last) must be divisible by the endpoint's
3930     * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
3931     + * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
3932     + * Do not use urb->sg for interrupt endpoints for now, only bulk.)
3933     * @num_mapped_sgs: (internal) number of mapped sg entries
3934     * @num_sgs: number of entries in the sg list
3935     * @transfer_buffer_length: How big is transfer_buffer. The transfer may
3936     diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
3937     index cf15b8213df7..54aff2d73150 100644
3938     --- a/include/sound/memalloc.h
3939     +++ b/include/sound/memalloc.h
3940     @@ -103,7 +103,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
3941     {
3942     struct snd_sg_buf *sgbuf = dmab->private_data;
3943     dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
3944     - addr &= PAGE_MASK;
3945     + addr &= ~((dma_addr_t)PAGE_SIZE - 1);
3946     return addr + offset % PAGE_SIZE;
3947     }
3948    
3949     diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
3950     index d630163b9a2e..5759810e1c1b 100644
3951     --- a/include/uapi/sound/compress_offload.h
3952     +++ b/include/uapi/sound/compress_offload.h
3953     @@ -30,7 +30,7 @@
3954     #include <sound/compress_params.h>
3955    
3956    
3957     -#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1)
3958     +#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
3959     /**
3960     * struct snd_compressed_buffer: compressed buffer
3961     * @fragment_size: size of buffer fragment in bytes
3962     @@ -67,8 +67,8 @@ struct snd_compr_params {
3963     struct snd_compr_tstamp {
3964     __u32 byte_offset;
3965     __u32 copied_total;
3966     - snd_pcm_uframes_t pcm_frames;
3967     - snd_pcm_uframes_t pcm_io_frames;
3968     + __u32 pcm_frames;
3969     + __u32 pcm_io_frames;
3970     __u32 sampling_rate;
3971     };
3972    
3973     diff --git a/kernel/futex.c b/kernel/futex.c
3974     index c3a1a55a5214..221a58fc62f7 100644
3975     --- a/kernel/futex.c
3976     +++ b/kernel/futex.c
3977     @@ -288,7 +288,7 @@ again:
3978     put_page(page);
3979     /* serialize against __split_huge_page_splitting() */
3980     local_irq_disable();
3981     - if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
3982     + if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
3983     page_head = compound_head(page);
3984     /*
3985     * page_head is valid pointer but we must pin
3986     diff --git a/kernel/kexec.c b/kernel/kexec.c
3987     index 2a74f307c5ec..ecd783dda9ae 100644
3988     --- a/kernel/kexec.c
3989     +++ b/kernel/kexec.c
3990     @@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
3991     size_t vmcoreinfo_size;
3992     size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
3993    
3994     +/* Flag to indicate we are going to kexec a new kernel */
3995     +bool kexec_in_progress = false;
3996     +
3997     /* Location of the reserved area for the crash kernel */
3998     struct resource crashk_res = {
3999     .name = "Crash kernel",
4000     @@ -1675,6 +1678,7 @@ int kernel_kexec(void)
4001     } else
4002     #endif
4003     {
4004     + kexec_in_progress = true;
4005     kernel_restart_prepare(NULL);
4006     printk(KERN_EMERG "Starting new kernel\n");
4007     machine_shutdown();
4008     diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
4009     index 196559994f7c..fd9ca1de7559 100644
4010     --- a/kernel/sched/debug.c
4011     +++ b/kernel/sched/debug.c
4012     @@ -225,6 +225,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
4013     atomic_read(&cfs_rq->tg->runnable_avg));
4014     #endif
4015     #endif
4016     +#ifdef CONFIG_CFS_BANDWIDTH
4017     + SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
4018     + cfs_rq->tg->cfs_bandwidth.timer_active);
4019     + SEQ_printf(m, " .%-30s: %d\n", "throttled",
4020     + cfs_rq->throttled);
4021     + SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
4022     + cfs_rq->throttle_count);
4023     +#endif
4024    
4025     #ifdef CONFIG_FAIR_GROUP_SCHED
4026     print_cfs_group_stats(m, cpu, cfs_rq->tg);
4027     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
4028     index 7c70201fbc61..513fc2fd5109 100644
4029     --- a/kernel/sched/fair.c
4030     +++ b/kernel/sched/fair.c
4031     @@ -2335,6 +2335,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
4032     cfs_rq->throttled_clock = rq_clock(rq);
4033     raw_spin_lock(&cfs_b->lock);
4034     list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4035     + if (!cfs_b->timer_active)
4036     + __start_cfs_bandwidth(cfs_b);
4037     raw_spin_unlock(&cfs_b->lock);
4038     }
4039    
4040     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4041     index 13b9d0f221b8..36cc2d0570ab 100644
4042     --- a/mm/memcontrol.c
4043     +++ b/mm/memcontrol.c
4044     @@ -2675,7 +2675,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
4045     goto bypass;
4046    
4047     if (unlikely(task_in_memcg_oom(current)))
4048     - goto bypass;
4049     + goto nomem;
4050     +
4051     + if (gfp_mask & __GFP_NOFAIL)
4052     + oom = false;
4053    
4054     /*
4055     * We always charge the cgroup the mm_struct belongs to.
4056     @@ -6338,6 +6341,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4057     static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4058     {
4059     struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4060     + /*
4061     + * XXX: css_offline() would be where we should reparent all
4062     + * memory to prepare the cgroup for destruction. However,
4063     + * memcg does not do css_tryget() and res_counter charging
4064     + * under the same RCU lock region, which means that charging
4065     + * could race with offlining. Offlining only happens to
4066     + * cgroups with no tasks in them but charges can show up
4067     + * without any tasks from the swapin path when the target
4068     + * memcg is looked up from the swapout record and not from the
4069     + * current task as it usually is. A race like this can leak
4070     + * charges and put pages with stale cgroup pointers into
4071     + * circulation:
4072     + *
4073     + * #0 #1
4074     + * lookup_swap_cgroup_id()
4075     + * rcu_read_lock()
4076     + * mem_cgroup_lookup()
4077     + * css_tryget()
4078     + * rcu_read_unlock()
4079     + * disable css_tryget()
4080     + * call_rcu()
4081     + * offline_css()
4082     + * reparent_charges()
4083     + * res_counter_charge()
4084     + * css_put()
4085     + * css_free()
4086     + * pc->mem_cgroup = dead memcg
4087     + * add page to lru
4088     + *
4089     + * The bulk of the charges are still moved in offline_css() to
4090     + * avoid pinning a lot of pages in case a long-term reference
4091     + * like a swapout record is deferring the css_free() to long
4092     + * after offlining. But this makes sure we catch any charges
4093     + * made after offlining:
4094     + */
4095     + mem_cgroup_reparent_charges(memcg);
4096    
4097     memcg_destroy_kmem(memcg);
4098     __mem_cgroup_free(memcg);
4099     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4100     index f60b1eec3f87..5e2c2f1a075d 100644
4101     --- a/net/ipv4/udp.c
4102     +++ b/net/ipv4/udp.c
4103     @@ -1075,9 +1075,6 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
4104     if (flags & MSG_SENDPAGE_NOTLAST)
4105     flags |= MSG_MORE;
4106    
4107     - if (flags & MSG_SENDPAGE_NOTLAST)
4108     - flags |= MSG_MORE;
4109     -
4110     if (!up->pending) {
4111     struct msghdr msg = { .msg_flags = flags|MSG_MORE };
4112    
4113     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
4114     index 629dee7ec9bf..9903ee585561 100644
4115     --- a/net/mac80211/cfg.c
4116     +++ b/net/mac80211/cfg.c
4117     @@ -2386,8 +2386,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
4118     struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4119     struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4120    
4121     - if (sdata->vif.type != NL80211_IFTYPE_STATION &&
4122     - sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
4123     + if (sdata->vif.type != NL80211_IFTYPE_STATION)
4124     return -EOPNOTSUPP;
4125    
4126     if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
4127     diff --git a/net/mac80211/main.c b/net/mac80211/main.c
4128     index 21d5d44444d0..e765f77bb97a 100644
4129     --- a/net/mac80211/main.c
4130     +++ b/net/mac80211/main.c
4131     @@ -1047,6 +1047,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
4132    
4133     cancel_work_sync(&local->restart_work);
4134     cancel_work_sync(&local->reconfig_filter);
4135     + flush_work(&local->sched_scan_stopped_work);
4136    
4137     ieee80211_clear_tx_pending(local);
4138     rate_control_deinitialize(local);
4139     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4140     index 674eac1f996c..1e5bd0d75732 100644
4141     --- a/net/mac80211/rx.c
4142     +++ b/net/mac80211/rx.c
4143     @@ -911,7 +911,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
4144     u16 sc;
4145     u8 tid, ack_policy;
4146    
4147     - if (!ieee80211_is_data_qos(hdr->frame_control))
4148     + if (!ieee80211_is_data_qos(hdr->frame_control) ||
4149     + is_multicast_ether_addr(hdr->addr1))
4150     goto dont_reorder;
4151    
4152     /*
4153     diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
4154     index d2d17a449224..8f2f003afbb8 100644
4155     --- a/net/mac80211/scan.c
4156     +++ b/net/mac80211/scan.c
4157     @@ -1089,6 +1089,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
4158    
4159     trace_api_sched_scan_stopped(local);
4160    
4161     - ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work);
4162     + schedule_work(&local->sched_scan_stopped_work);
4163     }
4164     EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
4165     diff --git a/net/wireless/core.c b/net/wireless/core.c
4166     index aff959e5a1b3..00a65ba3aeaa 100644
4167     --- a/net/wireless/core.c
4168     +++ b/net/wireless/core.c
4169     @@ -451,6 +451,9 @@ int wiphy_register(struct wiphy *wiphy)
4170     int i;
4171     u16 ifmodes = wiphy->interface_modes;
4172    
4173     + /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
4174     + wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
4175     +
4176     #ifdef CONFIG_PM
4177     if (WARN_ON(wiphy->wowlan &&
4178     (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
4179     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4180     index 5b5231068516..d9a78fd8a2e1 100644
4181     --- a/security/selinux/hooks.c
4182     +++ b/security/selinux/hooks.c
4183     @@ -53,6 +53,7 @@
4184     #include <net/ip.h> /* for local_port_range[] */
4185     #include <net/sock.h>
4186     #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
4187     +#include <net/inet_connection_sock.h>
4188     #include <net/net_namespace.h>
4189     #include <net/netlabel.h>
4190     #include <linux/uaccess.h>
4191     @@ -3805,6 +3806,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
4192     return 0;
4193     }
4194    
4195     +/**
4196     + * selinux_conn_sid - Determine the child socket label for a connection
4197     + * @sk_sid: the parent socket's SID
4198     + * @skb_sid: the packet's SID
4199     + * @conn_sid: the resulting connection SID
4200     + *
4201     + * If @skb_sid is valid then the user:role:type information from @sk_sid is
4202     + * combined with the MLS information from @skb_sid in order to create
4203     + * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
4204     + * of @sk_sid. Returns zero on success, negative values on failure.
4205     + *
4206     + */
4207     +static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
4208     +{
4209     + int err = 0;
4210     +
4211     + if (skb_sid != SECSID_NULL)
4212     + err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
4213     + else
4214     + *conn_sid = sk_sid;
4215     +
4216     + return err;
4217     +}
4218     +
4219     /* socket security operations */
4220    
4221     static int socket_sockcreate_sid(const struct task_security_struct *tsec,
4222     @@ -4411,7 +4436,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
4223     struct sk_security_struct *sksec = sk->sk_security;
4224     int err;
4225     u16 family = sk->sk_family;
4226     - u32 newsid;
4227     + u32 connsid;
4228     u32 peersid;
4229    
4230     /* handle mapped IPv4 packets arriving via IPv6 sockets */
4231     @@ -4421,16 +4446,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
4232     err = selinux_skb_peerlbl_sid(skb, family, &peersid);
4233     if (err)
4234     return err;
4235     - if (peersid == SECSID_NULL) {
4236     - req->secid = sksec->sid;
4237     - req->peer_secid = SECSID_NULL;
4238     - } else {
4239     - err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
4240     - if (err)
4241     - return err;
4242     - req->secid = newsid;
4243     - req->peer_secid = peersid;
4244     - }
4245     + err = selinux_conn_sid(sksec->sid, peersid, &connsid);
4246     + if (err)
4247     + return err;
4248     + req->secid = connsid;
4249     + req->peer_secid = peersid;
4250    
4251     return selinux_netlbl_inet_conn_request(req, family);
4252     }
4253     @@ -4690,6 +4710,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
4254     static unsigned int selinux_ip_output(struct sk_buff *skb,
4255     u16 family)
4256     {
4257     + struct sock *sk;
4258     u32 sid;
4259    
4260     if (!netlbl_enabled())
4261     @@ -4698,8 +4719,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
4262     /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
4263     * because we want to make sure we apply the necessary labeling
4264     * before IPsec is applied so we can leverage AH protection */
4265     - if (skb->sk) {
4266     - struct sk_security_struct *sksec = skb->sk->sk_security;
4267     + sk = skb->sk;
4268     + if (sk) {
4269     + struct sk_security_struct *sksec;
4270     +
4271     + if (sk->sk_state == TCP_LISTEN)
4272     + /* if the socket is the listening state then this
4273     + * packet is a SYN-ACK packet which means it needs to
4274     + * be labeled based on the connection/request_sock and
4275     + * not the parent socket. unfortunately, we can't
4276     + * lookup the request_sock yet as it isn't queued on
4277     + * the parent socket until after the SYN-ACK is sent.
4278     + * the "solution" is to simply pass the packet as-is
4279     + * as any IP option based labeling should be copied
4280     + * from the initial connection request (in the IP
4281     + * layer). it is far from ideal, but until we get a
4282     + * security label in the packet itself this is the
4283     + * best we can do. */
4284     + return NF_ACCEPT;
4285     +
4286     + /* standard practice, label using the parent socket */
4287     + sksec = sk->sk_security;
4288     sid = sksec->sid;
4289     } else
4290     sid = SECINITSID_KERNEL;
4291     @@ -4784,12 +4824,12 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4292     if (!secmark_active && !peerlbl_active)
4293     return NF_ACCEPT;
4294    
4295     - /* if the packet is being forwarded then get the peer label from the
4296     - * packet itself; otherwise check to see if it is from a local
4297     - * application or the kernel, if from an application get the peer label
4298     - * from the sending socket, otherwise use the kernel's sid */
4299     sk = skb->sk;
4300     if (sk == NULL) {
4301     + /* Without an associated socket the packet is either coming
4302     + * from the kernel or it is being forwarded; check the packet
4303     + * to determine which and if the packet is being forwarded
4304     + * query the packet directly to determine the security label. */
4305     if (skb->skb_iif) {
4306     secmark_perm = PACKET__FORWARD_OUT;
4307     if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
4308     @@ -4798,7 +4838,26 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4309     secmark_perm = PACKET__SEND;
4310     peer_sid = SECINITSID_KERNEL;
4311     }
4312     + } else if (sk->sk_state == TCP_LISTEN) {
4313     + /* Locally generated packet but the associated socket is in the
4314     + * listening state which means this is a SYN-ACK packet. In
4315     + * this particular case the correct security label is assigned
4316     + * to the connection/request_sock but unfortunately we can't
4317     + * query the request_sock as it isn't queued on the parent
4318     + * socket until after the SYN-ACK packet is sent; the only
4319     + * viable choice is to regenerate the label like we do in
4320     + * selinux_inet_conn_request(). See also selinux_ip_output()
4321     + * for similar problems. */
4322     + u32 skb_sid;
4323     + struct sk_security_struct *sksec = sk->sk_security;
4324     + if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
4325     + return NF_DROP;
4326     + if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
4327     + return NF_DROP;
4328     + secmark_perm = PACKET__SEND;
4329     } else {
4330     + /* Locally generated packet, fetch the security label from the
4331     + * associated socket. */
4332     struct sk_security_struct *sksec = sk->sk_security;
4333     peer_sid = sksec->sid;
4334     secmark_perm = PACKET__SEND;
4335     diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
4336     index c4671d00babd..c7f6d1cab606 100644
4337     --- a/sound/pci/hda/hda_generic.c
4338     +++ b/sound/pci/hda/hda_generic.c
4339     @@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx)
4340     memset(path, 0, sizeof(*path));
4341     }
4342    
4343     +/* return a DAC if paired to the given pin by codec driver */
4344     +static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin)
4345     +{
4346     + struct hda_gen_spec *spec = codec->spec;
4347     + const hda_nid_t *list = spec->preferred_dacs;
4348     +
4349     + if (!list)
4350     + return 0;
4351     + for (; *list; list += 2)
4352     + if (*list == pin)
4353     + return list[1];
4354     + return 0;
4355     +}
4356     +
4357     /* look for an empty DAC slot */
4358     static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin,
4359     bool is_digital)
4360     @@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
4361     continue;
4362     }
4363    
4364     - dacs[i] = look_for_dac(codec, pin, false);
4365     + dacs[i] = get_preferred_dac(codec, pin);
4366     + if (dacs[i]) {
4367     + if (is_dac_already_used(codec, dacs[i]))
4368     + badness += bad->shared_primary;
4369     + }
4370     +
4371     + if (!dacs[i])
4372     + dacs[i] = look_for_dac(codec, pin, false);
4373     if (!dacs[i] && !i) {
4374     /* try to steal the DAC of surrounds for the front */
4375     for (j = 1; j < num_outs; j++) {
4376     @@ -4297,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
4377     return AC_PWRST_D3;
4378     }
4379    
4380     +/* mute all aamix inputs initially; parse up to the first leaves */
4381     +static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
4382     +{
4383     + int i, nums;
4384     + const hda_nid_t *conn;
4385     + bool has_amp;
4386     +
4387     + nums = snd_hda_get_conn_list(codec, mix, &conn);
4388     + has_amp = nid_has_mute(codec, mix, HDA_INPUT);
4389     + for (i = 0; i < nums; i++) {
4390     + if (has_amp)
4391     + snd_hda_codec_amp_stereo(codec, mix,
4392     + HDA_INPUT, i,
4393     + 0xff, HDA_AMP_MUTE);
4394     + else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
4395     + snd_hda_codec_amp_stereo(codec, conn[i],
4396     + HDA_OUTPUT, 0,
4397     + 0xff, HDA_AMP_MUTE);
4398     + }
4399     +}
4400    
4401     /*
4402     * Parse the given BIOS configuration and set up the hda_gen_spec
4403     @@ -4435,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
4404     }
4405     }
4406    
4407     + /* mute all aamix input initially */
4408     + if (spec->mixer_nid)
4409     + mute_all_mixer_nid(codec, spec->mixer_nid);
4410     +
4411     dig_only:
4412     parse_digital(codec);
4413    
4414     diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
4415     index 7e45cb44d151..0929a06df812 100644
4416     --- a/sound/pci/hda/hda_generic.h
4417     +++ b/sound/pci/hda/hda_generic.h
4418     @@ -249,6 +249,9 @@ struct hda_gen_spec {
4419     const struct badness_table *main_out_badness;
4420     const struct badness_table *extra_out_badness;
4421    
4422     + /* preferred pin/DAC pairs; an array of paired NIDs */
4423     + const hda_nid_t *preferred_dacs;
4424     +
4425     /* loopback mixing mode */
4426     bool aamix_mode;
4427    
4428     diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
4429     index f684a4f8c797..38aa080681a3 100644
4430     --- a/sound/pci/hda/patch_analog.c
4431     +++ b/sound/pci/hda/patch_analog.c
4432     @@ -324,6 +324,14 @@ static int patch_ad1986a(struct hda_codec *codec)
4433     {
4434     int err;
4435     struct ad198x_spec *spec;
4436     + static hda_nid_t preferred_pairs[] = {
4437     + 0x1a, 0x03,
4438     + 0x1b, 0x03,
4439     + 0x1c, 0x04,
4440     + 0x1d, 0x05,
4441     + 0x1e, 0x03,
4442     + 0
4443     + };
4444    
4445     err = alloc_ad_spec(codec);
4446     if (err < 0)
4447     @@ -344,6 +352,8 @@ static int patch_ad1986a(struct hda_codec *codec)
4448     * So, let's disable the shared stream.
4449     */
4450     spec->gen.multiout.no_share_stream = 1;
4451     + /* give fixed DAC/pin pairs */
4452     + spec->gen.preferred_dacs = preferred_pairs;
4453    
4454     /* AD1986A can't manage the dynamic pin on/off smoothly */
4455     spec->gen.auto_mute_via_amp = 1;
4456     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4457     index 8d2d01b0cf86..f26c42c92db7 100644
4458     --- a/sound/pci/hda/patch_hdmi.c
4459     +++ b/sound/pci/hda/patch_hdmi.c
4460     @@ -2085,8 +2085,9 @@ static int simple_playback_build_controls(struct hda_codec *codec)
4461     int err;
4462    
4463     per_cvt = get_cvt(spec, 0);
4464     - err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid,
4465     - per_cvt->cvt_nid);
4466     + err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid,
4467     + per_cvt->cvt_nid,
4468     + HDA_PCM_TYPE_HDMI);
4469     if (err < 0)
4470     return err;
4471     return simple_hdmi_build_jack(codec, 0);
4472     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4473     index 1cf9ccb01013..aac732d17c17 100644
4474     --- a/virt/kvm/kvm_main.c
4475     +++ b/virt/kvm/kvm_main.c
4476     @@ -1893,6 +1893,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
4477     int r;
4478     struct kvm_vcpu *vcpu, *v;
4479    
4480     + if (id >= KVM_MAX_VCPUS)
4481     + return -EINVAL;
4482     +
4483     vcpu = kvm_arch_vcpu_create(kvm, id);
4484     if (IS_ERR(vcpu))
4485     return PTR_ERR(vcpu);