Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.4/0102-4.4.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2769 - (hide annotations) (download)
Thu Mar 3 08:32:11 2016 UTC (8 years, 2 months ago) by niro
File size: 156476 byte(s)
-linux-4.4.3
1 niro 2769 diff --git a/MAINTAINERS b/MAINTAINERS
2     index 233f83464814..d826f1b9eb02 100644
3     --- a/MAINTAINERS
4     +++ b/MAINTAINERS
5     @@ -10289,9 +10289,11 @@ S: Maintained
6     F: drivers/net/ethernet/dlink/sundance.c
7    
8     SUPERH
9     +M: Yoshinori Sato <ysato@users.sourceforge.jp>
10     +M: Rich Felker <dalias@libc.org>
11     L: linux-sh@vger.kernel.org
12     Q: http://patchwork.kernel.org/project/linux-sh/list/
13     -S: Orphan
14     +S: Maintained
15     F: Documentation/sh/
16     F: arch/sh/
17     F: drivers/sh/
18     diff --git a/Makefile b/Makefile
19     index e7a2958eb771..802be10c40c5 100644
20     --- a/Makefile
21     +++ b/Makefile
22     @@ -1,6 +1,6 @@
23     VERSION = 4
24     PATCHLEVEL = 4
25     -SUBLEVEL = 2
26     +SUBLEVEL = 3
27     EXTRAVERSION =
28     NAME = Blurry Fish Butt
29    
30     diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
31     index a633be3defda..cd316021d6ce 100644
32     --- a/arch/arm/boot/dts/armada-388-gp.dts
33     +++ b/arch/arm/boot/dts/armada-388-gp.dts
34     @@ -303,16 +303,6 @@
35     gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
36     };
37    
38     - reg_usb2_1_vbus: v5-vbus1 {
39     - compatible = "regulator-fixed";
40     - regulator-name = "v5.0-vbus1";
41     - regulator-min-microvolt = <5000000>;
42     - regulator-max-microvolt = <5000000>;
43     - enable-active-high;
44     - regulator-always-on;
45     - gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
46     - };
47     -
48     reg_sata0: pwr-sata0 {
49     compatible = "regulator-fixed";
50     regulator-name = "pwr_en_sata0";
51     diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
52     index 131614f28e75..569026e8f96c 100644
53     --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
54     +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
55     @@ -86,10 +86,12 @@
56     macb0: ethernet@f8020000 {
57     phy-mode = "rmii";
58     status = "okay";
59     + pinctrl-names = "default";
60     + pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
61    
62     phy0: ethernet-phy@1 {
63     interrupt-parent = <&pioE>;
64     - interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
65     + interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
66     reg = <1>;
67     };
68     };
69     @@ -152,6 +154,10 @@
70     atmel,pins =
71     <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
72     };
73     + pinctrl_macb0_phy_irq: macb0_phy_irq_0 {
74     + atmel,pins =
75     + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
76     + };
77     };
78     };
79     };
80     diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
81     index 2d4a33100af6..4e98cda97403 100644
82     --- a/arch/arm/boot/dts/at91-sama5d4ek.dts
83     +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
84     @@ -160,8 +160,15 @@
85     };
86    
87     macb0: ethernet@f8020000 {
88     + pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
89     phy-mode = "rmii";
90     status = "okay";
91     +
92     + ethernet-phy@1 {
93     + reg = <0x1>;
94     + interrupt-parent = <&pioE>;
95     + interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
96     + };
97     };
98    
99     mmc1: mmc@fc000000 {
100     @@ -193,6 +200,10 @@
101    
102     pinctrl@fc06a000 {
103     board {
104     + pinctrl_macb0_phy_irq: macb0_phy_irq {
105     + atmel,pins =
106     + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
107     + };
108     pinctrl_mmc0_cd: mmc0_cd {
109     atmel,pins =
110     <AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
111     diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
112     index 36387b11451d..80f6c786a37e 100644
113     --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
114     +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
115     @@ -122,6 +122,7 @@
116     interrupt-parent = <&gpio5>;
117     interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
118     ref-clock-frequency = <26000000>;
119     + tcxo-clock-frequency = <26000000>;
120     };
121     };
122    
123     diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
124     index 5cf76a1c5c75..41e80e7f20be 100644
125     --- a/arch/arm/boot/dts/omap5-board-common.dtsi
126     +++ b/arch/arm/boot/dts/omap5-board-common.dtsi
127     @@ -130,6 +130,16 @@
128     };
129     };
130    
131     +&gpio8 {
132     + /* TI trees use GPIO instead of msecure, see also muxing */
133     + p234 {
134     + gpio-hog;
135     + gpios = <10 GPIO_ACTIVE_HIGH>;
136     + output-high;
137     + line-name = "gpio8_234/msecure";
138     + };
139     +};
140     +
141     &omap5_pmx_core {
142     pinctrl-names = "default";
143     pinctrl-0 = <
144     @@ -213,6 +223,13 @@
145     >;
146     };
147    
148     + /* TI trees use GPIO mode; msecure mode does not work reliably? */
149     + palmas_msecure_pins: palmas_msecure_pins {
150     + pinctrl-single,pins = <
151     + OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */
152     + >;
153     + };
154     +
155     usbhost_pins: pinmux_usbhost_pins {
156     pinctrl-single,pins = <
157     0x84 (PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */
158     @@ -278,6 +295,12 @@
159     &usbhost_wkup_pins
160     >;
161    
162     + palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
163     + pinctrl-single,pins = <
164     + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
165     + >;
166     + };
167     +
168     usbhost_wkup_pins: pinmux_usbhost_wkup_pins {
169     pinctrl-single,pins = <
170     0x1A (PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
171     @@ -345,6 +368,8 @@
172     interrupt-controller;
173     #interrupt-cells = <2>;
174     ti,system-power-controller;
175     + pinctrl-names = "default";
176     + pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
177    
178     extcon_usb3: palmas_usb {
179     compatible = "ti,palmas-usb-vid";
180     @@ -358,6 +383,14 @@
181     #clock-cells = <0>;
182     };
183    
184     + rtc {
185     + compatible = "ti,palmas-rtc";
186     + interrupt-parent = <&palmas>;
187     + interrupts = <8 IRQ_TYPE_NONE>;
188     + ti,backup-battery-chargeable;
189     + ti,backup-battery-charge-high-current;
190     + };
191     +
192     palmas_pmic {
193     compatible = "ti,palmas-pmic";
194     interrupt-parent = <&palmas>;
195     diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
196     index 2193637b9cd2..3daf8d5d7878 100644
197     --- a/arch/arm/boot/dts/sama5d4.dtsi
198     +++ b/arch/arm/boot/dts/sama5d4.dtsi
199     @@ -1342,7 +1342,7 @@
200     dbgu: serial@fc069000 {
201     compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
202     reg = <0xfc069000 0x200>;
203     - interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
204     + interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
205     pinctrl-names = "default";
206     pinctrl-0 = <&pinctrl_dbgu>;
207     clocks = <&dbgu_clk>;
208     diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
209     index d0c743853318..27a333eb8987 100644
210     --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
211     +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
212     @@ -127,22 +127,14 @@
213     };
214     mmcsd_default_mode: mmcsd_default {
215     mmcsd_default_cfg1 {
216     - /* MCCLK */
217     - pins = "GPIO8_B10";
218     - ste,output = <0>;
219     - };
220     - mmcsd_default_cfg2 {
221     - /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
222     - pins = "GPIO10_C11", "GPIO15_A12",
223     - "GPIO16_C13", "GPIO23_D15";
224     - ste,output = <1>;
225     - };
226     - mmcsd_default_cfg3 {
227     - /* MCCMD, MCDAT3-0, MCMSFBCLK */
228     - pins = "GPIO9_A10", "GPIO11_B11",
229     - "GPIO12_A11", "GPIO13_C12",
230     - "GPIO14_B12", "GPIO24_C15";
231     - ste,input = <1>;
232     + /*
233     + * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
234     + * MCCMD, MCDAT3-0, MCMSFBCLK
235     + */
236     + pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
237     + "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
238     + "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
239     + ste,output = <2>;
240     };
241     };
242     };
243     @@ -802,10 +794,21 @@
244     clock-names = "mclk", "apb_pclk";
245     interrupt-parent = <&vica>;
246     interrupts = <22>;
247     - max-frequency = <48000000>;
248     + max-frequency = <400000>;
249     bus-width = <4>;
250     cap-mmc-highspeed;
251     cap-sd-highspeed;
252     + full-pwr-cycle;
253     + /*
254     + * The STw4811 circuit used with the Nomadik strictly
255     + * requires that all of these signal direction pins be
256     + * routed and used for its 4-bit levelshifter.
257     + */
258     + st,sig-dir-dat0;
259     + st,sig-dir-dat2;
260     + st,sig-dir-dat31;
261     + st,sig-dir-cmd;
262     + st,sig-pin-fbclk;
263     pinctrl-names = "default";
264     pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
265     vmmc-supply = <&vmmc_regulator>;
266     diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
267     index 2dc6da70ae59..d7ed252708c5 100644
268     --- a/arch/arm/common/icst.c
269     +++ b/arch/arm/common/icst.c
270     @@ -16,7 +16,7 @@
271     */
272     #include <linux/module.h>
273     #include <linux/kernel.h>
274     -
275     +#include <asm/div64.h>
276     #include <asm/hardware/icst.h>
277    
278     /*
279     @@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
280    
281     unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
282     {
283     - return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
284     + u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
285     + u32 divisor = (vco.r + 2) * p->s2div[vco.s];
286     +
287     + do_div(dividend, divisor);
288     + return (unsigned long)dividend;
289     }
290    
291     EXPORT_SYMBOL(icst_hz);
292     @@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
293    
294     if (f > p->vco_min && f <= p->vco_max)
295     break;
296     + i++;
297     } while (i < 8);
298    
299     if (i >= 8)
300     diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
301     index eafd120b53f1..1b9f0520dea9 100644
302     --- a/arch/arm/mach-omap2/sleep34xx.S
303     +++ b/arch/arm/mach-omap2/sleep34xx.S
304     @@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
305     stmfd sp!, {lr} @ save registers on stack
306     /* Setup so that we will disable and enable l2 */
307     mov r1, #0x1
308     - adrl r2, l2dis_3630 @ may be too distant for plain adr
309     - str r1, [r2]
310     + adrl r3, l2dis_3630_offset @ may be too distant for plain adr
311     + ldr r2, [r3] @ value for offset
312     + str r1, [r2, r3] @ write to l2dis_3630
313     ldmfd sp!, {pc} @ restore regs and return
314     ENDPROC(enable_omap3630_toggle_l2_on_restore)
315    
316     - .text
317     -/* Function to call rom code to save secure ram context */
318     +/*
319     + * Function to call rom code to save secure ram context. This gets
320     + * relocated to SRAM, so it can be all in .data section. Otherwise
321     + * we need to initialize api_params separately.
322     + */
323     + .data
324     .align 3
325     ENTRY(save_secure_ram_context)
326     stmfd sp!, {r4 - r11, lr} @ save registers on stack
327     @@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
328     ENTRY(save_secure_ram_context_sz)
329     .word . - save_secure_ram_context
330    
331     + .text
332     +
333     /*
334     * ======================
335     * == Idle entry point ==
336     @@ -289,12 +296,6 @@ wait_sdrc_ready:
337     bic r5, r5, #0x40
338     str r5, [r4]
339    
340     -/*
341     - * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
342     - * base instead.
343     - * Be careful not to clobber r7 when maintaing this code.
344     - */
345     -
346     is_dll_in_lock_mode:
347     /* Is dll in lock mode? */
348     ldr r4, sdrc_dlla_ctrl
349     @@ -302,11 +303,7 @@ is_dll_in_lock_mode:
350     tst r5, #0x4
351     bne exit_nonoff_modes @ Return if locked
352     /* wait till dll locks */
353     - adr r7, kick_counter
354     wait_dll_lock_timed:
355     - ldr r4, wait_dll_lock_counter
356     - add r4, r4, #1
357     - str r4, [r7, #wait_dll_lock_counter - kick_counter]
358     ldr r4, sdrc_dlla_status
359     /* Wait 20uS for lock */
360     mov r6, #8
361     @@ -330,9 +327,6 @@ kick_dll:
362     orr r6, r6, #(1<<3) @ enable dll
363     str r6, [r4]
364     dsb
365     - ldr r4, kick_counter
366     - add r4, r4, #1
367     - str r4, [r7] @ kick_counter
368     b wait_dll_lock_timed
369    
370     exit_nonoff_modes:
371     @@ -360,15 +354,6 @@ sdrc_dlla_status:
372     .word SDRC_DLLA_STATUS_V
373     sdrc_dlla_ctrl:
374     .word SDRC_DLLA_CTRL_V
375     - /*
376     - * When exporting to userspace while the counters are in SRAM,
377     - * these 2 words need to be at the end to facilitate retrival!
378     - */
379     -kick_counter:
380     - .word 0
381     -wait_dll_lock_counter:
382     - .word 0
383     -
384     ENTRY(omap3_do_wfi_sz)
385     .word . - omap3_do_wfi
386    
387     @@ -437,7 +422,9 @@ ENTRY(omap3_restore)
388     cmp r2, #0x0 @ Check if target power state was OFF or RET
389     bne logic_l1_restore
390    
391     - ldr r0, l2dis_3630
392     + adr r1, l2dis_3630_offset @ address for offset
393     + ldr r0, [r1] @ value for offset
394     + ldr r0, [r1, r0] @ value at l2dis_3630
395     cmp r0, #0x1 @ should we disable L2 on 3630?
396     bne skipl2dis
397     mrc p15, 0, r0, c1, c0, 1
398     @@ -449,12 +436,14 @@ skipl2dis:
399     and r1, #0x700
400     cmp r1, #0x300
401     beq l2_inv_gp
402     + adr r0, l2_inv_api_params_offset
403     + ldr r3, [r0]
404     + add r3, r3, r0 @ r3 points to dummy parameters
405     mov r0, #40 @ set service ID for PPA
406     mov r12, r0 @ copy secure Service ID in r12
407     mov r1, #0 @ set task id for ROM code in r1
408     mov r2, #4 @ set some flags in r2, r6
409     mov r6, #0xff
410     - adr r3, l2_inv_api_params @ r3 points to dummy parameters
411     dsb @ data write barrier
412     dmb @ data memory barrier
413     smc #1 @ call SMI monitor (smi #1)
414     @@ -488,8 +477,8 @@ skipl2dis:
415     b logic_l1_restore
416    
417     .align
418     -l2_inv_api_params:
419     - .word 0x1, 0x00
420     +l2_inv_api_params_offset:
421     + .long l2_inv_api_params - .
422     l2_inv_gp:
423     /* Execute smi to invalidate L2 cache */
424     mov r12, #0x1 @ set up to invalidate L2
425     @@ -506,7 +495,9 @@ l2_inv_gp:
426     mov r12, #0x2
427     smc #0 @ Call SMI monitor (smieq)
428     logic_l1_restore:
429     - ldr r1, l2dis_3630
430     + adr r0, l2dis_3630_offset @ adress for offset
431     + ldr r1, [r0] @ value for offset
432     + ldr r1, [r0, r1] @ value at l2dis_3630
433     cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
434     bne skipl2reen
435     mrc p15, 0, r1, c1, c0, 1
436     @@ -535,9 +526,17 @@ control_stat:
437     .word CONTROL_STAT
438     control_mem_rta:
439     .word CONTROL_MEM_RTA_CTRL
440     +l2dis_3630_offset:
441     + .long l2dis_3630 - .
442     +
443     + .data
444     l2dis_3630:
445     .word 0
446    
447     + .data
448     +l2_inv_api_params:
449     + .word 0x1, 0x00
450     +
451     /*
452     * Internal functions
453     */
454     diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
455     index 9b09d85d811a..c7a3b4aab4b5 100644
456     --- a/arch/arm/mach-omap2/sleep44xx.S
457     +++ b/arch/arm/mach-omap2/sleep44xx.S
458     @@ -29,12 +29,6 @@
459     dsb
460     .endm
461    
462     -ppa_zero_params:
463     - .word 0x0
464     -
465     -ppa_por_params:
466     - .word 1, 0
467     -
468     #ifdef CONFIG_ARCH_OMAP4
469    
470     /*
471     @@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
472     beq skip_ns_smp_enable
473     ppa_actrl_retry:
474     mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
475     - adr r3, ppa_zero_params @ Pointer to parameters
476     + adr r1, ppa_zero_params_offset
477     + ldr r3, [r1]
478     + add r3, r3, r1 @ Pointer to ppa_zero_params
479     mov r1, #0x0 @ Process ID
480     mov r2, #0x4 @ Flag
481     mov r6, #0xff
482     @@ -303,7 +299,9 @@ skip_ns_smp_enable:
483     ldr r0, =OMAP4_PPA_L2_POR_INDEX
484     ldr r1, =OMAP44XX_SAR_RAM_BASE
485     ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
486     - adr r3, ppa_por_params
487     + adr r1, ppa_por_params_offset
488     + ldr r3, [r1]
489     + add r3, r3, r1 @ Pointer to ppa_por_params
490     str r4, [r3, #0x04]
491     mov r1, #0x0 @ Process ID
492     mov r2, #0x4 @ Flag
493     @@ -328,6 +326,8 @@ skip_l2en:
494     #endif
495    
496     b cpu_resume @ Jump to generic resume
497     +ppa_por_params_offset:
498     + .long ppa_por_params - .
499     ENDPROC(omap4_cpu_resume)
500     #endif /* CONFIG_ARCH_OMAP4 */
501    
502     @@ -380,4 +380,13 @@ ENTRY(omap_do_wfi)
503     nop
504    
505     ldmfd sp!, {pc}
506     +ppa_zero_params_offset:
507     + .long ppa_zero_params - .
508     ENDPROC(omap_do_wfi)
509     +
510     + .data
511     +ppa_zero_params:
512     + .word 0
513     +
514     +ppa_por_params:
515     + .word 1, 0
516     diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
517     index 7963aa4b5d28..354144e33218 100644
518     --- a/arch/arm64/mm/dma-mapping.c
519     +++ b/arch/arm64/mm/dma-mapping.c
520     @@ -933,6 +933,10 @@ static int __init __iommu_dma_init(void)
521     ret = register_iommu_dma_ops_notifier(&platform_bus_type);
522     if (!ret)
523     ret = register_iommu_dma_ops_notifier(&amba_bustype);
524     +
525     + /* handle devices queued before this arch_initcall */
526     + if (!ret)
527     + __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
528     return ret;
529     }
530     arch_initcall(__iommu_dma_init);
531     diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
532     index 3571c7309c5e..cf6240741134 100644
533     --- a/arch/arm64/mm/pageattr.c
534     +++ b/arch/arm64/mm/pageattr.c
535     @@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
536     if (end < MODULES_VADDR || end >= MODULES_END)
537     return -EINVAL;
538    
539     + if (!numpages)
540     + return 0;
541     +
542     data.set_mask = set_mask;
543     data.clear_mask = clear_mask;
544    
545     diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
546     index 0392112a5d70..a5ecef7188ba 100644
547     --- a/arch/m32r/kernel/setup.c
548     +++ b/arch/m32r/kernel/setup.c
549     @@ -81,7 +81,10 @@ static struct resource code_resource = {
550     };
551    
552     unsigned long memory_start;
553     +EXPORT_SYMBOL(memory_start);
554     +
555     unsigned long memory_end;
556     +EXPORT_SYMBOL(memory_end);
557    
558     void __init setup_arch(char **);
559     int get_cpuinfo(char *);
560     diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
561     index c5eb86f3d452..867c39b45df6 100644
562     --- a/arch/powerpc/include/asm/eeh.h
563     +++ b/arch/powerpc/include/asm/eeh.h
564     @@ -81,6 +81,7 @@ struct pci_dn;
565     #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
566     #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
567     #define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
568     +#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
569    
570     struct eeh_pe {
571     int type; /* PE type: PHB/Bus/Device */
572     diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
573     index 8d14feb40f12..f69ecaa7ce33 100644
574     --- a/arch/powerpc/kernel/eeh_driver.c
575     +++ b/arch/powerpc/kernel/eeh_driver.c
576     @@ -564,6 +564,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
577     */
578     eeh_pe_state_mark(pe, EEH_PE_KEEP);
579     if (bus) {
580     + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
581     pci_lock_rescan_remove();
582     pcibios_remove_pci_devices(bus);
583     pci_unlock_rescan_remove();
584     @@ -803,6 +804,7 @@ perm_error:
585     * the their PCI config any more.
586     */
587     if (frozen_bus) {
588     + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
589     eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
590    
591     pci_lock_rescan_remove();
592     @@ -886,6 +888,7 @@ static void eeh_handle_special_event(void)
593     continue;
594    
595     /* Notify all devices to be down */
596     + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
597     bus = eeh_pe_bus_get(phb_pe);
598     eeh_pe_dev_traverse(pe,
599     eeh_report_failure, NULL);
600     diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
601     index 8654cb166c19..98f81800e00c 100644
602     --- a/arch/powerpc/kernel/eeh_pe.c
603     +++ b/arch/powerpc/kernel/eeh_pe.c
604     @@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
605     const char *eeh_pe_loc_get(struct eeh_pe *pe)
606     {
607     struct pci_bus *bus = eeh_pe_bus_get(pe);
608     - struct device_node *dn = pci_bus_to_OF_node(bus);
609     + struct device_node *dn;
610     const char *loc = NULL;
611    
612     - if (!dn)
613     - goto out;
614     + while (bus) {
615     + dn = pci_bus_to_OF_node(bus);
616     + if (!dn) {
617     + bus = bus->parent;
618     + continue;
619     + }
620    
621     - /* PHB PE or root PE ? */
622     - if (pci_is_root_bus(bus)) {
623     - loc = of_get_property(dn, "ibm,loc-code", NULL);
624     - if (!loc)
625     + if (pci_is_root_bus(bus))
626     loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
627     + else
628     + loc = of_get_property(dn, "ibm,slot-location-code",
629     + NULL);
630     +
631     if (loc)
632     - goto out;
633     + return loc;
634    
635     - /* Check the root port */
636     - dn = dn->child;
637     - if (!dn)
638     - goto out;
639     + bus = bus->parent;
640     }
641    
642     - loc = of_get_property(dn, "ibm,loc-code", NULL);
643     - if (!loc)
644     - loc = of_get_property(dn, "ibm,slot-location-code", NULL);
645     -
646     -out:
647     - return loc ? loc : "N/A";
648     + return "N/A";
649     }
650    
651     /**
652     @@ -931,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
653     bus = pe->phb->bus;
654     } else if (pe->type & EEH_PE_BUS ||
655     pe->type & EEH_PE_DEVICE) {
656     - if (pe->bus) {
657     + if (pe->state & EEH_PE_PRI_BUS) {
658     bus = pe->bus;
659     goto out;
660     }
661     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
662     index 3c6badcd53ef..e57cc383e5da 100644
663     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
664     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
665     @@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
666    
667     /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
668     2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
669     - rlwimi r5, r4, 1, DAWRX_WT
670     + rlwimi r5, r4, 2, DAWRX_WT
671     clrrdi r4, r4, 3
672     std r4, VCPU_DAWR(r3)
673     std r5, VCPU_DAWRX(r3)
674     diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
675     index 6fd2405c7f4a..a3b182dcb823 100644
676     --- a/arch/powerpc/kvm/powerpc.c
677     +++ b/arch/powerpc/kvm/powerpc.c
678     @@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
679     r = -ENXIO;
680     break;
681     }
682     - vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
683     + val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
684     break;
685     case KVM_REG_PPC_VSCR:
686     if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
687     r = -ENXIO;
688     break;
689     }
690     - vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
691     + val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
692     break;
693     case KVM_REG_PPC_VRSAVE:
694     - if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
695     - r = -ENXIO;
696     - break;
697     - }
698     - vcpu->arch.vrsave = set_reg_val(reg->id, val);
699     + val = get_reg_val(reg->id, vcpu->arch.vrsave);
700     break;
701     #endif /* CONFIG_ALTIVEC */
702     default:
703     @@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
704     r = -ENXIO;
705     break;
706     }
707     - val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
708     + vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
709     break;
710     case KVM_REG_PPC_VSCR:
711     if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
712     r = -ENXIO;
713     break;
714     }
715     - val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
716     + vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
717     break;
718     case KVM_REG_PPC_VRSAVE:
719     - val = get_reg_val(reg->id, vcpu->arch.vrsave);
720     + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
721     + r = -ENXIO;
722     + break;
723     + }
724     + vcpu->arch.vrsave = set_reg_val(reg->id, val);
725     break;
726     #endif /* CONFIG_ALTIVEC */
727     default:
728     diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
729     index e1c90725522a..2ba602591a20 100644
730     --- a/arch/powerpc/platforms/powernv/eeh-powernv.c
731     +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
732     @@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
733     * PCI devices of the PE are expected to be removed prior
734     * to PE reset.
735     */
736     - if (!edev->pe->bus)
737     + if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
738     edev->pe->bus = pci_find_bus(hose->global_number,
739     pdn->busno);
740     + if (edev->pe->bus)
741     + edev->pe->state |= EEH_PE_PRI_BUS;
742     + }
743    
744     /*
745     * Enable EEH explicitly so that we will do EEH check
746     diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
747     index 414fd1a00fda..e40d0714679e 100644
748     --- a/arch/powerpc/platforms/powernv/pci-ioda.c
749     +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
750     @@ -3034,6 +3034,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
751    
752     static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
753     .dma_dev_setup = pnv_pci_dma_dev_setup,
754     + .dma_bus_setup = pnv_pci_dma_bus_setup,
755     #ifdef CONFIG_PCI_MSI
756     .setup_msi_irqs = pnv_setup_msi_irqs,
757     .teardown_msi_irqs = pnv_teardown_msi_irqs,
758     diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
759     index f2dd77234240..ad8c3f4a5e0b 100644
760     --- a/arch/powerpc/platforms/powernv/pci.c
761     +++ b/arch/powerpc/platforms/powernv/pci.c
762     @@ -601,6 +601,9 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
763     u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
764     long i;
765    
766     + if (proto_tce & TCE_PCI_WRITE)
767     + proto_tce |= TCE_PCI_READ;
768     +
769     for (i = 0; i < npages; i++) {
770     unsigned long newtce = proto_tce |
771     ((rpn + i) << tbl->it_page_shift);
772     @@ -622,6 +625,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
773    
774     BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
775    
776     + if (newtce & TCE_PCI_WRITE)
777     + newtce |= TCE_PCI_READ;
778     +
779     oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
780     *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
781     *direction = iommu_tce_direction(oldtce);
782     @@ -762,6 +768,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
783     phb->dma_dev_setup(phb, pdev);
784     }
785    
786     +void pnv_pci_dma_bus_setup(struct pci_bus *bus)
787     +{
788     + struct pci_controller *hose = bus->sysdata;
789     + struct pnv_phb *phb = hose->private_data;
790     + struct pnv_ioda_pe *pe;
791     +
792     + list_for_each_entry(pe, &phb->ioda.pe_list, list) {
793     + if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
794     + continue;
795     +
796     + if (!pe->pbus)
797     + continue;
798     +
799     + if (bus->number == ((pe->rid >> 8) & 0xFF)) {
800     + pe->pbus = bus;
801     + break;
802     + }
803     + }
804     +}
805     +
806     void pnv_pci_shutdown(void)
807     {
808     struct pci_controller *hose;
809     diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
810     index c8ff50e90766..36a99feab7d8 100644
811     --- a/arch/powerpc/platforms/powernv/pci.h
812     +++ b/arch/powerpc/platforms/powernv/pci.h
813     @@ -235,6 +235,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
814     extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
815    
816     extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
817     +extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
818     extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
819     extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
820    
821     diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
822     index a471cadb9630..79c91853e50e 100644
823     --- a/arch/x86/include/asm/pgtable_types.h
824     +++ b/arch/x86/include/asm/pgtable_types.h
825     @@ -363,20 +363,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
826     }
827     static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
828     {
829     + pgprotval_t val = pgprot_val(pgprot);
830     pgprot_t new;
831     - unsigned long val;
832    
833     - val = pgprot_val(pgprot);
834     pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
835     ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
836     return new;
837     }
838     static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
839     {
840     + pgprotval_t val = pgprot_val(pgprot);
841     pgprot_t new;
842     - unsigned long val;
843    
844     - val = pgprot_val(pgprot);
845     pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
846     ((val & _PAGE_PAT_LARGE) >>
847     (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
848     diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
849     index 982ce34f4a9b..27f89c79a44b 100644
850     --- a/arch/x86/lib/copy_user_64.S
851     +++ b/arch/x86/lib/copy_user_64.S
852     @@ -232,17 +232,31 @@ ENDPROC(copy_user_enhanced_fast_string)
853    
854     /*
855     * copy_user_nocache - Uncached memory copy with exception handling
856     - * This will force destination/source out of cache for more performance.
857     + * This will force destination out of cache for more performance.
858     + *
859     + * Note: Cached memory copy is used when destination or size is not
860     + * naturally aligned. That is:
861     + * - Require 8-byte alignment when size is 8 bytes or larger.
862     + * - Require 4-byte alignment when size is 4 bytes.
863     */
864     ENTRY(__copy_user_nocache)
865     ASM_STAC
866     +
867     + /* If size is less than 8 bytes, go to 4-byte copy */
868     cmpl $8,%edx
869     - jb 20f /* less then 8 bytes, go to byte copy loop */
870     + jb .L_4b_nocache_copy_entry
871     +
872     + /* If destination is not 8-byte aligned, "cache" copy to align it */
873     ALIGN_DESTINATION
874     +
875     + /* Set 4x8-byte copy count and remainder */
876     movl %edx,%ecx
877     andl $63,%edx
878     shrl $6,%ecx
879     - jz 17f
880     + jz .L_8b_nocache_copy_entry /* jump if count is 0 */
881     +
882     + /* Perform 4x8-byte nocache loop-copy */
883     +.L_4x8b_nocache_copy_loop:
884     1: movq (%rsi),%r8
885     2: movq 1*8(%rsi),%r9
886     3: movq 2*8(%rsi),%r10
887     @@ -262,60 +276,106 @@ ENTRY(__copy_user_nocache)
888     leaq 64(%rsi),%rsi
889     leaq 64(%rdi),%rdi
890     decl %ecx
891     - jnz 1b
892     -17: movl %edx,%ecx
893     + jnz .L_4x8b_nocache_copy_loop
894     +
895     + /* Set 8-byte copy count and remainder */
896     +.L_8b_nocache_copy_entry:
897     + movl %edx,%ecx
898     andl $7,%edx
899     shrl $3,%ecx
900     - jz 20f
901     -18: movq (%rsi),%r8
902     -19: movnti %r8,(%rdi)
903     + jz .L_4b_nocache_copy_entry /* jump if count is 0 */
904     +
905     + /* Perform 8-byte nocache loop-copy */
906     +.L_8b_nocache_copy_loop:
907     +20: movq (%rsi),%r8
908     +21: movnti %r8,(%rdi)
909     leaq 8(%rsi),%rsi
910     leaq 8(%rdi),%rdi
911     decl %ecx
912     - jnz 18b
913     -20: andl %edx,%edx
914     - jz 23f
915     + jnz .L_8b_nocache_copy_loop
916     +
917     + /* If no byte left, we're done */
918     +.L_4b_nocache_copy_entry:
919     + andl %edx,%edx
920     + jz .L_finish_copy
921     +
922     + /* If destination is not 4-byte aligned, go to byte copy: */
923     + movl %edi,%ecx
924     + andl $3,%ecx
925     + jnz .L_1b_cache_copy_entry
926     +
927     + /* Set 4-byte copy count (1 or 0) and remainder */
928     movl %edx,%ecx
929     -21: movb (%rsi),%al
930     -22: movb %al,(%rdi)
931     + andl $3,%edx
932     + shrl $2,%ecx
933     + jz .L_1b_cache_copy_entry /* jump if count is 0 */
934     +
935     + /* Perform 4-byte nocache copy: */
936     +30: movl (%rsi),%r8d
937     +31: movnti %r8d,(%rdi)
938     + leaq 4(%rsi),%rsi
939     + leaq 4(%rdi),%rdi
940     +
941     + /* If no bytes left, we're done: */
942     + andl %edx,%edx
943     + jz .L_finish_copy
944     +
945     + /* Perform byte "cache" loop-copy for the remainder */
946     +.L_1b_cache_copy_entry:
947     + movl %edx,%ecx
948     +.L_1b_cache_copy_loop:
949     +40: movb (%rsi),%al
950     +41: movb %al,(%rdi)
951     incq %rsi
952     incq %rdi
953     decl %ecx
954     - jnz 21b
955     -23: xorl %eax,%eax
956     + jnz .L_1b_cache_copy_loop
957     +
958     + /* Finished copying; fence the prior stores */
959     +.L_finish_copy:
960     + xorl %eax,%eax
961     ASM_CLAC
962     sfence
963     ret
964    
965     .section .fixup,"ax"
966     -30: shll $6,%ecx
967     +.L_fixup_4x8b_copy:
968     + shll $6,%ecx
969     addl %ecx,%edx
970     - jmp 60f
971     -40: lea (%rdx,%rcx,8),%rdx
972     - jmp 60f
973     -50: movl %ecx,%edx
974     -60: sfence
975     + jmp .L_fixup_handle_tail
976     +.L_fixup_8b_copy:
977     + lea (%rdx,%rcx,8),%rdx
978     + jmp .L_fixup_handle_tail
979     +.L_fixup_4b_copy:
980     + lea (%rdx,%rcx,4),%rdx
981     + jmp .L_fixup_handle_tail
982     +.L_fixup_1b_copy:
983     + movl %ecx,%edx
984     +.L_fixup_handle_tail:
985     + sfence
986     jmp copy_user_handle_tail
987     .previous
988    
989     - _ASM_EXTABLE(1b,30b)
990     - _ASM_EXTABLE(2b,30b)
991     - _ASM_EXTABLE(3b,30b)
992     - _ASM_EXTABLE(4b,30b)
993     - _ASM_EXTABLE(5b,30b)
994     - _ASM_EXTABLE(6b,30b)
995     - _ASM_EXTABLE(7b,30b)
996     - _ASM_EXTABLE(8b,30b)
997     - _ASM_EXTABLE(9b,30b)
998     - _ASM_EXTABLE(10b,30b)
999     - _ASM_EXTABLE(11b,30b)
1000     - _ASM_EXTABLE(12b,30b)
1001     - _ASM_EXTABLE(13b,30b)
1002     - _ASM_EXTABLE(14b,30b)
1003     - _ASM_EXTABLE(15b,30b)
1004     - _ASM_EXTABLE(16b,30b)
1005     - _ASM_EXTABLE(18b,40b)
1006     - _ASM_EXTABLE(19b,40b)
1007     - _ASM_EXTABLE(21b,50b)
1008     - _ASM_EXTABLE(22b,50b)
1009     + _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
1010     + _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
1011     + _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
1012     + _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
1013     + _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
1014     + _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
1015     + _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
1016     + _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
1017     + _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
1018     + _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
1019     + _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
1020     + _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
1021     + _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
1022     + _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
1023     + _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
1024     + _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
1025     + _ASM_EXTABLE(20b,.L_fixup_8b_copy)
1026     + _ASM_EXTABLE(21b,.L_fixup_8b_copy)
1027     + _ASM_EXTABLE(30b,.L_fixup_4b_copy)
1028     + _ASM_EXTABLE(31b,.L_fixup_4b_copy)
1029     + _ASM_EXTABLE(40b,.L_fixup_1b_copy)
1030     + _ASM_EXTABLE(41b,.L_fixup_1b_copy)
1031     ENDPROC(__copy_user_nocache)
1032     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1033     index eef44d9a3f77..e830c71a1323 100644
1034     --- a/arch/x86/mm/fault.c
1035     +++ b/arch/x86/mm/fault.c
1036     @@ -287,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address)
1037     if (!pmd_k)
1038     return -1;
1039    
1040     + if (pmd_huge(*pmd_k))
1041     + return 0;
1042     +
1043     pte_k = pte_offset_kernel(pmd_k, address);
1044     if (!pte_present(*pte_k))
1045     return -1;
1046     @@ -360,8 +363,6 @@ void vmalloc_sync_all(void)
1047     * 64-bit:
1048     *
1049     * Handle a fault on the vmalloc area
1050     - *
1051     - * This assumes no large pages in there.
1052     */
1053     static noinline int vmalloc_fault(unsigned long address)
1054     {
1055     @@ -403,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address)
1056     if (pud_none(*pud_ref))
1057     return -1;
1058    
1059     - if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
1060     + if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
1061     BUG();
1062    
1063     + if (pud_huge(*pud))
1064     + return 0;
1065     +
1066     pmd = pmd_offset(pud, address);
1067     pmd_ref = pmd_offset(pud_ref, address);
1068     if (pmd_none(*pmd_ref))
1069     return -1;
1070    
1071     - if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
1072     + if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
1073     BUG();
1074    
1075     + if (pmd_huge(*pmd))
1076     + return 0;
1077     +
1078     pte_ref = pte_offset_kernel(pmd_ref, address);
1079     if (!pte_present(*pte_ref))
1080     return -1;
1081     diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
1082     index a3137a4feed1..db20ee9a413a 100644
1083     --- a/arch/x86/mm/pageattr.c
1084     +++ b/arch/x86/mm/pageattr.c
1085     @@ -33,7 +33,7 @@ struct cpa_data {
1086     pgd_t *pgd;
1087     pgprot_t mask_set;
1088     pgprot_t mask_clr;
1089     - int numpages;
1090     + unsigned long numpages;
1091     int flags;
1092     unsigned long pfn;
1093     unsigned force_split : 1;
1094     @@ -1345,7 +1345,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1095     * CPA operation. Either a large page has been
1096     * preserved or a single page update happened.
1097     */
1098     - BUG_ON(cpa->numpages > numpages);
1099     + BUG_ON(cpa->numpages > numpages || !cpa->numpages);
1100     numpages -= cpa->numpages;
1101     if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
1102     cpa->curpage++;
1103     diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
1104     index 52f708bcf77f..d50c701b19d6 100644
1105     --- a/drivers/hwspinlock/hwspinlock_core.c
1106     +++ b/drivers/hwspinlock/hwspinlock_core.c
1107     @@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
1108     hwlock = radix_tree_deref_slot(slot);
1109     if (unlikely(!hwlock))
1110     continue;
1111     + if (radix_tree_is_indirect_ptr(hwlock)) {
1112     + slot = radix_tree_iter_retry(&iter);
1113     + continue;
1114     + }
1115    
1116     if (hwlock->bank->dev->of_node == args.np) {
1117     ret = 0;
1118     diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
1119     index 969428dd6329..16cc5c691a55 100644
1120     --- a/drivers/iio/accel/Kconfig
1121     +++ b/drivers/iio/accel/Kconfig
1122     @@ -173,6 +173,7 @@ config STK8312
1123     config STK8BA50
1124     tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
1125     depends on I2C
1126     + depends on IIO_TRIGGER
1127     help
1128     Say yes here to get support for the Sensortek STK8BA50 3-axis
1129     accelerometer.
1130     diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
1131     index 7868c744fd4b..1e7aded53117 100644
1132     --- a/drivers/iio/adc/Kconfig
1133     +++ b/drivers/iio/adc/Kconfig
1134     @@ -372,6 +372,7 @@ config TWL6030_GPADC
1135     config VF610_ADC
1136     tristate "Freescale vf610 ADC driver"
1137     depends on OF
1138     + depends on HAS_IOMEM
1139     select IIO_BUFFER
1140     select IIO_TRIGGERED_BUFFER
1141     help
1142     diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
1143     index 942320e32753..c1e05532d437 100644
1144     --- a/drivers/iio/adc/ti_am335x_adc.c
1145     +++ b/drivers/iio/adc/ti_am335x_adc.c
1146     @@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
1147     goto error_kfifo_free;
1148    
1149     indio_dev->setup_ops = setup_ops;
1150     - indio_dev->modes |= INDIO_BUFFER_HARDWARE;
1151     + indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
1152    
1153     return 0;
1154    
1155     diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
1156     index 43d14588448d..b4dde8315210 100644
1157     --- a/drivers/iio/dac/mcp4725.c
1158     +++ b/drivers/iio/dac/mcp4725.c
1159     @@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
1160     data->client = client;
1161    
1162     indio_dev->dev.parent = &client->dev;
1163     + indio_dev->name = id->name;
1164     indio_dev->info = &mcp4725_info;
1165     indio_dev->channels = &mcp4725_channel;
1166     indio_dev->num_channels = 1;
1167     diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
1168     index cb32b593f1c5..36607d52fee0 100644
1169     --- a/drivers/iio/imu/adis_buffer.c
1170     +++ b/drivers/iio/imu/adis_buffer.c
1171     @@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
1172     return -ENOMEM;
1173    
1174     rx = adis->buffer;
1175     - tx = rx + indio_dev->scan_bytes;
1176     + tx = rx + scan_count;
1177    
1178     spi_message_init(&adis->msg);
1179    
1180     diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
1181     index c8bad3cf891d..217e9306aa0f 100644
1182     --- a/drivers/iio/inkern.c
1183     +++ b/drivers/iio/inkern.c
1184     @@ -351,6 +351,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get);
1185    
1186     void iio_channel_release(struct iio_channel *channel)
1187     {
1188     + if (!channel)
1189     + return;
1190     iio_device_put(channel->indio_dev);
1191     kfree(channel);
1192     }
1193     diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
1194     index 60537ec0c923..53201d99a16c 100644
1195     --- a/drivers/iio/light/acpi-als.c
1196     +++ b/drivers/iio/light/acpi-als.c
1197     @@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = {
1198     .realbits = 32,
1199     .storagebits = 32,
1200     },
1201     - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
1202     + /* _RAW is here for backward ABI compatibility */
1203     + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
1204     + BIT(IIO_CHAN_INFO_PROCESSED),
1205     },
1206     };
1207    
1208     @@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
1209     s32 temp_val;
1210     int ret;
1211    
1212     - if (mask != IIO_CHAN_INFO_RAW)
1213     + if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
1214     return -EINVAL;
1215    
1216     /* we support only illumination (_ALI) so far. */
1217     diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
1218     index 809a961b9a7f..6bf89d8f3741 100644
1219     --- a/drivers/iio/light/ltr501.c
1220     +++ b/drivers/iio/light/ltr501.c
1221     @@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = {
1222     {500000, 2000000}
1223     };
1224    
1225     -static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
1226     +static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
1227     int len, int val, int val2)
1228     {
1229     int i, freq;
1230     diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
1231     index f5ecd6e19f5d..a0d7deeac62f 100644
1232     --- a/drivers/iio/pressure/mpl115.c
1233     +++ b/drivers/iio/pressure/mpl115.c
1234     @@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
1235     *val = ret >> 6;
1236     return IIO_VAL_INT;
1237     case IIO_CHAN_INFO_OFFSET:
1238     - *val = 605;
1239     + *val = -605;
1240     *val2 = 750000;
1241     return IIO_VAL_INT_PLUS_MICRO;
1242     case IIO_CHAN_INFO_SCALE:
1243     diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1244     index 537ebb0e193a..78f93cf68840 100644
1245     --- a/drivers/input/mouse/elantech.c
1246     +++ b/drivers/input/mouse/elantech.c
1247     @@ -1222,7 +1222,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1248     input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
1249     ETP_WMAX_V2, 0, 0);
1250     }
1251     - input_mt_init_slots(dev, 2, 0);
1252     + input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
1253     input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
1254     input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
1255     break;
1256     diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
1257     index e272f06258ce..a3f0f5a47490 100644
1258     --- a/drivers/input/mouse/vmmouse.c
1259     +++ b/drivers/input/mouse/vmmouse.c
1260     @@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
1261     priv->abs_dev = abs_dev;
1262     psmouse->private = priv;
1263    
1264     - input_set_capability(rel_dev, EV_REL, REL_WHEEL);
1265     -
1266     /* Set up and register absolute device */
1267     snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
1268     psmouse->ps2dev.serio->phys);
1269     @@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
1270     abs_dev->id.version = psmouse->model;
1271     abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
1272    
1273     - error = input_register_device(priv->abs_dev);
1274     - if (error)
1275     - goto init_fail;
1276     -
1277     /* Set absolute device capabilities */
1278     input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
1279     input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
1280     @@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
1281     input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
1282     input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
1283    
1284     + error = input_register_device(priv->abs_dev);
1285     + if (error)
1286     + goto init_fail;
1287     +
1288     + /* Add wheel capability to the relative device */
1289     + input_set_capability(rel_dev, EV_REL, REL_WHEEL);
1290     +
1291     psmouse->protocol_handler = vmmouse_process_byte;
1292     psmouse->disconnect = vmmouse_disconnect;
1293     psmouse->reconnect = vmmouse_reconnect;
1294     diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1295     index c11556563ef0..68f5f4a0f1e7 100644
1296     --- a/drivers/input/serio/i8042-x86ia64io.h
1297     +++ b/drivers/input/serio/i8042-x86ia64io.h
1298     @@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
1299     },
1300     },
1301     {
1302     + /* Fujitsu Lifebook U745 */
1303     + .matches = {
1304     + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1305     + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
1306     + },
1307     + },
1308     + {
1309     /* Fujitsu T70H */
1310     .matches = {
1311     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1312     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1313     index 8b2be1e7714f..fc836f523afa 100644
1314     --- a/drivers/iommu/amd_iommu.c
1315     +++ b/drivers/iommu/amd_iommu.c
1316     @@ -1905,7 +1905,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
1317     /* Update device table */
1318     set_dte_entry(dev_data->devid, domain, ats);
1319     if (alias != dev_data->devid)
1320     - set_dte_entry(dev_data->devid, domain, ats);
1321     + set_dte_entry(alias, domain, ats);
1322    
1323     device_flush_dte(dev_data);
1324     }
1325     diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
1326     index 80e3c176008e..55a19e49205b 100644
1327     --- a/drivers/iommu/dmar.c
1328     +++ b/drivers/iommu/dmar.c
1329     @@ -1347,7 +1347,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1330    
1331     raw_spin_lock_irqsave(&iommu->register_lock, flags);
1332    
1333     - sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1334     + sts = readl(iommu->reg + DMAR_GSTS_REG);
1335     if (!(sts & DMA_GSTS_QIES))
1336     goto end;
1337    
1338     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1339     index ac7387686ddc..986a53e3eb96 100644
1340     --- a/drivers/iommu/intel-iommu.c
1341     +++ b/drivers/iommu/intel-iommu.c
1342     @@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1343     {
1344     struct pci_dev *pdev;
1345    
1346     - if (dev_is_pci(info->dev))
1347     + if (!dev_is_pci(info->dev))
1348     return;
1349    
1350     pdev = to_pci_dev(info->dev);
1351     diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
1352     index 50464833d0b8..d9939fa9b588 100644
1353     --- a/drivers/iommu/intel-svm.c
1354     +++ b/drivers/iommu/intel-svm.c
1355     @@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
1356     static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
1357     {
1358     struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
1359     + struct intel_svm_dev *sdev;
1360    
1361     + /* This might end up being called from exit_mmap(), *before* the page
1362     + * tables are cleared. And __mmu_notifier_release() will delete us from
1363     + * the list of notifiers so that our invalidate_range() callback doesn't
1364     + * get called when the page tables are cleared. So we need to protect
1365     + * against hardware accessing those page tables.
1366     + *
1367     + * We do it by clearing the entry in the PASID table and then flushing
1368     + * the IOTLB and the PASID table caches. This might upset hardware;
1369     + * perhaps we'll want to point the PASID to a dummy PGD (like the zero
1370     + * page) so that we end up taking a fault that the hardware really
1371     + * *has* to handle gracefully without affecting other processes.
1372     + */
1373     svm->iommu->pasid_table[svm->pasid].val = 0;
1374     + wmb();
1375     +
1376     + rcu_read_lock();
1377     + list_for_each_entry_rcu(sdev, &svm->devs, list) {
1378     + intel_flush_pasid_dev(svm, sdev, svm->pasid);
1379     + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
1380     + }
1381     + rcu_read_unlock();
1382    
1383     - /* There's no need to do any flush because we can't get here if there
1384     - * are any devices left anyway. */
1385     - WARN_ON(!list_empty(&svm->devs));
1386     }
1387    
1388     static const struct mmu_notifier_ops intel_mmuops = {
1389     @@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
1390     goto out;
1391     }
1392     iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
1393     - mm = NULL;
1394     } else
1395     iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
1396     wmb();
1397     @@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
1398     kfree_rcu(sdev, rcu);
1399    
1400     if (list_empty(&svm->devs)) {
1401     - mmu_notifier_unregister(&svm->notifier, svm->mm);
1402    
1403     idr_remove(&svm->iommu->pasid_idr, svm->pasid);
1404     if (svm->mm)
1405     - mmput(svm->mm);
1406     + mmu_notifier_unregister(&svm->notifier, svm->mm);
1407     +
1408     /* We mandate that no page faults may be outstanding
1409     * for the PASID when intel_svm_unbind_mm() is called.
1410     * If that is not obeyed, subtle errors will happen.
1411     @@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
1412     struct intel_svm *svm = NULL;
1413     int head, tail, handled = 0;
1414    
1415     + /* Clear PPR bit before reading head/tail registers, to
1416     + * ensure that we get a new interrupt if needed. */
1417     + writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
1418     +
1419     tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
1420     head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
1421     while (head != tail) {
1422     @@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
1423     * any faults on kernel addresses. */
1424     if (!svm->mm)
1425     goto bad_req;
1426     + /* If the mm is already defunct, don't handle faults. */
1427     + if (!atomic_inc_not_zero(&svm->mm->mm_users))
1428     + goto bad_req;
1429     down_read(&svm->mm->mmap_sem);
1430     vma = find_extend_vma(svm->mm, address);
1431     if (!vma || address < vma->vm_start)
1432     @@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
1433     result = QI_RESP_SUCCESS;
1434     invalid:
1435     up_read(&svm->mm->mmap_sem);
1436     + mmput(svm->mm);
1437     bad_req:
1438     /* Accounting for major/minor faults? */
1439     rcu_read_lock();
1440     diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
1441     index 1fae1881648c..e9b241b1c9dd 100644
1442     --- a/drivers/iommu/intel_irq_remapping.c
1443     +++ b/drivers/iommu/intel_irq_remapping.c
1444     @@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
1445    
1446     raw_spin_lock_irqsave(&iommu->register_lock, flags);
1447    
1448     - sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1449     + sts = readl(iommu->reg + DMAR_GSTS_REG);
1450     if (!(sts & DMA_GSTS_IRES))
1451     goto end;
1452    
1453     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
1454     index 0955b2cb10fe..62120c38d56b 100644
1455     --- a/drivers/nvdimm/namespace_devs.c
1456     +++ b/drivers/nvdimm/namespace_devs.c
1457     @@ -77,6 +77,59 @@ static bool is_namespace_io(struct device *dev)
1458     return dev ? dev->type == &namespace_io_device_type : false;
1459     }
1460    
1461     +static int is_uuid_busy(struct device *dev, void *data)
1462     +{
1463     + u8 *uuid1 = data, *uuid2 = NULL;
1464     +
1465     + if (is_namespace_pmem(dev)) {
1466     + struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1467     +
1468     + uuid2 = nspm->uuid;
1469     + } else if (is_namespace_blk(dev)) {
1470     + struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1471     +
1472     + uuid2 = nsblk->uuid;
1473     + } else if (is_nd_btt(dev)) {
1474     + struct nd_btt *nd_btt = to_nd_btt(dev);
1475     +
1476     + uuid2 = nd_btt->uuid;
1477     + } else if (is_nd_pfn(dev)) {
1478     + struct nd_pfn *nd_pfn = to_nd_pfn(dev);
1479     +
1480     + uuid2 = nd_pfn->uuid;
1481     + }
1482     +
1483     + if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
1484     + return -EBUSY;
1485     +
1486     + return 0;
1487     +}
1488     +
1489     +static int is_namespace_uuid_busy(struct device *dev, void *data)
1490     +{
1491     + if (is_nd_pmem(dev) || is_nd_blk(dev))
1492     + return device_for_each_child(dev, data, is_uuid_busy);
1493     + return 0;
1494     +}
1495     +
1496     +/**
1497     + * nd_is_uuid_unique - verify that no other namespace has @uuid
1498     + * @dev: any device on a nvdimm_bus
1499     + * @uuid: uuid to check
1500     + */
1501     +bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
1502     +{
1503     + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
1504     +
1505     + if (!nvdimm_bus)
1506     + return false;
1507     + WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
1508     + if (device_for_each_child(&nvdimm_bus->dev, uuid,
1509     + is_namespace_uuid_busy) != 0)
1510     + return false;
1511     + return true;
1512     +}
1513     +
1514     bool pmem_should_map_pages(struct device *dev)
1515     {
1516     struct nd_region *nd_region = to_nd_region(dev->parent);
1517     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
1518     index 529f3f02e7b2..9521696c9385 100644
1519     --- a/drivers/nvdimm/region_devs.c
1520     +++ b/drivers/nvdimm/region_devs.c
1521     @@ -134,62 +134,6 @@ int nd_region_to_nstype(struct nd_region *nd_region)
1522     }
1523     EXPORT_SYMBOL(nd_region_to_nstype);
1524    
1525     -static int is_uuid_busy(struct device *dev, void *data)
1526     -{
1527     - struct nd_region *nd_region = to_nd_region(dev->parent);
1528     - u8 *uuid = data;
1529     -
1530     - switch (nd_region_to_nstype(nd_region)) {
1531     - case ND_DEVICE_NAMESPACE_PMEM: {
1532     - struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1533     -
1534     - if (!nspm->uuid)
1535     - break;
1536     - if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0)
1537     - return -EBUSY;
1538     - break;
1539     - }
1540     - case ND_DEVICE_NAMESPACE_BLK: {
1541     - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1542     -
1543     - if (!nsblk->uuid)
1544     - break;
1545     - if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0)
1546     - return -EBUSY;
1547     - break;
1548     - }
1549     - default:
1550     - break;
1551     - }
1552     -
1553     - return 0;
1554     -}
1555     -
1556     -static int is_namespace_uuid_busy(struct device *dev, void *data)
1557     -{
1558     - if (is_nd_pmem(dev) || is_nd_blk(dev))
1559     - return device_for_each_child(dev, data, is_uuid_busy);
1560     - return 0;
1561     -}
1562     -
1563     -/**
1564     - * nd_is_uuid_unique - verify that no other namespace has @uuid
1565     - * @dev: any device on a nvdimm_bus
1566     - * @uuid: uuid to check
1567     - */
1568     -bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
1569     -{
1570     - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
1571     -
1572     - if (!nvdimm_bus)
1573     - return false;
1574     - WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
1575     - if (device_for_each_child(&nvdimm_bus->dev, uuid,
1576     - is_namespace_uuid_busy) != 0)
1577     - return false;
1578     - return true;
1579     -}
1580     -
1581     static ssize_t size_show(struct device *dev,
1582     struct device_attribute *attr, char *buf)
1583     {
1584     diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
1585     index 3a707dd14238..f96065a81d1e 100644
1586     --- a/drivers/phy/phy-twl4030-usb.c
1587     +++ b/drivers/phy/phy-twl4030-usb.c
1588     @@ -715,6 +715,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
1589     pm_runtime_use_autosuspend(&pdev->dev);
1590     pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
1591     pm_runtime_enable(&pdev->dev);
1592     + pm_runtime_get_sync(&pdev->dev);
1593    
1594     /* Our job is to use irqs and status from the power module
1595     * to keep the transceiver disabled when nothing's connected.
1596     @@ -750,6 +751,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
1597     struct twl4030_usb *twl = platform_get_drvdata(pdev);
1598     int val;
1599    
1600     + usb_remove_phy(&twl->phy);
1601     pm_runtime_get_sync(twl->dev);
1602     cancel_delayed_work(&twl->id_workaround_work);
1603     device_remove_file(twl->dev, &dev_attr_vbus);
1604     @@ -757,6 +759,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
1605     /* set transceiver mode to power on defaults */
1606     twl4030_usb_set_mode(twl, -1);
1607    
1608     + /* idle ulpi before powering off */
1609     + if (cable_present(twl->linkstat))
1610     + pm_runtime_put_noidle(twl->dev);
1611     + pm_runtime_mark_last_busy(twl->dev);
1612     + pm_runtime_put_sync_suspend(twl->dev);
1613     + pm_runtime_disable(twl->dev);
1614     +
1615     /* autogate 60MHz ULPI clock,
1616     * clear dpll clock request for i2c access,
1617     * disable 32KHz
1618     @@ -771,11 +780,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
1619     /* disable complete OTG block */
1620     twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
1621    
1622     - if (cable_present(twl->linkstat))
1623     - pm_runtime_put_noidle(twl->dev);
1624     - pm_runtime_mark_last_busy(twl->dev);
1625     - pm_runtime_put(twl->dev);
1626     -
1627     return 0;
1628     }
1629    
1630     diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
1631     index 02bc5a6343c3..aa454241489c 100644
1632     --- a/drivers/platform/x86/intel_scu_ipcutil.c
1633     +++ b/drivers/platform/x86/intel_scu_ipcutil.c
1634     @@ -49,7 +49,7 @@ struct scu_ipc_data {
1635    
1636     static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
1637     {
1638     - int count = data->count;
1639     + unsigned int count = data->count;
1640    
1641     if (count == 0 || count == 3 || count > 4)
1642     return -EINVAL;
1643     diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
1644     index 361358134315..93880ed6291c 100644
1645     --- a/drivers/scsi/device_handler/scsi_dh_rdac.c
1646     +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
1647     @@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
1648     /*
1649     * Command Lock contention
1650     */
1651     - err = SCSI_DH_RETRY;
1652     + err = SCSI_DH_IMM_RETRY;
1653     break;
1654     default:
1655     break;
1656     @@ -612,6 +612,8 @@ retry:
1657     err = mode_select_handle_sense(sdev, h->sense);
1658     if (err == SCSI_DH_RETRY && retry_cnt--)
1659     goto retry;
1660     + if (err == SCSI_DH_IMM_RETRY)
1661     + goto retry;
1662     }
1663     if (err == SCSI_DH_OK) {
1664     h->state = RDAC_STATE_ACTIVE;
1665     diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
1666     index 2c1160c7ec92..da2e068ee47d 100644
1667     --- a/drivers/scsi/scsi_devinfo.c
1668     +++ b/drivers/scsi/scsi_devinfo.c
1669     @@ -205,6 +205,7 @@ static struct {
1670     {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
1671     {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
1672     {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
1673     + {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
1674     {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
1675     {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
1676     {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
1677     @@ -227,6 +228,7 @@ static struct {
1678     {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
1679     {"Promise", "", NULL, BLIST_SPARSELUN},
1680     {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
1681     + {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
1682     {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
1683     {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
1684     {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
1685     diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
1686     index 21930c9ac9cd..c8115b4fe474 100644
1687     --- a/drivers/scsi/scsi_sysfs.c
1688     +++ b/drivers/scsi/scsi_sysfs.c
1689     @@ -1192,16 +1192,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1690     void scsi_remove_target(struct device *dev)
1691     {
1692     struct Scsi_Host *shost = dev_to_shost(dev->parent);
1693     - struct scsi_target *starget;
1694     + struct scsi_target *starget, *last_target = NULL;
1695     unsigned long flags;
1696    
1697     restart:
1698     spin_lock_irqsave(shost->host_lock, flags);
1699     list_for_each_entry(starget, &shost->__targets, siblings) {
1700     - if (starget->state == STARGET_DEL)
1701     + if (starget->state == STARGET_DEL ||
1702     + starget == last_target)
1703     continue;
1704     if (starget->dev.parent == dev || &starget->dev == dev) {
1705     kref_get(&starget->reap_ref);
1706     + last_target = starget;
1707     spin_unlock_irqrestore(shost->host_lock, flags);
1708     __scsi_remove_target(starget);
1709     scsi_target_reap(starget);
1710     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1711     index 4e08d1cd704d..84fa4c46eaa6 100644
1712     --- a/drivers/scsi/sd.c
1713     +++ b/drivers/scsi/sd.c
1714     @@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
1715     struct scsi_disk *sdkp = dev_get_drvdata(dev);
1716     int ret = 0;
1717    
1718     - if (!sdkp)
1719     - return 0; /* this can happen */
1720     + if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
1721     + return 0;
1722    
1723     if (sdkp->WCE && sdkp->media_present) {
1724     sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
1725     @@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev)
1726     {
1727     struct scsi_disk *sdkp = dev_get_drvdata(dev);
1728    
1729     + if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
1730     + return 0;
1731     +
1732     if (!sdkp->device->manage_start_stop)
1733     return 0;
1734    
1735     diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
1736     index 503ab8b46c0b..5e820674432c 100644
1737     --- a/drivers/scsi/sg.c
1738     +++ b/drivers/scsi/sg.c
1739     @@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1740     }
1741    
1742     sfp->mmap_called = 1;
1743     - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1744     + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1745     vma->vm_private_data = sfp;
1746     vma->vm_ops = &sg_mmap_vm_ops;
1747     return 0;
1748     diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
1749     index 8bd54a64efd6..64c867405ad4 100644
1750     --- a/drivers/scsi/sr.c
1751     +++ b/drivers/scsi/sr.c
1752     @@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
1753     {
1754     struct scsi_cd *cd = dev_get_drvdata(dev);
1755    
1756     + if (!cd) /* E.g.: runtime suspend following sr_remove() */
1757     + return 0;
1758     +
1759     if (cd->media_present)
1760     return -EBUSY;
1761     else
1762     @@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
1763     scsi_autopm_get_device(cd->device);
1764    
1765     del_gendisk(cd->disk);
1766     + dev_set_drvdata(dev, NULL);
1767    
1768     mutex_lock(&sr_ref_mutex);
1769     kref_put(&cd->kref, sr_kref_release);
1770     diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
1771     index aa5ab6c80ed4..41ef099b7aa6 100644
1772     --- a/drivers/staging/speakup/selection.c
1773     +++ b/drivers/staging/speakup/selection.c
1774     @@ -142,7 +142,9 @@ static void __speakup_paste_selection(struct work_struct *work)
1775     struct tty_ldisc *ld;
1776     DECLARE_WAITQUEUE(wait, current);
1777    
1778     - ld = tty_ldisc_ref_wait(tty);
1779     + ld = tty_ldisc_ref(tty);
1780     + if (!ld)
1781     + goto tty_unref;
1782     tty_buffer_lock_exclusive(&vc->port);
1783    
1784     add_wait_queue(&vc->paste_wait, &wait);
1785     @@ -162,6 +164,7 @@ static void __speakup_paste_selection(struct work_struct *work)
1786    
1787     tty_buffer_unlock_exclusive(&vc->port);
1788     tty_ldisc_deref(ld);
1789     +tty_unref:
1790     tty_kref_put(tty);
1791     }
1792    
1793     diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
1794     index 255204cc43e6..b4bfd706ac94 100644
1795     --- a/drivers/target/iscsi/iscsi_target_configfs.c
1796     +++ b/drivers/target/iscsi/iscsi_target_configfs.c
1797     @@ -1593,7 +1593,8 @@ static int lio_tpg_check_prot_fabric_only(
1798     }
1799    
1800     /*
1801     - * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
1802     + * Called with spin_lock_irq(struct se_portal_group->session_lock) held
1803     + * or not held.
1804     *
1805     * Also, this function calls iscsit_inc_session_usage_count() on the
1806     * struct iscsi_session in question.
1807     @@ -1601,19 +1602,32 @@ static int lio_tpg_check_prot_fabric_only(
1808     static int lio_tpg_shutdown_session(struct se_session *se_sess)
1809     {
1810     struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1811     + struct se_portal_group *se_tpg = se_sess->se_tpg;
1812     + bool local_lock = false;
1813     +
1814     + if (!spin_is_locked(&se_tpg->session_lock)) {
1815     + spin_lock_irq(&se_tpg->session_lock);
1816     + local_lock = true;
1817     + }
1818    
1819     spin_lock(&sess->conn_lock);
1820     if (atomic_read(&sess->session_fall_back_to_erl0) ||
1821     atomic_read(&sess->session_logout) ||
1822     (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
1823     spin_unlock(&sess->conn_lock);
1824     + if (local_lock)
1825     + spin_unlock_irq(&sess->conn_lock);
1826     return 0;
1827     }
1828     atomic_set(&sess->session_reinstatement, 1);
1829     spin_unlock(&sess->conn_lock);
1830    
1831     iscsit_stop_time2retain_timer(sess);
1832     + spin_unlock_irq(&se_tpg->session_lock);
1833     +
1834     iscsit_stop_session(sess, 1, 1);
1835     + if (!local_lock)
1836     + spin_lock_irq(&se_tpg->session_lock);
1837    
1838     return 1;
1839     }
1840     diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
1841     index a45660f62db5..78e983677339 100644
1842     --- a/drivers/tty/pty.c
1843     +++ b/drivers/tty/pty.c
1844     @@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
1845     /* this is called once with whichever end is closed last */
1846     static void pty_unix98_shutdown(struct tty_struct *tty)
1847     {
1848     - devpts_kill_index(tty->driver_data, tty->index);
1849     + struct inode *ptmx_inode;
1850     +
1851     + if (tty->driver->subtype == PTY_TYPE_MASTER)
1852     + ptmx_inode = tty->driver_data;
1853     + else
1854     + ptmx_inode = tty->link->driver_data;
1855     + devpts_kill_index(ptmx_inode, tty->index);
1856     + devpts_del_ref(ptmx_inode);
1857     }
1858    
1859     static const struct tty_operations ptm_unix98_ops = {
1860     @@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
1861     set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
1862     tty->driver_data = inode;
1863    
1864     + /*
1865     + * In the case where all references to ptmx inode are dropped and we
1866     + * still have /dev/tty opened pointing to the master/slave pair (ptmx
1867     + * is closed/released before /dev/tty), we must make sure that the inode
1868     + * is still valid when we call the final pty_unix98_shutdown, thus we
1869     + * hold an additional reference to the ptmx inode. For the same /dev/tty
1870     + * last close case, we also need to make sure the super_block isn't
1871     + * destroyed (devpts instance unmounted), before /dev/tty is closed and
1872     + * on its release devpts_kill_index is called.
1873     + */
1874     + devpts_add_ref(inode);
1875     +
1876     tty_add_file(tty, filp);
1877    
1878     slave_inode = devpts_pty_new(inode,
1879     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1880     index 4097f3f65b3b..7cd6f9a90542 100644
1881     --- a/drivers/tty/serial/8250/8250_pci.c
1882     +++ b/drivers/tty/serial/8250/8250_pci.c
1883     @@ -1379,6 +1379,9 @@ ce4100_serial_setup(struct serial_private *priv,
1884     #define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
1885     #define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
1886    
1887     +#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
1888     +#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
1889     +
1890     #define BYT_PRV_CLK 0x800
1891     #define BYT_PRV_CLK_EN (1 << 0)
1892     #define BYT_PRV_CLK_M_VAL_SHIFT 1
1893     @@ -1461,11 +1464,13 @@ byt_serial_setup(struct serial_private *priv,
1894     switch (pdev->device) {
1895     case PCI_DEVICE_ID_INTEL_BYT_UART1:
1896     case PCI_DEVICE_ID_INTEL_BSW_UART1:
1897     + case PCI_DEVICE_ID_INTEL_BDW_UART1:
1898     rx_param->src_id = 3;
1899     tx_param->dst_id = 2;
1900     break;
1901     case PCI_DEVICE_ID_INTEL_BYT_UART2:
1902     case PCI_DEVICE_ID_INTEL_BSW_UART2:
1903     + case PCI_DEVICE_ID_INTEL_BDW_UART2:
1904     rx_param->src_id = 5;
1905     tx_param->dst_id = 4;
1906     break;
1907     @@ -1936,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
1908     #define PCIE_VENDOR_ID_WCH 0x1c00
1909     #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
1910     #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
1911     +#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
1912    
1913     #define PCI_VENDOR_ID_PERICOM 0x12D8
1914     #define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
1915     @@ -2062,6 +2068,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1916     .subdevice = PCI_ANY_ID,
1917     .setup = byt_serial_setup,
1918     },
1919     + {
1920     + .vendor = PCI_VENDOR_ID_INTEL,
1921     + .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
1922     + .subvendor = PCI_ANY_ID,
1923     + .subdevice = PCI_ANY_ID,
1924     + .setup = byt_serial_setup,
1925     + },
1926     + {
1927     + .vendor = PCI_VENDOR_ID_INTEL,
1928     + .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
1929     + .subvendor = PCI_ANY_ID,
1930     + .subdevice = PCI_ANY_ID,
1931     + .setup = byt_serial_setup,
1932     + },
1933     /*
1934     * ITE
1935     */
1936     @@ -2618,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1937     .subdevice = PCI_ANY_ID,
1938     .setup = pci_wch_ch353_setup,
1939     },
1940     + /* WCH CH382 2S card (16850 clone) */
1941     + {
1942     + .vendor = PCIE_VENDOR_ID_WCH,
1943     + .device = PCIE_DEVICE_ID_WCH_CH382_2S,
1944     + .subvendor = PCI_ANY_ID,
1945     + .subdevice = PCI_ANY_ID,
1946     + .setup = pci_wch_ch38x_setup,
1947     + },
1948     /* WCH CH382 2S1P card (16850 clone) */
1949     {
1950     .vendor = PCIE_VENDOR_ID_WCH,
1951     @@ -2936,6 +2964,7 @@ enum pci_board_num_t {
1952     pbn_fintek_4,
1953     pbn_fintek_8,
1954     pbn_fintek_12,
1955     + pbn_wch382_2,
1956     pbn_wch384_4,
1957     pbn_pericom_PI7C9X7951,
1958     pbn_pericom_PI7C9X7952,
1959     @@ -3756,6 +3785,13 @@ static struct pciserial_board pci_boards[] = {
1960     .base_baud = 115200,
1961     .first_offset = 0x40,
1962     },
1963     + [pbn_wch382_2] = {
1964     + .flags = FL_BASE0,
1965     + .num_ports = 2,
1966     + .base_baud = 115200,
1967     + .uart_offset = 8,
1968     + .first_offset = 0xC0,
1969     + },
1970     [pbn_wch384_4] = {
1971     .flags = FL_BASE0,
1972     .num_ports = 4,
1973     @@ -5506,6 +5542,16 @@ static struct pci_device_id serial_pci_tbl[] = {
1974     PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
1975     pbn_byt },
1976    
1977     + /* Intel Broadwell */
1978     + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
1979     + PCI_ANY_ID, PCI_ANY_ID,
1980     + PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
1981     + pbn_byt },
1982     + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
1983     + PCI_ANY_ID, PCI_ANY_ID,
1984     + PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
1985     + pbn_byt },
1986     +
1987     /*
1988     * Intel Quark x1000
1989     */
1990     @@ -5545,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = {
1991     PCI_ANY_ID, PCI_ANY_ID,
1992     0, 0, pbn_b0_bt_2_115200 },
1993    
1994     + { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
1995     + PCI_ANY_ID, PCI_ANY_ID,
1996     + 0, 0, pbn_wch382_2 },
1997     +
1998     { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
1999     PCI_ANY_ID, PCI_ANY_ID,
2000     0, 0, pbn_wch384_4 },
2001     diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
2002     index 9d4c84f7485f..24280d9a05e9 100644
2003     --- a/drivers/tty/serial/omap-serial.c
2004     +++ b/drivers/tty/serial/omap-serial.c
2005     @@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
2006    
2007     /* Enable or disable the rs485 support */
2008     static int
2009     -serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
2010     +serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
2011     {
2012     struct uart_omap_port *up = to_uart_omap_port(port);
2013     unsigned int mode;
2014     @@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
2015     up->ier = 0;
2016     serial_out(up, UART_IER, 0);
2017    
2018     + /* Clamp the delays to [0, 100ms] */
2019     + rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
2020     + rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
2021     +
2022     /* store new config */
2023     - port->rs485 = *rs485conf;
2024     + port->rs485 = *rs485;
2025    
2026     /*
2027     * Just as a precaution, only allow rs485
2028     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2029     index eeaa6c6bd540..db0f0831b94f 100644
2030     --- a/drivers/usb/host/xhci-ring.c
2031     +++ b/drivers/usb/host/xhci-ring.c
2032     @@ -2192,10 +2192,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2033     }
2034     /* Fast path - was this the last TRB in the TD for this URB? */
2035     } else if (event_trb == td->last_trb) {
2036     - if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
2037     - return finish_td(xhci, td, event_trb, event, ep,
2038     - status, false);
2039     -
2040     if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2041     td->urb->actual_length =
2042     td->urb->transfer_buffer_length -
2043     @@ -2247,12 +2243,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2044     td->urb->actual_length +=
2045     TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2046     EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2047     -
2048     - if (trb_comp_code == COMP_SHORT_TX) {
2049     - xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
2050     - td->urb_length_set = true;
2051     - return 0;
2052     - }
2053     }
2054    
2055     return finish_td(xhci, td, event_trb, event, ep, status, false);
2056     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2057     index dca0a4692f08..776d59c32bc5 100644
2058     --- a/drivers/usb/host/xhci.c
2059     +++ b/drivers/usb/host/xhci.c
2060     @@ -1549,7 +1549,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2061     xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
2062     "HW died, freeing TD.");
2063     urb_priv = urb->hcpriv;
2064     - for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
2065     + for (i = urb_priv->td_cnt;
2066     + i < urb_priv->length && xhci->devs[urb->dev->slot_id];
2067     + i++) {
2068     td = urb_priv->td[i];
2069     if (!list_empty(&td->td_list))
2070     list_del_init(&td->td_list);
2071     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
2072     index d453d62ab0c6..e2f659dc5745 100644
2073     --- a/fs/btrfs/backref.c
2074     +++ b/fs/btrfs/backref.c
2075     @@ -1417,7 +1417,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2076     read_extent_buffer(eb, dest + bytes_left,
2077     name_off, name_len);
2078     if (eb != eb_in) {
2079     - btrfs_tree_read_unlock_blocking(eb);
2080     + if (!path->skip_locking)
2081     + btrfs_tree_read_unlock_blocking(eb);
2082     free_extent_buffer(eb);
2083     }
2084     ret = btrfs_find_item(fs_root, path, parent, 0,
2085     @@ -1437,9 +1438,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2086     eb = path->nodes[0];
2087     /* make sure we can use eb after releasing the path */
2088     if (eb != eb_in) {
2089     - atomic_inc(&eb->refs);
2090     - btrfs_tree_read_lock(eb);
2091     - btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2092     + if (!path->skip_locking)
2093     + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2094     + path->nodes[0] = NULL;
2095     + path->locks[0] = 0;
2096     }
2097     btrfs_release_path(path);
2098     iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2099     diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
2100     index e0941fbb913c..02b934d0ee65 100644
2101     --- a/fs/btrfs/delayed-inode.c
2102     +++ b/fs/btrfs/delayed-inode.c
2103     @@ -1694,7 +1694,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
2104     *
2105     */
2106     int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
2107     - struct list_head *ins_list)
2108     + struct list_head *ins_list, bool *emitted)
2109     {
2110     struct btrfs_dir_item *di;
2111     struct btrfs_delayed_item *curr, *next;
2112     @@ -1738,6 +1738,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
2113    
2114     if (over)
2115     return 1;
2116     + *emitted = true;
2117     }
2118     return 0;
2119     }
2120     diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
2121     index f70119f25421..0167853c84ae 100644
2122     --- a/fs/btrfs/delayed-inode.h
2123     +++ b/fs/btrfs/delayed-inode.h
2124     @@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
2125     int btrfs_should_delete_dir_index(struct list_head *del_list,
2126     u64 index);
2127     int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
2128     - struct list_head *ins_list);
2129     + struct list_head *ins_list, bool *emitted);
2130    
2131     /* for init */
2132     int __init btrfs_delayed_inode_init(void);
2133     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2134     index 974be09e7556..0ddca6734494 100644
2135     --- a/fs/btrfs/disk-io.c
2136     +++ b/fs/btrfs/disk-io.c
2137     @@ -1762,7 +1762,6 @@ static int cleaner_kthread(void *arg)
2138     int again;
2139     struct btrfs_trans_handle *trans;
2140    
2141     - set_freezable();
2142     do {
2143     again = 0;
2144    
2145     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2146     index a70c5790f8f5..54b5f0de623b 100644
2147     --- a/fs/btrfs/inode.c
2148     +++ b/fs/btrfs/inode.c
2149     @@ -5741,6 +5741,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
2150     char *name_ptr;
2151     int name_len;
2152     int is_curr = 0; /* ctx->pos points to the current index? */
2153     + bool emitted;
2154    
2155     /* FIXME, use a real flag for deciding about the key type */
2156     if (root->fs_info->tree_root == root)
2157     @@ -5769,6 +5770,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
2158     if (ret < 0)
2159     goto err;
2160    
2161     + emitted = false;
2162     while (1) {
2163     leaf = path->nodes[0];
2164     slot = path->slots[0];
2165     @@ -5848,6 +5850,7 @@ skip:
2166    
2167     if (over)
2168     goto nopos;
2169     + emitted = true;
2170     di_len = btrfs_dir_name_len(leaf, di) +
2171     btrfs_dir_data_len(leaf, di) + sizeof(*di);
2172     di_cur += di_len;
2173     @@ -5860,11 +5863,20 @@ next:
2174     if (key_type == BTRFS_DIR_INDEX_KEY) {
2175     if (is_curr)
2176     ctx->pos++;
2177     - ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
2178     + ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
2179     if (ret)
2180     goto nopos;
2181     }
2182    
2183     + /*
2184     + * If we haven't emitted any dir entry, we must not touch ctx->pos as
2185     + * it was was set to the termination value in previous call. We assume
2186     + * that "." and ".." were emitted if we reach this point and set the
2187     + * termination value as well for an empty directory.
2188     + */
2189     + if (ctx->pos > 2 && !emitted)
2190     + goto nopos;
2191     +
2192     /* Reached end of directory/root. Bump pos past the last item. */
2193     ctx->pos++;
2194    
2195     @@ -7985,6 +7997,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
2196    
2197     kfree(dip);
2198    
2199     + dio_bio->bi_error = bio->bi_error;
2200     dio_end_io(dio_bio, bio->bi_error);
2201    
2202     if (io_bio->end_io)
2203     @@ -8030,6 +8043,7 @@ out_test:
2204    
2205     kfree(dip);
2206    
2207     + dio_bio->bi_error = bio->bi_error;
2208     dio_end_io(dio_bio, bio->bi_error);
2209     bio_put(bio);
2210     }
2211     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2212     index da94138eb85e..08fd3f0f34fd 100644
2213     --- a/fs/btrfs/ioctl.c
2214     +++ b/fs/btrfs/ioctl.c
2215     @@ -2782,24 +2782,29 @@ out:
2216     static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2217     {
2218     struct page *page;
2219     - struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2220    
2221     page = grab_cache_page(inode->i_mapping, index);
2222     if (!page)
2223     - return NULL;
2224     + return ERR_PTR(-ENOMEM);
2225    
2226     if (!PageUptodate(page)) {
2227     - if (extent_read_full_page_nolock(tree, page, btrfs_get_extent,
2228     - 0))
2229     - return NULL;
2230     + int ret;
2231     +
2232     + ret = btrfs_readpage(NULL, page);
2233     + if (ret)
2234     + return ERR_PTR(ret);
2235     lock_page(page);
2236     if (!PageUptodate(page)) {
2237     unlock_page(page);
2238     page_cache_release(page);
2239     - return NULL;
2240     + return ERR_PTR(-EIO);
2241     + }
2242     + if (page->mapping != inode->i_mapping) {
2243     + unlock_page(page);
2244     + page_cache_release(page);
2245     + return ERR_PTR(-EAGAIN);
2246     }
2247     }
2248     - unlock_page(page);
2249    
2250     return page;
2251     }
2252     @@ -2811,17 +2816,31 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
2253     pgoff_t index = off >> PAGE_CACHE_SHIFT;
2254    
2255     for (i = 0; i < num_pages; i++) {
2256     +again:
2257     pages[i] = extent_same_get_page(inode, index + i);
2258     - if (!pages[i])
2259     - return -ENOMEM;
2260     + if (IS_ERR(pages[i])) {
2261     + int err = PTR_ERR(pages[i]);
2262     +
2263     + if (err == -EAGAIN)
2264     + goto again;
2265     + pages[i] = NULL;
2266     + return err;
2267     + }
2268     }
2269     return 0;
2270     }
2271    
2272     -static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
2273     +static int lock_extent_range(struct inode *inode, u64 off, u64 len,
2274     + bool retry_range_locking)
2275     {
2276     - /* do any pending delalloc/csum calc on src, one way or
2277     - another, and lock file content */
2278     + /*
2279     + * Do any pending delalloc/csum calculations on inode, one way or
2280     + * another, and lock file content.
2281     + * The locking order is:
2282     + *
2283     + * 1) pages
2284     + * 2) range in the inode's io tree
2285     + */
2286     while (1) {
2287     struct btrfs_ordered_extent *ordered;
2288     lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2289     @@ -2839,8 +2858,11 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
2290     unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2291     if (ordered)
2292     btrfs_put_ordered_extent(ordered);
2293     + if (!retry_range_locking)
2294     + return -EAGAIN;
2295     btrfs_wait_ordered_range(inode, off, len);
2296     }
2297     + return 0;
2298     }
2299    
2300     static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
2301     @@ -2865,15 +2887,24 @@ static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2302     unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2303     }
2304    
2305     -static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2306     - struct inode *inode2, u64 loff2, u64 len)
2307     +static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2308     + struct inode *inode2, u64 loff2, u64 len,
2309     + bool retry_range_locking)
2310     {
2311     + int ret;
2312     +
2313     if (inode1 < inode2) {
2314     swap(inode1, inode2);
2315     swap(loff1, loff2);
2316     }
2317     - lock_extent_range(inode1, loff1, len);
2318     - lock_extent_range(inode2, loff2, len);
2319     + ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
2320     + if (ret)
2321     + return ret;
2322     + ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
2323     + if (ret)
2324     + unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
2325     + loff1 + len - 1);
2326     + return ret;
2327     }
2328    
2329     struct cmp_pages {
2330     @@ -2889,11 +2920,15 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2331    
2332     for (i = 0; i < cmp->num_pages; i++) {
2333     pg = cmp->src_pages[i];
2334     - if (pg)
2335     + if (pg) {
2336     + unlock_page(pg);
2337     page_cache_release(pg);
2338     + }
2339     pg = cmp->dst_pages[i];
2340     - if (pg)
2341     + if (pg) {
2342     + unlock_page(pg);
2343     page_cache_release(pg);
2344     + }
2345     }
2346     kfree(cmp->src_pages);
2347     kfree(cmp->dst_pages);
2348     @@ -2954,6 +2989,8 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2349    
2350     src_page = cmp->src_pages[i];
2351     dst_page = cmp->dst_pages[i];
2352     + ASSERT(PageLocked(src_page));
2353     + ASSERT(PageLocked(dst_page));
2354    
2355     addr = kmap_atomic(src_page);
2356     dst_addr = kmap_atomic(dst_page);
2357     @@ -3066,14 +3103,46 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2358     goto out_unlock;
2359     }
2360    
2361     +again:
2362     ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
2363     if (ret)
2364     goto out_unlock;
2365    
2366     if (same_inode)
2367     - lock_extent_range(src, same_lock_start, same_lock_len);
2368     + ret = lock_extent_range(src, same_lock_start, same_lock_len,
2369     + false);
2370     else
2371     - btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
2372     + ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
2373     + false);
2374     + /*
2375     + * If one of the inodes has dirty pages in the respective range or
2376     + * ordered extents, we need to flush dellaloc and wait for all ordered
2377     + * extents in the range. We must unlock the pages and the ranges in the
2378     + * io trees to avoid deadlocks when flushing delalloc (requires locking
2379     + * pages) and when waiting for ordered extents to complete (they require
2380     + * range locking).
2381     + */
2382     + if (ret == -EAGAIN) {
2383     + /*
2384     + * Ranges in the io trees already unlocked. Now unlock all
2385     + * pages before waiting for all IO to complete.
2386     + */
2387     + btrfs_cmp_data_free(&cmp);
2388     + if (same_inode) {
2389     + btrfs_wait_ordered_range(src, same_lock_start,
2390     + same_lock_len);
2391     + } else {
2392     + btrfs_wait_ordered_range(src, loff, len);
2393     + btrfs_wait_ordered_range(dst, dst_loff, len);
2394     + }
2395     + goto again;
2396     + }
2397     + ASSERT(ret == 0);
2398     + if (WARN_ON(ret)) {
2399     + /* ranges in the io trees already unlocked */
2400     + btrfs_cmp_data_free(&cmp);
2401     + return ret;
2402     + }
2403    
2404     /* pass original length for comparison so we stay within i_size */
2405     ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
2406     @@ -3895,9 +3964,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2407     u64 lock_start = min_t(u64, off, destoff);
2408     u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
2409    
2410     - lock_extent_range(src, lock_start, lock_len);
2411     + ret = lock_extent_range(src, lock_start, lock_len, true);
2412     } else {
2413     - btrfs_double_extent_lock(src, off, inode, destoff, len);
2414     + ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
2415     + true);
2416     + }
2417     + ASSERT(ret == 0);
2418     + if (WARN_ON(ret)) {
2419     + /* ranges in the io trees already unlocked */
2420     + goto out_unlock;
2421     }
2422    
2423     ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
2424     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2425     index a23399e8e3ab..9e084477d320 100644
2426     --- a/fs/btrfs/volumes.c
2427     +++ b/fs/btrfs/volumes.c
2428     @@ -1257,6 +1257,15 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
2429     int ret;
2430     int slot;
2431     struct extent_buffer *l;
2432     + u64 min_search_start;
2433     +
2434     + /*
2435     + * We don't want to overwrite the superblock on the drive nor any area
2436     + * used by the boot loader (grub for example), so we make sure to start
2437     + * at an offset of at least 1MB.
2438     + */
2439     + min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
2440     + search_start = max(search_start, min_search_start);
2441    
2442     path = btrfs_alloc_path();
2443     if (!path)
2444     @@ -1397,18 +1406,9 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
2445     struct btrfs_device *device, u64 num_bytes,
2446     u64 *start, u64 *len)
2447     {
2448     - struct btrfs_root *root = device->dev_root;
2449     - u64 search_start;
2450     -
2451     /* FIXME use last free of some kind */
2452     -
2453     - /*
2454     - * we don't want to overwrite the superblock on the drive,
2455     - * so we make sure to start at an offset of at least 1MB
2456     - */
2457     - search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
2458     return find_free_dev_extent_start(trans->transaction, device,
2459     - num_bytes, search_start, start, len);
2460     + num_bytes, 0, start, len);
2461     }
2462    
2463     static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
2464     @@ -6512,6 +6512,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
2465     goto out_short_read;
2466    
2467     num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2468     + if (!num_stripes) {
2469     + printk(KERN_ERR
2470     + "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
2471     + num_stripes, cur_offset);
2472     + ret = -EIO;
2473     + break;
2474     + }
2475     +
2476     len = btrfs_chunk_item_size(num_stripes);
2477     if (cur_offset + len > array_size)
2478     goto out_short_read;
2479     diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
2480     index 7febcf2475c5..50b268483302 100644
2481     --- a/fs/cifs/cifs_debug.c
2482     +++ b/fs/cifs/cifs_debug.c
2483     @@ -50,7 +50,7 @@ void cifs_vfs_err(const char *fmt, ...)
2484     vaf.fmt = fmt;
2485     vaf.va = &args;
2486    
2487     - pr_err("CIFS VFS: %pV", &vaf);
2488     + pr_err_ratelimited("CIFS VFS: %pV", &vaf);
2489    
2490     va_end(args);
2491     }
2492     diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
2493     index f40fbaca1b2a..66cf0f9fff89 100644
2494     --- a/fs/cifs/cifs_debug.h
2495     +++ b/fs/cifs/cifs_debug.h
2496     @@ -51,14 +51,13 @@ __printf(1, 2) void cifs_vfs_err(const char *fmt, ...);
2497     /* information message: e.g., configuration, major event */
2498     #define cifs_dbg(type, fmt, ...) \
2499     do { \
2500     - if (type == FYI) { \
2501     - if (cifsFYI & CIFS_INFO) { \
2502     - pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__); \
2503     - } \
2504     + if (type == FYI && cifsFYI & CIFS_INFO) { \
2505     + pr_debug_ratelimited("%s: " \
2506     + fmt, __FILE__, ##__VA_ARGS__); \
2507     } else if (type == VFS) { \
2508     cifs_vfs_err(fmt, ##__VA_ARGS__); \
2509     } else if (type == NOISY && type != 0) { \
2510     - pr_debug(fmt, ##__VA_ARGS__); \
2511     + pr_debug_ratelimited(fmt, ##__VA_ARGS__); \
2512     } \
2513     } while (0)
2514    
2515     diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
2516     index afa09fce8151..e682b36a210f 100644
2517     --- a/fs/cifs/cifsencrypt.c
2518     +++ b/fs/cifs/cifsencrypt.c
2519     @@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
2520    
2521     ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
2522     if (!ses->auth_key.response) {
2523     - rc = ENOMEM;
2524     + rc = -ENOMEM;
2525     ses->auth_key.len = 0;
2526     goto setup_ntlmv2_rsp_ret;
2527     }
2528     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2529     index ecb0803bdb0e..3c194ff0d2f0 100644
2530     --- a/fs/cifs/connect.c
2531     +++ b/fs/cifs/connect.c
2532     @@ -368,7 +368,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
2533     server->session_key.response = NULL;
2534     server->session_key.len = 0;
2535     server->lstrp = jiffies;
2536     - mutex_unlock(&server->srv_mutex);
2537    
2538     /* mark submitted MIDs for retry and issue callback */
2539     INIT_LIST_HEAD(&retry_list);
2540     @@ -381,6 +380,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
2541     list_move(&mid_entry->qhead, &retry_list);
2542     }
2543     spin_unlock(&GlobalMid_Lock);
2544     + mutex_unlock(&server->srv_mutex);
2545    
2546     cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
2547     list_for_each_safe(tmp, tmp2, &retry_list) {
2548     diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
2549     index 0557c45e9c33..b30a4a6d98a0 100644
2550     --- a/fs/cifs/readdir.c
2551     +++ b/fs/cifs/readdir.c
2552     @@ -847,6 +847,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
2553     * if buggy server returns . and .. late do we want to
2554     * check for that here?
2555     */
2556     + *tmp_buf = 0;
2557     rc = cifs_filldir(current_entry, file, ctx,
2558     tmp_buf, max_len);
2559     if (rc) {
2560     diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
2561     index 2a24c524fb9a..87abe8ed074c 100644
2562     --- a/fs/cifs/transport.c
2563     +++ b/fs/cifs/transport.c
2564     @@ -576,14 +576,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
2565     cifs_in_send_dec(server);
2566     cifs_save_when_sent(mid);
2567    
2568     - if (rc < 0)
2569     + if (rc < 0) {
2570     server->sequence_number -= 2;
2571     + cifs_delete_mid(mid);
2572     + }
2573     +
2574     mutex_unlock(&server->srv_mutex);
2575    
2576     if (rc == 0)
2577     return 0;
2578    
2579     - cifs_delete_mid(mid);
2580     add_credits_and_wake_if(server, credits, optype);
2581     return rc;
2582     }
2583     diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
2584     index c35ffdc12bba..706de324f2a6 100644
2585     --- a/fs/devpts/inode.c
2586     +++ b/fs/devpts/inode.c
2587     @@ -575,6 +575,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
2588     mutex_unlock(&allocated_ptys_lock);
2589     }
2590    
2591     +/*
2592     + * pty code needs to hold extra references in case of last /dev/tty close
2593     + */
2594     +
2595     +void devpts_add_ref(struct inode *ptmx_inode)
2596     +{
2597     + struct super_block *sb = pts_sb_from_inode(ptmx_inode);
2598     +
2599     + atomic_inc(&sb->s_active);
2600     + ihold(ptmx_inode);
2601     +}
2602     +
2603     +void devpts_del_ref(struct inode *ptmx_inode)
2604     +{
2605     + struct super_block *sb = pts_sb_from_inode(ptmx_inode);
2606     +
2607     + iput(ptmx_inode);
2608     + deactivate_super(sb);
2609     +}
2610     +
2611     /**
2612     * devpts_pty_new -- create a new inode in /dev/pts/
2613     * @ptmx_inode: inode of the master
2614     diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
2615     index ec0668a60678..fe1f50fe764f 100644
2616     --- a/fs/ext4/balloc.c
2617     +++ b/fs/ext4/balloc.c
2618     @@ -191,7 +191,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
2619     /* If checksum is bad mark all blocks used to prevent allocation
2620     * essentially implementing a per-group read-only flag. */
2621     if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
2622     - ext4_error(sb, "Checksum bad for group %u", block_group);
2623     grp = ext4_get_group_info(sb, block_group);
2624     if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
2625     percpu_counter_sub(&sbi->s_freeclusters_counter,
2626     @@ -442,14 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
2627     }
2628     ext4_lock_group(sb, block_group);
2629     if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2630     -
2631     err = ext4_init_block_bitmap(sb, bh, block_group, desc);
2632     set_bitmap_uptodate(bh);
2633     set_buffer_uptodate(bh);
2634     ext4_unlock_group(sb, block_group);
2635     unlock_buffer(bh);
2636     - if (err)
2637     + if (err) {
2638     + ext4_error(sb, "Failed to init block bitmap for group "
2639     + "%u: %d", block_group, err);
2640     goto out;
2641     + }
2642     goto verify;
2643     }
2644     ext4_unlock_group(sb, block_group);
2645     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
2646     index 1b8024d26f65..53f2b98a69f3 100644
2647     --- a/fs/ext4/ialloc.c
2648     +++ b/fs/ext4/ialloc.c
2649     @@ -76,7 +76,6 @@ static int ext4_init_inode_bitmap(struct super_block *sb,
2650     /* If checksum is bad mark all blocks and inodes use to prevent
2651     * allocation, essentially implementing a per-group read-only flag. */
2652     if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
2653     - ext4_error(sb, "Checksum bad for group %u", block_group);
2654     grp = ext4_get_group_info(sb, block_group);
2655     if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
2656     percpu_counter_sub(&sbi->s_freeclusters_counter,
2657     @@ -191,8 +190,11 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
2658     set_buffer_verified(bh);
2659     ext4_unlock_group(sb, block_group);
2660     unlock_buffer(bh);
2661     - if (err)
2662     + if (err) {
2663     + ext4_error(sb, "Failed to init inode bitmap for group "
2664     + "%u: %d", block_group, err);
2665     goto out;
2666     + }
2667     return bh;
2668     }
2669     ext4_unlock_group(sb, block_group);
2670     diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
2671     index fb6f11709ae6..e032a0423e35 100644
2672     --- a/fs/ext4/move_extent.c
2673     +++ b/fs/ext4/move_extent.c
2674     @@ -265,11 +265,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
2675     ext4_lblk_t orig_blk_offset, donor_blk_offset;
2676     unsigned long blocksize = orig_inode->i_sb->s_blocksize;
2677     unsigned int tmp_data_size, data_size, replaced_size;
2678     - int err2, jblocks, retries = 0;
2679     + int i, err2, jblocks, retries = 0;
2680     int replaced_count = 0;
2681     int from = data_offset_in_page << orig_inode->i_blkbits;
2682     int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
2683     struct super_block *sb = orig_inode->i_sb;
2684     + struct buffer_head *bh = NULL;
2685    
2686     /*
2687     * It needs twice the amount of ordinary journal buffers because
2688     @@ -380,8 +381,16 @@ data_copy:
2689     }
2690     /* Perform all necessary steps similar write_begin()/write_end()
2691     * but keeping in mind that i_size will not change */
2692     - *err = __block_write_begin(pagep[0], from, replaced_size,
2693     - ext4_get_block);
2694     + if (!page_has_buffers(pagep[0]))
2695     + create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
2696     + bh = page_buffers(pagep[0]);
2697     + for (i = 0; i < data_offset_in_page; i++)
2698     + bh = bh->b_this_page;
2699     + for (i = 0; i < block_len_in_page; i++) {
2700     + *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
2701     + if (*err < 0)
2702     + break;
2703     + }
2704     if (!*err)
2705     *err = block_commit_write(pagep[0], from, from + replaced_size);
2706    
2707     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2708     index ad62d7acc315..34038e3598d5 100644
2709     --- a/fs/ext4/resize.c
2710     +++ b/fs/ext4/resize.c
2711     @@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
2712     if (flex_gd == NULL)
2713     goto out3;
2714    
2715     - if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
2716     + if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
2717     goto out2;
2718     flex_gd->count = flexbg_size;
2719    
2720     diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
2721     index de4bdfac0cec..595ebdb41846 100644
2722     --- a/fs/hugetlbfs/inode.c
2723     +++ b/fs/hugetlbfs/inode.c
2724     @@ -463,6 +463,7 @@ hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
2725     */
2726     vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
2727     unsigned long v_offset;
2728     + unsigned long v_end;
2729    
2730     /*
2731     * Can the expression below overflow on 32-bit arches?
2732     @@ -475,15 +476,17 @@ hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
2733     else
2734     v_offset = 0;
2735    
2736     - if (end) {
2737     - end = ((end - start) << PAGE_SHIFT) +
2738     - vma->vm_start + v_offset;
2739     - if (end > vma->vm_end)
2740     - end = vma->vm_end;
2741     - } else
2742     - end = vma->vm_end;
2743     + if (!end)
2744     + v_end = vma->vm_end;
2745     + else {
2746     + v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
2747     + + vma->vm_start;
2748     + if (v_end > vma->vm_end)
2749     + v_end = vma->vm_end;
2750     + }
2751    
2752     - unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
2753     + unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
2754     + NULL);
2755     }
2756     }
2757    
2758     diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
2759     index 03516c80855a..2a2e2d8ddee5 100644
2760     --- a/fs/nfs/flexfilelayout/flexfilelayout.c
2761     +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
2762     @@ -145,7 +145,7 @@ static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
2763     return false;
2764     for (i = 0; i < m1->fh_versions_cnt; i++) {
2765     bool found_fh = false;
2766     - for (j = 0; j < m2->fh_versions_cnt; i++) {
2767     + for (j = 0; j < m2->fh_versions_cnt; j++) {
2768     if (nfs_compare_fh(&m1->fh_versions[i],
2769     &m2->fh_versions[j]) == 0) {
2770     found_fh = true;
2771     @@ -1859,11 +1859,9 @@ ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
2772     start = xdr_reserve_space(xdr, 4);
2773     BUG_ON(!start);
2774    
2775     - if (ff_layout_encode_ioerr(flo, xdr, args))
2776     - goto out;
2777     -
2778     + ff_layout_encode_ioerr(flo, xdr, args);
2779     ff_layout_encode_iostats(flo, xdr, args);
2780     -out:
2781     +
2782     *start = cpu_to_be32((xdr->p - start - 1) * 4);
2783     dprintk("%s: Return\n", __func__);
2784     }
2785     diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
2786     index c7e8b87da5b2..3e2071a177fd 100644
2787     --- a/fs/nfs/inode.c
2788     +++ b/fs/nfs/inode.c
2789     @@ -1641,6 +1641,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2790     unsigned long invalid = 0;
2791     unsigned long now = jiffies;
2792     unsigned long save_cache_validity;
2793     + bool cache_revalidated = true;
2794    
2795     dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
2796     __func__, inode->i_sb->s_id, inode->i_ino,
2797     @@ -1702,22 +1703,28 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2798     nfs_force_lookup_revalidate(inode);
2799     inode->i_version = fattr->change_attr;
2800     }
2801     - } else
2802     + } else {
2803     nfsi->cache_validity |= save_cache_validity;
2804     + cache_revalidated = false;
2805     + }
2806    
2807     if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
2808     memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
2809     - } else if (server->caps & NFS_CAP_MTIME)
2810     + } else if (server->caps & NFS_CAP_MTIME) {
2811     nfsi->cache_validity |= save_cache_validity &
2812     (NFS_INO_INVALID_ATTR
2813     | NFS_INO_REVAL_FORCED);
2814     + cache_revalidated = false;
2815     + }
2816    
2817     if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
2818     memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
2819     - } else if (server->caps & NFS_CAP_CTIME)
2820     + } else if (server->caps & NFS_CAP_CTIME) {
2821     nfsi->cache_validity |= save_cache_validity &
2822     (NFS_INO_INVALID_ATTR
2823     | NFS_INO_REVAL_FORCED);
2824     + cache_revalidated = false;
2825     + }
2826    
2827     /* Check if our cached file size is stale */
2828     if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
2829     @@ -1737,19 +1744,23 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2830     (long long)cur_isize,
2831     (long long)new_isize);
2832     }
2833     - } else
2834     + } else {
2835     nfsi->cache_validity |= save_cache_validity &
2836     (NFS_INO_INVALID_ATTR
2837     | NFS_INO_REVAL_PAGECACHE
2838     | NFS_INO_REVAL_FORCED);
2839     + cache_revalidated = false;
2840     + }
2841    
2842    
2843     if (fattr->valid & NFS_ATTR_FATTR_ATIME)
2844     memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
2845     - else if (server->caps & NFS_CAP_ATIME)
2846     + else if (server->caps & NFS_CAP_ATIME) {
2847     nfsi->cache_validity |= save_cache_validity &
2848     (NFS_INO_INVALID_ATIME
2849     | NFS_INO_REVAL_FORCED);
2850     + cache_revalidated = false;
2851     + }
2852    
2853     if (fattr->valid & NFS_ATTR_FATTR_MODE) {
2854     if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
2855     @@ -1758,36 +1769,42 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2856     inode->i_mode = newmode;
2857     invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
2858     }
2859     - } else if (server->caps & NFS_CAP_MODE)
2860     + } else if (server->caps & NFS_CAP_MODE) {
2861     nfsi->cache_validity |= save_cache_validity &
2862     (NFS_INO_INVALID_ATTR
2863     | NFS_INO_INVALID_ACCESS
2864     | NFS_INO_INVALID_ACL
2865     | NFS_INO_REVAL_FORCED);
2866     + cache_revalidated = false;
2867     + }
2868    
2869     if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
2870     if (!uid_eq(inode->i_uid, fattr->uid)) {
2871     invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
2872     inode->i_uid = fattr->uid;
2873     }
2874     - } else if (server->caps & NFS_CAP_OWNER)
2875     + } else if (server->caps & NFS_CAP_OWNER) {
2876     nfsi->cache_validity |= save_cache_validity &
2877     (NFS_INO_INVALID_ATTR
2878     | NFS_INO_INVALID_ACCESS
2879     | NFS_INO_INVALID_ACL
2880     | NFS_INO_REVAL_FORCED);
2881     + cache_revalidated = false;
2882     + }
2883    
2884     if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
2885     if (!gid_eq(inode->i_gid, fattr->gid)) {
2886     invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
2887     inode->i_gid = fattr->gid;
2888     }
2889     - } else if (server->caps & NFS_CAP_OWNER_GROUP)
2890     + } else if (server->caps & NFS_CAP_OWNER_GROUP) {
2891     nfsi->cache_validity |= save_cache_validity &
2892     (NFS_INO_INVALID_ATTR
2893     | NFS_INO_INVALID_ACCESS
2894     | NFS_INO_INVALID_ACL
2895     | NFS_INO_REVAL_FORCED);
2896     + cache_revalidated = false;
2897     + }
2898    
2899     if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
2900     if (inode->i_nlink != fattr->nlink) {
2901     @@ -1796,19 +1813,22 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2902     invalid |= NFS_INO_INVALID_DATA;
2903     set_nlink(inode, fattr->nlink);
2904     }
2905     - } else if (server->caps & NFS_CAP_NLINK)
2906     + } else if (server->caps & NFS_CAP_NLINK) {
2907     nfsi->cache_validity |= save_cache_validity &
2908     (NFS_INO_INVALID_ATTR
2909     | NFS_INO_REVAL_FORCED);
2910     + cache_revalidated = false;
2911     + }
2912    
2913     if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
2914     /*
2915     * report the blocks in 512byte units
2916     */
2917     inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
2918     - }
2919     - if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
2920     + } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
2921     inode->i_blocks = fattr->du.nfs2.blocks;
2922     + else
2923     + cache_revalidated = false;
2924    
2925     /* Update attrtimeo value if we're out of the unstable period */
2926     if (invalid & NFS_INO_INVALID_ATTR) {
2927     @@ -1818,9 +1838,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2928     /* Set barrier to be more recent than all outstanding updates */
2929     nfsi->attr_gencount = nfs_inc_attr_generation_counter();
2930     } else {
2931     - if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
2932     - if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
2933     - nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
2934     + if (cache_revalidated) {
2935     + if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
2936     + nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
2937     + nfsi->attrtimeo <<= 1;
2938     + if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
2939     + nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
2940     + }
2941     nfsi->attrtimeo_timestamp = now;
2942     }
2943     /* Set the barrier to be more recent than this fattr */
2944     @@ -1829,7 +1853,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2945     }
2946    
2947     /* Don't declare attrcache up to date if there were no attrs! */
2948     - if (fattr->valid != 0)
2949     + if (cache_revalidated)
2950     invalid &= ~NFS_INO_INVALID_ATTR;
2951    
2952     /* Don't invalidate the data if we were to blame */
2953     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2954     index 343b0f1f15b1..f496ed721d27 100644
2955     --- a/fs/nfs/nfs4proc.c
2956     +++ b/fs/nfs/nfs4proc.c
2957     @@ -1385,6 +1385,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
2958     * Protect the call to nfs4_state_set_mode_locked and
2959     * serialise the stateid update
2960     */
2961     + spin_lock(&state->owner->so_lock);
2962     write_seqlock(&state->seqlock);
2963     if (deleg_stateid != NULL) {
2964     nfs4_stateid_copy(&state->stateid, deleg_stateid);
2965     @@ -1393,7 +1394,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
2966     if (open_stateid != NULL)
2967     nfs_set_open_stateid_locked(state, open_stateid, fmode);
2968     write_sequnlock(&state->seqlock);
2969     - spin_lock(&state->owner->so_lock);
2970     update_open_stateflags(state, fmode);
2971     spin_unlock(&state->owner->so_lock);
2972     }
2973     diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
2974     index 0a8983492d91..eff6319d5037 100644
2975     --- a/fs/overlayfs/copy_up.c
2976     +++ b/fs/overlayfs/copy_up.c
2977     @@ -22,9 +22,9 @@
2978    
2979     int ovl_copy_xattr(struct dentry *old, struct dentry *new)
2980     {
2981     - ssize_t list_size, size;
2982     - char *buf, *name, *value;
2983     - int error;
2984     + ssize_t list_size, size, value_size = 0;
2985     + char *buf, *name, *value = NULL;
2986     + int uninitialized_var(error);
2987    
2988     if (!old->d_inode->i_op->getxattr ||
2989     !new->d_inode->i_op->getxattr)
2990     @@ -41,29 +41,40 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
2991     if (!buf)
2992     return -ENOMEM;
2993    
2994     - error = -ENOMEM;
2995     - value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
2996     - if (!value)
2997     - goto out;
2998     -
2999     list_size = vfs_listxattr(old, buf, list_size);
3000     if (list_size <= 0) {
3001     error = list_size;
3002     - goto out_free_value;
3003     + goto out;
3004     }
3005    
3006     for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
3007     - size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
3008     - if (size <= 0) {
3009     +retry:
3010     + size = vfs_getxattr(old, name, value, value_size);
3011     + if (size == -ERANGE)
3012     + size = vfs_getxattr(old, name, NULL, 0);
3013     +
3014     + if (size < 0) {
3015     error = size;
3016     - goto out_free_value;
3017     + break;
3018     + }
3019     +
3020     + if (size > value_size) {
3021     + void *new;
3022     +
3023     + new = krealloc(value, size, GFP_KERNEL);
3024     + if (!new) {
3025     + error = -ENOMEM;
3026     + break;
3027     + }
3028     + value = new;
3029     + value_size = size;
3030     + goto retry;
3031     }
3032     +
3033     error = vfs_setxattr(new, name, value, size, 0);
3034     if (error)
3035     - goto out_free_value;
3036     + break;
3037     }
3038     -
3039     -out_free_value:
3040     kfree(value);
3041     out:
3042     kfree(buf);
3043     diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
3044     index 4060ffde8722..b29036aa8d7c 100644
3045     --- a/fs/overlayfs/inode.c
3046     +++ b/fs/overlayfs/inode.c
3047     @@ -42,6 +42,19 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
3048     int err;
3049     struct dentry *upperdentry;
3050    
3051     + /*
3052     + * Check for permissions before trying to copy-up. This is redundant
3053     + * since it will be rechecked later by ->setattr() on upper dentry. But
3054     + * without this, copy-up can be triggered by just about anybody.
3055     + *
3056     + * We don't initialize inode->size, which just means that
3057     + * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
3058     + * check for a swapfile (which this won't be anyway).
3059     + */
3060     + err = inode_change_ok(dentry->d_inode, attr);
3061     + if (err)
3062     + return err;
3063     +
3064     err = ovl_want_write(dentry);
3065     if (err)
3066     goto out;
3067     diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
3068     index 70e9af551600..adcb1398c481 100644
3069     --- a/fs/overlayfs/readdir.c
3070     +++ b/fs/overlayfs/readdir.c
3071     @@ -571,7 +571,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
3072     (int) PTR_ERR(dentry));
3073     continue;
3074     }
3075     - ovl_cleanup(upper->d_inode, dentry);
3076     + if (dentry->d_inode)
3077     + ovl_cleanup(upper->d_inode, dentry);
3078     dput(dentry);
3079     }
3080     mutex_unlock(&upper->d_inode->i_mutex);
3081     diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
3082     index e38ee0fed24a..f42c9407fbad 100644
3083     --- a/fs/overlayfs/super.c
3084     +++ b/fs/overlayfs/super.c
3085     @@ -9,6 +9,7 @@
3086    
3087     #include <linux/fs.h>
3088     #include <linux/namei.h>
3089     +#include <linux/pagemap.h>
3090     #include <linux/xattr.h>
3091     #include <linux/security.h>
3092     #include <linux/mount.h>
3093     @@ -910,6 +911,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3094     }
3095    
3096     sb->s_stack_depth = 0;
3097     + sb->s_maxbytes = MAX_LFS_FILESIZE;
3098     if (ufs->config.upperdir) {
3099     if (!ufs->config.workdir) {
3100     pr_err("overlayfs: missing 'workdir'\n");
3101     @@ -1053,6 +1055,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3102    
3103     root_dentry->d_fsdata = oe;
3104    
3105     + ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
3106     + root_dentry->d_inode);
3107     +
3108     sb->s_magic = OVERLAYFS_SUPER_MAGIC;
3109     sb->s_op = &ovl_super_operations;
3110     sb->s_root = root_dentry;
3111     diff --git a/fs/proc/array.c b/fs/proc/array.c
3112     index d73291f5f0fc..b6c00ce0e29e 100644
3113     --- a/fs/proc/array.c
3114     +++ b/fs/proc/array.c
3115     @@ -395,7 +395,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
3116    
3117     state = *get_task_state(task);
3118     vsize = eip = esp = 0;
3119     - permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
3120     + permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
3121     mm = get_task_mm(task);
3122     if (mm) {
3123     vsize = task_vsize(mm);
3124     diff --git a/fs/proc/base.c b/fs/proc/base.c
3125     index 4bd5d3118acd..b7de324bec11 100644
3126     --- a/fs/proc/base.c
3127     +++ b/fs/proc/base.c
3128     @@ -403,7 +403,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
3129     static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
3130     struct pid *pid, struct task_struct *task)
3131     {
3132     - struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
3133     + struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
3134     if (mm && !IS_ERR(mm)) {
3135     unsigned int nwords = 0;
3136     do {
3137     @@ -430,7 +430,8 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
3138    
3139     wchan = get_wchan(task);
3140    
3141     - if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
3142     + if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
3143     + && !lookup_symbol_name(wchan, symname))
3144     seq_printf(m, "%s", symname);
3145     else
3146     seq_putc(m, '0');
3147     @@ -444,7 +445,7 @@ static int lock_trace(struct task_struct *task)
3148     int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
3149     if (err)
3150     return err;
3151     - if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
3152     + if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
3153     mutex_unlock(&task->signal->cred_guard_mutex);
3154     return -EPERM;
3155     }
3156     @@ -697,7 +698,7 @@ static int proc_fd_access_allowed(struct inode *inode)
3157     */
3158     task = get_proc_task(inode);
3159     if (task) {
3160     - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
3161     + allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
3162     put_task_struct(task);
3163     }
3164     return allowed;
3165     @@ -732,7 +733,7 @@ static bool has_pid_permissions(struct pid_namespace *pid,
3166     return true;
3167     if (in_group_p(pid->pid_gid))
3168     return true;
3169     - return ptrace_may_access(task, PTRACE_MODE_READ);
3170     + return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
3171     }
3172    
3173    
3174     @@ -809,7 +810,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
3175     struct mm_struct *mm = ERR_PTR(-ESRCH);
3176    
3177     if (task) {
3178     - mm = mm_access(task, mode);
3179     + mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
3180     put_task_struct(task);
3181    
3182     if (!IS_ERR_OR_NULL(mm)) {
3183     @@ -1856,7 +1857,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
3184     if (!task)
3185     goto out_notask;
3186    
3187     - mm = mm_access(task, PTRACE_MODE_READ);
3188     + mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
3189     if (IS_ERR_OR_NULL(mm))
3190     goto out;
3191    
3192     @@ -2007,7 +2008,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
3193     goto out;
3194    
3195     result = -EACCES;
3196     - if (!ptrace_may_access(task, PTRACE_MODE_READ))
3197     + if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
3198     goto out_put_task;
3199    
3200     result = -ENOENT;
3201     @@ -2060,7 +2061,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
3202     goto out;
3203    
3204     ret = -EACCES;
3205     - if (!ptrace_may_access(task, PTRACE_MODE_READ))
3206     + if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
3207     goto out_put_task;
3208    
3209     ret = 0;
3210     @@ -2530,7 +2531,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
3211     if (result)
3212     return result;
3213    
3214     - if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
3215     + if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
3216     result = -EACCES;
3217     goto out_unlock;
3218     }
3219     diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
3220     index f6e8354b8cea..1b0ea4a5d89e 100644
3221     --- a/fs/proc/namespaces.c
3222     +++ b/fs/proc/namespaces.c
3223     @@ -42,7 +42,7 @@ static const char *proc_ns_follow_link(struct dentry *dentry, void **cookie)
3224     if (!task)
3225     return error;
3226    
3227     - if (ptrace_may_access(task, PTRACE_MODE_READ)) {
3228     + if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
3229     error = ns_get_path(&ns_path, task, ns_ops);
3230     if (!error)
3231     nd_jump_link(&ns_path);
3232     @@ -63,7 +63,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
3233     if (!task)
3234     return res;
3235    
3236     - if (ptrace_may_access(task, PTRACE_MODE_READ)) {
3237     + if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
3238     res = ns_get_name(name, sizeof(name), task, ns_ops);
3239     if (res >= 0)
3240     res = readlink_copy(buffer, buflen, name);
3241     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
3242     index 187b3b5f242e..09cd3edde08a 100644
3243     --- a/fs/proc/task_mmu.c
3244     +++ b/fs/proc/task_mmu.c
3245     @@ -1473,18 +1473,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
3246     static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
3247     unsigned long addr, unsigned long end, struct mm_walk *walk)
3248     {
3249     + pte_t huge_pte = huge_ptep_get(pte);
3250     struct numa_maps *md;
3251     struct page *page;
3252    
3253     - if (!pte_present(*pte))
3254     + if (!pte_present(huge_pte))
3255     return 0;
3256    
3257     - page = pte_page(*pte);
3258     + page = pte_page(huge_pte);
3259     if (!page)
3260     return 0;
3261    
3262     md = walk->private;
3263     - gather_stats(page, md, pte_dirty(*pte), 1);
3264     + gather_stats(page, md, pte_dirty(huge_pte), 1);
3265     return 0;
3266     }
3267    
3268     diff --git a/fs/timerfd.c b/fs/timerfd.c
3269     index b94fa6c3c6eb..053818dd6c18 100644
3270     --- a/fs/timerfd.c
3271     +++ b/fs/timerfd.c
3272     @@ -153,7 +153,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
3273     if (isalarm(ctx))
3274     remaining = alarm_expires_remaining(&ctx->t.alarm);
3275     else
3276     - remaining = hrtimer_expires_remaining(&ctx->t.tmr);
3277     + remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
3278    
3279     return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
3280     }
3281     diff --git a/fs/udf/inode.c b/fs/udf/inode.c
3282     index 8d0b3ade0ff0..566df9b5a6cb 100644
3283     --- a/fs/udf/inode.c
3284     +++ b/fs/udf/inode.c
3285     @@ -2047,14 +2047,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
3286     epos->offset += adsize;
3287     }
3288    
3289     +/*
3290     + * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
3291     + * someone does some weird stuff.
3292     + */
3293     +#define UDF_MAX_INDIR_EXTS 16
3294     +
3295     int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
3296     struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
3297     {
3298     int8_t etype;
3299     + unsigned int indirections = 0;
3300    
3301     while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
3302     (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
3303     int block;
3304     +
3305     + if (++indirections > UDF_MAX_INDIR_EXTS) {
3306     + udf_err(inode->i_sb,
3307     + "too many indirect extents in inode %lu\n",
3308     + inode->i_ino);
3309     + return -1;
3310     + }
3311     +
3312     epos->block = *eloc;
3313     epos->offset = sizeof(struct allocExtDesc);
3314     brelse(epos->bh);
3315     diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
3316     index ab478e62baae..e788a05aab83 100644
3317     --- a/fs/udf/unicode.c
3318     +++ b/fs/udf/unicode.c
3319     @@ -128,11 +128,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
3320     if (c < 0x80U)
3321     utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
3322     else if (c < 0x800U) {
3323     + if (utf_o->u_len > (UDF_NAME_LEN - 4))
3324     + break;
3325     utf_o->u_name[utf_o->u_len++] =
3326     (uint8_t)(0xc0 | (c >> 6));
3327     utf_o->u_name[utf_o->u_len++] =
3328     (uint8_t)(0x80 | (c & 0x3f));
3329     } else {
3330     + if (utf_o->u_len > (UDF_NAME_LEN - 5))
3331     + break;
3332     utf_o->u_name[utf_o->u_len++] =
3333     (uint8_t)(0xe0 | (c >> 12));
3334     utf_o->u_name[utf_o->u_len++] =
3335     @@ -173,17 +177,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
3336     static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
3337     {
3338     unsigned c, i, max_val, utf_char;
3339     - int utf_cnt, u_len;
3340     + int utf_cnt, u_len, u_ch;
3341    
3342     memset(ocu, 0, sizeof(dstring) * length);
3343     ocu[0] = 8;
3344     max_val = 0xffU;
3345     + u_ch = 1;
3346    
3347     try_again:
3348     u_len = 0U;
3349     utf_char = 0U;
3350     utf_cnt = 0U;
3351     for (i = 0U; i < utf->u_len; i++) {
3352     + /* Name didn't fit? */
3353     + if (u_len + 1 + u_ch >= length)
3354     + return 0;
3355     +
3356     c = (uint8_t)utf->u_name[i];
3357    
3358     /* Complete a multi-byte UTF-8 character */
3359     @@ -225,6 +234,7 @@ try_again:
3360     if (max_val == 0xffU) {
3361     max_val = 0xffffU;
3362     ocu[0] = (uint8_t)0x10U;
3363     + u_ch = 2;
3364     goto try_again;
3365     }
3366     goto error_out;
3367     @@ -277,7 +287,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
3368     c = (c << 8) | ocu[i++];
3369    
3370     len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
3371     - UDF_NAME_LEN - utf_o->u_len);
3372     + UDF_NAME_LEN - 2 - utf_o->u_len);
3373     /* Valid character? */
3374     if (len >= 0)
3375     utf_o->u_len += len;
3376     @@ -295,15 +305,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
3377     int len;
3378     unsigned i, max_val;
3379     uint16_t uni_char;
3380     - int u_len;
3381     + int u_len, u_ch;
3382    
3383     memset(ocu, 0, sizeof(dstring) * length);
3384     ocu[0] = 8;
3385     max_val = 0xffU;
3386     + u_ch = 1;
3387    
3388     try_again:
3389     u_len = 0U;
3390     for (i = 0U; i < uni->u_len; i++) {
3391     + /* Name didn't fit? */
3392     + if (u_len + 1 + u_ch >= length)
3393     + return 0;
3394     len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
3395     if (!len)
3396     continue;
3397     @@ -316,6 +330,7 @@ try_again:
3398     if (uni_char > max_val) {
3399     max_val = 0xffffU;
3400     ocu[0] = (uint8_t)0x10U;
3401     + u_ch = 2;
3402     goto try_again;
3403     }
3404    
3405     diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
3406     index 8774498ce0ff..e2536bb1c760 100644
3407     --- a/fs/xfs/libxfs/xfs_format.h
3408     +++ b/fs/xfs/libxfs/xfs_format.h
3409     @@ -786,7 +786,7 @@ typedef struct xfs_agfl {
3410     __be64 agfl_lsn;
3411     __be32 agfl_crc;
3412     __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
3413     -} xfs_agfl_t;
3414     +} __attribute__((packed)) xfs_agfl_t;
3415    
3416     #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
3417    
3418     diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
3419     index 268c00f4f83a..65485cfc4ade 100644
3420     --- a/fs/xfs/libxfs/xfs_inode_buf.c
3421     +++ b/fs/xfs/libxfs/xfs_inode_buf.c
3422     @@ -62,11 +62,12 @@ xfs_inobp_check(
3423     * has not had the inode cores stamped into it. Hence for readahead, the buffer
3424     * may be potentially invalid.
3425     *
3426     - * If the readahead buffer is invalid, we don't want to mark it with an error,
3427     - * but we do want to clear the DONE status of the buffer so that a followup read
3428     - * will re-read it from disk. This will ensure that we don't get an unnecessary
3429     - * warnings during log recovery and we don't get unnecssary panics on debug
3430     - * kernels.
3431     + * If the readahead buffer is invalid, we need to mark it with an error and
3432     + * clear the DONE status of the buffer so that a followup read will re-read it
3433     + * from disk. We don't report the error otherwise to avoid warnings during log
3434     + * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
3435     + * because all we want to do is say readahead failed; there is no-one to report
3436     + * the error to, so this will distinguish it from a non-ra verifier failure.
3437     */
3438     static void
3439     xfs_inode_buf_verify(
3440     @@ -93,6 +94,7 @@ xfs_inode_buf_verify(
3441     XFS_RANDOM_ITOBP_INOTOBP))) {
3442     if (readahead) {
3443     bp->b_flags &= ~XBF_DONE;
3444     + xfs_buf_ioerror(bp, -EIO);
3445     return;
3446     }
3447    
3448     diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
3449     index 3243cdf97f33..39090fc56f09 100644
3450     --- a/fs/xfs/xfs_buf.c
3451     +++ b/fs/xfs/xfs_buf.c
3452     @@ -604,6 +604,13 @@ found:
3453     }
3454     }
3455    
3456     + /*
3457     + * Clear b_error if this is a lookup from a caller that doesn't expect
3458     + * valid data to be found in the buffer.
3459     + */
3460     + if (!(flags & XBF_READ))
3461     + xfs_buf_ioerror(bp, 0);
3462     +
3463     XFS_STATS_INC(target->bt_mount, xb_get);
3464     trace_xfs_buf_get(bp, flags, _RET_IP_);
3465     return bp;
3466     @@ -1520,6 +1527,16 @@ xfs_wait_buftarg(
3467     LIST_HEAD(dispose);
3468     int loop = 0;
3469    
3470     + /*
3471     + * We need to flush the buffer workqueue to ensure that all IO
3472     + * completion processing is 100% done. Just waiting on buffer locks is
3473     + * not sufficient for async IO as the reference count held over IO is
3474     + * not released until after the buffer lock is dropped. Hence we need to
3475     + * ensure here that all reference counts have been dropped before we
3476     + * start walking the LRU list.
3477     + */
3478     + drain_workqueue(btp->bt_mount->m_buf_workqueue);
3479     +
3480     /* loop until there is nothing left on the lru list. */
3481     while (list_lru_count(&btp->bt_lru)) {
3482     list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
3483     diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
3484     index aa67339b9537..4f18fd92ca13 100644
3485     --- a/fs/xfs/xfs_trans_ail.c
3486     +++ b/fs/xfs/xfs_trans_ail.c
3487     @@ -497,7 +497,6 @@ xfsaild(
3488     long tout = 0; /* milliseconds */
3489    
3490     current->flags |= PF_MEMALLOC;
3491     - set_freezable();
3492    
3493     while (!kthread_should_stop()) {
3494     if (tout && tout <= 20)
3495     diff --git a/include/linux/compiler.h b/include/linux/compiler.h
3496     index 4dac1036594f..6fc9a6dd5ed2 100644
3497     --- a/include/linux/compiler.h
3498     +++ b/include/linux/compiler.h
3499     @@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
3500     */
3501     #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
3502     #define __trace_if(cond) \
3503     - if (__builtin_constant_p((cond)) ? !!(cond) : \
3504     + if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
3505     ({ \
3506     int ______r; \
3507     static struct ftrace_branch_data \
3508     diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
3509     index 251a2090a554..e0ee0b3000b2 100644
3510     --- a/include/linux/devpts_fs.h
3511     +++ b/include/linux/devpts_fs.h
3512     @@ -19,6 +19,8 @@
3513    
3514     int devpts_new_index(struct inode *ptmx_inode);
3515     void devpts_kill_index(struct inode *ptmx_inode, int idx);
3516     +void devpts_add_ref(struct inode *ptmx_inode);
3517     +void devpts_del_ref(struct inode *ptmx_inode);
3518     /* mknod in devpts */
3519     struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
3520     void *priv);
3521     @@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
3522     /* Dummy stubs in the no-pty case */
3523     static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
3524     static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
3525     +static inline void devpts_add_ref(struct inode *ptmx_inode) { }
3526     +static inline void devpts_del_ref(struct inode *ptmx_inode) { }
3527     static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
3528     dev_t device, int index, void *priv)
3529     {
3530     diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
3531     index 821273ca4873..2d9b650047a5 100644
3532     --- a/include/linux/intel-iommu.h
3533     +++ b/include/linux/intel-iommu.h
3534     @@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
3535     /* low 64 bit */
3536     #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
3537    
3538     +/* PRS_REG */
3539     +#define DMA_PRS_PPR ((u32)1)
3540     +
3541     #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
3542     do { \
3543     cycles_t start_time = get_cycles(); \
3544     diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
3545     index 061265f92876..504c98a278d4 100644
3546     --- a/include/linux/ptrace.h
3547     +++ b/include/linux/ptrace.h
3548     @@ -57,7 +57,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
3549     #define PTRACE_MODE_READ 0x01
3550     #define PTRACE_MODE_ATTACH 0x02
3551     #define PTRACE_MODE_NOAUDIT 0x04
3552     -/* Returns true on success, false on denial. */
3553     +#define PTRACE_MODE_FSCREDS 0x08
3554     +#define PTRACE_MODE_REALCREDS 0x10
3555     +
3556     +/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
3557     +#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
3558     +#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
3559     +#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
3560     +#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
3561     +
3562     +/**
3563     + * ptrace_may_access - check whether the caller is permitted to access
3564     + * a target task.
3565     + * @task: target task
3566     + * @mode: selects type of access and caller credentials
3567     + *
3568     + * Returns true on success, false on denial.
3569     + *
3570     + * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
3571     + * be set in @mode to specify whether the access was requested through
3572     + * a filesystem syscall (should use effective capabilities and fsuid
3573     + * of the caller) or through an explicit syscall such as
3574     + * process_vm_writev or ptrace (and should use the real credentials).
3575     + */
3576     extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
3577    
3578     static inline int ptrace_reparented(struct task_struct *child)
3579     diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
3580     index 33170dbd9db4..5d5174b59802 100644
3581     --- a/include/linux/radix-tree.h
3582     +++ b/include/linux/radix-tree.h
3583     @@ -370,12 +370,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
3584     struct radix_tree_iter *iter, unsigned flags);
3585    
3586     /**
3587     + * radix_tree_iter_retry - retry this chunk of the iteration
3588     + * @iter: iterator state
3589     + *
3590     + * If we iterate over a tree protected only by the RCU lock, a race
3591     + * against deletion or creation may result in seeing a slot for which
3592     + * radix_tree_deref_retry() returns true. If so, call this function
3593     + * and continue the iteration.
3594     + */
3595     +static inline __must_check
3596     +void **radix_tree_iter_retry(struct radix_tree_iter *iter)
3597     +{
3598     + iter->next_index = iter->index;
3599     + return NULL;
3600     +}
3601     +
3602     +/**
3603     * radix_tree_chunk_size - get current chunk size
3604     *
3605     * @iter: pointer to radix tree iterator
3606     * Returns: current chunk size
3607     */
3608     -static __always_inline unsigned
3609     +static __always_inline long
3610     radix_tree_chunk_size(struct radix_tree_iter *iter)
3611     {
3612     return iter->next_index - iter->index;
3613     @@ -409,9 +425,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
3614     return slot + offset + 1;
3615     }
3616     } else {
3617     - unsigned size = radix_tree_chunk_size(iter) - 1;
3618     + long size = radix_tree_chunk_size(iter);
3619    
3620     - while (size--) {
3621     + while (--size > 0) {
3622     slot++;
3623     iter->index++;
3624     if (likely(*slot))
3625     diff --git a/include/linux/rmap.h b/include/linux/rmap.h
3626     index 29446aeef36e..ddda2ac3446e 100644
3627     --- a/include/linux/rmap.h
3628     +++ b/include/linux/rmap.h
3629     @@ -108,20 +108,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
3630     __put_anon_vma(anon_vma);
3631     }
3632    
3633     -static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
3634     -{
3635     - struct anon_vma *anon_vma = vma->anon_vma;
3636     - if (anon_vma)
3637     - down_write(&anon_vma->root->rwsem);
3638     -}
3639     -
3640     -static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
3641     -{
3642     - struct anon_vma *anon_vma = vma->anon_vma;
3643     - if (anon_vma)
3644     - up_write(&anon_vma->root->rwsem);
3645     -}
3646     -
3647     static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
3648     {
3649     down_write(&anon_vma->root->rwsem);
3650     diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
3651     index 696a339c592c..03c7efb60c91 100644
3652     --- a/include/linux/tracepoint.h
3653     +++ b/include/linux/tracepoint.h
3654     @@ -14,8 +14,10 @@
3655     * See the file COPYING for more details.
3656     */
3657    
3658     +#include <linux/smp.h>
3659     #include <linux/errno.h>
3660     #include <linux/types.h>
3661     +#include <linux/cpumask.h>
3662     #include <linux/rcupdate.h>
3663     #include <linux/static_key.h>
3664    
3665     @@ -146,6 +148,9 @@ extern void syscall_unregfunc(void);
3666     void *it_func; \
3667     void *__data; \
3668     \
3669     + if (!cpu_online(raw_smp_processor_id())) \
3670     + return; \
3671     + \
3672     if (!(cond)) \
3673     return; \
3674     prercu; \
3675     diff --git a/ipc/shm.c b/ipc/shm.c
3676     index 41787276e141..3174634ca4e5 100644
3677     --- a/ipc/shm.c
3678     +++ b/ipc/shm.c
3679     @@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
3680     struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
3681    
3682     /*
3683     - * We raced in the idr lookup or with shm_destroy(). Either way, the
3684     - * ID is busted.
3685     + * Callers of shm_lock() must validate the status of the returned ipc
3686     + * object pointer (as returned by ipc_lock()), and error out as
3687     + * appropriate.
3688     */
3689     - WARN_ON(IS_ERR(ipcp));
3690     -
3691     + if (IS_ERR(ipcp))
3692     + return (void *)ipcp;
3693     return container_of(ipcp, struct shmid_kernel, shm_perm);
3694     }
3695    
3696     @@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
3697     }
3698    
3699    
3700     -/* This is called by fork, once for every shm attach. */
3701     -static void shm_open(struct vm_area_struct *vma)
3702     +static int __shm_open(struct vm_area_struct *vma)
3703     {
3704     struct file *file = vma->vm_file;
3705     struct shm_file_data *sfd = shm_file_data(file);
3706     struct shmid_kernel *shp;
3707    
3708     shp = shm_lock(sfd->ns, sfd->id);
3709     +
3710     + if (IS_ERR(shp))
3711     + return PTR_ERR(shp);
3712     +
3713     shp->shm_atim = get_seconds();
3714     shp->shm_lprid = task_tgid_vnr(current);
3715     shp->shm_nattch++;
3716     shm_unlock(shp);
3717     + return 0;
3718     +}
3719     +
3720     +/* This is called by fork, once for every shm attach. */
3721     +static void shm_open(struct vm_area_struct *vma)
3722     +{
3723     + int err = __shm_open(vma);
3724     + /*
3725     + * We raced in the idr lookup or with shm_destroy().
3726     + * Either way, the ID is busted.
3727     + */
3728     + WARN_ON_ONCE(err);
3729     }
3730    
3731     /*
3732     @@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
3733     down_write(&shm_ids(ns).rwsem);
3734     /* remove from the list of attaches of the shm segment */
3735     shp = shm_lock(ns, sfd->id);
3736     +
3737     + /*
3738     + * We raced in the idr lookup or with shm_destroy().
3739     + * Either way, the ID is busted.
3740     + */
3741     + if (WARN_ON_ONCE(IS_ERR(shp)))
3742     + goto done; /* no-op */
3743     +
3744     shp->shm_lprid = task_tgid_vnr(current);
3745     shp->shm_dtim = get_seconds();
3746     shp->shm_nattch--;
3747     @@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
3748     shm_destroy(ns, shp);
3749     else
3750     shm_unlock(shp);
3751     +done:
3752     up_write(&shm_ids(ns).rwsem);
3753     }
3754    
3755     @@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
3756     struct shm_file_data *sfd = shm_file_data(file);
3757     int ret;
3758    
3759     + /*
3760     + * In case of remap_file_pages() emulation, the file can represent
3761     + * removed IPC ID: propogate shm_lock() error to caller.
3762     + */
3763     + ret =__shm_open(vma);
3764     + if (ret)
3765     + return ret;
3766     +
3767     ret = sfd->file->f_op->mmap(sfd->file, vma);
3768     - if (ret != 0)
3769     + if (ret) {
3770     + shm_close(vma);
3771     return ret;
3772     + }
3773     sfd->vm_ops = vma->vm_ops;
3774     #ifdef CONFIG_MMU
3775     WARN_ON(!sfd->vm_ops->fault);
3776     #endif
3777     vma->vm_ops = &shm_vm_ops;
3778     - shm_open(vma);
3779     -
3780     - return ret;
3781     + return 0;
3782     }
3783    
3784     static int shm_release(struct inode *ino, struct file *file)
3785     diff --git a/kernel/events/core.c b/kernel/events/core.c
3786     index cfc227ccfceb..1087bbeb152b 100644
3787     --- a/kernel/events/core.c
3788     +++ b/kernel/events/core.c
3789     @@ -3434,7 +3434,7 @@ find_lively_task_by_vpid(pid_t vpid)
3790    
3791     /* Reuse ptrace permission checks for now. */
3792     err = -EACCES;
3793     - if (!ptrace_may_access(task, PTRACE_MODE_READ))
3794     + if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
3795     goto errout;
3796    
3797     return task;
3798     diff --git a/kernel/futex.c b/kernel/futex.c
3799     index 684d7549825a..461c72b2dac2 100644
3800     --- a/kernel/futex.c
3801     +++ b/kernel/futex.c
3802     @@ -2755,6 +2755,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3803     if (q.pi_state && (q.pi_state->owner != current)) {
3804     spin_lock(q.lock_ptr);
3805     ret = fixup_pi_state_owner(uaddr2, &q, current);
3806     + /*
3807     + * Drop the reference to the pi state which
3808     + * the requeue_pi() code acquired for us.
3809     + */
3810     + free_pi_state(q.pi_state);
3811     spin_unlock(q.lock_ptr);
3812     }
3813     } else {
3814     @@ -2881,7 +2886,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
3815     }
3816    
3817     ret = -EPERM;
3818     - if (!ptrace_may_access(p, PTRACE_MODE_READ))
3819     + if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3820     goto err_unlock;
3821    
3822     head = p->robust_list;
3823     diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
3824     index 55c8c9349cfe..4ae3232e7a28 100644
3825     --- a/kernel/futex_compat.c
3826     +++ b/kernel/futex_compat.c
3827     @@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
3828     }
3829    
3830     ret = -EPERM;
3831     - if (!ptrace_may_access(p, PTRACE_MODE_READ))
3832     + if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3833     goto err_unlock;
3834    
3835     head = p->compat_robust_list;
3836     diff --git a/kernel/kcmp.c b/kernel/kcmp.c
3837     index 0aa69ea1d8fd..3a47fa998fe0 100644
3838     --- a/kernel/kcmp.c
3839     +++ b/kernel/kcmp.c
3840     @@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
3841     &task2->signal->cred_guard_mutex);
3842     if (ret)
3843     goto err;
3844     - if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
3845     - !ptrace_may_access(task2, PTRACE_MODE_READ)) {
3846     + if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
3847     + !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
3848     ret = -EPERM;
3849     goto err_unlock;
3850     }
3851     diff --git a/kernel/memremap.c b/kernel/memremap.c
3852     index 7658d32c5c78..7a4e473cea4d 100644
3853     --- a/kernel/memremap.c
3854     +++ b/kernel/memremap.c
3855     @@ -111,7 +111,7 @@ EXPORT_SYMBOL(memunmap);
3856    
3857     static void devm_memremap_release(struct device *dev, void *res)
3858     {
3859     - memunmap(res);
3860     + memunmap(*(void **)res);
3861     }
3862    
3863     static int devm_memremap_match(struct device *dev, void *res, void *match_data)
3864     diff --git a/kernel/module.c b/kernel/module.c
3865     index 38c7bd5583ff..14833e6d5e37 100644
3866     --- a/kernel/module.c
3867     +++ b/kernel/module.c
3868     @@ -3515,7 +3515,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3869    
3870     /* Module is ready to execute: parsing args may do that. */
3871     after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3872     - -32768, 32767, NULL,
3873     + -32768, 32767, mod,
3874     unknown_module_param_cb);
3875     if (IS_ERR(after_dashes)) {
3876     err = PTR_ERR(after_dashes);
3877     @@ -3646,6 +3646,11 @@ static inline int is_arm_mapping_symbol(const char *str)
3878     && (str[2] == '\0' || str[2] == '.');
3879     }
3880    
3881     +static const char *symname(struct module *mod, unsigned int symnum)
3882     +{
3883     + return mod->strtab + mod->symtab[symnum].st_name;
3884     +}
3885     +
3886     static const char *get_ksymbol(struct module *mod,
3887     unsigned long addr,
3888     unsigned long *size,
3889     @@ -3668,15 +3673,15 @@ static const char *get_ksymbol(struct module *mod,
3890    
3891     /* We ignore unnamed symbols: they're uninformative
3892     * and inserted at a whim. */
3893     + if (*symname(mod, i) == '\0'
3894     + || is_arm_mapping_symbol(symname(mod, i)))
3895     + continue;
3896     +
3897     if (mod->symtab[i].st_value <= addr
3898     - && mod->symtab[i].st_value > mod->symtab[best].st_value
3899     - && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3900     - && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3901     + && mod->symtab[i].st_value > mod->symtab[best].st_value)
3902     best = i;
3903     if (mod->symtab[i].st_value > addr
3904     - && mod->symtab[i].st_value < nextval
3905     - && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3906     - && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3907     + && mod->symtab[i].st_value < nextval)
3908     nextval = mod->symtab[i].st_value;
3909     }
3910    
3911     @@ -3687,7 +3692,7 @@ static const char *get_ksymbol(struct module *mod,
3912     *size = nextval - mod->symtab[best].st_value;
3913     if (offset)
3914     *offset = addr - mod->symtab[best].st_value;
3915     - return mod->strtab + mod->symtab[best].st_name;
3916     + return symname(mod, best);
3917     }
3918    
3919     /* For kallsyms to ask for address resolution. NULL means not found. Careful
3920     @@ -3782,8 +3787,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3921     if (symnum < mod->num_symtab) {
3922     *value = mod->symtab[symnum].st_value;
3923     *type = mod->symtab[symnum].st_info;
3924     - strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3925     - KSYM_NAME_LEN);
3926     + strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN);
3927     strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3928     *exported = is_exported(name, *value, mod);
3929     preempt_enable();
3930     @@ -3800,7 +3804,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
3931     unsigned int i;
3932    
3933     for (i = 0; i < mod->num_symtab; i++)
3934     - if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3935     + if (strcmp(name, symname(mod, i)) == 0 &&
3936     mod->symtab[i].st_info != 'U')
3937     return mod->symtab[i].st_value;
3938     return 0;
3939     @@ -3844,7 +3848,7 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3940     if (mod->state == MODULE_STATE_UNFORMED)
3941     continue;
3942     for (i = 0; i < mod->num_symtab; i++) {
3943     - ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3944     + ret = fn(data, symname(mod, i),
3945     mod, mod->symtab[i].st_value);
3946     if (ret != 0)
3947     return ret;
3948     diff --git a/kernel/ptrace.c b/kernel/ptrace.c
3949     index b760bae64cf1..3189e51db7e8 100644
3950     --- a/kernel/ptrace.c
3951     +++ b/kernel/ptrace.c
3952     @@ -219,6 +219,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
3953     static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
3954     {
3955     const struct cred *cred = current_cred(), *tcred;
3956     + int dumpable = 0;
3957     + kuid_t caller_uid;
3958     + kgid_t caller_gid;
3959     +
3960     + if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
3961     + WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
3962     + return -EPERM;
3963     + }
3964    
3965     /* May we inspect the given task?
3966     * This check is used both for attaching with ptrace
3967     @@ -228,18 +236,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
3968     * because setting up the necessary parent/child relationship
3969     * or halting the specified task is impossible.
3970     */
3971     - int dumpable = 0;
3972     +
3973     /* Don't let security modules deny introspection */
3974     if (same_thread_group(task, current))
3975     return 0;
3976     rcu_read_lock();
3977     + if (mode & PTRACE_MODE_FSCREDS) {
3978     + caller_uid = cred->fsuid;
3979     + caller_gid = cred->fsgid;
3980     + } else {
3981     + /*
3982     + * Using the euid would make more sense here, but something
3983     + * in userland might rely on the old behavior, and this
3984     + * shouldn't be a security problem since
3985     + * PTRACE_MODE_REALCREDS implies that the caller explicitly
3986     + * used a syscall that requests access to another process
3987     + * (and not a filesystem syscall to procfs).
3988     + */
3989     + caller_uid = cred->uid;
3990     + caller_gid = cred->gid;
3991     + }
3992     tcred = __task_cred(task);
3993     - if (uid_eq(cred->uid, tcred->euid) &&
3994     - uid_eq(cred->uid, tcred->suid) &&
3995     - uid_eq(cred->uid, tcred->uid) &&
3996     - gid_eq(cred->gid, tcred->egid) &&
3997     - gid_eq(cred->gid, tcred->sgid) &&
3998     - gid_eq(cred->gid, tcred->gid))
3999     + if (uid_eq(caller_uid, tcred->euid) &&
4000     + uid_eq(caller_uid, tcred->suid) &&
4001     + uid_eq(caller_uid, tcred->uid) &&
4002     + gid_eq(caller_gid, tcred->egid) &&
4003     + gid_eq(caller_gid, tcred->sgid) &&
4004     + gid_eq(caller_gid, tcred->gid))
4005     goto ok;
4006     if (ptrace_has_cap(tcred->user_ns, mode))
4007     goto ok;
4008     @@ -306,7 +329,7 @@ static int ptrace_attach(struct task_struct *task, long request,
4009     goto out;
4010    
4011     task_lock(task);
4012     - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
4013     + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
4014     task_unlock(task);
4015     if (retval)
4016     goto unlock_creds;
4017     diff --git a/kernel/sys.c b/kernel/sys.c
4018     index 6af9212ab5aa..78947de6f969 100644
4019     --- a/kernel/sys.c
4020     +++ b/kernel/sys.c
4021     @@ -1853,11 +1853,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
4022     user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
4023     }
4024    
4025     - if (prctl_map.exe_fd != (u32)-1)
4026     + if (prctl_map.exe_fd != (u32)-1) {
4027     error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
4028     - down_read(&mm->mmap_sem);
4029     - if (error)
4030     - goto out;
4031     + if (error)
4032     + return error;
4033     + }
4034     +
4035     + down_write(&mm->mmap_sem);
4036    
4037     /*
4038     * We don't validate if these members are pointing to
4039     @@ -1894,10 +1896,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
4040     if (prctl_map.auxv_size)
4041     memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
4042    
4043     - error = 0;
4044     -out:
4045     - up_read(&mm->mmap_sem);
4046     - return error;
4047     + up_write(&mm->mmap_sem);
4048     + return 0;
4049     }
4050     #endif /* CONFIG_CHECKPOINT_RESTORE */
4051    
4052     @@ -1963,7 +1963,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
4053    
4054     error = -EINVAL;
4055    
4056     - down_read(&mm->mmap_sem);
4057     + down_write(&mm->mmap_sem);
4058     vma = find_vma(mm, addr);
4059    
4060     prctl_map.start_code = mm->start_code;
4061     @@ -2056,7 +2056,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
4062    
4063     error = 0;
4064     out:
4065     - up_read(&mm->mmap_sem);
4066     + up_write(&mm->mmap_sem);
4067     return error;
4068     }
4069    
4070     diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
4071     index 8d262b467573..1d5c7204ddc9 100644
4072     --- a/kernel/time/itimer.c
4073     +++ b/kernel/time/itimer.c
4074     @@ -26,7 +26,7 @@
4075     */
4076     static struct timeval itimer_get_remtime(struct hrtimer *timer)
4077     {
4078     - ktime_t rem = hrtimer_get_remaining(timer);
4079     + ktime_t rem = __hrtimer_get_remaining(timer, true);
4080    
4081     /*
4082     * Racy but safe: if the itimer expires after the above
4083     diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
4084     index 31d11ac9fa47..f2826c35e918 100644
4085     --- a/kernel/time/posix-timers.c
4086     +++ b/kernel/time/posix-timers.c
4087     @@ -760,7 +760,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
4088     (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
4089     timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
4090    
4091     - remaining = ktime_sub(hrtimer_get_expires(timer), now);
4092     + remaining = __hrtimer_expires_remaining_adjusted(timer, now);
4093     /* Return 0 only, when the timer is expired and not pending */
4094     if (remaining.tv64 <= 0) {
4095     /*
4096     diff --git a/lib/dma-debug.c b/lib/dma-debug.c
4097     index d34bd24c2c84..4a1515f4b452 100644
4098     --- a/lib/dma-debug.c
4099     +++ b/lib/dma-debug.c
4100     @@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end
4101    
4102     static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
4103     {
4104     - if (overlap(addr, len, _text, _etext) ||
4105     + if (overlap(addr, len, _stext, _etext) ||
4106     overlap(addr, len, __start_rodata, __end_rodata))
4107     err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
4108     }
4109     diff --git a/lib/dump_stack.c b/lib/dump_stack.c
4110     index 6745c6230db3..c30d07e99dba 100644
4111     --- a/lib/dump_stack.c
4112     +++ b/lib/dump_stack.c
4113     @@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
4114    
4115     asmlinkage __visible void dump_stack(void)
4116     {
4117     + unsigned long flags;
4118     int was_locked;
4119     int old;
4120     int cpu;
4121     @@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
4122     * Permit this cpu to perform nested stack dumps while serialising
4123     * against other CPUs
4124     */
4125     - preempt_disable();
4126     -
4127     retry:
4128     + local_irq_save(flags);
4129     cpu = smp_processor_id();
4130     old = atomic_cmpxchg(&dump_lock, -1, cpu);
4131     if (old == -1) {
4132     @@ -43,6 +43,7 @@ retry:
4133     } else if (old == cpu) {
4134     was_locked = 1;
4135     } else {
4136     + local_irq_restore(flags);
4137     cpu_relax();
4138     goto retry;
4139     }
4140     @@ -52,7 +53,7 @@ retry:
4141     if (!was_locked)
4142     atomic_set(&dump_lock, -1);
4143    
4144     - preempt_enable();
4145     + local_irq_restore(flags);
4146     }
4147     #else
4148     asmlinkage __visible void dump_stack(void)
4149     diff --git a/lib/klist.c b/lib/klist.c
4150     index d74cf7a29afd..0507fa5d84c5 100644
4151     --- a/lib/klist.c
4152     +++ b/lib/klist.c
4153     @@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
4154     struct klist_node *n)
4155     {
4156     i->i_klist = k;
4157     - i->i_cur = n;
4158     - if (n)
4159     - kref_get(&n->n_ref);
4160     + i->i_cur = NULL;
4161     + if (n && kref_get_unless_zero(&n->n_ref))
4162     + i->i_cur = n;
4163     }
4164     EXPORT_SYMBOL_GPL(klist_iter_init_node);
4165    
4166     diff --git a/lib/radix-tree.c b/lib/radix-tree.c
4167     index fcf5d98574ce..6b79e9026e24 100644
4168     --- a/lib/radix-tree.c
4169     +++ b/lib/radix-tree.c
4170     @@ -1019,9 +1019,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
4171     return 0;
4172    
4173     radix_tree_for_each_slot(slot, root, &iter, first_index) {
4174     - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
4175     + results[ret] = rcu_dereference_raw(*slot);
4176     if (!results[ret])
4177     continue;
4178     + if (radix_tree_is_indirect_ptr(results[ret])) {
4179     + slot = radix_tree_iter_retry(&iter);
4180     + continue;
4181     + }
4182     if (++ret == max_items)
4183     break;
4184     }
4185     @@ -1098,9 +1102,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
4186     return 0;
4187    
4188     radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
4189     - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
4190     + results[ret] = rcu_dereference_raw(*slot);
4191     if (!results[ret])
4192     continue;
4193     + if (radix_tree_is_indirect_ptr(results[ret])) {
4194     + slot = radix_tree_iter_retry(&iter);
4195     + continue;
4196     + }
4197     if (++ret == max_items)
4198     break;
4199     }
4200     diff --git a/lib/string_helpers.c b/lib/string_helpers.c
4201     index 5939f63d90cd..5c88204b6f1f 100644
4202     --- a/lib/string_helpers.c
4203     +++ b/lib/string_helpers.c
4204     @@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
4205     [STRING_UNITS_10] = 1000,
4206     [STRING_UNITS_2] = 1024,
4207     };
4208     - int i, j;
4209     - u32 remainder = 0, sf_cap, exp;
4210     + static const unsigned int rounding[] = { 500, 50, 5 };
4211     + int i = 0, j;
4212     + u32 remainder = 0, sf_cap;
4213     char tmp[8];
4214     const char *unit;
4215    
4216     tmp[0] = '\0';
4217     - i = 0;
4218     - if (!size)
4219     +
4220     + if (blk_size == 0)
4221     + size = 0;
4222     + if (size == 0)
4223     goto out;
4224    
4225     - while (blk_size >= divisor[units]) {
4226     - remainder = do_div(blk_size, divisor[units]);
4227     + /* This is Napier's algorithm. Reduce the original block size to
4228     + *
4229     + * coefficient * divisor[units]^i
4230     + *
4231     + * we do the reduction so both coefficients are just under 32 bits so
4232     + * that multiplying them together won't overflow 64 bits and we keep
4233     + * as much precision as possible in the numbers.
4234     + *
4235     + * Note: it's safe to throw away the remainders here because all the
4236     + * precision is in the coefficients.
4237     + */
4238     + while (blk_size >> 32) {
4239     + do_div(blk_size, divisor[units]);
4240     i++;
4241     }
4242    
4243     - exp = divisor[units] / (u32)blk_size;
4244     - /*
4245     - * size must be strictly greater than exp here to ensure that remainder
4246     - * is greater than divisor[units] coming out of the if below.
4247     - */
4248     - if (size > exp) {
4249     - remainder = do_div(size, divisor[units]);
4250     - remainder *= blk_size;
4251     + while (size >> 32) {
4252     + do_div(size, divisor[units]);
4253     i++;
4254     - } else {
4255     - remainder *= size;
4256     }
4257    
4258     + /* now perform the actual multiplication keeping i as the sum of the
4259     + * two logarithms */
4260     size *= blk_size;
4261     - size += remainder / divisor[units];
4262     - remainder %= divisor[units];
4263    
4264     + /* and logarithmically reduce it until it's just under the divisor */
4265     while (size >= divisor[units]) {
4266     remainder = do_div(size, divisor[units]);
4267     i++;
4268     }
4269    
4270     + /* work out in j how many digits of precision we need from the
4271     + * remainder */
4272     sf_cap = size;
4273     for (j = 0; sf_cap*10 < 1000; j++)
4274     sf_cap *= 10;
4275    
4276     - if (j) {
4277     + if (units == STRING_UNITS_2) {
4278     + /* express the remainder as a decimal. It's currently the
4279     + * numerator of a fraction whose denominator is
4280     + * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
4281     remainder *= 1000;
4282     - remainder /= divisor[units];
4283     + remainder >>= 10;
4284     + }
4285     +
4286     + /* add a 5 to the digit below what will be printed to ensure
4287     + * an arithmetical round up and carry it through to size */
4288     + remainder += rounding[j];
4289     + if (remainder >= 1000) {
4290     + remainder -= 1000;
4291     + size += 1;
4292     + }
4293     +
4294     + if (j) {
4295     snprintf(tmp, sizeof(tmp), ".%03u", remainder);
4296     tmp[j+1] = '\0';
4297     }
4298     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4299     index fc10620967c7..ee6acd279953 100644
4300     --- a/mm/memcontrol.c
4301     +++ b/mm/memcontrol.c
4302     @@ -3522,16 +3522,17 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4303     swap_buffers:
4304     /* Swap primary and spare array */
4305     thresholds->spare = thresholds->primary;
4306     - /* If all events are unregistered, free the spare array */
4307     - if (!new) {
4308     - kfree(thresholds->spare);
4309     - thresholds->spare = NULL;
4310     - }
4311    
4312     rcu_assign_pointer(thresholds->primary, new);
4313    
4314     /* To be sure that nobody uses thresholds */
4315     synchronize_rcu();
4316     +
4317     + /* If all events are unregistered, free the spare array */
4318     + if (!new) {
4319     + kfree(thresholds->spare);
4320     + thresholds->spare = NULL;
4321     + }
4322     unlock:
4323     mutex_unlock(&memcg->thresholds_lock);
4324     }
4325     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
4326     index 8424b64711ac..750b7893ee3a 100644
4327     --- a/mm/memory-failure.c
4328     +++ b/mm/memory-failure.c
4329     @@ -1572,7 +1572,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
4330     * Did it turn free?
4331     */
4332     ret = __get_any_page(page, pfn, 0);
4333     - if (!PageLRU(page)) {
4334     + if (ret == 1 && !PageLRU(page)) {
4335     /* Drop page reference which is from __get_any_page() */
4336     put_hwpoison_page(page);
4337     pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
4338     diff --git a/mm/mlock.c b/mm/mlock.c
4339     index 339d9e0949b6..d6006b146fea 100644
4340     --- a/mm/mlock.c
4341     +++ b/mm/mlock.c
4342     @@ -172,7 +172,7 @@ static void __munlock_isolation_failed(struct page *page)
4343     */
4344     unsigned int munlock_vma_page(struct page *page)
4345     {
4346     - unsigned int nr_pages;
4347     + int nr_pages;
4348     struct zone *zone = page_zone(page);
4349    
4350     /* For try_to_munlock() and to serialize with page migration */
4351     diff --git a/mm/mmap.c b/mm/mmap.c
4352     index 2ce04a649f6b..455772a05e54 100644
4353     --- a/mm/mmap.c
4354     +++ b/mm/mmap.c
4355     @@ -441,12 +441,16 @@ static void validate_mm(struct mm_struct *mm)
4356     struct vm_area_struct *vma = mm->mmap;
4357    
4358     while (vma) {
4359     + struct anon_vma *anon_vma = vma->anon_vma;
4360     struct anon_vma_chain *avc;
4361    
4362     - vma_lock_anon_vma(vma);
4363     - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
4364     - anon_vma_interval_tree_verify(avc);
4365     - vma_unlock_anon_vma(vma);
4366     + if (anon_vma) {
4367     + anon_vma_lock_read(anon_vma);
4368     + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
4369     + anon_vma_interval_tree_verify(avc);
4370     + anon_vma_unlock_read(anon_vma);
4371     + }
4372     +
4373     highest_address = vma->vm_end;
4374     vma = vma->vm_next;
4375     i++;
4376     @@ -2147,32 +2151,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
4377     int expand_upwards(struct vm_area_struct *vma, unsigned long address)
4378     {
4379     struct mm_struct *mm = vma->vm_mm;
4380     - int error;
4381     + int error = 0;
4382    
4383     if (!(vma->vm_flags & VM_GROWSUP))
4384     return -EFAULT;
4385    
4386     - /*
4387     - * We must make sure the anon_vma is allocated
4388     - * so that the anon_vma locking is not a noop.
4389     - */
4390     + /* Guard against wrapping around to address 0. */
4391     + if (address < PAGE_ALIGN(address+4))
4392     + address = PAGE_ALIGN(address+4);
4393     + else
4394     + return -ENOMEM;
4395     +
4396     + /* We must make sure the anon_vma is allocated. */
4397     if (unlikely(anon_vma_prepare(vma)))
4398     return -ENOMEM;
4399     - vma_lock_anon_vma(vma);
4400    
4401     /*
4402     * vma->vm_start/vm_end cannot change under us because the caller
4403     * is required to hold the mmap_sem in read mode. We need the
4404     * anon_vma lock to serialize against concurrent expand_stacks.
4405     - * Also guard against wrapping around to address 0.
4406     */
4407     - if (address < PAGE_ALIGN(address+4))
4408     - address = PAGE_ALIGN(address+4);
4409     - else {
4410     - vma_unlock_anon_vma(vma);
4411     - return -ENOMEM;
4412     - }
4413     - error = 0;
4414     + anon_vma_lock_write(vma->anon_vma);
4415    
4416     /* Somebody else might have raced and expanded it already */
4417     if (address > vma->vm_end) {
4418     @@ -2190,7 +2189,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
4419     * updates, but we only hold a shared mmap_sem
4420     * lock here, so we need to protect against
4421     * concurrent vma expansions.
4422     - * vma_lock_anon_vma() doesn't help here, as
4423     + * anon_vma_lock_write() doesn't help here, as
4424     * we don't guarantee that all growable vmas
4425     * in a mm share the same root anon vma.
4426     * So, we reuse mm->page_table_lock to guard
4427     @@ -2214,7 +2213,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
4428     }
4429     }
4430     }
4431     - vma_unlock_anon_vma(vma);
4432     + anon_vma_unlock_write(vma->anon_vma);
4433     khugepaged_enter_vma_merge(vma, vma->vm_flags);
4434     validate_mm(mm);
4435     return error;
4436     @@ -2230,25 +2229,21 @@ int expand_downwards(struct vm_area_struct *vma,
4437     struct mm_struct *mm = vma->vm_mm;
4438     int error;
4439    
4440     - /*
4441     - * We must make sure the anon_vma is allocated
4442     - * so that the anon_vma locking is not a noop.
4443     - */
4444     - if (unlikely(anon_vma_prepare(vma)))
4445     - return -ENOMEM;
4446     -
4447     address &= PAGE_MASK;
4448     error = security_mmap_addr(address);
4449     if (error)
4450     return error;
4451    
4452     - vma_lock_anon_vma(vma);
4453     + /* We must make sure the anon_vma is allocated. */
4454     + if (unlikely(anon_vma_prepare(vma)))
4455     + return -ENOMEM;
4456    
4457     /*
4458     * vma->vm_start/vm_end cannot change under us because the caller
4459     * is required to hold the mmap_sem in read mode. We need the
4460     * anon_vma lock to serialize against concurrent expand_stacks.
4461     */
4462     + anon_vma_lock_write(vma->anon_vma);
4463    
4464     /* Somebody else might have raced and expanded it already */
4465     if (address < vma->vm_start) {
4466     @@ -2266,7 +2261,7 @@ int expand_downwards(struct vm_area_struct *vma,
4467     * updates, but we only hold a shared mmap_sem
4468     * lock here, so we need to protect against
4469     * concurrent vma expansions.
4470     - * vma_lock_anon_vma() doesn't help here, as
4471     + * anon_vma_lock_write() doesn't help here, as
4472     * we don't guarantee that all growable vmas
4473     * in a mm share the same root anon vma.
4474     * So, we reuse mm->page_table_lock to guard
4475     @@ -2288,7 +2283,7 @@ int expand_downwards(struct vm_area_struct *vma,
4476     }
4477     }
4478     }
4479     - vma_unlock_anon_vma(vma);
4480     + anon_vma_unlock_write(vma->anon_vma);
4481     khugepaged_enter_vma_merge(vma, vma->vm_flags);
4482     validate_mm(mm);
4483     return error;
4484     @@ -2673,12 +2668,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
4485     if (!vma || !(vma->vm_flags & VM_SHARED))
4486     goto out;
4487    
4488     - if (start < vma->vm_start || start + size > vma->vm_end)
4489     + if (start < vma->vm_start)
4490     goto out;
4491    
4492     - if (pgoff == linear_page_index(vma, start)) {
4493     - ret = 0;
4494     - goto out;
4495     + if (start + size > vma->vm_end) {
4496     + struct vm_area_struct *next;
4497     +
4498     + for (next = vma->vm_next; next; next = next->vm_next) {
4499     + /* hole between vmas ? */
4500     + if (next->vm_start != next->vm_prev->vm_end)
4501     + goto out;
4502     +
4503     + if (next->vm_file != vma->vm_file)
4504     + goto out;
4505     +
4506     + if (next->vm_flags != vma->vm_flags)
4507     + goto out;
4508     +
4509     + if (start + size <= next->vm_end)
4510     + break;
4511     + }
4512     +
4513     + if (!next)
4514     + goto out;
4515     }
4516    
4517     prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
4518     @@ -2688,9 +2700,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
4519     flags &= MAP_NONBLOCK;
4520     flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
4521     if (vma->vm_flags & VM_LOCKED) {
4522     + struct vm_area_struct *tmp;
4523     flags |= MAP_LOCKED;
4524     +
4525     /* drop PG_Mlocked flag for over-mapped range */
4526     - munlock_vma_pages_range(vma, start, start + size);
4527     + for (tmp = vma; tmp->vm_start >= start + size;
4528     + tmp = tmp->vm_next) {
4529     + munlock_vma_pages_range(tmp,
4530     + max(tmp->vm_start, start),
4531     + min(tmp->vm_end, start + size));
4532     + }
4533     }
4534    
4535     file = get_file(vma->vm_file);
4536     diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
4537     index 7d3db0247983..1ba58213ad65 100644
4538     --- a/mm/pgtable-generic.c
4539     +++ b/mm/pgtable-generic.c
4540     @@ -210,7 +210,9 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
4541     VM_BUG_ON(address & ~HPAGE_PMD_MASK);
4542     VM_BUG_ON(pmd_trans_huge(*pmdp));
4543     pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
4544     - flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
4545     +
4546     + /* collapse entails shooting down ptes not pmd */
4547     + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
4548     return pmd;
4549     }
4550     #endif
4551     diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
4552     index e88d071648c2..5d453e58ddbf 100644
4553     --- a/mm/process_vm_access.c
4554     +++ b/mm/process_vm_access.c
4555     @@ -194,7 +194,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
4556     goto free_proc_pages;
4557     }
4558    
4559     - mm = mm_access(task, PTRACE_MODE_ATTACH);
4560     + mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
4561     if (!mm || IS_ERR(mm)) {
4562     rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
4563     /*
4564     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4565     index 2ffaf6a79499..027c9ef8a263 100644
4566     --- a/net/sunrpc/xprtsock.c
4567     +++ b/net/sunrpc/xprtsock.c
4568     @@ -398,7 +398,6 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
4569     if (unlikely(!sock))
4570     return -ENOTSOCK;
4571    
4572     - clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
4573     if (base != 0) {
4574     addr = NULL;
4575     addrlen = 0;
4576     @@ -442,7 +441,6 @@ static void xs_nospace_callback(struct rpc_task *task)
4577     struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
4578    
4579     transport->inet->sk_write_pending--;
4580     - clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
4581     }
4582    
4583     /**
4584     @@ -467,20 +465,11 @@ static int xs_nospace(struct rpc_task *task)
4585    
4586     /* Don't race with disconnect */
4587     if (xprt_connected(xprt)) {
4588     - if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
4589     - /*
4590     - * Notify TCP that we're limited by the application
4591     - * window size
4592     - */
4593     - set_bit(SOCK_NOSPACE, &transport->sock->flags);
4594     - sk->sk_write_pending++;
4595     - /* ...and wait for more buffer space */
4596     - xprt_wait_for_buffer_space(task, xs_nospace_callback);
4597     - }
4598     - } else {
4599     - clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
4600     + /* wait for more buffer space */
4601     + sk->sk_write_pending++;
4602     + xprt_wait_for_buffer_space(task, xs_nospace_callback);
4603     + } else
4604     ret = -ENOTCONN;
4605     - }
4606    
4607     spin_unlock_bh(&xprt->transport_lock);
4608    
4609     @@ -616,9 +605,6 @@ process_status:
4610     case -EAGAIN:
4611     status = xs_nospace(task);
4612     break;
4613     - default:
4614     - dprintk("RPC: sendmsg returned unrecognized error %d\n",
4615     - -status);
4616     case -ENETUNREACH:
4617     case -ENOBUFS:
4618     case -EPIPE:
4619     @@ -626,7 +612,10 @@ process_status:
4620     case -EPERM:
4621     /* When the server has died, an ICMP port unreachable message
4622     * prompts ECONNREFUSED. */
4623     - clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
4624     + break;
4625     + default:
4626     + dprintk("RPC: sendmsg returned unrecognized error %d\n",
4627     + -status);
4628     }
4629    
4630     return status;
4631     @@ -706,16 +695,16 @@ static int xs_tcp_send_request(struct rpc_task *task)
4632     case -EAGAIN:
4633     status = xs_nospace(task);
4634     break;
4635     - default:
4636     - dprintk("RPC: sendmsg returned unrecognized error %d\n",
4637     - -status);
4638     case -ECONNRESET:
4639     case -ECONNREFUSED:
4640     case -ENOTCONN:
4641     case -EADDRINUSE:
4642     case -ENOBUFS:
4643     case -EPIPE:
4644     - clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
4645     + break;
4646     + default:
4647     + dprintk("RPC: sendmsg returned unrecognized error %d\n",
4648     + -status);
4649     }
4650    
4651     return status;
4652     @@ -1609,19 +1598,23 @@ static void xs_tcp_state_change(struct sock *sk)
4653    
4654     static void xs_write_space(struct sock *sk)
4655     {
4656     - struct socket *sock;
4657     + struct socket_wq *wq;
4658     struct rpc_xprt *xprt;
4659    
4660     - if (unlikely(!(sock = sk->sk_socket)))
4661     + if (!sk->sk_socket)
4662     return;
4663     - clear_bit(SOCK_NOSPACE, &sock->flags);
4664     + clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
4665    
4666     if (unlikely(!(xprt = xprt_from_sock(sk))))
4667     return;
4668     - if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
4669     - return;
4670     + rcu_read_lock();
4671     + wq = rcu_dereference(sk->sk_wq);
4672     + if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
4673     + goto out;
4674    
4675     xprt_write_space(xprt);
4676     +out:
4677     + rcu_read_unlock();
4678     }
4679    
4680     /**
4681     diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
4682     index 23e78dcd12bf..38b64f487315 100755
4683     --- a/scripts/bloat-o-meter
4684     +++ b/scripts/bloat-o-meter
4685     @@ -58,8 +58,8 @@ for name in common:
4686     delta.sort()
4687     delta.reverse()
4688    
4689     -print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
4690     - (add, remove, grow, shrink, up, -down, up-down)
4691     -print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
4692     +print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
4693     + (add, remove, grow, shrink, up, -down, up-down))
4694     +print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
4695     for d, n in delta:
4696     - if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
4697     + if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
4698     diff --git a/security/commoncap.c b/security/commoncap.c
4699     index 1832cf701c3d..48071ed7c445 100644
4700     --- a/security/commoncap.c
4701     +++ b/security/commoncap.c
4702     @@ -137,12 +137,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
4703     {
4704     int ret = 0;
4705     const struct cred *cred, *child_cred;
4706     + const kernel_cap_t *caller_caps;
4707    
4708     rcu_read_lock();
4709     cred = current_cred();
4710     child_cred = __task_cred(child);
4711     + if (mode & PTRACE_MODE_FSCREDS)
4712     + caller_caps = &cred->cap_effective;
4713     + else
4714     + caller_caps = &cred->cap_permitted;
4715     if (cred->user_ns == child_cred->user_ns &&
4716     - cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
4717     + cap_issubset(child_cred->cap_permitted, *caller_caps))
4718     goto out;
4719     if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
4720     goto out;
4721     diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
4722     index a8b27cdc2844..4ba64fd49759 100644
4723     --- a/sound/core/pcm_native.c
4724     +++ b/sound/core/pcm_native.c
4725     @@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
4726     static DEFINE_RWLOCK(snd_pcm_link_rwlock);
4727     static DECLARE_RWSEM(snd_pcm_link_rwsem);
4728    
4729     +/* Writer in rwsem may block readers even during its waiting in queue,
4730     + * and this may lead to a deadlock when the code path takes read sem
4731     + * twice (e.g. one in snd_pcm_action_nonatomic() and another in
4732     + * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
4733     + * spin until it gets the lock.
4734     + */
4735     +static inline void down_write_nonblock(struct rw_semaphore *lock)
4736     +{
4737     + while (!down_write_trylock(lock))
4738     + cond_resched();
4739     +}
4740     +
4741     /**
4742     * snd_pcm_stream_lock - Lock the PCM stream
4743     * @substream: PCM substream
4744     @@ -1813,7 +1825,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
4745     res = -ENOMEM;
4746     goto _nolock;
4747     }
4748     - down_write(&snd_pcm_link_rwsem);
4749     + down_write_nonblock(&snd_pcm_link_rwsem);
4750     write_lock_irq(&snd_pcm_link_rwlock);
4751     if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
4752     substream->runtime->status->state != substream1->runtime->status->state ||
4753     @@ -1860,7 +1872,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
4754     struct snd_pcm_substream *s;
4755     int res = 0;
4756    
4757     - down_write(&snd_pcm_link_rwsem);
4758     + down_write_nonblock(&snd_pcm_link_rwsem);
4759     write_lock_irq(&snd_pcm_link_rwlock);
4760     if (!snd_pcm_stream_linked(substream)) {
4761     res = -EALREADY;
4762     diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
4763     index 801076687bb1..c850345c43b5 100644
4764     --- a/sound/core/seq/seq_memory.c
4765     +++ b/sound/core/seq/seq_memory.c
4766     @@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
4767    
4768     if (snd_BUG_ON(!pool))
4769     return -EINVAL;
4770     - if (pool->ptr) /* should be atomic? */
4771     - return 0;
4772    
4773     - pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
4774     - if (!pool->ptr)
4775     + cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
4776     + if (!cellptr)
4777     return -ENOMEM;
4778    
4779     /* add new cells to the free cell list */
4780     spin_lock_irqsave(&pool->lock, flags);
4781     + if (pool->ptr) {
4782     + spin_unlock_irqrestore(&pool->lock, flags);
4783     + vfree(cellptr);
4784     + return 0;
4785     + }
4786     +
4787     + pool->ptr = cellptr;
4788     pool->free = NULL;
4789    
4790     for (cell = 0; cell < pool->size; cell++) {
4791     diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
4792     index 921fb2bd8fad..fe686ee41c6d 100644
4793     --- a/sound/core/seq/seq_ports.c
4794     +++ b/sound/core/seq/seq_ports.c
4795     @@ -535,19 +535,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
4796     bool is_src, bool ack)
4797     {
4798     struct snd_seq_port_subs_info *grp;
4799     + struct list_head *list;
4800     + bool empty;
4801    
4802     grp = is_src ? &port->c_src : &port->c_dest;
4803     + list = is_src ? &subs->src_list : &subs->dest_list;
4804     down_write(&grp->list_mutex);
4805     write_lock_irq(&grp->list_lock);
4806     - if (is_src)
4807     - list_del(&subs->src_list);
4808     - else
4809     - list_del(&subs->dest_list);
4810     + empty = list_empty(list);
4811     + if (!empty)
4812     + list_del_init(list);
4813     grp->exclusive = 0;
4814     write_unlock_irq(&grp->list_lock);
4815     up_write(&grp->list_mutex);
4816    
4817     - unsubscribe_port(client, port, grp, &subs->info, ack);
4818     + if (!empty)
4819     + unsubscribe_port(client, port, grp, &subs->info, ack);
4820     }
4821    
4822     /* connect two ports */
4823     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4824     index 02a86ba5ba22..2c13298e80b7 100644
4825     --- a/sound/pci/hda/hda_intel.c
4826     +++ b/sound/pci/hda/hda_intel.c
4827     @@ -2143,10 +2143,10 @@ static void azx_remove(struct pci_dev *pci)
4828     struct hda_intel *hda;
4829    
4830     if (card) {
4831     - /* flush the pending probing work */
4832     + /* cancel the pending probing work */
4833     chip = card->private_data;
4834     hda = container_of(chip, struct hda_intel, chip);
4835     - flush_work(&hda->probe_work);
4836     + cancel_work_sync(&hda->probe_work);
4837    
4838     snd_card_free(card);
4839     }
4840     diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
4841     index 2a912df6771b..68276f35e323 100644
4842     --- a/tools/lib/traceevent/event-parse.c
4843     +++ b/tools/lib/traceevent/event-parse.c
4844     @@ -4968,13 +4968,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
4845     sizeof(long) != 8) {
4846     char *p;
4847    
4848     - ls = 2;
4849     /* make %l into %ll */
4850     - p = strchr(format, 'l');
4851     - if (p)
4852     + if (ls == 1 && (p = strchr(format, 'l')))
4853     memmove(p+1, p, strlen(p)+1);
4854     else if (strcmp(format, "%p") == 0)
4855     strcpy(format, "0x%llx");
4856     + ls = 2;
4857     }
4858     switch (ls) {
4859     case -2:
4860     diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
4861     index 6fc8cd753e1a..b48e87693aa5 100644
4862     --- a/tools/perf/util/parse-events.c
4863     +++ b/tools/perf/util/parse-events.c
4864     @@ -399,6 +399,9 @@ static void tracepoint_error(struct parse_events_error *e, int err,
4865     {
4866     char help[BUFSIZ];
4867    
4868     + if (!e)
4869     + return;
4870     +
4871     /*
4872     * We get error directly from syscall errno ( > 0),
4873     * or from encoded pointer's error ( < 0).
4874     diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
4875     index c35ffdd360fe..468de95bc8bb 100644
4876     --- a/tools/perf/util/session.c
4877     +++ b/tools/perf/util/session.c
4878     @@ -972,7 +972,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
4879    
4880     machine = machines__find(machines, pid);
4881     if (!machine)
4882     - machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
4883     + machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
4884     return machine;
4885     }
4886    
4887     diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
4888     index 69bca185c471..ea6064696fe4 100644
4889     --- a/virt/kvm/arm/arch_timer.c
4890     +++ b/virt/kvm/arm/arch_timer.c
4891     @@ -143,7 +143,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
4892     * Check if there was a change in the timer state (should we raise or lower
4893     * the line level to the GIC).
4894     */
4895     -static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
4896     +static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
4897     {
4898     struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
4899    
4900     @@ -154,10 +154,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
4901     * until we call this function from kvm_timer_flush_hwstate.
4902     */
4903     if (!vgic_initialized(vcpu->kvm))
4904     - return;
4905     + return -ENODEV;
4906    
4907     if (kvm_timer_should_fire(vcpu) != timer->irq.level)
4908     kvm_timer_update_irq(vcpu, !timer->irq.level);
4909     +
4910     + return 0;
4911     }
4912    
4913     /*
4914     @@ -218,7 +220,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
4915     bool phys_active;
4916     int ret;
4917    
4918     - kvm_timer_update_state(vcpu);
4919     + if (kvm_timer_update_state(vcpu))
4920     + return;
4921    
4922     /*
4923     * If we enter the guest with the virtual input level to the VGIC