Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.1/0101-4.1.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (hide annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 5 months ago) by niro
File size: 72997 byte(s)
-linux-4.1 patches up to 4.1.15
1 niro 2748 diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
2     index 750d577e8083..f5a8ca29aff0 100644
3     --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
4     +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
5     @@ -1,7 +1,7 @@
6     * Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
7    
8     Required properties:
9     -- compatible: should be "marvell,armada-370-neta".
10     +- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
11     - reg: address and length of the register set for the device.
12     - interrupts: interrupt for the device
13     - phy: See ethernet.txt file in the same directory.
14     diff --git a/Makefile b/Makefile
15     index 1caf4ad3eb8a..cef84c061f02 100644
16     --- a/Makefile
17     +++ b/Makefile
18     @@ -1,6 +1,6 @@
19     VERSION = 4
20     PATCHLEVEL = 1
21     -SUBLEVEL = 1
22     +SUBLEVEL = 2
23     EXTRAVERSION =
24     NAME = Series 4800
25    
26     diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
27     index ec96f0b36346..06a2f2ae9d1e 100644
28     --- a/arch/arm/boot/dts/armada-370-xp.dtsi
29     +++ b/arch/arm/boot/dts/armada-370-xp.dtsi
30     @@ -270,7 +270,6 @@
31     };
32    
33     eth0: ethernet@70000 {
34     - compatible = "marvell,armada-370-neta";
35     reg = <0x70000 0x4000>;
36     interrupts = <8>;
37     clocks = <&gateclk 4>;
38     @@ -286,7 +285,6 @@
39     };
40    
41     eth1: ethernet@74000 {
42     - compatible = "marvell,armada-370-neta";
43     reg = <0x74000 0x4000>;
44     interrupts = <10>;
45     clocks = <&gateclk 3>;
46     diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
47     index 00b50db57c9c..ca4257b2f77d 100644
48     --- a/arch/arm/boot/dts/armada-370.dtsi
49     +++ b/arch/arm/boot/dts/armada-370.dtsi
50     @@ -307,6 +307,14 @@
51     dmacap,memset;
52     };
53     };
54     +
55     + ethernet@70000 {
56     + compatible = "marvell,armada-370-neta";
57     + };
58     +
59     + ethernet@74000 {
60     + compatible = "marvell,armada-370-neta";
61     + };
62     };
63     };
64     };
65     diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
66     index 8479fdc9e9c2..c5fdc99f0dbe 100644
67     --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
68     +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
69     @@ -318,7 +318,7 @@
70     };
71    
72     eth3: ethernet@34000 {
73     - compatible = "marvell,armada-370-neta";
74     + compatible = "marvell,armada-xp-neta";
75     reg = <0x34000 0x4000>;
76     interrupts = <14>;
77     clocks = <&gateclk 1>;
78     diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
79     index 661d54c81580..0e24f1a38540 100644
80     --- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
81     +++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
82     @@ -356,7 +356,7 @@
83     };
84    
85     eth3: ethernet@34000 {
86     - compatible = "marvell,armada-370-neta";
87     + compatible = "marvell,armada-xp-neta";
88     reg = <0x34000 0x4000>;
89     interrupts = <14>;
90     clocks = <&gateclk 1>;
91     diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
92     index 013d63f69e36..8fdd6d7c0ab1 100644
93     --- a/arch/arm/boot/dts/armada-xp.dtsi
94     +++ b/arch/arm/boot/dts/armada-xp.dtsi
95     @@ -177,7 +177,7 @@
96     };
97    
98     eth2: ethernet@30000 {
99     - compatible = "marvell,armada-370-neta";
100     + compatible = "marvell,armada-xp-neta";
101     reg = <0x30000 0x4000>;
102     interrupts = <12>;
103     clocks = <&gateclk 2>;
104     @@ -220,6 +220,14 @@
105     };
106     };
107    
108     + ethernet@70000 {
109     + compatible = "marvell,armada-xp-neta";
110     + };
111     +
112     + ethernet@74000 {
113     + compatible = "marvell,armada-xp-neta";
114     + };
115     +
116     xor@f0900 {
117     compatible = "marvell,orion-xor";
118     reg = <0xF0900 0x100
119     diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
120     index 2fd8988f310c..3794ca16499d 100644
121     --- a/arch/arm/boot/dts/sun5i-a10s.dtsi
122     +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
123     @@ -573,7 +573,7 @@
124     };
125    
126     rtp: rtp@01c25000 {
127     - compatible = "allwinner,sun4i-a10-ts";
128     + compatible = "allwinner,sun5i-a13-ts";
129     reg = <0x01c25000 0x100>;
130     interrupts = <29>;
131     #thermal-sensor-cells = <0>;
132     diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
133     index 883cb4873688..5098185abde6 100644
134     --- a/arch/arm/boot/dts/sun5i-a13.dtsi
135     +++ b/arch/arm/boot/dts/sun5i-a13.dtsi
136     @@ -555,7 +555,7 @@
137     };
138    
139     rtp: rtp@01c25000 {
140     - compatible = "allwinner,sun4i-a10-ts";
141     + compatible = "allwinner,sun5i-a13-ts";
142     reg = <0x01c25000 0x100>;
143     interrupts = <29>;
144     #thermal-sensor-cells = <0>;
145     diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
146     index fdd181792b4b..2b4847c7cbd4 100644
147     --- a/arch/arm/boot/dts/sun7i-a20.dtsi
148     +++ b/arch/arm/boot/dts/sun7i-a20.dtsi
149     @@ -1042,7 +1042,7 @@
150     };
151    
152     rtp: rtp@01c25000 {
153     - compatible = "allwinner,sun4i-a10-ts";
154     + compatible = "allwinner,sun5i-a13-ts";
155     reg = <0x01c25000 0x100>;
156     interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
157     #thermal-sensor-cells = <0>;
158     diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
159     index 79caf79b304a..f7db3a5d80e3 100644
160     --- a/arch/arm/kvm/interrupts.S
161     +++ b/arch/arm/kvm/interrupts.S
162     @@ -170,13 +170,9 @@ __kvm_vcpu_return:
163     @ Don't trap coprocessor accesses for host kernel
164     set_hstr vmexit
165     set_hdcr vmexit
166     - set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
167     + set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
168    
169     #ifdef CONFIG_VFPv3
170     - @ Save floating point registers we if let guest use them.
171     - tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
172     - bne after_vfp_restore
173     -
174     @ Switch VFP/NEON hardware state to the host's
175     add r7, vcpu, #VCPU_VFP_GUEST
176     store_vfp_state r7
177     @@ -188,6 +184,8 @@ after_vfp_restore:
178     @ Restore FPEXC_EN which we clobbered on entry
179     pop {r2}
180     VFPFMXR FPEXC, r2
181     +#else
182     +after_vfp_restore:
183     #endif
184    
185     @ Reset Hyp-role
186     @@ -483,7 +481,7 @@ switch_to_guest_vfp:
187     push {r3-r7}
188    
189     @ NEON/VFP used. Turn on VFP access.
190     - set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
191     + set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
192    
193     @ Switch VFP/NEON hardware state to the guest's
194     add r7, r0, #VCPU_VFP_HOST
195     diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
196     index 35e4a3a0c476..48efe2ee452c 100644
197     --- a/arch/arm/kvm/interrupts_head.S
198     +++ b/arch/arm/kvm/interrupts_head.S
199     @@ -591,8 +591,13 @@ ARM_BE8(rev r6, r6 )
200     .endm
201    
202     /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
203     - * (hardware reset value is 0). Keep previous value in r2. */
204     -.macro set_hcptr operation, mask
205     + * (hardware reset value is 0). Keep previous value in r2.
206     + * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
207     + * VFP wasn't already enabled (always executed on vmtrap).
208     + * If a label is specified with vmexit, it is branched to if VFP wasn't
209     + * enabled.
210     + */
211     +.macro set_hcptr operation, mask, label = none
212     mrc p15, 4, r2, c1, c1, 2
213     ldr r3, =\mask
214     .if \operation == vmentry
215     @@ -601,6 +606,17 @@ ARM_BE8(rev r6, r6 )
216     bic r3, r2, r3 @ Don't trap defined coproc-accesses
217     .endif
218     mcr p15, 4, r3, c1, c1, 2
219     + .if \operation != vmentry
220     + .if \operation == vmexit
221     + tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
222     + beq 1f
223     + .endif
224     + isb
225     + .if \label != none
226     + b \label
227     + .endif
228     +1:
229     + .endif
230     .endm
231    
232     /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
233     diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
234     index 02fa8eff6ae1..531e922486b2 100644
235     --- a/arch/arm/kvm/psci.c
236     +++ b/arch/arm/kvm/psci.c
237     @@ -230,10 +230,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
238     case PSCI_0_2_FN64_AFFINITY_INFO:
239     val = kvm_psci_vcpu_affinity_info(vcpu);
240     break;
241     - case PSCI_0_2_FN_MIGRATE:
242     - case PSCI_0_2_FN64_MIGRATE:
243     - val = PSCI_RET_NOT_SUPPORTED;
244     - break;
245     case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
246     /*
247     * Trusted OS is MP hence does not require migration
248     @@ -242,10 +238,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
249     */
250     val = PSCI_0_2_TOS_MP;
251     break;
252     - case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
253     - case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
254     - val = PSCI_RET_NOT_SUPPORTED;
255     - break;
256     case PSCI_0_2_FN_SYSTEM_OFF:
257     kvm_psci_system_off(vcpu);
258     /*
259     @@ -271,7 +263,8 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
260     ret = 0;
261     break;
262     default:
263     - return -EINVAL;
264     + val = PSCI_RET_NOT_SUPPORTED;
265     + break;
266     }
267    
268     *vcpu_reg(vcpu, 0) = val;
269     @@ -291,12 +284,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
270     case KVM_PSCI_FN_CPU_ON:
271     val = kvm_psci_vcpu_on(vcpu);
272     break;
273     - case KVM_PSCI_FN_CPU_SUSPEND:
274     - case KVM_PSCI_FN_MIGRATE:
275     + default:
276     val = PSCI_RET_NOT_SUPPORTED;
277     break;
278     - default:
279     - return -EINVAL;
280     }
281    
282     *vcpu_reg(vcpu, 0) = val;
283     diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
284     index 469a150bf98f..a2e8ef3c0bd9 100644
285     --- a/arch/arm/mach-imx/clk-imx6q.c
286     +++ b/arch/arm/mach-imx/clk-imx6q.c
287     @@ -443,7 +443,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
288     clk[IMX6QDL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
289     clk[IMX6QDL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
290     clk[IMX6QDL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
291     - clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
292     + clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
293     clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
294     clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
295     clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
296     diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
297     index 6dfd4ab97b2a..301ab38d38ba 100644
298     --- a/arch/arm/mach-mvebu/pm-board.c
299     +++ b/arch/arm/mach-mvebu/pm-board.c
300     @@ -43,6 +43,9 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
301     for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
302     ackcmd |= BIT(pic_raw_gpios[i]);
303    
304     + srcmd = cpu_to_le32(srcmd);
305     + ackcmd = cpu_to_le32(ackcmd);
306     +
307     /*
308     * Wait a while, the PIC needs quite a bit of time between the
309     * two GPIO commands.
310     diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
311     index 88de2dce2e87..7469347b1749 100644
312     --- a/arch/arm/mach-tegra/cpuidle-tegra20.c
313     +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
314     @@ -34,6 +34,7 @@
315     #include "iomap.h"
316     #include "irq.h"
317     #include "pm.h"
318     +#include "reset.h"
319     #include "sleep.h"
320    
321     #ifdef CONFIG_PM_SLEEP
322     @@ -70,15 +71,13 @@ static struct cpuidle_driver tegra_idle_driver = {
323    
324     #ifdef CONFIG_PM_SLEEP
325     #ifdef CONFIG_SMP
326     -static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
327     -
328     static int tegra20_reset_sleeping_cpu_1(void)
329     {
330     int ret = 0;
331    
332     tegra_pen_lock();
333    
334     - if (readl(pmc + PMC_SCRATCH41) == CPU_RESETTABLE)
335     + if (readb(tegra20_cpu1_resettable_status) == CPU_RESETTABLE)
336     tegra20_cpu_shutdown(1);
337     else
338     ret = -EINVAL;
339     diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
340     index 71be4af5e975..e3070fdab80b 100644
341     --- a/arch/arm/mach-tegra/reset-handler.S
342     +++ b/arch/arm/mach-tegra/reset-handler.S
343     @@ -169,10 +169,10 @@ after_errata:
344     cmp r6, #TEGRA20
345     bne 1f
346     /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
347     - mov32 r5, TEGRA_PMC_BASE
348     - mov r0, #0
349     + mov32 r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
350     + mov r0, #CPU_NOT_RESETTABLE
351     cmp r10, #0
352     - strne r0, [r5, #PMC_SCRATCH41]
353     + strneb r0, [r5, #__tegra20_cpu1_resettable_status_offset]
354     1:
355     #endif
356    
357     @@ -281,6 +281,10 @@ __tegra_cpu_reset_handler_data:
358     .rept TEGRA_RESET_DATA_SIZE
359     .long 0
360     .endr
361     + .globl __tegra20_cpu1_resettable_status_offset
362     + .equ __tegra20_cpu1_resettable_status_offset, \
363     + . - __tegra_cpu_reset_handler_start
364     + .byte 0
365     .align L1_CACHE_SHIFT
366    
367     ENTRY(__tegra_cpu_reset_handler_end)
368     diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
369     index 76a93434c6ee..29c3dec0126a 100644
370     --- a/arch/arm/mach-tegra/reset.h
371     +++ b/arch/arm/mach-tegra/reset.h
372     @@ -35,6 +35,7 @@ extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE];
373    
374     void __tegra_cpu_reset_handler_start(void);
375     void __tegra_cpu_reset_handler(void);
376     +void __tegra20_cpu1_resettable_status_offset(void);
377     void __tegra_cpu_reset_handler_end(void);
378     void tegra_secondary_startup(void);
379    
380     @@ -47,6 +48,9 @@ void tegra_secondary_startup(void);
381     (IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
382     ((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
383     (u32)__tegra_cpu_reset_handler_start)))
384     +#define tegra20_cpu1_resettable_status \
385     + (IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
386     + (u32)__tegra20_cpu1_resettable_status_offset))
387     #endif
388    
389     #define tegra_cpu_reset_handler_offset \
390     diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
391     index be4bc5f853f5..e6b684e14322 100644
392     --- a/arch/arm/mach-tegra/sleep-tegra20.S
393     +++ b/arch/arm/mach-tegra/sleep-tegra20.S
394     @@ -97,9 +97,10 @@ ENDPROC(tegra20_hotplug_shutdown)
395     ENTRY(tegra20_cpu_shutdown)
396     cmp r0, #0
397     reteq lr @ must not be called for CPU 0
398     - mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
399     + mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
400     + ldr r2, =__tegra20_cpu1_resettable_status_offset
401     mov r12, #CPU_RESETTABLE
402     - str r12, [r1]
403     + strb r12, [r1, r2]
404    
405     cpu_to_halt_reg r1, r0
406     ldr r3, =TEGRA_FLOW_CTRL_VIRT
407     @@ -182,38 +183,41 @@ ENDPROC(tegra_pen_unlock)
408     /*
409     * tegra20_cpu_clear_resettable(void)
410     *
411     - * Called to clear the "resettable soon" flag in PMC_SCRATCH41 when
412     + * Called to clear the "resettable soon" flag in IRAM variable when
413     * it is expected that the secondary CPU will be idle soon.
414     */
415     ENTRY(tegra20_cpu_clear_resettable)
416     - mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
417     + mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
418     + ldr r2, =__tegra20_cpu1_resettable_status_offset
419     mov r12, #CPU_NOT_RESETTABLE
420     - str r12, [r1]
421     + strb r12, [r1, r2]
422     ret lr
423     ENDPROC(tegra20_cpu_clear_resettable)
424    
425     /*
426     * tegra20_cpu_set_resettable_soon(void)
427     *
428     - * Called to set the "resettable soon" flag in PMC_SCRATCH41 when
429     + * Called to set the "resettable soon" flag in IRAM variable when
430     * it is expected that the secondary CPU will be idle soon.
431     */
432     ENTRY(tegra20_cpu_set_resettable_soon)
433     - mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
434     + mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
435     + ldr r2, =__tegra20_cpu1_resettable_status_offset
436     mov r12, #CPU_RESETTABLE_SOON
437     - str r12, [r1]
438     + strb r12, [r1, r2]
439     ret lr
440     ENDPROC(tegra20_cpu_set_resettable_soon)
441    
442     /*
443     * tegra20_cpu_is_resettable_soon(void)
444     *
445     - * Returns true if the "resettable soon" flag in PMC_SCRATCH41 has been
446     + * Returns true if the "resettable soon" flag in IRAM variable has been
447     * set because it is expected that the secondary CPU will be idle soon.
448     */
449     ENTRY(tegra20_cpu_is_resettable_soon)
450     - mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
451     - ldr r12, [r1]
452     + mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
453     + ldr r2, =__tegra20_cpu1_resettable_status_offset
454     + ldrb r12, [r1, r2]
455     cmp r12, #CPU_RESETTABLE_SOON
456     moveq r0, #1
457     movne r0, #0
458     @@ -256,9 +260,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
459     mov r0, #TEGRA_FLUSH_CACHE_LOUIS
460     bl tegra_disable_clean_inv_dcache
461    
462     - mov32 r0, TEGRA_PMC_VIRT + PMC_SCRATCH41
463     + mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT
464     + ldr r4, =__tegra20_cpu1_resettable_status_offset
465     mov r3, #CPU_RESETTABLE
466     - str r3, [r0]
467     + strb r3, [r0, r4]
468    
469     bl tegra_cpu_do_idle
470    
471     @@ -274,10 +279,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
472    
473     bl tegra_pen_lock
474    
475     - mov32 r3, TEGRA_PMC_VIRT
476     - add r0, r3, #PMC_SCRATCH41
477     + mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT
478     + ldr r4, =__tegra20_cpu1_resettable_status_offset
479     mov r3, #CPU_NOT_RESETTABLE
480     - str r3, [r0]
481     + strb r3, [r0, r4]
482    
483     bl tegra_pen_unlock
484    
485     diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
486     index 92d46ec1361a..0d59360d891d 100644
487     --- a/arch/arm/mach-tegra/sleep.h
488     +++ b/arch/arm/mach-tegra/sleep.h
489     @@ -18,6 +18,7 @@
490     #define __MACH_TEGRA_SLEEP_H
491    
492     #include "iomap.h"
493     +#include "irammap.h"
494    
495     #define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS \
496     + IO_CPU_VIRT)
497     @@ -29,6 +30,9 @@
498     + IO_APB_VIRT)
499     #define TEGRA_PMC_VIRT (TEGRA_PMC_BASE - IO_APB_PHYS + IO_APB_VIRT)
500    
501     +#define TEGRA_IRAM_RESET_BASE_VIRT (IO_IRAM_VIRT + \
502     + TEGRA_IRAM_RESET_HANDLER_OFFSET)
503     +
504     /* PMC_SCRATCH37-39 and 41 are used for tegra_pen_lock and idle */
505     #define PMC_SCRATCH37 0x130
506     #define PMC_SCRATCH38 0x134
507     diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
508     index 9488fa5f8866..afc96ecb9004 100644
509     --- a/arch/mips/include/asm/mach-generic/spaces.h
510     +++ b/arch/mips/include/asm/mach-generic/spaces.h
511     @@ -94,7 +94,11 @@
512     #endif
513    
514     #ifndef FIXADDR_TOP
515     +#ifdef CONFIG_KVM_GUEST
516     +#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000)
517     +#else
518     #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
519     #endif
520     +#endif
521    
522     #endif /* __ASM_MACH_GENERIC_SPACES_H */
523     diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
524     index bb68e8d520e8..52f205ae1281 100644
525     --- a/arch/mips/kvm/mips.c
526     +++ b/arch/mips/kvm/mips.c
527     @@ -982,7 +982,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
528    
529     /* If nothing is dirty, don't bother messing with page tables. */
530     if (is_dirty) {
531     - memslot = &kvm->memslots->memslots[log->slot];
532     + memslot = id_to_memslot(kvm->memslots, log->slot);
533    
534     ga = memslot->base_gfn << PAGE_SHIFT;
535     ga_end = ga + (memslot->npages << PAGE_SHIFT);
536     diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
537     index 12b638425bb9..d90893b76e7c 100644
538     --- a/arch/powerpc/perf/core-book3s.c
539     +++ b/arch/powerpc/perf/core-book3s.c
540     @@ -131,7 +131,16 @@ static void pmao_restore_workaround(bool ebb) { }
541    
542     static bool regs_use_siar(struct pt_regs *regs)
543     {
544     - return !!regs->result;
545     + /*
546     + * When we take a performance monitor exception the regs are setup
547     + * using perf_read_regs() which overloads some fields, in particular
548     + * regs->result to tell us whether to use SIAR.
549     + *
550     + * However if the regs are from another exception, eg. a syscall, then
551     + * they have not been setup using perf_read_regs() and so regs->result
552     + * is something random.
553     + */
554     + return ((TRAP(regs) == 0xf00) && regs->result);
555     }
556    
557     /*
558     diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
559     index 9f73c8059022..49b74454d7ee 100644
560     --- a/arch/s390/kernel/crash_dump.c
561     +++ b/arch/s390/kernel/crash_dump.c
562     @@ -415,7 +415,7 @@ static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
563     ptr += len;
564     /* Copy lower halves of SIMD registers 0-15 */
565     for (i = 0; i < 16; i++) {
566     - memcpy(ptr, &vx_regs[i], 8);
567     + memcpy(ptr, &vx_regs[i].u[2], 8);
568     ptr += 8;
569     }
570     return ptr;
571     diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
572     index 9de47265ef73..b745a109bfc1 100644
573     --- a/arch/s390/kvm/interrupt.c
574     +++ b/arch/s390/kvm/interrupt.c
575     @@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
576     if (sclp_has_sigpif())
577     return __inject_extcall_sigpif(vcpu, src_id);
578    
579     - if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
580     + if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
581     return -EBUSY;
582     *extcall = irq->u.extcall;
583     atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
584     @@ -1606,6 +1606,9 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
585     int i;
586    
587     spin_lock(&fi->lock);
588     + fi->pending_irqs = 0;
589     + memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
590     + memset(&fi->mchk, 0, sizeof(fi->mchk));
591     for (i = 0; i < FIRQ_LIST_COUNT; i++)
592     clear_irq_list(&fi->lists[i]);
593     for (i = 0; i < FIRQ_MAX_COUNT; i++)
594     diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
595     index 55423d8be580..9afb9d602f84 100644
596     --- a/arch/s390/net/bpf_jit_comp.c
597     +++ b/arch/s390/net/bpf_jit_comp.c
598     @@ -227,7 +227,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
599     ({ \
600     /* Branch instruction needs 6 bytes */ \
601     int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
602     - _EMIT6(op1 | reg(b1, b2) << 16 | rel, op2 | mask); \
603     + _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask); \
604     REG_SET_SEEN(b1); \
605     REG_SET_SEEN(b2); \
606     })
607     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
608     index f4a555beef19..41b06fca39f7 100644
609     --- a/arch/x86/include/asm/kvm_host.h
610     +++ b/arch/x86/include/asm/kvm_host.h
611     @@ -591,7 +591,7 @@ struct kvm_arch {
612     struct kvm_pic *vpic;
613     struct kvm_ioapic *vioapic;
614     struct kvm_pit *vpit;
615     - int vapics_in_nmi_mode;
616     + atomic_t vapics_in_nmi_mode;
617     struct mutex apic_map_lock;
618     struct kvm_apic_map *apic_map;
619    
620     diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
621     index 4dce6f8b6129..f90952f64e79 100644
622     --- a/arch/x86/kvm/i8254.c
623     +++ b/arch/x86/kvm/i8254.c
624     @@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
625     * LVT0 to NMI delivery. Other PIC interrupts are just sent to
626     * VCPU0, and only if its LVT0 is in EXTINT mode.
627     */
628     - if (kvm->arch.vapics_in_nmi_mode > 0)
629     + if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
630     kvm_for_each_vcpu(i, vcpu, kvm)
631     kvm_apic_nmi_wd_deliver(vcpu);
632     }
633     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
634     index 4c7deb4f78a1..67d07e051436 100644
635     --- a/arch/x86/kvm/lapic.c
636     +++ b/arch/x86/kvm/lapic.c
637     @@ -1250,10 +1250,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
638     if (!nmi_wd_enabled) {
639     apic_debug("Receive NMI setting on APIC_LVT0 "
640     "for cpu %d\n", apic->vcpu->vcpu_id);
641     - apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
642     + atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
643     }
644     } else if (nmi_wd_enabled)
645     - apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
646     + atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
647     }
648    
649     static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
650     @@ -1808,6 +1808,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
651     apic_update_ppr(apic);
652     hrtimer_cancel(&apic->lapic_timer.timer);
653     apic_update_lvtt(apic);
654     + apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
655     update_divide_count(apic);
656     start_apic_timer(apic);
657     apic->irr_pending = true;
658     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
659     index 9afa233b5482..4911bf19122b 100644
660     --- a/arch/x86/kvm/svm.c
661     +++ b/arch/x86/kvm/svm.c
662     @@ -511,8 +511,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
663     {
664     struct vcpu_svm *svm = to_svm(vcpu);
665    
666     - if (svm->vmcb->control.next_rip != 0)
667     + if (svm->vmcb->control.next_rip != 0) {
668     + WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
669     svm->next_rip = svm->vmcb->control.next_rip;
670     + }
671    
672     if (!svm->next_rip) {
673     if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
674     @@ -4317,7 +4319,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
675     break;
676     }
677    
678     - vmcb->control.next_rip = info->next_rip;
679     + /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
680     + if (static_cpu_has(X86_FEATURE_NRIPS))
681     + vmcb->control.next_rip = info->next_rip;
682     vmcb->control.exit_code = icpt_info.exit_code;
683     vmexit = nested_svm_exit_handled(svm);
684    
685     diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
686     index 14a63ed6fe09..ff9911707160 100644
687     --- a/arch/x86/pci/acpi.c
688     +++ b/arch/x86/pci/acpi.c
689     @@ -81,6 +81,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
690     DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
691     },
692     },
693     + /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
694     + /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
695     + {
696     + .callback = set_use_crs,
697     + .ident = "Foxconn K8M890-8237A",
698     + .matches = {
699     + DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
700     + DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
701     + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
702     + },
703     + },
704    
705     /* Now for the blacklist.. */
706    
707     @@ -121,8 +132,10 @@ void __init pci_acpi_crs_quirks(void)
708     {
709     int year;
710    
711     - if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
712     - pci_use_crs = false;
713     + if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
714     + if (iomem_resource.end <= 0xffffffff)
715     + pci_use_crs = false;
716     + }
717    
718     dmi_check_system(pci_crs_quirks);
719    
720     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
721     index 6414661ac1c4..c45d274a75c8 100644
722     --- a/drivers/cpufreq/intel_pstate.c
723     +++ b/drivers/cpufreq/intel_pstate.c
724     @@ -535,7 +535,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
725    
726     val |= vid;
727    
728     - wrmsrl(MSR_IA32_PERF_CTL, val);
729     + wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
730     }
731    
732     #define BYT_BCLK_FREQS 5
733     diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
734     index 59372077ec7c..3442764a5293 100644
735     --- a/drivers/cpuidle/cpuidle-powernv.c
736     +++ b/drivers/cpuidle/cpuidle-powernv.c
737     @@ -60,6 +60,8 @@ static int nap_loop(struct cpuidle_device *dev,
738     return index;
739     }
740    
741     +/* Register for fastsleep only in oneshot mode of broadcast */
742     +#ifdef CONFIG_TICK_ONESHOT
743     static int fastsleep_loop(struct cpuidle_device *dev,
744     struct cpuidle_driver *drv,
745     int index)
746     @@ -83,7 +85,7 @@ static int fastsleep_loop(struct cpuidle_device *dev,
747    
748     return index;
749     }
750     -
751     +#endif
752     /*
753     * States for dedicated partition case.
754     */
755     @@ -209,7 +211,14 @@ static int powernv_add_idle_states(void)
756     powernv_states[nr_idle_states].flags = 0;
757     powernv_states[nr_idle_states].target_residency = 100;
758     powernv_states[nr_idle_states].enter = &nap_loop;
759     - } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
760     + }
761     +
762     + /*
763     + * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
764     + * within this config dependency check.
765     + */
766     +#ifdef CONFIG_TICK_ONESHOT
767     + if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
768     flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
769     /* Add FASTSLEEP state */
770     strcpy(powernv_states[nr_idle_states].name, "FastSleep");
771     @@ -218,7 +227,7 @@ static int powernv_add_idle_states(void)
772     powernv_states[nr_idle_states].target_residency = 300000;
773     powernv_states[nr_idle_states].enter = &fastsleep_loop;
774     }
775     -
776     +#endif
777     powernv_states[nr_idle_states].exit_latency =
778     ((unsigned int)latency_ns[i]) / 1000;
779    
780     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
781     index 857414afa29a..f062158d4dc9 100644
782     --- a/drivers/crypto/talitos.c
783     +++ b/drivers/crypto/talitos.c
784     @@ -925,7 +925,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
785     sg_count--;
786     link_tbl_ptr--;
787     }
788     - be16_add_cpu(&link_tbl_ptr->len, cryptlen);
789     + link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
790     + + cryptlen);
791    
792     /* tag end of link table */
793     link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
794     @@ -2561,6 +2562,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
795     break;
796     default:
797     dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
798     + kfree(t_alg);
799     return ERR_PTR(-EINVAL);
800     }
801    
802     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
803     index e1c7e9e51045..ca9f4edbb940 100644
804     --- a/drivers/iommu/amd_iommu.c
805     +++ b/drivers/iommu/amd_iommu.c
806     @@ -1869,9 +1869,15 @@ static void free_pt_##LVL (unsigned long __pt) \
807     pt = (u64 *)__pt; \
808     \
809     for (i = 0; i < 512; ++i) { \
810     + /* PTE present? */ \
811     if (!IOMMU_PTE_PRESENT(pt[i])) \
812     continue; \
813     \
814     + /* Large PTE? */ \
815     + if (PM_PTE_LEVEL(pt[i]) == 0 || \
816     + PM_PTE_LEVEL(pt[i]) == 7) \
817     + continue; \
818     + \
819     p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
820     FN(p); \
821     } \
822     diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
823     index 66a803b9dd3a..65075ef75e2a 100644
824     --- a/drivers/iommu/arm-smmu.c
825     +++ b/drivers/iommu/arm-smmu.c
826     @@ -1567,7 +1567,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
827     return -ENODEV;
828     }
829    
830     - if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
831     + if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
832     smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
833     dev_notice(smmu->dev, "\taddress translation ops\n");
834     }
835     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
836     index c80287a02735..9231cdfe2757 100644
837     --- a/drivers/mmc/host/sdhci.c
838     +++ b/drivers/mmc/host/sdhci.c
839     @@ -848,7 +848,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
840     int sg_cnt;
841    
842     sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
843     - if (sg_cnt == 0) {
844     + if (sg_cnt <= 0) {
845     /*
846     * This only happens when someone fed
847     * us an invalid request.
848     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
849     index b0f69248cb71..e9b1810d319f 100644
850     --- a/drivers/net/can/dev.c
851     +++ b/drivers/net/can/dev.c
852     @@ -440,6 +440,9 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
853     struct can_frame *cf = (struct can_frame *)skb->data;
854     u8 dlc = cf->can_dlc;
855    
856     + if (!(skb->tstamp.tv64))
857     + __net_timestamp(skb);
858     +
859     netif_rx(priv->echo_skb[idx]);
860     priv->echo_skb[idx] = NULL;
861    
862     @@ -575,6 +578,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
863     if (unlikely(!skb))
864     return NULL;
865    
866     + __net_timestamp(skb);
867     skb->protocol = htons(ETH_P_CAN);
868     skb->pkt_type = PACKET_BROADCAST;
869     skb->ip_summed = CHECKSUM_UNNECESSARY;
870     @@ -603,6 +607,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
871     if (unlikely(!skb))
872     return NULL;
873    
874     + __net_timestamp(skb);
875     skb->protocol = htons(ETH_P_CANFD);
876     skb->pkt_type = PACKET_BROADCAST;
877     skb->ip_summed = CHECKSUM_UNNECESSARY;
878     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
879     index c837eb91d43e..f64f5290d6f8 100644
880     --- a/drivers/net/can/slcan.c
881     +++ b/drivers/net/can/slcan.c
882     @@ -207,6 +207,7 @@ static void slc_bump(struct slcan *sl)
883     if (!skb)
884     return;
885    
886     + __net_timestamp(skb);
887     skb->dev = sl->dev;
888     skb->protocol = htons(ETH_P_CAN);
889     skb->pkt_type = PACKET_BROADCAST;
890     diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
891     index 674f367087c5..0ce868de855d 100644
892     --- a/drivers/net/can/vcan.c
893     +++ b/drivers/net/can/vcan.c
894     @@ -78,6 +78,9 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
895     skb->dev = dev;
896     skb->ip_summed = CHECKSUM_UNNECESSARY;
897    
898     + if (!(skb->tstamp.tv64))
899     + __net_timestamp(skb);
900     +
901     netif_rx_ni(skb);
902     }
903    
904     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
905     index d81fc6bd4759..5c92fb71b37e 100644
906     --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
907     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
908     @@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
909     int ret;
910    
911     /* Try to obtain pages, decreasing order if necessary */
912     - gfp |= __GFP_COLD | __GFP_COMP;
913     + gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
914     while (order >= 0) {
915     pages = alloc_pages(gfp, order);
916     if (pages)
917     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
918     index 33501bcddc48..8a97d28f3d65 100644
919     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
920     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
921     @@ -9323,7 +9323,8 @@ unload_error:
922     * function stop ramrod is sent, since as part of this ramrod FW access
923     * PTP registers.
924     */
925     - bnx2x_stop_ptp(bp);
926     + if (bp->flags & PTP_SUPPORTED)
927     + bnx2x_stop_ptp(bp);
928    
929     /* Disable HW interrupts, NAPI */
930     bnx2x_netif_stop(bp, 1);
931     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
932     index ce5f7f9cff06..74d0389bf233 100644
933     --- a/drivers/net/ethernet/marvell/mvneta.c
934     +++ b/drivers/net/ethernet/marvell/mvneta.c
935     @@ -310,6 +310,7 @@ struct mvneta_port {
936     unsigned int link;
937     unsigned int duplex;
938     unsigned int speed;
939     + unsigned int tx_csum_limit;
940     int use_inband_status:1;
941     };
942    
943     @@ -1013,6 +1014,12 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
944     val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
945     val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
946     mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
947     + } else {
948     + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
949     + val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
950     + MVNETA_GMAC_AN_SPEED_EN |
951     + MVNETA_GMAC_AN_DUPLEX_EN);
952     + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
953     }
954    
955     mvneta_set_ucast_table(pp, -1);
956     @@ -2502,8 +2509,10 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
957    
958     dev->mtu = mtu;
959    
960     - if (!netif_running(dev))
961     + if (!netif_running(dev)) {
962     + netdev_update_features(dev);
963     return 0;
964     + }
965    
966     /* The interface is running, so we have to force a
967     * reallocation of the queues
968     @@ -2532,9 +2541,26 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
969     mvneta_start_dev(pp);
970     mvneta_port_up(pp);
971    
972     + netdev_update_features(dev);
973     +
974     return 0;
975     }
976    
977     +static netdev_features_t mvneta_fix_features(struct net_device *dev,
978     + netdev_features_t features)
979     +{
980     + struct mvneta_port *pp = netdev_priv(dev);
981     +
982     + if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
983     + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
984     + netdev_info(dev,
985     + "Disable IP checksum for MTU greater than %dB\n",
986     + pp->tx_csum_limit);
987     + }
988     +
989     + return features;
990     +}
991     +
992     /* Get mac address */
993     static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
994     {
995     @@ -2856,6 +2882,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
996     .ndo_set_rx_mode = mvneta_set_rx_mode,
997     .ndo_set_mac_address = mvneta_set_mac_addr,
998     .ndo_change_mtu = mvneta_change_mtu,
999     + .ndo_fix_features = mvneta_fix_features,
1000     .ndo_get_stats64 = mvneta_get_stats64,
1001     .ndo_do_ioctl = mvneta_ioctl,
1002     };
1003     @@ -3101,6 +3128,9 @@ static int mvneta_probe(struct platform_device *pdev)
1004     }
1005     }
1006    
1007     + if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
1008     + pp->tx_csum_limit = 1600;
1009     +
1010     pp->tx_ring_size = MVNETA_MAX_TXD;
1011     pp->rx_ring_size = MVNETA_MAX_RXD;
1012    
1013     @@ -3179,6 +3209,7 @@ static int mvneta_remove(struct platform_device *pdev)
1014    
1015     static const struct of_device_id mvneta_match[] = {
1016     { .compatible = "marvell,armada-370-neta" },
1017     + { .compatible = "marvell,armada-xp-neta" },
1018     { }
1019     };
1020     MODULE_DEVICE_TABLE(of, mvneta_match);
1021     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1022     index cf467a9f6cc7..a5a0b8420d26 100644
1023     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1024     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1025     @@ -1973,10 +1973,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1026     mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1027     }
1028    
1029     - if (priv->base_tx_qpn) {
1030     - mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
1031     - priv->base_tx_qpn = 0;
1032     - }
1033     }
1034    
1035     int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1036     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1037     index 2a77a6b19121..eab4e080ebd2 100644
1038     --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1039     +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1040     @@ -723,7 +723,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
1041     }
1042     #endif
1043     static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
1044     - int hwtstamp_rx_filter)
1045     + netdev_features_t dev_features)
1046     {
1047     __wsum hw_checksum = 0;
1048    
1049     @@ -731,14 +731,8 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
1050    
1051     hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
1052    
1053     - if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
1054     - hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
1055     - /* next protocol non IPv4 or IPv6 */
1056     - if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
1057     - != htons(ETH_P_IP) &&
1058     - ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
1059     - != htons(ETH_P_IPV6))
1060     - return -1;
1061     + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
1062     + !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
1063     hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
1064     hdr += sizeof(struct vlan_hdr);
1065     }
1066     @@ -901,7 +895,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
1067    
1068     if (ip_summed == CHECKSUM_COMPLETE) {
1069     void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
1070     - if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
1071     + if (check_csum(cqe, gro_skb, va,
1072     + dev->features)) {
1073     ip_summed = CHECKSUM_NONE;
1074     ring->csum_none++;
1075     ring->csum_complete--;
1076     @@ -956,7 +951,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
1077     }
1078    
1079     if (ip_summed == CHECKSUM_COMPLETE) {
1080     - if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
1081     + if (check_csum(cqe, skb, skb->data, dev->features)) {
1082     ip_summed = CHECKSUM_NONE;
1083     ring->csum_complete--;
1084     ring->csum_none++;
1085     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1086     index 7bed3a88579f..c10d98f6ad96 100644
1087     --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1088     +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1089     @@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
1090     ring->size = size;
1091     ring->size_mask = size - 1;
1092     ring->stride = stride;
1093     + ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
1094    
1095     tmp = size * sizeof(struct mlx4_en_tx_info);
1096     ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
1097     @@ -180,6 +181,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
1098     mlx4_bf_free(mdev->dev, &ring->bf);
1099     mlx4_qp_remove(mdev->dev, &ring->qp);
1100     mlx4_qp_free(mdev->dev, &ring->qp);
1101     + mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
1102     mlx4_en_unmap_buffer(&ring->wqres.buf);
1103     mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
1104     kfree(ring->bounce_buf);
1105     @@ -231,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
1106     MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
1107     }
1108    
1109     +static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
1110     +{
1111     + return ring->prod - ring->cons > ring->full_size;
1112     +}
1113     +
1114     static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
1115     struct mlx4_en_tx_ring *ring, int index,
1116     u8 owner)
1117     @@ -473,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
1118    
1119     netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
1120    
1121     - /*
1122     - * Wakeup Tx queue if this stopped, and at least 1 packet
1123     - * was completed
1124     + /* Wakeup Tx queue if this stopped, and ring is not full.
1125     */
1126     - if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
1127     + if (netif_tx_queue_stopped(ring->tx_queue) &&
1128     + !mlx4_en_is_tx_ring_full(ring)) {
1129     netif_tx_wake_queue(ring->tx_queue);
1130     ring->wake_queue++;
1131     }
1132     @@ -921,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
1133     skb_tx_timestamp(skb);
1134    
1135     /* Check available TXBBs And 2K spare for prefetch */
1136     - stop_queue = (int)(ring->prod - ring_cons) >
1137     - ring->size - HEADROOM - MAX_DESC_TXBBS;
1138     + stop_queue = mlx4_en_is_tx_ring_full(ring);
1139     if (unlikely(stop_queue)) {
1140     netif_tx_stop_queue(ring->tx_queue);
1141     ring->queue_stopped++;
1142     @@ -991,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
1143     smp_rmb();
1144    
1145     ring_cons = ACCESS_ONCE(ring->cons);
1146     - if (unlikely(((int)(ring->prod - ring_cons)) <=
1147     - ring->size - HEADROOM - MAX_DESC_TXBBS)) {
1148     + if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
1149     netif_tx_wake_queue(ring->tx_queue);
1150     ring->wake_queue++;
1151     }
1152     diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
1153     index 6fce58718837..0d80aed59043 100644
1154     --- a/drivers/net/ethernet/mellanox/mlx4/intf.c
1155     +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
1156     @@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_interface *intf)
1157     mutex_lock(&intf_mutex);
1158    
1159     list_add_tail(&intf->list, &intf_list);
1160     - list_for_each_entry(priv, &dev_list, dev_list)
1161     + list_for_each_entry(priv, &dev_list, dev_list) {
1162     + if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
1163     + mlx4_dbg(&priv->dev,
1164     + "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
1165     + intf->flags &= ~MLX4_INTFF_BONDING;
1166     + }
1167     mlx4_add_device(intf, priv);
1168     + }
1169    
1170     mutex_unlock(&intf_mutex);
1171    
1172     diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1173     index d021f079f181..909fcf803c54 100644
1174     --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1175     +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1176     @@ -279,6 +279,7 @@ struct mlx4_en_tx_ring {
1177     u32 size; /* number of TXBBs */
1178     u32 size_mask;
1179     u16 stride;
1180     + u32 full_size;
1181     u16 cqn; /* index of port CQ associated with this ring */
1182     u32 buf_size;
1183     __be32 doorbell_qpn;
1184     @@ -579,7 +580,6 @@ struct mlx4_en_priv {
1185     int vids[128];
1186     bool wol;
1187     struct device *ddev;
1188     - int base_tx_qpn;
1189     struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
1190     struct hwtstamp_config hwtstamp_config;
1191    
1192     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1193     index bdfe51fc3a65..d551df62e61a 100644
1194     --- a/drivers/net/phy/phy_device.c
1195     +++ b/drivers/net/phy/phy_device.c
1196     @@ -796,10 +796,11 @@ static int genphy_config_advert(struct phy_device *phydev)
1197     if (phydev->supported & (SUPPORTED_1000baseT_Half |
1198     SUPPORTED_1000baseT_Full)) {
1199     adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
1200     - if (adv != oldadv)
1201     - changed = 1;
1202     }
1203    
1204     + if (adv != oldadv)
1205     + changed = 1;
1206     +
1207     err = phy_write(phydev, MII_CTRL1000, adv);
1208     if (err < 0)
1209     return err;
1210     diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
1211     index 968787abf78d..ec383b0f5443 100644
1212     --- a/drivers/net/xen-netback/xenbus.c
1213     +++ b/drivers/net/xen-netback/xenbus.c
1214     @@ -681,6 +681,9 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
1215     char *node;
1216     unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
1217    
1218     + if (vif->credit_watch.node)
1219     + return -EADDRINUSE;
1220     +
1221     node = kmalloc(maxlen, GFP_KERNEL);
1222     if (!node)
1223     return -ENOMEM;
1224     @@ -770,6 +773,7 @@ static void connect(struct backend_info *be)
1225     }
1226    
1227     xen_net_read_rate(dev, &credit_bytes, &credit_usec);
1228     + xen_unregister_watchers(be->vif);
1229     xen_register_watchers(dev, be->vif);
1230     read_xenbus_vif_flags(be);
1231    
1232     diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
1233     index 6f1fa1773e76..f8d8fdb26b72 100644
1234     --- a/drivers/s390/kvm/virtio_ccw.c
1235     +++ b/drivers/s390/kvm/virtio_ccw.c
1236     @@ -65,6 +65,7 @@ struct virtio_ccw_device {
1237     bool is_thinint;
1238     bool going_away;
1239     bool device_lost;
1240     + unsigned int config_ready;
1241     void *airq_info;
1242     };
1243    
1244     @@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
1245     if (ret)
1246     goto out_free;
1247    
1248     - memcpy(vcdev->config, config_area, sizeof(vcdev->config));
1249     - memcpy(buf, &vcdev->config[offset], len);
1250     + memcpy(vcdev->config, config_area, offset + len);
1251     + if (buf)
1252     + memcpy(buf, &vcdev->config[offset], len);
1253     + if (vcdev->config_ready < offset + len)
1254     + vcdev->config_ready = offset + len;
1255    
1256     out_free:
1257     kfree(config_area);
1258     @@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
1259     if (!config_area)
1260     goto out_free;
1261    
1262     + /* Make sure we don't overwrite fields. */
1263     + if (vcdev->config_ready < offset)
1264     + virtio_ccw_get_config(vdev, 0, NULL, offset);
1265     memcpy(&vcdev->config[offset], buf, len);
1266     /* Write the config area to the host. */
1267     memcpy(config_area, vcdev->config, sizeof(vcdev->config));
1268     diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1269     index 3507f880eb74..45b8c8b338df 100644
1270     --- a/drivers/usb/gadget/function/f_fs.c
1271     +++ b/drivers/usb/gadget/function/f_fs.c
1272     @@ -3435,6 +3435,7 @@ done:
1273     static void ffs_closed(struct ffs_data *ffs)
1274     {
1275     struct ffs_dev *ffs_obj;
1276     + struct f_fs_opts *opts;
1277    
1278     ENTER();
1279     ffs_dev_lock();
1280     @@ -3449,8 +3450,13 @@ static void ffs_closed(struct ffs_data *ffs)
1281     ffs_obj->ffs_closed_callback)
1282     ffs_obj->ffs_closed_callback(ffs);
1283    
1284     - if (!ffs_obj->opts || ffs_obj->opts->no_configfs
1285     - || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
1286     + if (ffs_obj->opts)
1287     + opts = ffs_obj->opts;
1288     + else
1289     + goto done;
1290     +
1291     + if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
1292     + || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
1293     goto done;
1294    
1295     unregister_gadget_item(ffs_obj->opts->
1296     diff --git a/fs/dcache.c b/fs/dcache.c
1297     index 37b5afdaf698..50bb3c207621 100644
1298     --- a/fs/dcache.c
1299     +++ b/fs/dcache.c
1300     @@ -2927,17 +2927,6 @@ restart:
1301     vfsmnt = &mnt->mnt;
1302     continue;
1303     }
1304     - /*
1305     - * Filesystems needing to implement special "root names"
1306     - * should do so with ->d_dname()
1307     - */
1308     - if (IS_ROOT(dentry) &&
1309     - (dentry->d_name.len != 1 ||
1310     - dentry->d_name.name[0] != '/')) {
1311     - WARN(1, "Root dentry has weird name <%.*s>\n",
1312     - (int) dentry->d_name.len,
1313     - dentry->d_name.name);
1314     - }
1315     if (!error)
1316     error = is_mounted(vfsmnt) ? 1 : 2;
1317     break;
1318     diff --git a/fs/inode.c b/fs/inode.c
1319     index ea37cd17b53f..6e342cadef81 100644
1320     --- a/fs/inode.c
1321     +++ b/fs/inode.c
1322     @@ -1693,8 +1693,8 @@ int file_remove_suid(struct file *file)
1323     error = security_inode_killpriv(dentry);
1324     if (!error && killsuid)
1325     error = __remove_suid(dentry, killsuid);
1326     - if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1327     - inode->i_flags |= S_NOSEC;
1328     + if (!error)
1329     + inode_has_no_xattr(inode);
1330    
1331     return error;
1332     }
1333     diff --git a/fs/namespace.c b/fs/namespace.c
1334     index 1b9e11167bae..1d4a97c573e0 100644
1335     --- a/fs/namespace.c
1336     +++ b/fs/namespace.c
1337     @@ -3185,11 +3185,15 @@ bool fs_fully_visible(struct file_system_type *type)
1338     if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
1339     continue;
1340    
1341     - /* This mount is not fully visible if there are any child mounts
1342     - * that cover anything except for empty directories.
1343     + /* This mount is not fully visible if there are any
1344     + * locked child mounts that cover anything except for
1345     + * empty directories.
1346     */
1347     list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1348     struct inode *inode = child->mnt_mountpoint->d_inode;
1349     + /* Only worry about locked mounts */
1350     + if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
1351     + continue;
1352     if (!S_ISDIR(inode->i_mode))
1353     goto next;
1354     if (inode->i_nlink > 2)
1355     diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
1356     index 2c1036080d52..a7106eda5024 100644
1357     --- a/fs/ufs/balloc.c
1358     +++ b/fs/ufs/balloc.c
1359     @@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
1360    
1361     if (ufs_fragnum(fragment) + count > uspi->s_fpg)
1362     ufs_error (sb, "ufs_free_fragments", "internal error");
1363     -
1364     - lock_ufs(sb);
1365     +
1366     + mutex_lock(&UFS_SB(sb)->s_lock);
1367    
1368     cgno = ufs_dtog(uspi, fragment);
1369     bit = ufs_dtogd(uspi, fragment);
1370     @@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
1371     if (sb->s_flags & MS_SYNCHRONOUS)
1372     ubh_sync_block(UCPI_UBH(ucpi));
1373     ufs_mark_sb_dirty(sb);
1374     -
1375     - unlock_ufs(sb);
1376     +
1377     + mutex_unlock(&UFS_SB(sb)->s_lock);
1378     UFSD("EXIT\n");
1379     return;
1380    
1381     failed:
1382     - unlock_ufs(sb);
1383     + mutex_unlock(&UFS_SB(sb)->s_lock);
1384     UFSD("EXIT (FAILED)\n");
1385     return;
1386     }
1387     @@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
1388     goto failed;
1389     }
1390    
1391     - lock_ufs(sb);
1392     + mutex_lock(&UFS_SB(sb)->s_lock);
1393    
1394     do_more:
1395     overflow = 0;
1396     @@ -211,12 +211,12 @@ do_more:
1397     }
1398    
1399     ufs_mark_sb_dirty(sb);
1400     - unlock_ufs(sb);
1401     + mutex_unlock(&UFS_SB(sb)->s_lock);
1402     UFSD("EXIT\n");
1403     return;
1404    
1405     failed_unlock:
1406     - unlock_ufs(sb);
1407     + mutex_unlock(&UFS_SB(sb)->s_lock);
1408     failed:
1409     UFSD("EXIT (FAILED)\n");
1410     return;
1411     @@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
1412     usb1 = ubh_get_usb_first(uspi);
1413     *err = -ENOSPC;
1414    
1415     - lock_ufs(sb);
1416     + mutex_lock(&UFS_SB(sb)->s_lock);
1417     tmp = ufs_data_ptr_to_cpu(sb, p);
1418    
1419     if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
1420     @@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
1421     "fragment %llu, tmp %llu\n",
1422     (unsigned long long)fragment,
1423     (unsigned long long)tmp);
1424     - unlock_ufs(sb);
1425     + mutex_unlock(&UFS_SB(sb)->s_lock);
1426     return INVBLOCK;
1427     }
1428     if (fragment < UFS_I(inode)->i_lastfrag) {
1429     UFSD("EXIT (ALREADY ALLOCATED)\n");
1430     - unlock_ufs(sb);
1431     + mutex_unlock(&UFS_SB(sb)->s_lock);
1432     return 0;
1433     }
1434     }
1435     else {
1436     if (tmp) {
1437     UFSD("EXIT (ALREADY ALLOCATED)\n");
1438     - unlock_ufs(sb);
1439     + mutex_unlock(&UFS_SB(sb)->s_lock);
1440     return 0;
1441     }
1442     }
1443     @@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
1444     * There is not enough space for user on the device
1445     */
1446     if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
1447     - unlock_ufs(sb);
1448     + mutex_unlock(&UFS_SB(sb)->s_lock);
1449     UFSD("EXIT (FAILED)\n");
1450     return 0;
1451     }
1452     @@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
1453     ufs_clear_frags(inode, result + oldcount,
1454     newcount - oldcount, locked_page != NULL);
1455     }
1456     - unlock_ufs(sb);
1457     + mutex_unlock(&UFS_SB(sb)->s_lock);
1458     UFSD("EXIT, result %llu\n", (unsigned long long)result);
1459     return result;
1460     }
1461     @@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
1462     fragment + count);
1463     ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
1464     locked_page != NULL);
1465     - unlock_ufs(sb);
1466     + mutex_unlock(&UFS_SB(sb)->s_lock);
1467     UFSD("EXIT, result %llu\n", (unsigned long long)result);
1468     return result;
1469     }
1470     @@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
1471     *err = 0;
1472     UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
1473     fragment + count);
1474     - unlock_ufs(sb);
1475     + mutex_unlock(&UFS_SB(sb)->s_lock);
1476     if (newcount < request)
1477     ufs_free_fragments (inode, result + newcount, request - newcount);
1478     ufs_free_fragments (inode, tmp, oldcount);
1479     @@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
1480     return result;
1481     }
1482    
1483     - unlock_ufs(sb);
1484     + mutex_unlock(&UFS_SB(sb)->s_lock);
1485     UFSD("EXIT (FAILED)\n");
1486     return 0;
1487     }
1488     diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
1489     index 7caa01652888..fd0203ce1f7f 100644
1490     --- a/fs/ufs/ialloc.c
1491     +++ b/fs/ufs/ialloc.c
1492     @@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode)
1493    
1494     ino = inode->i_ino;
1495    
1496     - lock_ufs(sb);
1497     + mutex_lock(&UFS_SB(sb)->s_lock);
1498    
1499     if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
1500     ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
1501     - unlock_ufs(sb);
1502     + mutex_unlock(&UFS_SB(sb)->s_lock);
1503     return;
1504     }
1505    
1506     @@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode)
1507     bit = ufs_inotocgoff (ino);
1508     ucpi = ufs_load_cylinder (sb, cg);
1509     if (!ucpi) {
1510     - unlock_ufs(sb);
1511     + mutex_unlock(&UFS_SB(sb)->s_lock);
1512     return;
1513     }
1514     ucg = ubh_get_ucg(UCPI_UBH(ucpi));
1515     @@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode)
1516     ubh_sync_block(UCPI_UBH(ucpi));
1517    
1518     ufs_mark_sb_dirty(sb);
1519     - unlock_ufs(sb);
1520     + mutex_unlock(&UFS_SB(sb)->s_lock);
1521     UFSD("EXIT\n");
1522     }
1523    
1524     @@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
1525     sbi = UFS_SB(sb);
1526     uspi = sbi->s_uspi;
1527    
1528     - lock_ufs(sb);
1529     + mutex_lock(&sbi->s_lock);
1530    
1531     /*
1532     * Try to place the inode in its parent directory
1533     @@ -331,21 +331,21 @@ cg_found:
1534     sync_dirty_buffer(bh);
1535     brelse(bh);
1536     }
1537     - unlock_ufs(sb);
1538     + mutex_unlock(&sbi->s_lock);
1539    
1540     UFSD("allocating inode %lu\n", inode->i_ino);
1541     UFSD("EXIT\n");
1542     return inode;
1543    
1544     fail_remove_inode:
1545     - unlock_ufs(sb);
1546     + mutex_unlock(&sbi->s_lock);
1547     clear_nlink(inode);
1548     unlock_new_inode(inode);
1549     iput(inode);
1550     UFSD("EXIT (FAILED): err %d\n", err);
1551     return ERR_PTR(err);
1552     failed:
1553     - unlock_ufs(sb);
1554     + mutex_unlock(&sbi->s_lock);
1555     make_bad_inode(inode);
1556     iput (inode);
1557     UFSD("EXIT (FAILED): err %d\n", err);
1558     diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
1559     index be7d42c7d938..2d93ab07da8a 100644
1560     --- a/fs/ufs/inode.c
1561     +++ b/fs/ufs/inode.c
1562     @@ -902,6 +902,9 @@ void ufs_evict_inode(struct inode * inode)
1563     invalidate_inode_buffers(inode);
1564     clear_inode(inode);
1565    
1566     - if (want_delete)
1567     + if (want_delete) {
1568     + lock_ufs(inode->i_sb);
1569     ufs_free_inode(inode);
1570     + unlock_ufs(inode->i_sb);
1571     + }
1572     }
1573     diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
1574     index e491a93a7e9a..60ee32249b72 100644
1575     --- a/fs/ufs/namei.c
1576     +++ b/fs/ufs/namei.c
1577     @@ -128,12 +128,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
1578     if (l > sb->s_blocksize)
1579     goto out_notlocked;
1580    
1581     + lock_ufs(dir->i_sb);
1582     inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
1583     err = PTR_ERR(inode);
1584     if (IS_ERR(inode))
1585     - goto out_notlocked;
1586     + goto out;
1587    
1588     - lock_ufs(dir->i_sb);
1589     if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
1590     /* slow symlink */
1591     inode->i_op = &ufs_symlink_inode_operations;
1592     @@ -174,7 +174,12 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
1593     inode_inc_link_count(inode);
1594     ihold(inode);
1595    
1596     - error = ufs_add_nondir(dentry, inode);
1597     + error = ufs_add_link(dentry, inode);
1598     + if (error) {
1599     + inode_dec_link_count(inode);
1600     + iput(inode);
1601     + } else
1602     + d_instantiate(dentry, inode);
1603     unlock_ufs(dir->i_sb);
1604     return error;
1605     }
1606     @@ -184,9 +189,13 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
1607     struct inode * inode;
1608     int err;
1609    
1610     + lock_ufs(dir->i_sb);
1611     + inode_inc_link_count(dir);
1612     +
1613     inode = ufs_new_inode(dir, S_IFDIR|mode);
1614     + err = PTR_ERR(inode);
1615     if (IS_ERR(inode))
1616     - return PTR_ERR(inode);
1617     + goto out_dir;
1618    
1619     inode->i_op = &ufs_dir_inode_operations;
1620     inode->i_fop = &ufs_dir_operations;
1621     @@ -194,9 +203,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
1622    
1623     inode_inc_link_count(inode);
1624    
1625     - lock_ufs(dir->i_sb);
1626     - inode_inc_link_count(dir);
1627     -
1628     err = ufs_make_empty(inode, dir);
1629     if (err)
1630     goto out_fail;
1631     @@ -206,6 +212,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
1632     goto out_fail;
1633     unlock_ufs(dir->i_sb);
1634    
1635     + unlock_new_inode(inode);
1636     d_instantiate(dentry, inode);
1637     out:
1638     return err;
1639     @@ -215,6 +222,7 @@ out_fail:
1640     inode_dec_link_count(inode);
1641     unlock_new_inode(inode);
1642     iput (inode);
1643     +out_dir:
1644     inode_dec_link_count(dir);
1645     unlock_ufs(dir->i_sb);
1646     goto out;
1647     diff --git a/fs/ufs/super.c b/fs/ufs/super.c
1648     index b3bc3e7ae79d..dc33f9416340 100644
1649     --- a/fs/ufs/super.c
1650     +++ b/fs/ufs/super.c
1651     @@ -694,6 +694,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
1652     unsigned flags;
1653    
1654     lock_ufs(sb);
1655     + mutex_lock(&UFS_SB(sb)->s_lock);
1656    
1657     UFSD("ENTER\n");
1658    
1659     @@ -711,6 +712,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
1660     ufs_put_cstotal(sb);
1661    
1662     UFSD("EXIT\n");
1663     + mutex_unlock(&UFS_SB(sb)->s_lock);
1664     unlock_ufs(sb);
1665    
1666     return 0;
1667     @@ -799,6 +801,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
1668     UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
1669    
1670     mutex_init(&sbi->mutex);
1671     + mutex_init(&sbi->s_lock);
1672     spin_lock_init(&sbi->work_lock);
1673     INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
1674     /*
1675     @@ -1277,6 +1280,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1676    
1677     sync_filesystem(sb);
1678     lock_ufs(sb);
1679     + mutex_lock(&UFS_SB(sb)->s_lock);
1680     uspi = UFS_SB(sb)->s_uspi;
1681     flags = UFS_SB(sb)->s_flags;
1682     usb1 = ubh_get_usb_first(uspi);
1683     @@ -1290,6 +1294,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1684     new_mount_opt = 0;
1685     ufs_set_opt (new_mount_opt, ONERROR_LOCK);
1686     if (!ufs_parse_options (data, &new_mount_opt)) {
1687     + mutex_unlock(&UFS_SB(sb)->s_lock);
1688     unlock_ufs(sb);
1689     return -EINVAL;
1690     }
1691     @@ -1297,12 +1302,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1692     new_mount_opt |= ufstype;
1693     } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
1694     pr_err("ufstype can't be changed during remount\n");
1695     + mutex_unlock(&UFS_SB(sb)->s_lock);
1696     unlock_ufs(sb);
1697     return -EINVAL;
1698     }
1699    
1700     if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
1701     UFS_SB(sb)->s_mount_opt = new_mount_opt;
1702     + mutex_unlock(&UFS_SB(sb)->s_lock);
1703     unlock_ufs(sb);
1704     return 0;
1705     }
1706     @@ -1326,6 +1333,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1707     */
1708     #ifndef CONFIG_UFS_FS_WRITE
1709     pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
1710     + mutex_unlock(&UFS_SB(sb)->s_lock);
1711     unlock_ufs(sb);
1712     return -EINVAL;
1713     #else
1714     @@ -1335,11 +1343,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1715     ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
1716     ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
1717     pr_err("this ufstype is read-only supported\n");
1718     + mutex_unlock(&UFS_SB(sb)->s_lock);
1719     unlock_ufs(sb);
1720     return -EINVAL;
1721     }
1722     if (!ufs_read_cylinder_structures(sb)) {
1723     pr_err("failed during remounting\n");
1724     + mutex_unlock(&UFS_SB(sb)->s_lock);
1725     unlock_ufs(sb);
1726     return -EPERM;
1727     }
1728     @@ -1347,6 +1357,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1729     #endif
1730     }
1731     UFS_SB(sb)->s_mount_opt = new_mount_opt;
1732     + mutex_unlock(&UFS_SB(sb)->s_lock);
1733     unlock_ufs(sb);
1734     return 0;
1735     }
1736     diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
1737     index 2a07396d5f9e..cf6368d42d4a 100644
1738     --- a/fs/ufs/ufs.h
1739     +++ b/fs/ufs/ufs.h
1740     @@ -30,6 +30,7 @@ struct ufs_sb_info {
1741     int work_queued; /* non-zero if the delayed work is queued */
1742     struct delayed_work sync_work; /* FS sync delayed work */
1743     spinlock_t work_lock; /* protects sync_work and work_queued */
1744     + struct mutex s_lock;
1745     };
1746    
1747     struct ufs_inode_info {
1748     diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
1749     index 3573a81815ad..8ba379f9e467 100644
1750     --- a/include/net/netns/sctp.h
1751     +++ b/include/net/netns/sctp.h
1752     @@ -31,6 +31,7 @@ struct netns_sctp {
1753     struct list_head addr_waitq;
1754     struct timer_list addr_wq_timer;
1755     struct list_head auto_asconf_splist;
1756     + /* Lock that protects both addr_waitq and auto_asconf_splist */
1757     spinlock_t addr_wq_lock;
1758    
1759     /* Lock that protects the local_addr_list writers */
1760     diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
1761     index 2bb2fcf5b11f..495c87e367b3 100644
1762     --- a/include/net/sctp/structs.h
1763     +++ b/include/net/sctp/structs.h
1764     @@ -223,6 +223,10 @@ struct sctp_sock {
1765     atomic_t pd_mode;
1766     /* Receive to here while partial delivery is in effect. */
1767     struct sk_buff_head pd_lobby;
1768     +
1769     + /* These must be the last fields, as they will skipped on copies,
1770     + * like on accept and peeloff operations
1771     + */
1772     struct list_head auto_asconf_list;
1773     int do_auto_asconf;
1774     };
1775     diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
1776     index a9a4a1b7863d..8d423bc649b9 100644
1777     --- a/net/bridge/br_ioctl.c
1778     +++ b/net/bridge/br_ioctl.c
1779     @@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1780     if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
1781     return -EPERM;
1782    
1783     - spin_lock_bh(&br->lock);
1784     br_stp_set_bridge_priority(br, args[1]);
1785     - spin_unlock_bh(&br->lock);
1786     return 0;
1787    
1788     case BRCTL_SET_PORT_PRIORITY:
1789     diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
1790     index 41146872c1b4..7832d07f48f6 100644
1791     --- a/net/bridge/br_stp_if.c
1792     +++ b/net/bridge/br_stp_if.c
1793     @@ -243,12 +243,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
1794     return true;
1795     }
1796    
1797     -/* called under bridge lock */
1798     +/* Acquires and releases bridge lock */
1799     void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
1800     {
1801     struct net_bridge_port *p;
1802     int wasroot;
1803    
1804     + spin_lock_bh(&br->lock);
1805     wasroot = br_is_root_bridge(br);
1806    
1807     list_for_each_entry(p, &br->port_list, list) {
1808     @@ -266,6 +267,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
1809     br_port_state_selection(br);
1810     if (br_is_root_bridge(br) && !wasroot)
1811     br_become_root_bridge(br);
1812     + spin_unlock_bh(&br->lock);
1813     }
1814    
1815     /* called under bridge lock */
1816     diff --git a/net/can/af_can.c b/net/can/af_can.c
1817     index 32d710eaf1fc..689c818ed007 100644
1818     --- a/net/can/af_can.c
1819     +++ b/net/can/af_can.c
1820     @@ -310,8 +310,12 @@ int can_send(struct sk_buff *skb, int loop)
1821     return err;
1822     }
1823    
1824     - if (newskb)
1825     + if (newskb) {
1826     + if (!(newskb->tstamp.tv64))
1827     + __net_timestamp(newskb);
1828     +
1829     netif_rx_ni(newskb);
1830     + }
1831    
1832     /* update statistics */
1833     can_stats.tx_frames++;
1834     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1835     index 3de654256028..2237c1b3cdd2 100644
1836     --- a/net/core/neighbour.c
1837     +++ b/net/core/neighbour.c
1838     @@ -957,6 +957,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1839     rc = 0;
1840     if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1841     goto out_unlock_bh;
1842     + if (neigh->dead)
1843     + goto out_dead;
1844    
1845     if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1846     if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1847     @@ -1013,6 +1015,13 @@ out_unlock_bh:
1848     write_unlock(&neigh->lock);
1849     local_bh_enable();
1850     return rc;
1851     +
1852     +out_dead:
1853     + if (neigh->nud_state & NUD_STALE)
1854     + goto out_unlock_bh;
1855     + write_unlock_bh(&neigh->lock);
1856     + kfree_skb(skb);
1857     + return 1;
1858     }
1859     EXPORT_SYMBOL(__neigh_event_send);
1860    
1861     @@ -1076,6 +1085,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1862     if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1863     (old & (NUD_NOARP | NUD_PERMANENT)))
1864     goto out;
1865     + if (neigh->dead)
1866     + goto out;
1867    
1868     if (!(new & NUD_VALID)) {
1869     neigh_del_timer(neigh);
1870     @@ -1225,6 +1236,8 @@ EXPORT_SYMBOL(neigh_update);
1871     */
1872     void __neigh_set_probe_once(struct neighbour *neigh)
1873     {
1874     + if (neigh->dead)
1875     + return;
1876     neigh->updated = jiffies;
1877     if (!(neigh->nud_state & NUD_FAILED))
1878     return;
1879     diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
1880     index 8b47a4d79d04..a5aa54ea6533 100644
1881     --- a/net/ipv4/af_inet.c
1882     +++ b/net/ipv4/af_inet.c
1883     @@ -228,6 +228,8 @@ int inet_listen(struct socket *sock, int backlog)
1884     err = 0;
1885     if (err)
1886     goto out;
1887     +
1888     + tcp_fastopen_init_key_once(true);
1889     }
1890     err = inet_csk_listen_start(sk, backlog);
1891     if (err)
1892     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1893     index 7cfb0893f263..6ddde89996f4 100644
1894     --- a/net/ipv4/ip_sockglue.c
1895     +++ b/net/ipv4/ip_sockglue.c
1896     @@ -432,6 +432,15 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
1897     kfree_skb(skb);
1898     }
1899    
1900     +/* For some errors we have valid addr_offset even with zero payload and
1901     + * zero port. Also, addr_offset should be supported if port is set.
1902     + */
1903     +static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
1904     +{
1905     + return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
1906     + serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
1907     +}
1908     +
1909     /* IPv4 supports cmsg on all imcp errors and some timestamps
1910     *
1911     * Timestamp code paths do not initialize the fields expected by cmsg:
1912     @@ -498,7 +507,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1913    
1914     serr = SKB_EXT_ERR(skb);
1915    
1916     - if (sin && serr->port) {
1917     + if (sin && ipv4_datagram_support_addr(serr)) {
1918     sin->sin_family = AF_INET;
1919     sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
1920     serr->addr_offset);
1921     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1922     index f1377f2a0472..bb2ce74f6004 100644
1923     --- a/net/ipv4/tcp.c
1924     +++ b/net/ipv4/tcp.c
1925     @@ -2545,10 +2545,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1926    
1927     case TCP_FASTOPEN:
1928     if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
1929     - TCPF_LISTEN)))
1930     + TCPF_LISTEN))) {
1931     + tcp_fastopen_init_key_once(true);
1932     +
1933     err = fastopen_init_queue(sk, val);
1934     - else
1935     + } else {
1936     err = -EINVAL;
1937     + }
1938     break;
1939     case TCP_TIMESTAMP:
1940     if (!tp->repair)
1941     diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
1942     index 46b087a27503..f9c0fb84e435 100644
1943     --- a/net/ipv4/tcp_fastopen.c
1944     +++ b/net/ipv4/tcp_fastopen.c
1945     @@ -78,8 +78,6 @@ static bool __tcp_fastopen_cookie_gen(const void *path,
1946     struct tcp_fastopen_context *ctx;
1947     bool ok = false;
1948    
1949     - tcp_fastopen_init_key_once(true);
1950     -
1951     rcu_read_lock();
1952     ctx = rcu_dereference(tcp_fastopen_ctx);
1953     if (ctx) {
1954     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1955     index 762a58c772b8..62d908e64eeb 100644
1956     --- a/net/ipv6/datagram.c
1957     +++ b/net/ipv6/datagram.c
1958     @@ -325,6 +325,16 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
1959     kfree_skb(skb);
1960     }
1961    
1962     +/* For some errors we have valid addr_offset even with zero payload and
1963     + * zero port. Also, addr_offset should be supported if port is set.
1964     + */
1965     +static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
1966     +{
1967     + return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
1968     + serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
1969     + serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
1970     +}
1971     +
1972     /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
1973     *
1974     * At one point, excluding local errors was a quick test to identify icmp/icmp6
1975     @@ -389,7 +399,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1976    
1977     serr = SKB_EXT_ERR(skb);
1978    
1979     - if (sin && serr->port) {
1980     + if (sin && ipv6_datagram_support_addr(serr)) {
1981     const unsigned char *nh = skb_network_header(skb);
1982     sin->sin6_family = AF_INET6;
1983     sin->sin6_flowinfo = 0;
1984     diff --git a/net/mac80211/key.c b/net/mac80211/key.c
1985     index a907f2d5c12d..81e9785f38bc 100644
1986     --- a/net/mac80211/key.c
1987     +++ b/net/mac80211/key.c
1988     @@ -66,12 +66,15 @@ update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
1989     if (sdata->vif.type != NL80211_IFTYPE_AP)
1990     return;
1991    
1992     - mutex_lock(&sdata->local->mtx);
1993     + /* crypto_tx_tailroom_needed_cnt is protected by this */
1994     + assert_key_lock(sdata->local);
1995     +
1996     + rcu_read_lock();
1997    
1998     - list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
1999     + list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
2000     vlan->crypto_tx_tailroom_needed_cnt += delta;
2001    
2002     - mutex_unlock(&sdata->local->mtx);
2003     + rcu_read_unlock();
2004     }
2005    
2006     static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
2007     @@ -95,6 +98,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
2008     * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
2009     */
2010    
2011     + assert_key_lock(sdata->local);
2012     +
2013     update_vlan_tailroom_need_count(sdata, 1);
2014    
2015     if (!sdata->crypto_tx_tailroom_needed_cnt++) {
2016     @@ -109,6 +114,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
2017     static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
2018     int delta)
2019     {
2020     + assert_key_lock(sdata->local);
2021     +
2022     WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
2023    
2024     update_vlan_tailroom_need_count(sdata, -delta);
2025     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2026     index b5989c6ee551..fe1610ddeacf 100644
2027     --- a/net/packet/af_packet.c
2028     +++ b/net/packet/af_packet.c
2029     @@ -1272,16 +1272,6 @@ static void packet_sock_destruct(struct sock *sk)
2030     sk_refcnt_debug_dec(sk);
2031     }
2032    
2033     -static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
2034     -{
2035     - int x = atomic_read(&f->rr_cur) + 1;
2036     -
2037     - if (x >= num)
2038     - x = 0;
2039     -
2040     - return x;
2041     -}
2042     -
2043     static unsigned int fanout_demux_hash(struct packet_fanout *f,
2044     struct sk_buff *skb,
2045     unsigned int num)
2046     @@ -1293,13 +1283,9 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
2047     struct sk_buff *skb,
2048     unsigned int num)
2049     {
2050     - int cur, old;
2051     + unsigned int val = atomic_inc_return(&f->rr_cur);
2052    
2053     - cur = atomic_read(&f->rr_cur);
2054     - while ((old = atomic_cmpxchg(&f->rr_cur, cur,
2055     - fanout_rr_next(f, num))) != cur)
2056     - cur = old;
2057     - return cur;
2058     + return val % num;
2059     }
2060    
2061     static unsigned int fanout_demux_cpu(struct packet_fanout *f,
2062     @@ -1353,7 +1339,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
2063     struct packet_type *pt, struct net_device *orig_dev)
2064     {
2065     struct packet_fanout *f = pt->af_packet_priv;
2066     - unsigned int num = f->num_members;
2067     + unsigned int num = READ_ONCE(f->num_members);
2068     struct packet_sock *po;
2069     unsigned int idx;
2070    
2071     diff --git a/net/sctp/output.c b/net/sctp/output.c
2072     index fc5e45b8a832..abe7c2db2412 100644
2073     --- a/net/sctp/output.c
2074     +++ b/net/sctp/output.c
2075     @@ -599,7 +599,9 @@ out:
2076     return err;
2077     no_route:
2078     kfree_skb(nskb);
2079     - IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
2080     +
2081     + if (asoc)
2082     + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
2083    
2084     /* FIXME: Returning the 'err' will effect all the associations
2085     * associated with a socket, although only one of the paths of the
2086     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2087     index f09de7fac2e6..5f6c4e61325b 100644
2088     --- a/net/sctp/socket.c
2089     +++ b/net/sctp/socket.c
2090     @@ -1528,8 +1528,10 @@ static void sctp_close(struct sock *sk, long timeout)
2091    
2092     /* Supposedly, no process has access to the socket, but
2093     * the net layers still may.
2094     + * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
2095     + * held and that should be grabbed before socket lock.
2096     */
2097     - local_bh_disable();
2098     + spin_lock_bh(&net->sctp.addr_wq_lock);
2099     bh_lock_sock(sk);
2100    
2101     /* Hold the sock, since sk_common_release() will put sock_put()
2102     @@ -1539,7 +1541,7 @@ static void sctp_close(struct sock *sk, long timeout)
2103     sk_common_release(sk);
2104    
2105     bh_unlock_sock(sk);
2106     - local_bh_enable();
2107     + spin_unlock_bh(&net->sctp.addr_wq_lock);
2108    
2109     sock_put(sk);
2110    
2111     @@ -3580,6 +3582,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
2112     if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
2113     return 0;
2114    
2115     + spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
2116     if (val == 0 && sp->do_auto_asconf) {
2117     list_del(&sp->auto_asconf_list);
2118     sp->do_auto_asconf = 0;
2119     @@ -3588,6 +3591,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
2120     &sock_net(sk)->sctp.auto_asconf_splist);
2121     sp->do_auto_asconf = 1;
2122     }
2123     + spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
2124     return 0;
2125     }
2126    
2127     @@ -4121,18 +4125,28 @@ static int sctp_init_sock(struct sock *sk)
2128     local_bh_disable();
2129     percpu_counter_inc(&sctp_sockets_allocated);
2130     sock_prot_inuse_add(net, sk->sk_prot, 1);
2131     +
2132     + /* Nothing can fail after this block, otherwise
2133     + * sctp_destroy_sock() will be called without addr_wq_lock held
2134     + */
2135     if (net->sctp.default_auto_asconf) {
2136     + spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
2137     list_add_tail(&sp->auto_asconf_list,
2138     &net->sctp.auto_asconf_splist);
2139     sp->do_auto_asconf = 1;
2140     - } else
2141     + spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
2142     + } else {
2143     sp->do_auto_asconf = 0;
2144     + }
2145     +
2146     local_bh_enable();
2147    
2148     return 0;
2149     }
2150    
2151     -/* Cleanup any SCTP per socket resources. */
2152     +/* Cleanup any SCTP per socket resources. Must be called with
2153     + * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
2154     + */
2155     static void sctp_destroy_sock(struct sock *sk)
2156     {
2157     struct sctp_sock *sp;
2158     @@ -7195,6 +7209,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
2159     newinet->mc_list = NULL;
2160     }
2161    
2162     +static inline void sctp_copy_descendant(struct sock *sk_to,
2163     + const struct sock *sk_from)
2164     +{
2165     + int ancestor_size = sizeof(struct inet_sock) +
2166     + sizeof(struct sctp_sock) -
2167     + offsetof(struct sctp_sock, auto_asconf_list);
2168     +
2169     + if (sk_from->sk_family == PF_INET6)
2170     + ancestor_size += sizeof(struct ipv6_pinfo);
2171     +
2172     + __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
2173     +}
2174     +
2175     /* Populate the fields of the newsk from the oldsk and migrate the assoc
2176     * and its messages to the newsk.
2177     */
2178     @@ -7209,7 +7236,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
2179     struct sk_buff *skb, *tmp;
2180     struct sctp_ulpevent *event;
2181     struct sctp_bind_hashbucket *head;
2182     - struct list_head tmplist;
2183    
2184     /* Migrate socket buffer sizes and all the socket level options to the
2185     * new socket.
2186     @@ -7217,12 +7243,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
2187     newsk->sk_sndbuf = oldsk->sk_sndbuf;
2188     newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
2189     /* Brute force copy old sctp opt. */
2190     - if (oldsp->do_auto_asconf) {
2191     - memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
2192     - inet_sk_copy_descendant(newsk, oldsk);
2193     - memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
2194     - } else
2195     - inet_sk_copy_descendant(newsk, oldsk);
2196     + sctp_copy_descendant(newsk, oldsk);
2197    
2198     /* Restore the ep value that was overwritten with the above structure
2199     * copy.
2200     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
2201     index 7dade28affba..212070e1de1a 100644
2202     --- a/security/selinux/hooks.c
2203     +++ b/security/selinux/hooks.c
2204     @@ -403,6 +403,7 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
2205     return sbsec->behavior == SECURITY_FS_USE_XATTR ||
2206     sbsec->behavior == SECURITY_FS_USE_TRANS ||
2207     sbsec->behavior == SECURITY_FS_USE_TASK ||
2208     + sbsec->behavior == SECURITY_FS_USE_NATIVE ||
2209     /* Special handling. Genfs but also in-core setxattr handler */
2210     !strcmp(sb->s_type->name, "sysfs") ||
2211     !strcmp(sb->s_type->name, "pstore") ||
2212     diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
2213     index 78fb8201014f..950064a0942d 100644
2214     --- a/virt/kvm/arm/vgic.c
2215     +++ b/virt/kvm/arm/vgic.c
2216     @@ -1561,7 +1561,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
2217     goto out;
2218     }
2219    
2220     - if (irq_num >= kvm->arch.vgic.nr_irqs)
2221     + if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
2222     return -EINVAL;
2223    
2224     vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
2225     @@ -2161,10 +2161,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id,
2226    
2227     BUG_ON(!vgic_initialized(kvm));
2228    
2229     - if (spi > kvm->arch.vgic.nr_irqs)
2230     - return -EINVAL;
2231     return kvm_vgic_inject_irq(kvm, 0, spi, level);
2232     -
2233     }
2234    
2235     /* MSI not implemented yet */