Contents of /trunk/kernel-alx/patches-5.4/0196-5.4.97-all-fixes.patch
Parent Directory | Revision Log
Revision 3635 -
(show annotations)
(download)
Mon Oct 24 12:34:12 2022 UTC (23 months ago) by niro
File size: 77123 byte(s)
Mon Oct 24 12:34:12 2022 UTC (23 months ago) by niro
File size: 77123 byte(s)
-sync kernel patches
1 | diff --git a/Makefile b/Makefile |
2 | index 7a47a2594f957..032751f6be0c1 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 96 |
10 | +SUBLEVEL = 97 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | @@ -920,12 +920,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) |
15 | # change __FILE__ to the relative path from the srctree |
16 | KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) |
17 | |
18 | -# ensure -fcf-protection is disabled when using retpoline as it is |
19 | -# incompatible with -mindirect-branch=thunk-extern |
20 | -ifdef CONFIG_RETPOLINE |
21 | -KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) |
22 | -endif |
23 | - |
24 | include scripts/Makefile.kasan |
25 | include scripts/Makefile.extrawarn |
26 | include scripts/Makefile.ubsan |
27 | diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts |
28 | index 01ccff756996d..5740f9442705c 100644 |
29 | --- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts |
30 | +++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts |
31 | @@ -110,7 +110,7 @@ |
32 | pinctrl-names = "default"; |
33 | pinctrl-0 = <&gmac_rgmii_pins>; |
34 | phy-handle = <&phy1>; |
35 | - phy-mode = "rgmii"; |
36 | + phy-mode = "rgmii-id"; |
37 | phy-supply = <®_gmac_3v3>; |
38 | status = "okay"; |
39 | }; |
40 | diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c |
41 | index 8b81a17f675d9..e17ec92b90dd8 100644 |
42 | --- a/arch/arm/mach-footbridge/dc21285.c |
43 | +++ b/arch/arm/mach-footbridge/dc21285.c |
44 | @@ -66,15 +66,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, |
45 | if (addr) |
46 | switch (size) { |
47 | case 1: |
48 | - asm("ldrb %0, [%1, %2]" |
49 | + asm volatile("ldrb %0, [%1, %2]" |
50 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
51 | break; |
52 | case 2: |
53 | - asm("ldrh %0, [%1, %2]" |
54 | + asm volatile("ldrh %0, [%1, %2]" |
55 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
56 | break; |
57 | case 4: |
58 | - asm("ldr %0, [%1, %2]" |
59 | + asm volatile("ldr %0, [%1, %2]" |
60 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
61 | break; |
62 | } |
63 | @@ -100,17 +100,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, |
64 | if (addr) |
65 | switch (size) { |
66 | case 1: |
67 | - asm("strb %0, [%1, %2]" |
68 | + asm volatile("strb %0, [%1, %2]" |
69 | : : "r" (value), "r" (addr), "r" (where) |
70 | : "cc"); |
71 | break; |
72 | case 2: |
73 | - asm("strh %0, [%1, %2]" |
74 | + asm volatile("strh %0, [%1, %2]" |
75 | : : "r" (value), "r" (addr), "r" (where) |
76 | : "cc"); |
77 | break; |
78 | case 4: |
79 | - asm("str %0, [%1, %2]" |
80 | + asm volatile("str %0, [%1, %2]" |
81 | : : "r" (value), "r" (addr), "r" (where) |
82 | : "cc"); |
83 | break; |
84 | diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi |
85 | index 354ef2f3eac67..9533c85fb0a30 100644 |
86 | --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi |
87 | +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi |
88 | @@ -2382,7 +2382,7 @@ |
89 | interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; |
90 | dr_mode = "host"; |
91 | snps,dis_u2_susphy_quirk; |
92 | - snps,quirk-frame-length-adjustment; |
93 | + snps,quirk-frame-length-adjustment = <0x20>; |
94 | snps,parkmode-disable-ss-quirk; |
95 | }; |
96 | }; |
97 | diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi |
98 | index d4c1da3d4bde2..04d4b1b11a00a 100644 |
99 | --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi |
100 | +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi |
101 | @@ -304,7 +304,7 @@ |
102 | |
103 | dcfg: dcfg@1ee0000 { |
104 | compatible = "fsl,ls1046a-dcfg", "syscon"; |
105 | - reg = <0x0 0x1ee0000 0x0 0x10000>; |
106 | + reg = <0x0 0x1ee0000 0x0 0x1000>; |
107 | big-endian; |
108 | }; |
109 | |
110 | diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts |
111 | index f539b3655f6b9..e638f216dbfb3 100644 |
112 | --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts |
113 | +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts |
114 | @@ -243,6 +243,8 @@ |
115 | &i2c3 { |
116 | status = "okay"; |
117 | clock-frequency = <400000>; |
118 | + /* Overwrite pinctrl-0 from sdm845.dtsi */ |
119 | + pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>; |
120 | |
121 | tsel: hid@15 { |
122 | compatible = "hid-over-i2c"; |
123 | @@ -250,9 +252,6 @@ |
124 | hid-descr-addr = <0x1>; |
125 | |
126 | interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; |
127 | - |
128 | - pinctrl-names = "default"; |
129 | - pinctrl-0 = <&i2c3_hid_active>; |
130 | }; |
131 | |
132 | tsc2: hid@2c { |
133 | @@ -261,11 +260,6 @@ |
134 | hid-descr-addr = <0x20>; |
135 | |
136 | interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; |
137 | - |
138 | - pinctrl-names = "default"; |
139 | - pinctrl-0 = <&i2c3_hid_active>; |
140 | - |
141 | - status = "disabled"; |
142 | }; |
143 | }; |
144 | |
145 | diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi |
146 | index 9e09909a510a1..98b014a8f9165 100644 |
147 | --- a/arch/arm64/boot/dts/rockchip/px30.dtsi |
148 | +++ b/arch/arm64/boot/dts/rockchip/px30.dtsi |
149 | @@ -860,7 +860,7 @@ |
150 | vopl_mmu: iommu@ff470f00 { |
151 | compatible = "rockchip,iommu"; |
152 | reg = <0x0 0xff470f00 0x0 0x100>; |
153 | - interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; |
154 | + interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>; |
155 | interrupt-names = "vopl_mmu"; |
156 | clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>; |
157 | clock-names = "aclk", "hclk"; |
158 | diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c |
159 | index 179b41ad63baf..18618af3835f9 100644 |
160 | --- a/arch/um/drivers/virtio_uml.c |
161 | +++ b/arch/um/drivers/virtio_uml.c |
162 | @@ -959,6 +959,7 @@ static void virtio_uml_release_dev(struct device *d) |
163 | } |
164 | |
165 | os_close_file(vu_dev->sock); |
166 | + kfree(vu_dev); |
167 | } |
168 | |
169 | /* Platform device */ |
170 | @@ -977,7 +978,7 @@ static int virtio_uml_probe(struct platform_device *pdev) |
171 | if (!pdata) |
172 | return -EINVAL; |
173 | |
174 | - vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL); |
175 | + vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); |
176 | if (!vu_dev) |
177 | return -ENOMEM; |
178 | |
179 | diff --git a/arch/x86/Makefile b/arch/x86/Makefile |
180 | index 94df0868804bc..b5e3bfd4facea 100644 |
181 | --- a/arch/x86/Makefile |
182 | +++ b/arch/x86/Makefile |
183 | @@ -131,6 +131,9 @@ else |
184 | |
185 | KBUILD_CFLAGS += -mno-red-zone |
186 | KBUILD_CFLAGS += -mcmodel=kernel |
187 | + |
188 | + # Intel CET isn't enabled in the kernel |
189 | + KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) |
190 | endif |
191 | |
192 | ifdef CONFIG_X86_X32 |
193 | diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h |
194 | index 6016559ed1713..5bef1575708dc 100644 |
195 | --- a/arch/x86/include/asm/apic.h |
196 | +++ b/arch/x86/include/asm/apic.h |
197 | @@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; } |
198 | #endif /* !CONFIG_X86_LOCAL_APIC */ |
199 | |
200 | #ifdef CONFIG_X86_X2APIC |
201 | -/* |
202 | - * Make previous memory operations globally visible before |
203 | - * sending the IPI through x2apic wrmsr. We need a serializing instruction or |
204 | - * mfence for this. |
205 | - */ |
206 | -static inline void x2apic_wrmsr_fence(void) |
207 | -{ |
208 | - asm volatile("mfence" : : : "memory"); |
209 | -} |
210 | - |
211 | static inline void native_apic_msr_write(u32 reg, u32 v) |
212 | { |
213 | if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || |
214 | diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h |
215 | index 7f828fe497978..4819d5e5a3353 100644 |
216 | --- a/arch/x86/include/asm/barrier.h |
217 | +++ b/arch/x86/include/asm/barrier.h |
218 | @@ -84,4 +84,22 @@ do { \ |
219 | |
220 | #include <asm-generic/barrier.h> |
221 | |
222 | +/* |
223 | + * Make previous memory operations globally visible before |
224 | + * a WRMSR. |
225 | + * |
226 | + * MFENCE makes writes visible, but only affects load/store |
227 | + * instructions. WRMSR is unfortunately not a load/store |
228 | + * instruction and is unaffected by MFENCE. The LFENCE ensures |
229 | + * that the WRMSR is not reordered. |
230 | + * |
231 | + * Most WRMSRs are full serializing instructions themselves and |
232 | + * do not require this barrier. This is only required for the |
233 | + * IA32_TSC_DEADLINE and X2APIC MSRs. |
234 | + */ |
235 | +static inline void weak_wrmsr_fence(void) |
236 | +{ |
237 | + asm volatile("mfence; lfence" : : : "memory"); |
238 | +} |
239 | + |
240 | #endif /* _ASM_X86_BARRIER_H */ |
241 | diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c |
242 | index 06fa808d72032..3dca7b8642e9c 100644 |
243 | --- a/arch/x86/kernel/apic/apic.c |
244 | +++ b/arch/x86/kernel/apic/apic.c |
245 | @@ -42,6 +42,7 @@ |
246 | #include <asm/x86_init.h> |
247 | #include <asm/pgalloc.h> |
248 | #include <linux/atomic.h> |
249 | +#include <asm/barrier.h> |
250 | #include <asm/mpspec.h> |
251 | #include <asm/i8259.h> |
252 | #include <asm/proto.h> |
253 | @@ -472,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta, |
254 | { |
255 | u64 tsc; |
256 | |
257 | + /* This MSR is special and need a special fence: */ |
258 | + weak_wrmsr_fence(); |
259 | + |
260 | tsc = rdtsc(); |
261 | wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); |
262 | return 0; |
263 | diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c |
264 | index b0889c48a2ac5..7eec3c154fa24 100644 |
265 | --- a/arch/x86/kernel/apic/x2apic_cluster.c |
266 | +++ b/arch/x86/kernel/apic/x2apic_cluster.c |
267 | @@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector) |
268 | { |
269 | u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); |
270 | |
271 | - x2apic_wrmsr_fence(); |
272 | + /* x2apic MSRs are special and need a special fence: */ |
273 | + weak_wrmsr_fence(); |
274 | __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); |
275 | } |
276 | |
277 | @@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) |
278 | unsigned long flags; |
279 | u32 dest; |
280 | |
281 | - x2apic_wrmsr_fence(); |
282 | + /* x2apic MSRs are special and need a special fence: */ |
283 | + weak_wrmsr_fence(); |
284 | local_irq_save(flags); |
285 | |
286 | tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); |
287 | diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c |
288 | index e14eae6d6ea71..032a00e5d9fa6 100644 |
289 | --- a/arch/x86/kernel/apic/x2apic_phys.c |
290 | +++ b/arch/x86/kernel/apic/x2apic_phys.c |
291 | @@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector) |
292 | { |
293 | u32 dest = per_cpu(x86_cpu_to_apicid, cpu); |
294 | |
295 | - x2apic_wrmsr_fence(); |
296 | + /* x2apic MSRs are special and need a special fence: */ |
297 | + weak_wrmsr_fence(); |
298 | __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); |
299 | } |
300 | |
301 | @@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) |
302 | unsigned long this_cpu; |
303 | unsigned long flags; |
304 | |
305 | - x2apic_wrmsr_fence(); |
306 | + /* x2apic MSRs are special and need a special fence: */ |
307 | + weak_wrmsr_fence(); |
308 | |
309 | local_irq_save(flags); |
310 | |
311 | @@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which) |
312 | { |
313 | unsigned long cfg = __prepare_ICR(which, vector, 0); |
314 | |
315 | - x2apic_wrmsr_fence(); |
316 | + /* x2apic MSRs are special and need a special fence: */ |
317 | + weak_wrmsr_fence(); |
318 | native_x2apic_icr_write(cfg, 0); |
319 | } |
320 | |
321 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
322 | index 39265b55929d2..60c8dcb907a50 100644 |
323 | --- a/arch/x86/kvm/emulate.c |
324 | +++ b/arch/x86/kvm/emulate.c |
325 | @@ -2890,6 +2890,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) |
326 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); |
327 | *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : |
328 | (u32)msr_data; |
329 | + if (efer & EFER_LMA) |
330 | + ctxt->mode = X86EMUL_MODE_PROT64; |
331 | |
332 | return X86EMUL_CONTINUE; |
333 | } |
334 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
335 | index 2b506904be024..4906e480b5bb6 100644 |
336 | --- a/arch/x86/kvm/svm.c |
337 | +++ b/arch/x86/kvm/svm.c |
338 | @@ -889,6 +889,11 @@ static int has_svm(void) |
339 | return 0; |
340 | } |
341 | |
342 | + if (sev_active()) { |
343 | + pr_info("KVM is unsupported when running as an SEV guest\n"); |
344 | + return 0; |
345 | + } |
346 | + |
347 | return 1; |
348 | } |
349 | |
350 | diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c |
351 | index 9268c12458c84..dfa01bcdc3694 100644 |
352 | --- a/arch/x86/mm/mem_encrypt.c |
353 | +++ b/arch/x86/mm/mem_encrypt.c |
354 | @@ -375,6 +375,7 @@ bool force_dma_unencrypted(struct device *dev) |
355 | |
356 | return false; |
357 | } |
358 | +EXPORT_SYMBOL_GPL(sev_active); |
359 | |
360 | /* Architecture __weak replacement functions */ |
361 | void __init mem_encrypt_free_decrypted_mem(void) |
362 | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
363 | index d2dd387c95d86..de06ee7d2ad46 100644 |
364 | --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
365 | +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
366 | @@ -1434,8 +1434,6 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) |
367 | |
368 | drm_connector_update_edid_property(connector, |
369 | aconnector->edid); |
370 | - drm_add_edid_modes(connector, aconnector->edid); |
371 | - |
372 | if (aconnector->dc_link->aux_mode) |
373 | drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, |
374 | aconnector->edid); |
375 | diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c |
376 | index 8c73377ac82ca..3d004ca76b6ed 100644 |
377 | --- a/drivers/input/joystick/xpad.c |
378 | +++ b/drivers/input/joystick/xpad.c |
379 | @@ -215,9 +215,17 @@ static const struct xpad_device { |
380 | { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
381 | { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
382 | { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, |
383 | - { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, |
384 | + { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
385 | + { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
386 | + { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, |
387 | { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, |
388 | { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, |
389 | + { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
390 | + { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
391 | + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, |
392 | + { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, |
393 | + { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, |
394 | + { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, |
395 | { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
396 | { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, |
397 | { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
398 | @@ -296,6 +304,9 @@ static const struct xpad_device { |
399 | { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, |
400 | { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, |
401 | { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, |
402 | + { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, |
403 | + { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, |
404 | + { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, |
405 | { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
406 | { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, |
407 | { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, |
408 | @@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = { |
409 | XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ |
410 | XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ |
411 | XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ |
412 | + XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ |
413 | + XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ |
414 | XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ |
415 | XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ |
416 | + XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ |
417 | + XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ |
418 | { } |
419 | }; |
420 | |
421 | diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h |
422 | index eca931da76c3a..b7dbcbac3a1a5 100644 |
423 | --- a/drivers/input/serio/i8042-x86ia64io.h |
424 | +++ b/drivers/input/serio/i8042-x86ia64io.h |
425 | @@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { |
426 | DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), |
427 | DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), |
428 | }, |
429 | + }, |
430 | + { |
431 | .matches = { |
432 | DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), |
433 | DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), |
434 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
435 | index 984c7a6ea4fe8..953d86ca6d2b2 100644 |
436 | --- a/drivers/iommu/intel-iommu.c |
437 | +++ b/drivers/iommu/intel-iommu.c |
438 | @@ -3285,6 +3285,12 @@ static int __init init_dmars(void) |
439 | |
440 | if (!ecap_pass_through(iommu->ecap)) |
441 | hw_pass_through = 0; |
442 | + |
443 | + if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { |
444 | + pr_info("Disable batched IOTLB flush due to virtualization"); |
445 | + intel_iommu_strict = 1; |
446 | + } |
447 | + |
448 | #ifdef CONFIG_INTEL_IOMMU_SVM |
449 | if (pasid_supported(iommu)) |
450 | intel_svm_init(iommu); |
451 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
452 | index ec5dfb7ae4e16..cc38530804c90 100644 |
453 | --- a/drivers/md/md.c |
454 | +++ b/drivers/md/md.c |
455 | @@ -538,8 +538,10 @@ static void md_submit_flush_data(struct work_struct *ws) |
456 | * could wait for this and below md_handle_request could wait for those |
457 | * bios because of suspend check |
458 | */ |
459 | + spin_lock_irq(&mddev->lock); |
460 | mddev->last_flush = mddev->start_flush; |
461 | mddev->flush_bio = NULL; |
462 | + spin_unlock_irq(&mddev->lock); |
463 | wake_up(&mddev->sb_wait); |
464 | |
465 | if (bio->bi_iter.bi_size == 0) { |
466 | diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c |
467 | index 3efaa9534a777..9a5aaac29099b 100644 |
468 | --- a/drivers/mmc/core/sdio_cis.c |
469 | +++ b/drivers/mmc/core/sdio_cis.c |
470 | @@ -20,6 +20,8 @@ |
471 | #include "sdio_cis.h" |
472 | #include "sdio_ops.h" |
473 | |
474 | +#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ |
475 | + |
476 | static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, |
477 | const unsigned char *buf, unsigned size) |
478 | { |
479 | @@ -266,6 +268,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) |
480 | |
481 | do { |
482 | unsigned char tpl_code, tpl_link; |
483 | + unsigned long timeout = jiffies + |
484 | + msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); |
485 | |
486 | ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); |
487 | if (ret) |
488 | @@ -318,6 +322,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) |
489 | prev = &this->next; |
490 | |
491 | if (ret == -ENOENT) { |
492 | + if (time_after(jiffies, timeout)) |
493 | + break; |
494 | /* warn about unknown tuples */ |
495 | pr_warn_ratelimited("%s: queuing unknown" |
496 | " CIS tuple 0x%02x (%u bytes)\n", |
497 | diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c |
498 | index 469b155df4885..1af09fd3fed1c 100644 |
499 | --- a/drivers/net/dsa/mv88e6xxx/chip.c |
500 | +++ b/drivers/net/dsa/mv88e6xxx/chip.c |
501 | @@ -1517,7 +1517,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, |
502 | if (!entry.portvec) |
503 | entry.state = 0; |
504 | } else { |
505 | - entry.portvec |= BIT(port); |
506 | + if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) |
507 | + entry.portvec = BIT(port); |
508 | + else |
509 | + entry.portvec |= BIT(port); |
510 | + |
511 | entry.state = state; |
512 | } |
513 | |
514 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
515 | index c20dc689698ed..5acd599d6b9af 100644 |
516 | --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
517 | +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
518 | @@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) |
519 | |
520 | pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; |
521 | pfe.severity = PF_EVENT_SEVERITY_INFO; |
522 | - |
523 | - /* Always report link is down if the VF queues aren't enabled */ |
524 | - if (!vf->queues_enabled) { |
525 | - pfe.event_data.link_event.link_status = false; |
526 | - pfe.event_data.link_event.link_speed = 0; |
527 | - } else if (vf->link_forced) { |
528 | + if (vf->link_forced) { |
529 | pfe.event_data.link_event.link_status = vf->link_up; |
530 | pfe.event_data.link_event.link_speed = |
531 | (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); |
532 | @@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) |
533 | pfe.event_data.link_event.link_speed = |
534 | i40e_virtchnl_link_speed(ls->link_speed); |
535 | } |
536 | - |
537 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, |
538 | 0, (u8 *)&pfe, sizeof(pfe), NULL); |
539 | } |
540 | @@ -2393,8 +2387,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) |
541 | } |
542 | } |
543 | |
544 | - vf->queues_enabled = true; |
545 | - |
546 | error_param: |
547 | /* send the response to the VF */ |
548 | return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, |
549 | @@ -2416,9 +2408,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) |
550 | struct i40e_pf *pf = vf->pf; |
551 | i40e_status aq_ret = 0; |
552 | |
553 | - /* Immediately mark queues as disabled */ |
554 | - vf->queues_enabled = false; |
555 | - |
556 | if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
557 | aq_ret = I40E_ERR_PARAM; |
558 | goto error_param; |
559 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
560 | index 7164b9bb294ff..f65cc0c165502 100644 |
561 | --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
562 | +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
563 | @@ -99,7 +99,6 @@ struct i40e_vf { |
564 | unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ |
565 | bool link_forced; |
566 | bool link_up; /* only valid if VF link is forced */ |
567 | - bool queues_enabled; /* true if the VF queues are enabled */ |
568 | bool spoofchk; |
569 | u16 num_mac; |
570 | u16 num_vlan; |
571 | diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c |
572 | index 0303eeb760505..0365bf2b480e3 100644 |
573 | --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c |
574 | +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c |
575 | @@ -1709,7 +1709,8 @@ static int igc_get_link_ksettings(struct net_device *netdev, |
576 | Asym_Pause); |
577 | } |
578 | |
579 | - status = rd32(IGC_STATUS); |
580 | + status = pm_runtime_suspended(&adapter->pdev->dev) ? |
581 | + 0 : rd32(IGC_STATUS); |
582 | |
583 | if (status & IGC_STATUS_LU) { |
584 | if (status & IGC_STATUS_SPEED_1000) { |
585 | diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c |
586 | index c25f555aaf822..ed5d09c11c389 100644 |
587 | --- a/drivers/net/ethernet/intel/igc/igc_i225.c |
588 | +++ b/drivers/net/ethernet/intel/igc/igc_i225.c |
589 | @@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, |
590 | u16 *data) |
591 | { |
592 | struct igc_nvm_info *nvm = &hw->nvm; |
593 | + s32 ret_val = -IGC_ERR_NVM; |
594 | u32 attempts = 100000; |
595 | u32 i, k, eewr = 0; |
596 | - s32 ret_val = 0; |
597 | |
598 | /* A check for invalid values: offset too large, too many words, |
599 | * too many words for the offset, and not enough words. |
600 | @@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, |
601 | if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || |
602 | words == 0) { |
603 | hw_dbg("nvm parameter(s) out of bounds\n"); |
604 | - ret_val = -IGC_ERR_NVM; |
605 | goto out; |
606 | } |
607 | |
608 | diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c |
609 | index 5eeb4c8caf4ae..08adf103e90b4 100644 |
610 | --- a/drivers/net/ethernet/intel/igc/igc_mac.c |
611 | +++ b/drivers/net/ethernet/intel/igc/igc_mac.c |
612 | @@ -647,7 +647,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) |
613 | } |
614 | |
615 | out: |
616 | - return 0; |
617 | + return ret_val; |
618 | } |
619 | |
620 | /** |
621 | diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c |
622 | index a30eb90ba3d28..dd590086fe6a5 100644 |
623 | --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c |
624 | +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c |
625 | @@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) |
626 | /* Clear entry invalidation bit */ |
627 | pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; |
628 | |
629 | - /* Write tcam index - indirect access */ |
630 | - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); |
631 | - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) |
632 | - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); |
633 | - |
634 | /* Write sram index - indirect access */ |
635 | mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); |
636 | for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) |
637 | mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); |
638 | |
639 | + /* Write tcam index - indirect access */ |
640 | + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); |
641 | + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) |
642 | + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); |
643 | + |
644 | return 0; |
645 | } |
646 | |
647 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
648 | index 4944c40436f08..11e12761b0a6e 100644 |
649 | --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
650 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
651 | @@ -1697,6 +1697,7 @@ search_again_locked: |
652 | if (!fte_tmp) |
653 | continue; |
654 | rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); |
655 | + /* No error check needed here, because insert_fte() is not called */ |
656 | up_write_ref_node(&fte_tmp->node, false); |
657 | tree_put_node(&fte_tmp->node, false); |
658 | kmem_cache_free(steering->ftes_cache, fte); |
659 | @@ -1745,6 +1746,8 @@ skip_search: |
660 | up_write_ref_node(&g->node, false); |
661 | rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); |
662 | up_write_ref_node(&fte->node, false); |
663 | + if (IS_ERR(rule)) |
664 | + tree_put_node(&fte->node, false); |
665 | return rule; |
666 | } |
667 | rule = ERR_PTR(-ENOENT); |
668 | @@ -1844,6 +1847,8 @@ search_again_locked: |
669 | up_write_ref_node(&g->node, false); |
670 | rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); |
671 | up_write_ref_node(&fte->node, false); |
672 | + if (IS_ERR(rule)) |
673 | + tree_put_node(&fte->node, false); |
674 | tree_put_node(&g->node, false); |
675 | return rule; |
676 | |
677 | diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c |
678 | index 366ca1b5da5cc..1e8244ec5b332 100644 |
679 | --- a/drivers/net/ethernet/realtek/r8169_main.c |
680 | +++ b/drivers/net/ethernet/realtek/r8169_main.c |
681 | @@ -6419,10 +6419,10 @@ static int rtl8169_close(struct net_device *dev) |
682 | |
683 | cancel_work_sync(&tp->wk.work); |
684 | |
685 | - phy_disconnect(tp->phydev); |
686 | - |
687 | free_irq(pci_irq_vector(pdev, 0), tp); |
688 | |
689 | + phy_disconnect(tp->phydev); |
690 | + |
691 | dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, |
692 | tp->RxPhyAddr); |
693 | dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, |
694 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
695 | index c54fe6650018e..7272d8522a9e9 100644 |
696 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
697 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
698 | @@ -134,7 +134,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm) |
699 | .dataflags[0] = IWL_HCMD_DFL_NOCOPY, |
700 | }; |
701 | |
702 | - /* Do not configure default queue, it is configured via context info */ |
703 | + /* |
704 | + * The default queue is configured via context info, so if we |
705 | + * have a single queue, there's nothing to do here. |
706 | + */ |
707 | + if (mvm->trans->num_rx_queues == 1) |
708 | + return 0; |
709 | + |
710 | + /* skip the default queue */ |
711 | num_queues = mvm->trans->num_rx_queues - 1; |
712 | |
713 | size = struct_size(cmd, data, num_queues); |
714 | diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c |
715 | index 196aa44c4936a..e0f411021c59d 100644 |
716 | --- a/drivers/nvdimm/dimm_devs.c |
717 | +++ b/drivers/nvdimm/dimm_devs.c |
718 | @@ -344,16 +344,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
719 | } |
720 | static DEVICE_ATTR_RO(state); |
721 | |
722 | -static ssize_t available_slots_show(struct device *dev, |
723 | - struct device_attribute *attr, char *buf) |
724 | +static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) |
725 | { |
726 | - struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); |
727 | + struct device *dev; |
728 | ssize_t rc; |
729 | u32 nfree; |
730 | |
731 | if (!ndd) |
732 | return -ENXIO; |
733 | |
734 | + dev = ndd->dev; |
735 | nvdimm_bus_lock(dev); |
736 | nfree = nd_label_nfree(ndd); |
737 | if (nfree - 1 > nfree) { |
738 | @@ -365,6 +365,18 @@ static ssize_t available_slots_show(struct device *dev, |
739 | nvdimm_bus_unlock(dev); |
740 | return rc; |
741 | } |
742 | + |
743 | +static ssize_t available_slots_show(struct device *dev, |
744 | + struct device_attribute *attr, char *buf) |
745 | +{ |
746 | + ssize_t rc; |
747 | + |
748 | + nd_device_lock(dev); |
749 | + rc = __available_slots_show(dev_get_drvdata(dev), buf); |
750 | + nd_device_unlock(dev); |
751 | + |
752 | + return rc; |
753 | +} |
754 | static DEVICE_ATTR_RO(available_slots); |
755 | |
756 | __weak ssize_t security_show(struct device *dev, |
757 | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
758 | index ef93bd3ed339c..434d3f21f0e13 100644 |
759 | --- a/drivers/nvme/host/pci.c |
760 | +++ b/drivers/nvme/host/pci.c |
761 | @@ -3161,6 +3161,8 @@ static const struct pci_device_id nvme_id_table[] = { |
762 | { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ |
763 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
764 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
765 | + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ |
766 | + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, |
767 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), |
768 | .driver_data = NVME_QUIRK_SINGLE_VECTOR }, |
769 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, |
770 | diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c |
771 | index e31823f19a0fa..9242224156f5b 100644 |
772 | --- a/drivers/nvme/target/tcp.c |
773 | +++ b/drivers/nvme/target/tcp.c |
774 | @@ -292,7 +292,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) |
775 | length = cmd->pdu_len; |
776 | cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); |
777 | offset = cmd->rbytes_done; |
778 | - cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); |
779 | + cmd->sg_idx = offset / PAGE_SIZE; |
780 | sg_offset = offset % PAGE_SIZE; |
781 | sg = &cmd->req.sg[cmd->sg_idx]; |
782 | |
783 | @@ -305,6 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) |
784 | length -= iov_len; |
785 | sg = sg_next(sg); |
786 | iov++; |
787 | + sg_offset = 0; |
788 | } |
789 | |
790 | iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, |
791 | diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c |
792 | index 134dc2005ce97..c9f6e97582885 100644 |
793 | --- a/drivers/usb/class/usblp.c |
794 | +++ b/drivers/usb/class/usblp.c |
795 | @@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) |
796 | if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) |
797 | return -EINVAL; |
798 | |
799 | - alts = usblp->protocol[protocol].alt_setting; |
800 | - if (alts < 0) |
801 | - return -EINVAL; |
802 | - r = usb_set_interface(usblp->dev, usblp->ifnum, alts); |
803 | - if (r < 0) { |
804 | - printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", |
805 | - alts, usblp->ifnum); |
806 | - return r; |
807 | + /* Don't unnecessarily set the interface if there's a single alt. */ |
808 | + if (usblp->intf->num_altsetting > 1) { |
809 | + alts = usblp->protocol[protocol].alt_setting; |
810 | + if (alts < 0) |
811 | + return -EINVAL; |
812 | + r = usb_set_interface(usblp->dev, usblp->ifnum, alts); |
813 | + if (r < 0) { |
814 | + printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", |
815 | + alts, usblp->ifnum); |
816 | + return r; |
817 | + } |
818 | } |
819 | |
820 | usblp->bidir = (usblp->protocol[protocol].epread != NULL); |
821 | diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c |
822 | index 70ac47a341ac2..e3f1f20c49221 100644 |
823 | --- a/drivers/usb/dwc2/gadget.c |
824 | +++ b/drivers/usb/dwc2/gadget.c |
825 | @@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, |
826 | static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, |
827 | u32 windex) |
828 | { |
829 | - struct dwc2_hsotg_ep *ep; |
830 | int dir = (windex & USB_DIR_IN) ? 1 : 0; |
831 | int idx = windex & 0x7F; |
832 | |
833 | @@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, |
834 | if (idx > hsotg->num_of_eps) |
835 | return NULL; |
836 | |
837 | - ep = index_to_ep(hsotg, idx, dir); |
838 | - |
839 | - if (idx && ep->dir_in != dir) |
840 | - return NULL; |
841 | - |
842 | - return ep; |
843 | + return index_to_ep(hsotg, idx, dir); |
844 | } |
845 | |
846 | /** |
847 | diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c |
848 | index 440dbf55ddf70..90ec65d31059f 100644 |
849 | --- a/drivers/usb/dwc3/core.c |
850 | +++ b/drivers/usb/dwc3/core.c |
851 | @@ -1718,7 +1718,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) |
852 | if (PMSG_IS_AUTO(msg)) |
853 | break; |
854 | |
855 | - ret = dwc3_core_init(dwc); |
856 | + ret = dwc3_core_init_for_resume(dwc); |
857 | if (ret) |
858 | return ret; |
859 | |
860 | diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c |
861 | index 30313b233680d..99c7fc0d1d597 100644 |
862 | --- a/drivers/usb/gadget/legacy/ether.c |
863 | +++ b/drivers/usb/gadget/legacy/ether.c |
864 | @@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) |
865 | struct usb_descriptor_header *usb_desc; |
866 | |
867 | usb_desc = usb_otg_descriptor_alloc(gadget); |
868 | - if (!usb_desc) |
869 | + if (!usb_desc) { |
870 | + status = -ENOMEM; |
871 | goto fail1; |
872 | + } |
873 | usb_otg_descriptor_init(gadget, usb_desc); |
874 | otg_desc[0] = usb_desc; |
875 | otg_desc[1] = NULL; |
876 | diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c |
877 | index 45c54d56ecbd5..b45e5bf089979 100644 |
878 | --- a/drivers/usb/host/xhci-mtk-sch.c |
879 | +++ b/drivers/usb/host/xhci-mtk-sch.c |
880 | @@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev, |
881 | |
882 | sch_ep->sch_tt = tt; |
883 | sch_ep->ep = ep; |
884 | + INIT_LIST_HEAD(&sch_ep->endpoint); |
885 | + INIT_LIST_HEAD(&sch_ep->tt_endpoint); |
886 | |
887 | return sch_ep; |
888 | } |
889 | @@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, |
890 | sch_ep->bw_budget_table[j]; |
891 | } |
892 | } |
893 | + sch_ep->allocated = used; |
894 | } |
895 | |
896 | static int check_sch_tt(struct usb_device *udev, |
897 | @@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev, |
898 | return 0; |
899 | } |
900 | |
901 | +static void destroy_sch_ep(struct usb_device *udev, |
902 | + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) |
903 | +{ |
904 | + /* only release ep bw check passed by check_sch_bw() */ |
905 | + if (sch_ep->allocated) |
906 | + update_bus_bw(sch_bw, sch_ep, 0); |
907 | + |
908 | + list_del(&sch_ep->endpoint); |
909 | + |
910 | + if (sch_ep->sch_tt) { |
911 | + list_del(&sch_ep->tt_endpoint); |
912 | + drop_tt(udev); |
913 | + } |
914 | + kfree(sch_ep); |
915 | +} |
916 | + |
917 | static bool need_bw_sch(struct usb_host_endpoint *ep, |
918 | enum usb_device_speed speed, int has_tt) |
919 | { |
920 | @@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) |
921 | |
922 | mtk->sch_array = sch_array; |
923 | |
924 | + INIT_LIST_HEAD(&mtk->bw_ep_chk_list); |
925 | + |
926 | return 0; |
927 | } |
928 | EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); |
929 | @@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, |
930 | struct xhci_ep_ctx *ep_ctx; |
931 | struct xhci_slot_ctx *slot_ctx; |
932 | struct xhci_virt_device *virt_dev; |
933 | - struct mu3h_sch_bw_info *sch_bw; |
934 | struct mu3h_sch_ep_info *sch_ep; |
935 | - struct mu3h_sch_bw_info *sch_array; |
936 | unsigned int ep_index; |
937 | - int bw_index; |
938 | - int ret = 0; |
939 | |
940 | xhci = hcd_to_xhci(hcd); |
941 | virt_dev = xhci->devs[udev->slot_id]; |
942 | ep_index = xhci_get_endpoint_index(&ep->desc); |
943 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
944 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
945 | - sch_array = mtk->sch_array; |
946 | |
947 | xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", |
948 | __func__, usb_endpoint_type(&ep->desc), udev->speed, |
949 | @@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, |
950 | return 0; |
951 | } |
952 | |
953 | - bw_index = get_bw_index(xhci, udev, ep); |
954 | - sch_bw = &sch_array[bw_index]; |
955 | - |
956 | sch_ep = create_sch_ep(udev, ep, ep_ctx); |
957 | if (IS_ERR_OR_NULL(sch_ep)) |
958 | return -ENOMEM; |
959 | |
960 | setup_sch_info(udev, ep_ctx, sch_ep); |
961 | |
962 | - ret = check_sch_bw(udev, sch_bw, sch_ep); |
963 | - if (ret) { |
964 | - xhci_err(xhci, "Not enough bandwidth!\n"); |
965 | - if (is_fs_or_ls(udev->speed)) |
966 | - drop_tt(udev); |
967 | - |
968 | - kfree(sch_ep); |
969 | - return -ENOSPC; |
970 | - } |
971 | - |
972 | - list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); |
973 | - |
974 | - ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) |
975 | - | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); |
976 | - ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) |
977 | - | EP_BREPEAT(sch_ep->repeat)); |
978 | - |
979 | - xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", |
980 | - sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, |
981 | - sch_ep->offset, sch_ep->repeat); |
982 | + list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list); |
983 | |
984 | return 0; |
985 | } |
986 | @@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, |
987 | struct xhci_virt_device *virt_dev; |
988 | struct mu3h_sch_bw_info *sch_array; |
989 | struct mu3h_sch_bw_info *sch_bw; |
990 | - struct mu3h_sch_ep_info *sch_ep; |
991 | + struct mu3h_sch_ep_info *sch_ep, *tmp; |
992 | int bw_index; |
993 | |
994 | xhci = hcd_to_xhci(hcd); |
995 | @@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, |
996 | bw_index = get_bw_index(xhci, udev, ep); |
997 | sch_bw = &sch_array[bw_index]; |
998 | |
999 | - list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { |
1000 | + list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) { |
1001 | if (sch_ep->ep == ep) { |
1002 | - update_bus_bw(sch_bw, sch_ep, 0); |
1003 | - list_del(&sch_ep->endpoint); |
1004 | - if (is_fs_or_ls(udev->speed)) { |
1005 | - list_del(&sch_ep->tt_endpoint); |
1006 | - drop_tt(udev); |
1007 | - } |
1008 | - kfree(sch_ep); |
1009 | + destroy_sch_ep(udev, sch_bw, sch_ep); |
1010 | break; |
1011 | } |
1012 | } |
1013 | } |
1014 | EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); |
1015 | + |
1016 | +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
1017 | +{ |
1018 | + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); |
1019 | + struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1020 | + struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
1021 | + struct mu3h_sch_bw_info *sch_bw; |
1022 | + struct mu3h_sch_ep_info *sch_ep, *tmp; |
1023 | + int bw_index, ret; |
1024 | + |
1025 | + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); |
1026 | + |
1027 | + list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) { |
1028 | + bw_index = get_bw_index(xhci, udev, sch_ep->ep); |
1029 | + sch_bw = &mtk->sch_array[bw_index]; |
1030 | + |
1031 | + ret = check_sch_bw(udev, sch_bw, sch_ep); |
1032 | + if (ret) { |
1033 | + xhci_err(xhci, "Not enough bandwidth!\n"); |
1034 | + return -ENOSPC; |
1035 | + } |
1036 | + } |
1037 | + |
1038 | + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { |
1039 | + struct xhci_ep_ctx *ep_ctx; |
1040 | + struct usb_host_endpoint *ep = sch_ep->ep; |
1041 | + unsigned int ep_index = xhci_get_endpoint_index(&ep->desc); |
1042 | + |
1043 | + bw_index = get_bw_index(xhci, udev, ep); |
1044 | + sch_bw = &mtk->sch_array[bw_index]; |
1045 | + |
1046 | + list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); |
1047 | + |
1048 | + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
1049 | + ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) |
1050 | + | EP_BCSCOUNT(sch_ep->cs_count) |
1051 | + | EP_BBM(sch_ep->burst_mode)); |
1052 | + ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) |
1053 | + | EP_BREPEAT(sch_ep->repeat)); |
1054 | + |
1055 | + xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", |
1056 | + sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, |
1057 | + sch_ep->offset, sch_ep->repeat); |
1058 | + } |
1059 | + |
1060 | + return xhci_check_bandwidth(hcd, udev); |
1061 | +} |
1062 | +EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth); |
1063 | + |
1064 | +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
1065 | +{ |
1066 | + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); |
1067 | + struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1068 | + struct mu3h_sch_bw_info *sch_bw; |
1069 | + struct mu3h_sch_ep_info *sch_ep, *tmp; |
1070 | + int bw_index; |
1071 | + |
1072 | + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); |
1073 | + |
1074 | + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { |
1075 | + bw_index = get_bw_index(xhci, udev, sch_ep->ep); |
1076 | + sch_bw = &mtk->sch_array[bw_index]; |
1077 | + destroy_sch_ep(udev, sch_bw, sch_ep); |
1078 | + } |
1079 | + |
1080 | + xhci_reset_bandwidth(hcd, udev); |
1081 | +} |
1082 | +EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth); |
1083 | diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c |
1084 | index 85f1ff0399a9c..09b67219fd146 100644 |
1085 | --- a/drivers/usb/host/xhci-mtk.c |
1086 | +++ b/drivers/usb/host/xhci-mtk.c |
1087 | @@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable) |
1088 | static int xhci_mtk_setup(struct usb_hcd *hcd); |
1089 | static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { |
1090 | .reset = xhci_mtk_setup, |
1091 | + .check_bandwidth = xhci_mtk_check_bandwidth, |
1092 | + .reset_bandwidth = xhci_mtk_reset_bandwidth, |
1093 | }; |
1094 | |
1095 | static struct hc_driver __read_mostly xhci_mtk_hc_driver; |
1096 | diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h |
1097 | index 5ac458b7d2e0e..734c5513aa1bf 100644 |
1098 | --- a/drivers/usb/host/xhci-mtk.h |
1099 | +++ b/drivers/usb/host/xhci-mtk.h |
1100 | @@ -59,6 +59,7 @@ struct mu3h_sch_bw_info { |
1101 | * @ep_type: endpoint type |
1102 | * @maxpkt: max packet size of endpoint |
1103 | * @ep: address of usb_host_endpoint struct |
1104 | + * @allocated: the bandwidth is aready allocated from bus_bw |
1105 | * @offset: which uframe of the interval that transfer should be |
1106 | * scheduled first time within the interval |
1107 | * @repeat: the time gap between two uframes that transfers are |
1108 | @@ -86,6 +87,7 @@ struct mu3h_sch_ep_info { |
1109 | u32 ep_type; |
1110 | u32 maxpkt; |
1111 | void *ep; |
1112 | + bool allocated; |
1113 | /* |
1114 | * mtk xHCI scheduling information put into reserved DWs |
1115 | * in ep context |
1116 | @@ -131,6 +133,7 @@ struct xhci_hcd_mtk { |
1117 | struct device *dev; |
1118 | struct usb_hcd *hcd; |
1119 | struct mu3h_sch_bw_info *sch_array; |
1120 | + struct list_head bw_ep_chk_list; |
1121 | struct mu3c_ippc_regs __iomem *ippc_regs; |
1122 | bool has_ippc; |
1123 | int num_u2_ports; |
1124 | @@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, |
1125 | struct usb_host_endpoint *ep); |
1126 | void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, |
1127 | struct usb_host_endpoint *ep); |
1128 | +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1129 | +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1130 | |
1131 | #else |
1132 | static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, |
1133 | @@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, |
1134 | { |
1135 | } |
1136 | |
1137 | +static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, |
1138 | + struct usb_device *udev) |
1139 | +{ |
1140 | + return 0; |
1141 | +} |
1142 | + |
1143 | +static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, |
1144 | + struct usb_device *udev) |
1145 | +{ |
1146 | +} |
1147 | #endif |
1148 | |
1149 | #endif /* _XHCI_MTK_H_ */ |
1150 | diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c |
1151 | index 60651a50770f9..f27d5c2c42f31 100644 |
1152 | --- a/drivers/usb/host/xhci-mvebu.c |
1153 | +++ b/drivers/usb/host/xhci-mvebu.c |
1154 | @@ -8,6 +8,7 @@ |
1155 | #include <linux/mbus.h> |
1156 | #include <linux/of.h> |
1157 | #include <linux/platform_device.h> |
1158 | +#include <linux/phy/phy.h> |
1159 | |
1160 | #include <linux/usb.h> |
1161 | #include <linux/usb/hcd.h> |
1162 | @@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) |
1163 | return 0; |
1164 | } |
1165 | |
1166 | +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) |
1167 | +{ |
1168 | + struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1169 | + struct device *dev = hcd->self.controller; |
1170 | + struct phy *phy; |
1171 | + int ret; |
1172 | + |
1173 | + /* Old bindings miss the PHY handle */ |
1174 | + phy = of_phy_get(dev->of_node, "usb3-phy"); |
1175 | + if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) |
1176 | + return -EPROBE_DEFER; |
1177 | + else if (IS_ERR(phy)) |
1178 | + goto phy_out; |
1179 | + |
1180 | + ret = phy_init(phy); |
1181 | + if (ret) |
1182 | + goto phy_put; |
1183 | + |
1184 | + ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); |
1185 | + if (ret) |
1186 | + goto phy_exit; |
1187 | + |
1188 | + ret = phy_power_on(phy); |
1189 | + if (ret == -EOPNOTSUPP) { |
1190 | + /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ |
1191 | + dev_warn(dev, "PHY unsupported by firmware\n"); |
1192 | + xhci->quirks |= XHCI_SKIP_PHY_INIT; |
1193 | + } |
1194 | + if (ret) |
1195 | + goto phy_exit; |
1196 | + |
1197 | + phy_power_off(phy); |
1198 | +phy_exit: |
1199 | + phy_exit(phy); |
1200 | +phy_put: |
1201 | + phy_put(phy); |
1202 | +phy_out: |
1203 | + |
1204 | + return 0; |
1205 | +} |
1206 | + |
1207 | int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) |
1208 | { |
1209 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1210 | diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h |
1211 | index ca0a3a5721dd7..74b4d21a498a0 100644 |
1212 | --- a/drivers/usb/host/xhci-mvebu.h |
1213 | +++ b/drivers/usb/host/xhci-mvebu.h |
1214 | @@ -12,6 +12,7 @@ struct usb_hcd; |
1215 | |
1216 | #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) |
1217 | int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); |
1218 | +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); |
1219 | int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); |
1220 | #else |
1221 | static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) |
1222 | @@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) |
1223 | return 0; |
1224 | } |
1225 | |
1226 | +static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) |
1227 | +{ |
1228 | + return 0; |
1229 | +} |
1230 | + |
1231 | static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) |
1232 | { |
1233 | return 0; |
1234 | diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c |
1235 | index 52c625c023410..84cfa85442852 100644 |
1236 | --- a/drivers/usb/host/xhci-plat.c |
1237 | +++ b/drivers/usb/host/xhci-plat.c |
1238 | @@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd) |
1239 | priv->plat_start(hcd); |
1240 | } |
1241 | |
1242 | +static int xhci_priv_plat_setup(struct usb_hcd *hcd) |
1243 | +{ |
1244 | + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); |
1245 | + |
1246 | + if (!priv->plat_setup) |
1247 | + return 0; |
1248 | + |
1249 | + return priv->plat_setup(hcd); |
1250 | +} |
1251 | + |
1252 | static int xhci_priv_init_quirk(struct usb_hcd *hcd) |
1253 | { |
1254 | struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); |
1255 | @@ -101,6 +111,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { |
1256 | }; |
1257 | |
1258 | static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { |
1259 | + .plat_setup = xhci_mvebu_a3700_plat_setup, |
1260 | .init_quirk = xhci_mvebu_a3700_init_quirk, |
1261 | }; |
1262 | |
1263 | @@ -163,6 +174,8 @@ static int xhci_plat_probe(struct platform_device *pdev) |
1264 | struct usb_hcd *hcd; |
1265 | int ret; |
1266 | int irq; |
1267 | + struct xhci_plat_priv *priv = NULL; |
1268 | + |
1269 | |
1270 | if (usb_disabled()) |
1271 | return -ENODEV; |
1272 | @@ -257,8 +270,7 @@ static int xhci_plat_probe(struct platform_device *pdev) |
1273 | |
1274 | priv_match = of_device_get_match_data(&pdev->dev); |
1275 | if (priv_match) { |
1276 | - struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); |
1277 | - |
1278 | + priv = hcd_to_xhci_priv(hcd); |
1279 | /* Just copy data for now */ |
1280 | if (priv_match) |
1281 | *priv = *priv_match; |
1282 | @@ -307,6 +319,16 @@ static int xhci_plat_probe(struct platform_device *pdev) |
1283 | |
1284 | hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); |
1285 | xhci->shared_hcd->tpl_support = hcd->tpl_support; |
1286 | + |
1287 | + if (priv) { |
1288 | + ret = xhci_priv_plat_setup(hcd); |
1289 | + if (ret) |
1290 | + goto disable_usb_phy; |
1291 | + } |
1292 | + |
1293 | + if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) |
1294 | + hcd->skip_phy_initialization = 1; |
1295 | + |
1296 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); |
1297 | if (ret) |
1298 | goto disable_usb_phy; |
1299 | diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h |
1300 | index 5681723fc9cd7..b7749151bdfb8 100644 |
1301 | --- a/drivers/usb/host/xhci-plat.h |
1302 | +++ b/drivers/usb/host/xhci-plat.h |
1303 | @@ -13,6 +13,7 @@ |
1304 | struct xhci_plat_priv { |
1305 | const char *firmware_name; |
1306 | unsigned long long quirks; |
1307 | + int (*plat_setup)(struct usb_hcd *); |
1308 | void (*plat_start)(struct usb_hcd *); |
1309 | int (*init_quirk)(struct usb_hcd *); |
1310 | int (*resume_quirk)(struct usb_hcd *); |
1311 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
1312 | index 52e156c018042..900ea91fb3c6b 100644 |
1313 | --- a/drivers/usb/host/xhci-ring.c |
1314 | +++ b/drivers/usb/host/xhci-ring.c |
1315 | @@ -695,11 +695,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, |
1316 | dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, |
1317 | DMA_FROM_DEVICE); |
1318 | /* for in tranfers we need to copy the data from bounce to sg */ |
1319 | - len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, |
1320 | - seg->bounce_len, seg->bounce_offs); |
1321 | - if (len != seg->bounce_len) |
1322 | - xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", |
1323 | - len, seg->bounce_len); |
1324 | + if (urb->num_sgs) { |
1325 | + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, |
1326 | + seg->bounce_len, seg->bounce_offs); |
1327 | + if (len != seg->bounce_len) |
1328 | + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", |
1329 | + len, seg->bounce_len); |
1330 | + } else { |
1331 | + memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, |
1332 | + seg->bounce_len); |
1333 | + } |
1334 | seg->bounce_len = 0; |
1335 | seg->bounce_offs = 0; |
1336 | } |
1337 | @@ -3263,12 +3268,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, |
1338 | |
1339 | /* create a max max_pkt sized bounce buffer pointed to by last trb */ |
1340 | if (usb_urb_dir_out(urb)) { |
1341 | - len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, |
1342 | - seg->bounce_buf, new_buff_len, enqd_len); |
1343 | - if (len != new_buff_len) |
1344 | - xhci_warn(xhci, |
1345 | - "WARN Wrong bounce buffer write length: %zu != %d\n", |
1346 | - len, new_buff_len); |
1347 | + if (urb->num_sgs) { |
1348 | + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, |
1349 | + seg->bounce_buf, new_buff_len, enqd_len); |
1350 | + if (len != new_buff_len) |
1351 | + xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", |
1352 | + len, new_buff_len); |
1353 | + } else { |
1354 | + memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); |
1355 | + } |
1356 | + |
1357 | seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, |
1358 | max_pkt, DMA_TO_DEVICE); |
1359 | } else { |
1360 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
1361 | index 70aa3055c41e7..91330517444e7 100644 |
1362 | --- a/drivers/usb/host/xhci.c |
1363 | +++ b/drivers/usb/host/xhci.c |
1364 | @@ -2861,7 +2861,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, |
1365 | * else should be touching the xhci->devs[slot_id] structure, so we |
1366 | * don't need to take the xhci->lock for manipulating that. |
1367 | */ |
1368 | -static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
1369 | +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
1370 | { |
1371 | int i; |
1372 | int ret = 0; |
1373 | @@ -2959,7 +2959,7 @@ command_cleanup: |
1374 | return ret; |
1375 | } |
1376 | |
1377 | -static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
1378 | +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
1379 | { |
1380 | struct xhci_hcd *xhci; |
1381 | struct xhci_virt_device *virt_dev; |
1382 | @@ -5380,6 +5380,10 @@ void xhci_init_driver(struct hc_driver *drv, |
1383 | drv->reset = over->reset; |
1384 | if (over->start) |
1385 | drv->start = over->start; |
1386 | + if (over->check_bandwidth) |
1387 | + drv->check_bandwidth = over->check_bandwidth; |
1388 | + if (over->reset_bandwidth) |
1389 | + drv->reset_bandwidth = over->reset_bandwidth; |
1390 | } |
1391 | } |
1392 | EXPORT_SYMBOL_GPL(xhci_init_driver); |
1393 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
1394 | index b483317bcb17b..1ad1d6e9e9979 100644 |
1395 | --- a/drivers/usb/host/xhci.h |
1396 | +++ b/drivers/usb/host/xhci.h |
1397 | @@ -1873,6 +1873,7 @@ struct xhci_hcd { |
1398 | #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) |
1399 | #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) |
1400 | #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) |
1401 | +#define XHCI_SKIP_PHY_INIT BIT_ULL(37) |
1402 | #define XHCI_DISABLE_SPARSE BIT_ULL(38) |
1403 | |
1404 | unsigned int num_active_eps; |
1405 | @@ -1911,6 +1912,8 @@ struct xhci_driver_overrides { |
1406 | size_t extra_priv_size; |
1407 | int (*reset)(struct usb_hcd *hcd); |
1408 | int (*start)(struct usb_hcd *hcd); |
1409 | + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); |
1410 | + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); |
1411 | }; |
1412 | |
1413 | #define XHCI_CFC_DELAY 10 |
1414 | @@ -2063,6 +2066,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); |
1415 | void xhci_shutdown(struct usb_hcd *hcd); |
1416 | void xhci_init_driver(struct hc_driver *drv, |
1417 | const struct xhci_driver_overrides *over); |
1418 | +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1419 | +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1420 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); |
1421 | int xhci_ext_cap_init(struct xhci_hcd *xhci); |
1422 | |
1423 | diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c |
1424 | index 05cdad13933b1..cfc16943979d5 100644 |
1425 | --- a/drivers/usb/renesas_usbhs/fifo.c |
1426 | +++ b/drivers/usb/renesas_usbhs/fifo.c |
1427 | @@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) |
1428 | } |
1429 | |
1430 | usbhs_pipe_clear_without_sequence(pipe, 0, 0); |
1431 | + usbhs_pipe_running(pipe, 0); |
1432 | |
1433 | __usbhsf_pkt_del(pkt); |
1434 | } |
1435 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
1436 | index a90801ef00554..361a2e3ccad8d 100644 |
1437 | --- a/drivers/usb/serial/cp210x.c |
1438 | +++ b/drivers/usb/serial/cp210x.c |
1439 | @@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = { |
1440 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ |
1441 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ |
1442 | { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ |
1443 | + { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ |
1444 | { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ |
1445 | { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ |
1446 | { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ |
1447 | @@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = { |
1448 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
1449 | { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ |
1450 | { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ |
1451 | + { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ |
1452 | { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ |
1453 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
1454 | { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
1455 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
1456 | index fd41b07b5aaf1..f49eae18500cc 100644 |
1457 | --- a/drivers/usb/serial/option.c |
1458 | +++ b/drivers/usb/serial/option.c |
1459 | @@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); |
1460 | #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 |
1461 | #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 |
1462 | #define CINTERION_PRODUCT_CLS8 0x00b0 |
1463 | +#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 |
1464 | +#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 |
1465 | |
1466 | /* Olivetti products */ |
1467 | #define OLIVETTI_VENDOR_ID 0x0b3c |
1468 | @@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = { |
1469 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, |
1470 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ |
1471 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, |
1472 | + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), |
1473 | + .driver_info = RSVD(3)}, |
1474 | + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), |
1475 | + .driver_info = RSVD(0)}, |
1476 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), |
1477 | .driver_info = RSVD(4) }, |
1478 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), |
1479 | diff --git a/fs/afs/main.c b/fs/afs/main.c |
1480 | index c9c45d7078bd1..5cd26af2464c9 100644 |
1481 | --- a/fs/afs/main.c |
1482 | +++ b/fs/afs/main.c |
1483 | @@ -186,7 +186,7 @@ static int __init afs_init(void) |
1484 | goto error_cache; |
1485 | #endif |
1486 | |
1487 | - ret = register_pernet_subsys(&afs_net_ops); |
1488 | + ret = register_pernet_device(&afs_net_ops); |
1489 | if (ret < 0) |
1490 | goto error_net; |
1491 | |
1492 | @@ -206,7 +206,7 @@ static int __init afs_init(void) |
1493 | error_proc: |
1494 | afs_fs_exit(); |
1495 | error_fs: |
1496 | - unregister_pernet_subsys(&afs_net_ops); |
1497 | + unregister_pernet_device(&afs_net_ops); |
1498 | error_net: |
1499 | #ifdef CONFIG_AFS_FSCACHE |
1500 | fscache_unregister_netfs(&afs_cache_netfs); |
1501 | @@ -237,7 +237,7 @@ static void __exit afs_exit(void) |
1502 | |
1503 | proc_remove(afs_proc_symlink); |
1504 | afs_fs_exit(); |
1505 | - unregister_pernet_subsys(&afs_net_ops); |
1506 | + unregister_pernet_device(&afs_net_ops); |
1507 | #ifdef CONFIG_AFS_FSCACHE |
1508 | fscache_unregister_netfs(&afs_cache_netfs); |
1509 | #endif |
1510 | diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c |
1511 | index 5a35850ccb1ab..9ae9a514676c3 100644 |
1512 | --- a/fs/cifs/dir.c |
1513 | +++ b/fs/cifs/dir.c |
1514 | @@ -738,6 +738,7 @@ static int |
1515 | cifs_d_revalidate(struct dentry *direntry, unsigned int flags) |
1516 | { |
1517 | struct inode *inode; |
1518 | + int rc; |
1519 | |
1520 | if (flags & LOOKUP_RCU) |
1521 | return -ECHILD; |
1522 | @@ -747,8 +748,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) |
1523 | if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) |
1524 | CIFS_I(inode)->time = 0; /* force reval */ |
1525 | |
1526 | - if (cifs_revalidate_dentry(direntry)) |
1527 | - return 0; |
1528 | + rc = cifs_revalidate_dentry(direntry); |
1529 | + if (rc) { |
1530 | + cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); |
1531 | + switch (rc) { |
1532 | + case -ENOENT: |
1533 | + case -ESTALE: |
1534 | + /* |
1535 | + * Those errors mean the dentry is invalid |
1536 | + * (file was deleted or recreated) |
1537 | + */ |
1538 | + return 0; |
1539 | + default: |
1540 | + /* |
1541 | + * Otherwise some unexpected error happened |
1542 | + * report it as-is to VFS layer |
1543 | + */ |
1544 | + return rc; |
1545 | + } |
1546 | + } |
1547 | else { |
1548 | /* |
1549 | * If the inode wasn't known to be a dfs entry when |
1550 | diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h |
1551 | index 2482978f09486..739556e385be8 100644 |
1552 | --- a/fs/cifs/smb2pdu.h |
1553 | +++ b/fs/cifs/smb2pdu.h |
1554 | @@ -227,7 +227,7 @@ struct smb2_negotiate_req { |
1555 | __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */ |
1556 | __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */ |
1557 | __le16 Reserved2; |
1558 | - __le16 Dialects[1]; /* One dialect (vers=) at a time for now */ |
1559 | + __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ |
1560 | } __packed; |
1561 | |
1562 | /* Dialects */ |
1563 | diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c |
1564 | index 4ffbf8f965814..eab7940bfebef 100644 |
1565 | --- a/fs/cifs/transport.c |
1566 | +++ b/fs/cifs/transport.c |
1567 | @@ -659,10 +659,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num, |
1568 | spin_lock(&server->req_lock); |
1569 | if (*credits < num) { |
1570 | /* |
1571 | - * Return immediately if not too many requests in flight since |
1572 | - * we will likely be stuck on waiting for credits. |
1573 | + * If the server is tight on resources or just gives us less |
1574 | + * credits for other reasons (e.g. requests are coming out of |
1575 | + * order and the server delays granting more credits until it |
1576 | + * processes a missing mid) and we exhausted most available |
1577 | + * credits there may be situations when we try to send |
1578 | + * a compound request but we don't have enough credits. At this |
1579 | + * point the client needs to decide if it should wait for |
1580 | + * additional credits or fail the request. If at least one |
1581 | + * request is in flight there is a high probability that the |
1582 | + * server will return enough credits to satisfy this compound |
1583 | + * request. |
1584 | + * |
1585 | + * Return immediately if no requests in flight since we will be |
1586 | + * stuck on waiting for credits. |
1587 | */ |
1588 | - if (server->in_flight < num - *credits) { |
1589 | + if (server->in_flight == 0) { |
1590 | spin_unlock(&server->req_lock); |
1591 | return -ENOTSUPP; |
1592 | } |
1593 | diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
1594 | index 5fff7cb3582f0..cf3af2140c3d8 100644 |
1595 | --- a/fs/hugetlbfs/inode.c |
1596 | +++ b/fs/hugetlbfs/inode.c |
1597 | @@ -675,9 +675,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, |
1598 | |
1599 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
1600 | |
1601 | + set_page_huge_active(page); |
1602 | /* |
1603 | * unlock_page because locked by add_to_page_cache() |
1604 | - * page_put due to reference from alloc_huge_page() |
1605 | + * put_page() due to reference from alloc_huge_page() |
1606 | */ |
1607 | unlock_page(page); |
1608 | put_page(page); |
1609 | diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c |
1610 | index 29abdb1d3b5c6..6509ec3cb3730 100644 |
1611 | --- a/fs/overlayfs/dir.c |
1612 | +++ b/fs/overlayfs/dir.c |
1613 | @@ -940,8 +940,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) |
1614 | |
1615 | buflen -= thislen; |
1616 | memcpy(&buf[buflen], name, thislen); |
1617 | - tmp = dget_dlock(d->d_parent); |
1618 | spin_unlock(&d->d_lock); |
1619 | + tmp = dget_parent(d); |
1620 | |
1621 | dput(d); |
1622 | d = tmp; |
1623 | diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
1624 | index 8a03f392f3680..0e080ba5efbcc 100644 |
1625 | --- a/include/linux/hugetlb.h |
1626 | +++ b/include/linux/hugetlb.h |
1627 | @@ -590,6 +590,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, |
1628 | } |
1629 | #endif |
1630 | |
1631 | +void set_page_huge_active(struct page *page); |
1632 | + |
1633 | #else /* CONFIG_HUGETLB_PAGE */ |
1634 | struct hstate {}; |
1635 | |
1636 | diff --git a/include/linux/msi.h b/include/linux/msi.h |
1637 | index 8ad679e9d9c04..d695e2eb2092d 100644 |
1638 | --- a/include/linux/msi.h |
1639 | +++ b/include/linux/msi.h |
1640 | @@ -139,6 +139,12 @@ struct msi_desc { |
1641 | list_for_each_entry((desc), dev_to_msi_list((dev)), list) |
1642 | #define for_each_msi_entry_safe(desc, tmp, dev) \ |
1643 | list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) |
1644 | +#define for_each_msi_vector(desc, __irq, dev) \ |
1645 | + for_each_msi_entry((desc), (dev)) \ |
1646 | + if ((desc)->irq) \ |
1647 | + for (__irq = (desc)->irq; \ |
1648 | + __irq < ((desc)->irq + (desc)->nvec_used); \ |
1649 | + __irq++) |
1650 | |
1651 | #ifdef CONFIG_IRQ_MSI_IOMMU |
1652 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) |
1653 | diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h |
1654 | index 3d03756e10699..b2ceec7b280d4 100644 |
1655 | --- a/include/net/sch_generic.h |
1656 | +++ b/include/net/sch_generic.h |
1657 | @@ -1158,7 +1158,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, |
1658 | old = *pold; |
1659 | *pold = new; |
1660 | if (old != NULL) |
1661 | - qdisc_tree_flush_backlog(old); |
1662 | + qdisc_purge_queue(old); |
1663 | sch_tree_unlock(sch); |
1664 | |
1665 | return old; |
1666 | diff --git a/init/init_task.c b/init/init_task.c |
1667 | index df7041be96fca..5d8359c44564a 100644 |
1668 | --- a/init/init_task.c |
1669 | +++ b/init/init_task.c |
1670 | @@ -171,7 +171,8 @@ struct task_struct init_task |
1671 | .lockdep_recursion = 0, |
1672 | #endif |
1673 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1674 | - .ret_stack = NULL, |
1675 | + .ret_stack = NULL, |
1676 | + .tracing_graph_pause = ATOMIC_INIT(0), |
1677 | #endif |
1678 | #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION) |
1679 | .trace_recursion = 0, |
1680 | diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c |
1681 | index 5a8b4dfdb1419..c2f0aa818b7af 100644 |
1682 | --- a/kernel/bpf/cgroup.c |
1683 | +++ b/kernel/bpf/cgroup.c |
1684 | @@ -1109,6 +1109,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, |
1685 | goto out; |
1686 | } |
1687 | |
1688 | + if (ctx.optlen < 0) { |
1689 | + ret = -EFAULT; |
1690 | + goto out; |
1691 | + } |
1692 | + |
1693 | if (copy_from_user(ctx.optval, optval, |
1694 | min(ctx.optlen, max_optlen)) != 0) { |
1695 | ret = -EFAULT; |
1696 | @@ -1126,7 +1131,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, |
1697 | goto out; |
1698 | } |
1699 | |
1700 | - if (ctx.optlen > max_optlen) { |
1701 | + if (ctx.optlen > max_optlen || ctx.optlen < 0) { |
1702 | ret = -EFAULT; |
1703 | goto out; |
1704 | } |
1705 | diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c |
1706 | index eb95f6106a1ee..5d3da0db092ff 100644 |
1707 | --- a/kernel/irq/msi.c |
1708 | +++ b/kernel/irq/msi.c |
1709 | @@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
1710 | |
1711 | can_reserve = msi_check_reservation_mode(domain, info, dev); |
1712 | |
1713 | - for_each_msi_entry(desc, dev) { |
1714 | - virq = desc->irq; |
1715 | - if (desc->nvec_used == 1) |
1716 | - dev_dbg(dev, "irq %d for MSI\n", virq); |
1717 | - else |
1718 | + /* |
1719 | + * This flag is set by the PCI layer as we need to activate |
1720 | + * the MSI entries before the PCI layer enables MSI in the |
1721 | + * card. Otherwise the card latches a random msi message. |
1722 | + */ |
1723 | + if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) |
1724 | + goto skip_activate; |
1725 | + |
1726 | + for_each_msi_vector(desc, i, dev) { |
1727 | + if (desc->irq == i) { |
1728 | + virq = desc->irq; |
1729 | dev_dbg(dev, "irq [%d-%d] for MSI\n", |
1730 | virq, virq + desc->nvec_used - 1); |
1731 | - /* |
1732 | - * This flag is set by the PCI layer as we need to activate |
1733 | - * the MSI entries before the PCI layer enables MSI in the |
1734 | - * card. Otherwise the card latches a random msi message. |
1735 | - */ |
1736 | - if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) |
1737 | - continue; |
1738 | + } |
1739 | |
1740 | - irq_data = irq_domain_get_irq_data(domain, desc->irq); |
1741 | + irq_data = irq_domain_get_irq_data(domain, i); |
1742 | if (!can_reserve) { |
1743 | irqd_clr_can_reserve(irq_data); |
1744 | if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) |
1745 | @@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
1746 | goto cleanup; |
1747 | } |
1748 | |
1749 | +skip_activate: |
1750 | /* |
1751 | * If these interrupts use reservation mode, clear the activated bit |
1752 | * so request_irq() will assign the final vector. |
1753 | */ |
1754 | if (can_reserve) { |
1755 | - for_each_msi_entry(desc, dev) { |
1756 | - irq_data = irq_domain_get_irq_data(domain, desc->irq); |
1757 | + for_each_msi_vector(desc, i, dev) { |
1758 | + irq_data = irq_domain_get_irq_data(domain, i); |
1759 | irqd_clr_activated(irq_data); |
1760 | } |
1761 | } |
1762 | return 0; |
1763 | |
1764 | cleanup: |
1765 | - for_each_msi_entry(desc, dev) { |
1766 | - struct irq_data *irqd; |
1767 | - |
1768 | - if (desc->irq == virq) |
1769 | - break; |
1770 | - |
1771 | - irqd = irq_domain_get_irq_data(domain, desc->irq); |
1772 | - if (irqd_is_activated(irqd)) |
1773 | - irq_domain_deactivate_irq(irqd); |
1774 | + for_each_msi_vector(desc, i, dev) { |
1775 | + irq_data = irq_domain_get_irq_data(domain, i); |
1776 | + if (irqd_is_activated(irq_data)) |
1777 | + irq_domain_deactivate_irq(irq_data); |
1778 | } |
1779 | msi_domain_free_irqs(domain, dev); |
1780 | return ret; |
1781 | diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
1782 | index 283c8b01ce789..26ae92c12fc22 100644 |
1783 | --- a/kernel/kprobes.c |
1784 | +++ b/kernel/kprobes.c |
1785 | @@ -1972,6 +1972,10 @@ int register_kretprobe(struct kretprobe *rp) |
1786 | if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) |
1787 | return -EINVAL; |
1788 | |
1789 | + /* If only rp->kp.addr is specified, check reregistering kprobes */ |
1790 | + if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) |
1791 | + return -EINVAL; |
1792 | + |
1793 | if (kretprobe_blacklist_size) { |
1794 | addr = kprobe_addr(&rp->kp); |
1795 | if (IS_ERR(addr)) |
1796 | diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c |
1797 | index 7950a0356042a..888cd00174fe3 100644 |
1798 | --- a/kernel/trace/fgraph.c |
1799 | +++ b/kernel/trace/fgraph.c |
1800 | @@ -367,7 +367,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
1801 | } |
1802 | |
1803 | if (t->ret_stack == NULL) { |
1804 | - atomic_set(&t->tracing_graph_pause, 0); |
1805 | atomic_set(&t->trace_overrun, 0); |
1806 | t->curr_ret_stack = -1; |
1807 | t->curr_ret_depth = -1; |
1808 | @@ -462,7 +461,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); |
1809 | static void |
1810 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) |
1811 | { |
1812 | - atomic_set(&t->tracing_graph_pause, 0); |
1813 | atomic_set(&t->trace_overrun, 0); |
1814 | t->ftrace_timestamp = 0; |
1815 | /* make curr_ret_stack visible before we add the ret_stack */ |
1816 | diff --git a/mm/compaction.c b/mm/compaction.c |
1817 | index 92470625f0b1e..88c3f6bad1aba 100644 |
1818 | --- a/mm/compaction.c |
1819 | +++ b/mm/compaction.c |
1820 | @@ -1276,7 +1276,7 @@ fast_isolate_freepages(struct compact_control *cc) |
1821 | { |
1822 | unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); |
1823 | unsigned int nr_scanned = 0; |
1824 | - unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; |
1825 | + unsigned long low_pfn, min_pfn, highest = 0; |
1826 | unsigned long nr_isolated = 0; |
1827 | unsigned long distance; |
1828 | struct page *page = NULL; |
1829 | @@ -1321,6 +1321,7 @@ fast_isolate_freepages(struct compact_control *cc) |
1830 | struct page *freepage; |
1831 | unsigned long flags; |
1832 | unsigned int order_scanned = 0; |
1833 | + unsigned long high_pfn = 0; |
1834 | |
1835 | if (!area->nr_free) |
1836 | continue; |
1837 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
1838 | index 11aa763a31440..7bbf419bb86d6 100644 |
1839 | --- a/mm/huge_memory.c |
1840 | +++ b/mm/huge_memory.c |
1841 | @@ -2306,7 +2306,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
1842 | { |
1843 | spinlock_t *ptl; |
1844 | struct mmu_notifier_range range; |
1845 | - bool was_locked = false; |
1846 | + bool do_unlock_page = false; |
1847 | pmd_t _pmd; |
1848 | |
1849 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, |
1850 | @@ -2322,7 +2322,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
1851 | VM_BUG_ON(freeze && !page); |
1852 | if (page) { |
1853 | VM_WARN_ON_ONCE(!PageLocked(page)); |
1854 | - was_locked = true; |
1855 | if (page != pmd_page(*pmd)) |
1856 | goto out; |
1857 | } |
1858 | @@ -2331,19 +2330,29 @@ repeat: |
1859 | if (pmd_trans_huge(*pmd)) { |
1860 | if (!page) { |
1861 | page = pmd_page(*pmd); |
1862 | - if (unlikely(!trylock_page(page))) { |
1863 | - get_page(page); |
1864 | - _pmd = *pmd; |
1865 | - spin_unlock(ptl); |
1866 | - lock_page(page); |
1867 | - spin_lock(ptl); |
1868 | - if (unlikely(!pmd_same(*pmd, _pmd))) { |
1869 | - unlock_page(page); |
1870 | + /* |
1871 | + * An anonymous page must be locked, to ensure that a |
1872 | + * concurrent reuse_swap_page() sees stable mapcount; |
1873 | + * but reuse_swap_page() is not used on shmem or file, |
1874 | + * and page lock must not be taken when zap_pmd_range() |
1875 | + * calls __split_huge_pmd() while i_mmap_lock is held. |
1876 | + */ |
1877 | + if (PageAnon(page)) { |
1878 | + if (unlikely(!trylock_page(page))) { |
1879 | + get_page(page); |
1880 | + _pmd = *pmd; |
1881 | + spin_unlock(ptl); |
1882 | + lock_page(page); |
1883 | + spin_lock(ptl); |
1884 | + if (unlikely(!pmd_same(*pmd, _pmd))) { |
1885 | + unlock_page(page); |
1886 | + put_page(page); |
1887 | + page = NULL; |
1888 | + goto repeat; |
1889 | + } |
1890 | put_page(page); |
1891 | - page = NULL; |
1892 | - goto repeat; |
1893 | } |
1894 | - put_page(page); |
1895 | + do_unlock_page = true; |
1896 | } |
1897 | } |
1898 | if (PageMlocked(page)) |
1899 | @@ -2353,7 +2362,7 @@ repeat: |
1900 | __split_huge_pmd_locked(vma, pmd, range.start, freeze); |
1901 | out: |
1902 | spin_unlock(ptl); |
1903 | - if (!was_locked && page) |
1904 | + if (do_unlock_page) |
1905 | unlock_page(page); |
1906 | /* |
1907 | * No need to double call mmu_notifier->invalidate_range() callback. |
1908 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
1909 | index 3bc33fa838177..d5b03b9262d4f 100644 |
1910 | --- a/mm/hugetlb.c |
1911 | +++ b/mm/hugetlb.c |
1912 | @@ -71,6 +71,21 @@ DEFINE_SPINLOCK(hugetlb_lock); |
1913 | static int num_fault_mutexes; |
1914 | struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; |
1915 | |
1916 | +static inline bool PageHugeFreed(struct page *head) |
1917 | +{ |
1918 | + return page_private(head + 4) == -1UL; |
1919 | +} |
1920 | + |
1921 | +static inline void SetPageHugeFreed(struct page *head) |
1922 | +{ |
1923 | + set_page_private(head + 4, -1UL); |
1924 | +} |
1925 | + |
1926 | +static inline void ClearPageHugeFreed(struct page *head) |
1927 | +{ |
1928 | + set_page_private(head + 4, 0); |
1929 | +} |
1930 | + |
1931 | /* Forward declaration */ |
1932 | static int hugetlb_acct_memory(struct hstate *h, long delta); |
1933 | |
1934 | @@ -869,6 +884,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) |
1935 | list_move(&page->lru, &h->hugepage_freelists[nid]); |
1936 | h->free_huge_pages++; |
1937 | h->free_huge_pages_node[nid]++; |
1938 | + SetPageHugeFreed(page); |
1939 | } |
1940 | |
1941 | static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) |
1942 | @@ -886,6 +902,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) |
1943 | return NULL; |
1944 | list_move(&page->lru, &h->hugepage_activelist); |
1945 | set_page_refcounted(page); |
1946 | + ClearPageHugeFreed(page); |
1947 | h->free_huge_pages--; |
1948 | h->free_huge_pages_node[nid]--; |
1949 | return page; |
1950 | @@ -1217,12 +1234,11 @@ struct hstate *size_to_hstate(unsigned long size) |
1951 | */ |
1952 | bool page_huge_active(struct page *page) |
1953 | { |
1954 | - VM_BUG_ON_PAGE(!PageHuge(page), page); |
1955 | - return PageHead(page) && PagePrivate(&page[1]); |
1956 | + return PageHeadHuge(page) && PagePrivate(&page[1]); |
1957 | } |
1958 | |
1959 | /* never called for tail page */ |
1960 | -static void set_page_huge_active(struct page *page) |
1961 | +void set_page_huge_active(struct page *page) |
1962 | { |
1963 | VM_BUG_ON_PAGE(!PageHeadHuge(page), page); |
1964 | SetPagePrivate(&page[1]); |
1965 | @@ -1375,6 +1391,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) |
1966 | set_hugetlb_cgroup(page, NULL); |
1967 | h->nr_huge_pages++; |
1968 | h->nr_huge_pages_node[nid]++; |
1969 | + ClearPageHugeFreed(page); |
1970 | spin_unlock(&hugetlb_lock); |
1971 | } |
1972 | |
1973 | @@ -1602,6 +1619,7 @@ int dissolve_free_huge_page(struct page *page) |
1974 | { |
1975 | int rc = -EBUSY; |
1976 | |
1977 | +retry: |
1978 | /* Not to disrupt normal path by vainly holding hugetlb_lock */ |
1979 | if (!PageHuge(page)) |
1980 | return 0; |
1981 | @@ -1618,6 +1636,26 @@ int dissolve_free_huge_page(struct page *page) |
1982 | int nid = page_to_nid(head); |
1983 | if (h->free_huge_pages - h->resv_huge_pages == 0) |
1984 | goto out; |
1985 | + |
1986 | + /* |
1987 | + * We should make sure that the page is already on the free list |
1988 | + * when it is dissolved. |
1989 | + */ |
1990 | + if (unlikely(!PageHugeFreed(head))) { |
1991 | + spin_unlock(&hugetlb_lock); |
1992 | + cond_resched(); |
1993 | + |
1994 | + /* |
1995 | + * Theoretically, we should return -EBUSY when we |
1996 | + * encounter this race. In fact, we have a chance |
1997 | + * to successfully dissolve the page if we do a |
1998 | + * retry. Because the race window is quite small. |
1999 | + * If we seize this opportunity, it is an optimization |
2000 | + * for increasing the success rate of dissolving page. |
2001 | + */ |
2002 | + goto retry; |
2003 | + } |
2004 | + |
2005 | /* |
2006 | * Move PageHWPoison flag from head page to the raw error page, |
2007 | * which makes any subpages rather than the error page reusable. |
2008 | @@ -5136,9 +5174,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) |
2009 | { |
2010 | bool ret = true; |
2011 | |
2012 | - VM_BUG_ON_PAGE(!PageHead(page), page); |
2013 | spin_lock(&hugetlb_lock); |
2014 | - if (!page_huge_active(page) || !get_page_unless_zero(page)) { |
2015 | + if (!PageHeadHuge(page) || !page_huge_active(page) || |
2016 | + !get_page_unless_zero(page)) { |
2017 | ret = false; |
2018 | goto unlock; |
2019 | } |
2020 | diff --git a/mm/memblock.c b/mm/memblock.c |
2021 | index c4b16cae2bc9b..11f6ae37d6699 100644 |
2022 | --- a/mm/memblock.c |
2023 | +++ b/mm/memblock.c |
2024 | @@ -257,14 +257,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, |
2025 | * |
2026 | * Find @size free area aligned to @align in the specified range and node. |
2027 | * |
2028 | - * When allocation direction is bottom-up, the @start should be greater |
2029 | - * than the end of the kernel image. Otherwise, it will be trimmed. The |
2030 | - * reason is that we want the bottom-up allocation just near the kernel |
2031 | - * image so it is highly likely that the allocated memory and the kernel |
2032 | - * will reside in the same node. |
2033 | - * |
2034 | - * If bottom-up allocation failed, will try to allocate memory top-down. |
2035 | - * |
2036 | * Return: |
2037 | * Found address on success, 0 on failure. |
2038 | */ |
2039 | @@ -273,8 +265,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, |
2040 | phys_addr_t end, int nid, |
2041 | enum memblock_flags flags) |
2042 | { |
2043 | - phys_addr_t kernel_end, ret; |
2044 | - |
2045 | /* pump up @end */ |
2046 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE || |
2047 | end == MEMBLOCK_ALLOC_KASAN) |
2048 | @@ -283,40 +273,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, |
2049 | /* avoid allocating the first page */ |
2050 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
2051 | end = max(start, end); |
2052 | - kernel_end = __pa_symbol(_end); |
2053 | - |
2054 | - /* |
2055 | - * try bottom-up allocation only when bottom-up mode |
2056 | - * is set and @end is above the kernel image. |
2057 | - */ |
2058 | - if (memblock_bottom_up() && end > kernel_end) { |
2059 | - phys_addr_t bottom_up_start; |
2060 | - |
2061 | - /* make sure we will allocate above the kernel */ |
2062 | - bottom_up_start = max(start, kernel_end); |
2063 | |
2064 | - /* ok, try bottom-up allocation first */ |
2065 | - ret = __memblock_find_range_bottom_up(bottom_up_start, end, |
2066 | - size, align, nid, flags); |
2067 | - if (ret) |
2068 | - return ret; |
2069 | - |
2070 | - /* |
2071 | - * we always limit bottom-up allocation above the kernel, |
2072 | - * but top-down allocation doesn't have the limit, so |
2073 | - * retrying top-down allocation may succeed when bottom-up |
2074 | - * allocation failed. |
2075 | - * |
2076 | - * bottom-up allocation is expected to be fail very rarely, |
2077 | - * so we use WARN_ONCE() here to see the stack trace if |
2078 | - * fail happens. |
2079 | - */ |
2080 | - WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), |
2081 | - "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); |
2082 | - } |
2083 | - |
2084 | - return __memblock_find_range_top_down(start, end, size, align, nid, |
2085 | - flags); |
2086 | + if (memblock_bottom_up()) |
2087 | + return __memblock_find_range_bottom_up(start, end, size, align, |
2088 | + nid, flags); |
2089 | + else |
2090 | + return __memblock_find_range_top_down(start, end, size, align, |
2091 | + nid, flags); |
2092 | } |
2093 | |
2094 | /** |
2095 | diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
2096 | index 6c270fce200f4..7080d708b7d08 100644 |
2097 | --- a/net/core/neighbour.c |
2098 | +++ b/net/core/neighbour.c |
2099 | @@ -1244,13 +1244,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, |
2100 | old = neigh->nud_state; |
2101 | err = -EPERM; |
2102 | |
2103 | - if (!(flags & NEIGH_UPDATE_F_ADMIN) && |
2104 | - (old & (NUD_NOARP | NUD_PERMANENT))) |
2105 | - goto out; |
2106 | if (neigh->dead) { |
2107 | NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); |
2108 | + new = old; |
2109 | goto out; |
2110 | } |
2111 | + if (!(flags & NEIGH_UPDATE_F_ADMIN) && |
2112 | + (old & (NUD_NOARP | NUD_PERMANENT))) |
2113 | + goto out; |
2114 | |
2115 | ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); |
2116 | |
2117 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
2118 | index ca525cf681a4e..f64d1743b86d6 100644 |
2119 | --- a/net/ipv4/ip_tunnel.c |
2120 | +++ b/net/ipv4/ip_tunnel.c |
2121 | @@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) |
2122 | } |
2123 | |
2124 | dev->needed_headroom = t_hlen + hlen; |
2125 | - mtu -= (dev->hard_header_len + t_hlen); |
2126 | + mtu -= t_hlen; |
2127 | |
2128 | if (mtu < IPV4_MIN_MTU) |
2129 | mtu = IPV4_MIN_MTU; |
2130 | @@ -347,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, |
2131 | nt = netdev_priv(dev); |
2132 | t_hlen = nt->hlen + sizeof(struct iphdr); |
2133 | dev->min_mtu = ETH_MIN_MTU; |
2134 | - dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; |
2135 | + dev->max_mtu = IP_MAX_MTU - t_hlen; |
2136 | ip_tunnel_add(itn, nt); |
2137 | return nt; |
2138 | |
2139 | @@ -494,11 +494,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
2140 | int mtu; |
2141 | |
2142 | tunnel_hlen = md ? tunnel_hlen : tunnel->hlen; |
2143 | - pkt_size = skb->len - tunnel_hlen - dev->hard_header_len; |
2144 | + pkt_size = skb->len - tunnel_hlen; |
2145 | |
2146 | if (df) |
2147 | - mtu = dst_mtu(&rt->dst) - dev->hard_header_len |
2148 | - - sizeof(struct iphdr) - tunnel_hlen; |
2149 | + mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen); |
2150 | else |
2151 | mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
2152 | |
2153 | @@ -964,7 +963,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) |
2154 | { |
2155 | struct ip_tunnel *tunnel = netdev_priv(dev); |
2156 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); |
2157 | - int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; |
2158 | + int max_mtu = IP_MAX_MTU - t_hlen; |
2159 | |
2160 | if (new_mtu < ETH_MIN_MTU) |
2161 | return -EINVAL; |
2162 | @@ -1141,10 +1140,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], |
2163 | |
2164 | mtu = ip_tunnel_bind_dev(dev); |
2165 | if (tb[IFLA_MTU]) { |
2166 | - unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; |
2167 | + unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); |
2168 | |
2169 | - mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, |
2170 | - (unsigned int)(max - sizeof(struct iphdr))); |
2171 | + mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); |
2172 | } |
2173 | |
2174 | err = dev_set_mtu(dev, mtu); |
2175 | diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c |
2176 | index 7a4d0715d1c32..a966d29c772d9 100644 |
2177 | --- a/net/lapb/lapb_out.c |
2178 | +++ b/net/lapb/lapb_out.c |
2179 | @@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb) |
2180 | skb = skb_dequeue(&lapb->write_queue); |
2181 | |
2182 | do { |
2183 | - if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
2184 | + skbn = skb_copy(skb, GFP_ATOMIC); |
2185 | + if (!skbn) { |
2186 | skb_queue_head(&lapb->write_queue, skb); |
2187 | break; |
2188 | } |
2189 | diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c |
2190 | index c9a8a2433e8ac..48322e45e7ddb 100644 |
2191 | --- a/net/mac80211/driver-ops.c |
2192 | +++ b/net/mac80211/driver-ops.c |
2193 | @@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local, |
2194 | } else if (old_state == IEEE80211_STA_AUTH && |
2195 | new_state == IEEE80211_STA_ASSOC) { |
2196 | ret = drv_sta_add(local, sdata, &sta->sta); |
2197 | - if (ret == 0) |
2198 | + if (ret == 0) { |
2199 | sta->uploaded = true; |
2200 | + if (rcu_access_pointer(sta->sta.rates)) |
2201 | + drv_sta_rate_tbl_update(local, sdata, &sta->sta); |
2202 | + } |
2203 | } else if (old_state == IEEE80211_STA_ASSOC && |
2204 | new_state == IEEE80211_STA_AUTH) { |
2205 | drv_sta_remove(local, sdata, &sta->sta); |
2206 | diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c |
2207 | index b051f125d3af2..9841db84bce0a 100644 |
2208 | --- a/net/mac80211/rate.c |
2209 | +++ b/net/mac80211/rate.c |
2210 | @@ -934,7 +934,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, |
2211 | if (old) |
2212 | kfree_rcu(old, rcu_head); |
2213 | |
2214 | - drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); |
2215 | + if (sta->uploaded) |
2216 | + drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); |
2217 | |
2218 | ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); |
2219 | |
2220 | diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c |
2221 | index 2921fc2767134..9bacec6653bac 100644 |
2222 | --- a/net/rxrpc/af_rxrpc.c |
2223 | +++ b/net/rxrpc/af_rxrpc.c |
2224 | @@ -976,7 +976,7 @@ static int __init af_rxrpc_init(void) |
2225 | goto error_security; |
2226 | } |
2227 | |
2228 | - ret = register_pernet_subsys(&rxrpc_net_ops); |
2229 | + ret = register_pernet_device(&rxrpc_net_ops); |
2230 | if (ret) |
2231 | goto error_pernet; |
2232 | |
2233 | @@ -1021,7 +1021,7 @@ error_key_type: |
2234 | error_sock: |
2235 | proto_unregister(&rxrpc_proto); |
2236 | error_proto: |
2237 | - unregister_pernet_subsys(&rxrpc_net_ops); |
2238 | + unregister_pernet_device(&rxrpc_net_ops); |
2239 | error_pernet: |
2240 | rxrpc_exit_security(); |
2241 | error_security: |
2242 | @@ -1043,7 +1043,7 @@ static void __exit af_rxrpc_exit(void) |
2243 | unregister_key_type(&key_type_rxrpc); |
2244 | sock_unregister(PF_RXRPC); |
2245 | proto_unregister(&rxrpc_proto); |
2246 | - unregister_pernet_subsys(&rxrpc_net_ops); |
2247 | + unregister_pernet_device(&rxrpc_net_ops); |
2248 | ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); |
2249 | ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); |
2250 |