Contents of /trunk/kernel-alx/patches-4.14/0123-4.14.24-all-fixes.patch
Parent Directory | Revision Log
Revision 3238 -
(show annotations)
(download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 142003 byte(s)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 142003 byte(s)
-added up to patches-4.14.79
1 | diff --git a/Makefile b/Makefile |
2 | index 169f3199274f..38acc6047d7d 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 14 |
9 | -SUBLEVEL = 23 |
10 | +SUBLEVEL = 24 |
11 | EXTRAVERSION = |
12 | NAME = Petit Gorille |
13 | |
14 | diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts |
15 | index 940875316d0f..67b4de0e3439 100644 |
16 | --- a/arch/arm/boot/dts/ls1021a-qds.dts |
17 | +++ b/arch/arm/boot/dts/ls1021a-qds.dts |
18 | @@ -215,7 +215,7 @@ |
19 | reg = <0x2a>; |
20 | VDDA-supply = <®_3p3v>; |
21 | VDDIO-supply = <®_3p3v>; |
22 | - clocks = <&sys_mclk 1>; |
23 | + clocks = <&sys_mclk>; |
24 | }; |
25 | }; |
26 | }; |
27 | diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts |
28 | index a8b148ad1dd2..44715c8ef756 100644 |
29 | --- a/arch/arm/boot/dts/ls1021a-twr.dts |
30 | +++ b/arch/arm/boot/dts/ls1021a-twr.dts |
31 | @@ -187,7 +187,7 @@ |
32 | reg = <0x0a>; |
33 | VDDA-supply = <®_3p3v>; |
34 | VDDIO-supply = <®_3p3v>; |
35 | - clocks = <&sys_mclk 1>; |
36 | + clocks = <&sys_mclk>; |
37 | }; |
38 | }; |
39 | |
40 | diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S |
41 | index 1712f132b80d..b83fdc06286a 100644 |
42 | --- a/arch/arm/lib/csumpartialcopyuser.S |
43 | +++ b/arch/arm/lib/csumpartialcopyuser.S |
44 | @@ -85,7 +85,11 @@ |
45 | .pushsection .text.fixup,"ax" |
46 | .align 4 |
47 | 9001: mov r4, #-EFAULT |
48 | +#ifdef CONFIG_CPU_SW_DOMAIN_PAN |
49 | + ldr r5, [sp, #9*4] @ *err_ptr |
50 | +#else |
51 | ldr r5, [sp, #8*4] @ *err_ptr |
52 | +#endif |
53 | str r4, [r5] |
54 | ldmia sp, {r1, r2} @ retrieve dst, len |
55 | add r2, r2, r1 |
56 | diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts |
57 | index 2b6b792dab93..e6ee7443b530 100644 |
58 | --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts |
59 | +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts |
60 | @@ -228,8 +228,11 @@ |
61 | |
62 | &cpm_eth0 { |
63 | status = "okay"; |
64 | + /* Network PHY */ |
65 | phy = <&phy0>; |
66 | phy-mode = "10gbase-kr"; |
67 | + /* Generic PHY, providing serdes lanes */ |
68 | + phys = <&cpm_comphy4 0>; |
69 | }; |
70 | |
71 | &cpm_sata0 { |
72 | @@ -263,15 +266,21 @@ |
73 | |
74 | &cps_eth0 { |
75 | status = "okay"; |
76 | + /* Network PHY */ |
77 | phy = <&phy8>; |
78 | phy-mode = "10gbase-kr"; |
79 | + /* Generic PHY, providing serdes lanes */ |
80 | + phys = <&cps_comphy4 0>; |
81 | }; |
82 | |
83 | &cps_eth1 { |
84 | /* CPS Lane 0 - J5 (Gigabit RJ45) */ |
85 | status = "okay"; |
86 | + /* Network PHY */ |
87 | phy = <&ge_phy>; |
88 | phy-mode = "sgmii"; |
89 | + /* Generic PHY, providing serdes lanes */ |
90 | + phys = <&cps_comphy0 1>; |
91 | }; |
92 | |
93 | &cps_pinctrl { |
94 | diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi |
95 | index 32690107c1cc..9a7b63cd63a3 100644 |
96 | --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi |
97 | +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi |
98 | @@ -111,6 +111,44 @@ |
99 | }; |
100 | }; |
101 | |
102 | + cpm_comphy: phy@120000 { |
103 | + compatible = "marvell,comphy-cp110"; |
104 | + reg = <0x120000 0x6000>; |
105 | + marvell,system-controller = <&cpm_syscon0>; |
106 | + #address-cells = <1>; |
107 | + #size-cells = <0>; |
108 | + |
109 | + cpm_comphy0: phy@0 { |
110 | + reg = <0>; |
111 | + #phy-cells = <1>; |
112 | + }; |
113 | + |
114 | + cpm_comphy1: phy@1 { |
115 | + reg = <1>; |
116 | + #phy-cells = <1>; |
117 | + }; |
118 | + |
119 | + cpm_comphy2: phy@2 { |
120 | + reg = <2>; |
121 | + #phy-cells = <1>; |
122 | + }; |
123 | + |
124 | + cpm_comphy3: phy@3 { |
125 | + reg = <3>; |
126 | + #phy-cells = <1>; |
127 | + }; |
128 | + |
129 | + cpm_comphy4: phy@4 { |
130 | + reg = <4>; |
131 | + #phy-cells = <1>; |
132 | + }; |
133 | + |
134 | + cpm_comphy5: phy@5 { |
135 | + reg = <5>; |
136 | + #phy-cells = <1>; |
137 | + }; |
138 | + }; |
139 | + |
140 | cpm_mdio: mdio@12a200 { |
141 | #address-cells = <1>; |
142 | #size-cells = <0>; |
143 | diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi |
144 | index 14e47c5c3816..faf28633a309 100644 |
145 | --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi |
146 | +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi |
147 | @@ -111,6 +111,44 @@ |
148 | }; |
149 | }; |
150 | |
151 | + cps_comphy: phy@120000 { |
152 | + compatible = "marvell,comphy-cp110"; |
153 | + reg = <0x120000 0x6000>; |
154 | + marvell,system-controller = <&cps_syscon0>; |
155 | + #address-cells = <1>; |
156 | + #size-cells = <0>; |
157 | + |
158 | + cps_comphy0: phy@0 { |
159 | + reg = <0>; |
160 | + #phy-cells = <1>; |
161 | + }; |
162 | + |
163 | + cps_comphy1: phy@1 { |
164 | + reg = <1>; |
165 | + #phy-cells = <1>; |
166 | + }; |
167 | + |
168 | + cps_comphy2: phy@2 { |
169 | + reg = <2>; |
170 | + #phy-cells = <1>; |
171 | + }; |
172 | + |
173 | + cps_comphy3: phy@3 { |
174 | + reg = <3>; |
175 | + #phy-cells = <1>; |
176 | + }; |
177 | + |
178 | + cps_comphy4: phy@4 { |
179 | + reg = <4>; |
180 | + #phy-cells = <1>; |
181 | + }; |
182 | + |
183 | + cps_comphy5: phy@5 { |
184 | + reg = <5>; |
185 | + #phy-cells = <1>; |
186 | + }; |
187 | + }; |
188 | + |
189 | cps_mdio: mdio@12a200 { |
190 | #address-cells = <1>; |
191 | #size-cells = <0>; |
192 | diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi |
193 | index 1b868df2393f..e95d99265af9 100644 |
194 | --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi |
195 | +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi |
196 | @@ -145,7 +145,6 @@ |
197 | &avb { |
198 | pinctrl-0 = <&avb_pins>; |
199 | pinctrl-names = "default"; |
200 | - renesas,no-ether-link; |
201 | phy-handle = <&phy0>; |
202 | status = "okay"; |
203 | |
204 | diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c |
205 | index aa7be020a904..c954523d00fe 100644 |
206 | --- a/arch/ia64/kernel/time.c |
207 | +++ b/arch/ia64/kernel/time.c |
208 | @@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk) |
209 | } |
210 | |
211 | if (ti->softirq_time) { |
212 | - delta = cycle_to_nsec(ti->softirq_time)); |
213 | + delta = cycle_to_nsec(ti->softirq_time); |
214 | account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); |
215 | } |
216 | |
217 | diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile |
218 | index 78c2affeabf8..e84e12655fa8 100644 |
219 | --- a/arch/mips/lib/Makefile |
220 | +++ b/arch/mips/lib/Makefile |
221 | @@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o |
222 | obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o |
223 | |
224 | # libgcc-style stuff needed in the kernel |
225 | -obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o |
226 | +obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \ |
227 | + ucmpdi2.o |
228 | diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h |
229 | index 28002ed90c2c..199a7f96282f 100644 |
230 | --- a/arch/mips/lib/libgcc.h |
231 | +++ b/arch/mips/lib/libgcc.h |
232 | @@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__))); |
233 | struct DWstruct { |
234 | int high, low; |
235 | }; |
236 | + |
237 | +struct TWstruct { |
238 | + long long high, low; |
239 | +}; |
240 | #elif defined(__LITTLE_ENDIAN) |
241 | struct DWstruct { |
242 | int low, high; |
243 | }; |
244 | + |
245 | +struct TWstruct { |
246 | + long long low, high; |
247 | +}; |
248 | #else |
249 | #error I feel sick. |
250 | #endif |
251 | @@ -23,4 +31,13 @@ typedef union { |
252 | long long ll; |
253 | } DWunion; |
254 | |
255 | +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) |
256 | +typedef int ti_type __attribute__((mode(TI))); |
257 | + |
258 | +typedef union { |
259 | + struct TWstruct s; |
260 | + ti_type ti; |
261 | +} TWunion; |
262 | +#endif |
263 | + |
264 | #endif /* __ASM_LIBGCC_H */ |
265 | diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c |
266 | new file mode 100644 |
267 | index 000000000000..111ad475aa0c |
268 | --- /dev/null |
269 | +++ b/arch/mips/lib/multi3.c |
270 | @@ -0,0 +1,54 @@ |
271 | +// SPDX-License-Identifier: GPL-2.0 |
272 | +#include <linux/export.h> |
273 | + |
274 | +#include "libgcc.h" |
275 | + |
276 | +/* |
277 | + * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that |
278 | + * specific case only we'll implement it here. |
279 | + * |
280 | + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 |
281 | + */ |
282 | +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) |
283 | + |
284 | +/* multiply 64-bit values, low 64-bits returned */ |
285 | +static inline long long notrace dmulu(long long a, long long b) |
286 | +{ |
287 | + long long res; |
288 | + |
289 | + asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); |
290 | + return res; |
291 | +} |
292 | + |
293 | +/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */ |
294 | +static inline long long notrace dmuhu(long long a, long long b) |
295 | +{ |
296 | + long long res; |
297 | + |
298 | + asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); |
299 | + return res; |
300 | +} |
301 | + |
302 | +/* multiply 128-bit values, low 128-bits returned */ |
303 | +ti_type notrace __multi3(ti_type a, ti_type b) |
304 | +{ |
305 | + TWunion res, aa, bb; |
306 | + |
307 | + aa.ti = a; |
308 | + bb.ti = b; |
309 | + |
310 | + /* |
311 | + * a * b = (a.lo * b.lo) |
312 | + * + 2^64 * (a.hi * b.lo + a.lo * b.hi) |
313 | + * [+ 2^128 * (a.hi * b.hi)] |
314 | + */ |
315 | + res.s.low = dmulu(aa.s.low, bb.s.low); |
316 | + res.s.high = dmuhu(aa.s.low, bb.s.low); |
317 | + res.s.high += dmulu(aa.s.high, bb.s.low); |
318 | + res.s.high += dmulu(aa.s.low, bb.s.high); |
319 | + |
320 | + return res.ti; |
321 | +} |
322 | +EXPORT_SYMBOL(__multi3); |
323 | + |
324 | +#endif /* 64BIT && CPU_MIPSR6 && GCC7 */ |
325 | diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h |
326 | index c980a02a52bc..598c8d60fa5e 100644 |
327 | --- a/arch/parisc/include/asm/thread_info.h |
328 | +++ b/arch/parisc/include/asm/thread_info.h |
329 | @@ -35,7 +35,12 @@ struct thread_info { |
330 | |
331 | /* thread information allocation */ |
332 | |
333 | +#ifdef CONFIG_IRQSTACKS |
334 | +#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */ |
335 | +#else |
336 | #define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */ |
337 | +#endif |
338 | + |
339 | /* Be sure to hunt all references to this down when you change the size of |
340 | * the kernel stack */ |
341 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
342 | diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c |
343 | index e45b5f10645a..e9149d05d30b 100644 |
344 | --- a/arch/powerpc/platforms/pseries/dlpar.c |
345 | +++ b/arch/powerpc/platforms/pseries/dlpar.c |
346 | @@ -586,11 +586,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, |
347 | |
348 | static CLASS_ATTR_RW(dlpar); |
349 | |
350 | -static int __init pseries_dlpar_init(void) |
351 | +int __init dlpar_workqueue_init(void) |
352 | { |
353 | + if (pseries_hp_wq) |
354 | + return 0; |
355 | + |
356 | pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", |
357 | - WQ_UNBOUND, 1); |
358 | + WQ_UNBOUND, 1); |
359 | + |
360 | + return pseries_hp_wq ? 0 : -ENOMEM; |
361 | +} |
362 | + |
363 | +static int __init dlpar_sysfs_init(void) |
364 | +{ |
365 | + int rc; |
366 | + |
367 | + rc = dlpar_workqueue_init(); |
368 | + if (rc) |
369 | + return rc; |
370 | + |
371 | return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); |
372 | } |
373 | -machine_device_initcall(pseries, pseries_dlpar_init); |
374 | +machine_device_initcall(pseries, dlpar_sysfs_init); |
375 | |
376 | diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h |
377 | index 4470a3194311..1ae1d9f4dbe9 100644 |
378 | --- a/arch/powerpc/platforms/pseries/pseries.h |
379 | +++ b/arch/powerpc/platforms/pseries/pseries.h |
380 | @@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void) |
381 | return CMO_PageSize; |
382 | } |
383 | |
384 | +int dlpar_workqueue_init(void); |
385 | + |
386 | #endif /* _PSERIES_PSERIES_H */ |
387 | diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c |
388 | index 4923ffe230cf..5e1ef9150182 100644 |
389 | --- a/arch/powerpc/platforms/pseries/ras.c |
390 | +++ b/arch/powerpc/platforms/pseries/ras.c |
391 | @@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); |
392 | static irqreturn_t ras_error_interrupt(int irq, void *dev_id); |
393 | |
394 | |
395 | +/* |
396 | + * Enable the hotplug interrupt late because processing them may touch other |
397 | + * devices or systems (e.g. hugepages) that have not been initialized at the |
398 | + * subsys stage. |
399 | + */ |
400 | +int __init init_ras_hotplug_IRQ(void) |
401 | +{ |
402 | + struct device_node *np; |
403 | + |
404 | + /* Hotplug Events */ |
405 | + np = of_find_node_by_path("/event-sources/hot-plug-events"); |
406 | + if (np != NULL) { |
407 | + if (dlpar_workqueue_init() == 0) |
408 | + request_event_sources_irqs(np, ras_hotplug_interrupt, |
409 | + "RAS_HOTPLUG"); |
410 | + of_node_put(np); |
411 | + } |
412 | + |
413 | + return 0; |
414 | +} |
415 | +machine_late_initcall(pseries, init_ras_hotplug_IRQ); |
416 | + |
417 | /* |
418 | * Initialize handlers for the set of interrupts caused by hardware errors |
419 | * and power system events. |
420 | @@ -66,14 +88,6 @@ static int __init init_ras_IRQ(void) |
421 | of_node_put(np); |
422 | } |
423 | |
424 | - /* Hotplug Events */ |
425 | - np = of_find_node_by_path("/event-sources/hot-plug-events"); |
426 | - if (np != NULL) { |
427 | - request_event_sources_irqs(np, ras_hotplug_interrupt, |
428 | - "RAS_HOTPLUG"); |
429 | - of_node_put(np); |
430 | - } |
431 | - |
432 | /* EPOW Events */ |
433 | np = of_find_node_by_path("/event-sources/epow-events"); |
434 | if (np != NULL) { |
435 | diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c |
436 | index 77c35350ee77..b7fa7a87e946 100644 |
437 | --- a/arch/sh/boards/mach-se/770x/setup.c |
438 | +++ b/arch/sh/boards/mach-se/770x/setup.c |
439 | @@ -9,6 +9,7 @@ |
440 | */ |
441 | #include <linux/init.h> |
442 | #include <linux/platform_device.h> |
443 | +#include <linux/sh_eth.h> |
444 | #include <mach-se/mach/se.h> |
445 | #include <mach-se/mach/mrshpc.h> |
446 | #include <asm/machvec.h> |
447 | @@ -115,6 +116,11 @@ static struct platform_device heartbeat_device = { |
448 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ |
449 | defined(CONFIG_CPU_SUBTYPE_SH7712) |
450 | /* SH771X Ethernet driver */ |
451 | +static struct sh_eth_plat_data sh_eth_plat = { |
452 | + .phy = PHY_ID, |
453 | + .phy_interface = PHY_INTERFACE_MODE_MII, |
454 | +}; |
455 | + |
456 | static struct resource sh_eth0_resources[] = { |
457 | [0] = { |
458 | .start = SH_ETH0_BASE, |
459 | @@ -132,7 +138,7 @@ static struct platform_device sh_eth0_device = { |
460 | .name = "sh771x-ether", |
461 | .id = 0, |
462 | .dev = { |
463 | - .platform_data = PHY_ID, |
464 | + .platform_data = &sh_eth_plat, |
465 | }, |
466 | .num_resources = ARRAY_SIZE(sh_eth0_resources), |
467 | .resource = sh_eth0_resources, |
468 | @@ -155,7 +161,7 @@ static struct platform_device sh_eth1_device = { |
469 | .name = "sh771x-ether", |
470 | .id = 1, |
471 | .dev = { |
472 | - .platform_data = PHY_ID, |
473 | + .platform_data = &sh_eth_plat, |
474 | }, |
475 | .num_resources = ARRAY_SIZE(sh_eth1_resources), |
476 | .resource = sh_eth1_resources, |
477 | diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
478 | index 1c2558430cf0..56457cb73448 100644 |
479 | --- a/arch/x86/events/intel/core.c |
480 | +++ b/arch/x86/events/intel/core.c |
481 | @@ -3847,6 +3847,8 @@ static struct attribute *intel_pmu_attrs[] = { |
482 | |
483 | __init int intel_pmu_init(void) |
484 | { |
485 | + struct attribute **extra_attr = NULL; |
486 | + struct attribute **to_free = NULL; |
487 | union cpuid10_edx edx; |
488 | union cpuid10_eax eax; |
489 | union cpuid10_ebx ebx; |
490 | @@ -3854,7 +3856,6 @@ __init int intel_pmu_init(void) |
491 | unsigned int unused; |
492 | struct extra_reg *er; |
493 | int version, i; |
494 | - struct attribute **extra_attr = NULL; |
495 | char *name; |
496 | |
497 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
498 | @@ -4294,6 +4295,7 @@ __init int intel_pmu_init(void) |
499 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? |
500 | hsw_format_attr : nhm_format_attr; |
501 | extra_attr = merge_attr(extra_attr, skl_format_attr); |
502 | + to_free = extra_attr; |
503 | x86_pmu.cpu_events = get_hsw_events_attrs(); |
504 | intel_pmu_pebs_data_source_skl( |
505 | boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); |
506 | @@ -4401,6 +4403,7 @@ __init int intel_pmu_init(void) |
507 | pr_cont("full-width counters, "); |
508 | } |
509 | |
510 | + kfree(to_free); |
511 | return 0; |
512 | } |
513 | |
514 | diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h |
515 | index 219faaec51df..386a6900e206 100644 |
516 | --- a/arch/x86/include/asm/asm.h |
517 | +++ b/arch/x86/include/asm/asm.h |
518 | @@ -136,6 +136,7 @@ |
519 | #endif |
520 | |
521 | #ifndef __ASSEMBLY__ |
522 | +#ifndef __BPF__ |
523 | /* |
524 | * This output constraint should be used for any inline asm which has a "call" |
525 | * instruction. Otherwise the asm may be inserted before the frame pointer |
526 | @@ -145,5 +146,6 @@ |
527 | register unsigned long current_stack_pointer asm(_ASM_SP); |
528 | #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) |
529 | #endif |
530 | +#endif |
531 | |
532 | #endif /* _ASM_X86_ASM_H */ |
533 | diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c |
534 | index e84cb4c75cd0..c54361a22f59 100644 |
535 | --- a/arch/x86/kernel/setup.c |
536 | +++ b/arch/x86/kernel/setup.c |
537 | @@ -928,9 +928,6 @@ void __init setup_arch(char **cmdline_p) |
538 | set_bit(EFI_BOOT, &efi.flags); |
539 | set_bit(EFI_64BIT, &efi.flags); |
540 | } |
541 | - |
542 | - if (efi_enabled(EFI_BOOT)) |
543 | - efi_memblock_x86_reserve_range(); |
544 | #endif |
545 | |
546 | x86_init.oem.arch_setup(); |
547 | @@ -984,6 +981,8 @@ void __init setup_arch(char **cmdline_p) |
548 | |
549 | parse_early_param(); |
550 | |
551 | + if (efi_enabled(EFI_BOOT)) |
552 | + efi_memblock_x86_reserve_range(); |
553 | #ifdef CONFIG_MEMORY_HOTPLUG |
554 | /* |
555 | * Memory used by the kernel cannot be hot-removed because Linux |
556 | diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c |
557 | index 60244bfaf88f..4565f31bd398 100644 |
558 | --- a/arch/x86/kernel/stacktrace.c |
559 | +++ b/arch/x86/kernel/stacktrace.c |
560 | @@ -160,8 +160,12 @@ int save_stack_trace_tsk_reliable(struct task_struct *tsk, |
561 | { |
562 | int ret; |
563 | |
564 | + /* |
565 | + * If the task doesn't have a stack (e.g., a zombie), the stack is |
566 | + * "reliably" empty. |
567 | + */ |
568 | if (!try_get_task_stack(tsk)) |
569 | - return -EINVAL; |
570 | + return 0; |
571 | |
572 | ret = __save_stack_trace_reliable(trace, tsk); |
573 | |
574 | diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c |
575 | index dc036e511f48..5a0483e7bf66 100644 |
576 | --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c |
577 | +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c |
578 | @@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) |
579 | return 0; |
580 | } |
581 | |
582 | -static const struct bt_sfi_data tng_bt_sfi_data __initdata = { |
583 | +static struct bt_sfi_data tng_bt_sfi_data __initdata = { |
584 | .setup = tng_bt_sfi_setup, |
585 | }; |
586 | |
587 | diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c |
588 | index d669e9d89001..c9081c6671f0 100644 |
589 | --- a/arch/x86/xen/enlighten.c |
590 | +++ b/arch/x86/xen/enlighten.c |
591 | @@ -1,8 +1,12 @@ |
592 | +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
593 | +#include <linux/bootmem.h> |
594 | +#endif |
595 | #include <linux/cpu.h> |
596 | #include <linux/kexec.h> |
597 | |
598 | #include <xen/features.h> |
599 | #include <xen/page.h> |
600 | +#include <xen/interface/memory.h> |
601 | |
602 | #include <asm/xen/hypercall.h> |
603 | #include <asm/xen/hypervisor.h> |
604 | @@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num) |
605 | } |
606 | EXPORT_SYMBOL(xen_arch_unregister_cpu); |
607 | #endif |
608 | + |
609 | +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
610 | +void __init arch_xen_balloon_init(struct resource *hostmem_resource) |
611 | +{ |
612 | + struct xen_memory_map memmap; |
613 | + int rc; |
614 | + unsigned int i, last_guest_ram; |
615 | + phys_addr_t max_addr = PFN_PHYS(max_pfn); |
616 | + struct e820_table *xen_e820_table; |
617 | + const struct e820_entry *entry; |
618 | + struct resource *res; |
619 | + |
620 | + if (!xen_initial_domain()) |
621 | + return; |
622 | + |
623 | + xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL); |
624 | + if (!xen_e820_table) |
625 | + return; |
626 | + |
627 | + memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries); |
628 | + set_xen_guest_handle(memmap.buffer, xen_e820_table->entries); |
629 | + rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap); |
630 | + if (rc) { |
631 | + pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc); |
632 | + goto out; |
633 | + } |
634 | + |
635 | + last_guest_ram = 0; |
636 | + for (i = 0; i < memmap.nr_entries; i++) { |
637 | + if (xen_e820_table->entries[i].addr >= max_addr) |
638 | + break; |
639 | + if (xen_e820_table->entries[i].type == E820_TYPE_RAM) |
640 | + last_guest_ram = i; |
641 | + } |
642 | + |
643 | + entry = &xen_e820_table->entries[last_guest_ram]; |
644 | + if (max_addr >= entry->addr + entry->size) |
645 | + goto out; /* No unallocated host RAM. */ |
646 | + |
647 | + hostmem_resource->start = max_addr; |
648 | + hostmem_resource->end = entry->addr + entry->size; |
649 | + |
650 | + /* |
651 | + * Mark non-RAM regions between the end of dom0 RAM and end of host RAM |
652 | + * as unavailable. The rest of that region can be used for hotplug-based |
653 | + * ballooning. |
654 | + */ |
655 | + for (; i < memmap.nr_entries; i++) { |
656 | + entry = &xen_e820_table->entries[i]; |
657 | + |
658 | + if (entry->type == E820_TYPE_RAM) |
659 | + continue; |
660 | + |
661 | + if (entry->addr >= hostmem_resource->end) |
662 | + break; |
663 | + |
664 | + res = kzalloc(sizeof(*res), GFP_KERNEL); |
665 | + if (!res) |
666 | + goto out; |
667 | + |
668 | + res->name = "Unavailable host RAM"; |
669 | + res->start = entry->addr; |
670 | + res->end = (entry->addr + entry->size < hostmem_resource->end) ? |
671 | + entry->addr + entry->size : hostmem_resource->end; |
672 | + rc = insert_resource(hostmem_resource, res); |
673 | + if (rc) { |
674 | + pr_warn("%s: Can't insert [%llx - %llx) (%d)\n", |
675 | + __func__, res->start, res->end, rc); |
676 | + kfree(res); |
677 | + goto out; |
678 | + } |
679 | + } |
680 | + |
681 | + out: |
682 | + kfree(xen_e820_table); |
683 | +} |
684 | +#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ |
685 | diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c |
686 | index 899a22a02e95..f896c2975545 100644 |
687 | --- a/arch/x86/xen/enlighten_pv.c |
688 | +++ b/arch/x86/xen/enlighten_pv.c |
689 | @@ -88,6 +88,8 @@ |
690 | #include "multicalls.h" |
691 | #include "pmu.h" |
692 | |
693 | +#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */ |
694 | + |
695 | void *xen_initial_gdt; |
696 | |
697 | static int xen_cpu_up_prepare_pv(unsigned int cpu); |
698 | @@ -1257,6 +1259,7 @@ asmlinkage __visible void __init xen_start_kernel(void) |
699 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; |
700 | |
701 | /* Work out if we support NX */ |
702 | + get_cpu_cap(&boot_cpu_data); |
703 | x86_configure_nx(); |
704 | |
705 | /* Get mfn list */ |
706 | diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c |
707 | index d0b943a6b117..042e9c422b21 100644 |
708 | --- a/arch/x86/xen/mmu_pv.c |
709 | +++ b/arch/x86/xen/mmu_pv.c |
710 | @@ -1902,6 +1902,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
711 | /* Graft it onto L4[511][510] */ |
712 | copy_page(level2_kernel_pgt, l2); |
713 | |
714 | + /* |
715 | + * Zap execute permission from the ident map. Due to the sharing of |
716 | + * L1 entries we need to do this in the L2. |
717 | + */ |
718 | + if (__supported_pte_mask & _PAGE_NX) { |
719 | + for (i = 0; i < PTRS_PER_PMD; ++i) { |
720 | + if (pmd_none(level2_ident_pgt[i])) |
721 | + continue; |
722 | + level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX); |
723 | + } |
724 | + } |
725 | + |
726 | /* Copy the initial P->M table mappings if necessary. */ |
727 | i = pgd_index(xen_start_info->mfn_list); |
728 | if (i && i < pgd_index(__START_KERNEL_map)) |
729 | diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c |
730 | index c114ca767b3b..6e0d2086eacb 100644 |
731 | --- a/arch/x86/xen/setup.c |
732 | +++ b/arch/x86/xen/setup.c |
733 | @@ -808,7 +808,6 @@ char * __init xen_memory_setup(void) |
734 | addr = xen_e820_table.entries[0].addr; |
735 | size = xen_e820_table.entries[0].size; |
736 | while (i < xen_e820_table.nr_entries) { |
737 | - bool discard = false; |
738 | |
739 | chunk_size = size; |
740 | type = xen_e820_table.entries[i].type; |
741 | @@ -824,11 +823,10 @@ char * __init xen_memory_setup(void) |
742 | xen_add_extra_mem(pfn_s, n_pfns); |
743 | xen_max_p2m_pfn = pfn_s + n_pfns; |
744 | } else |
745 | - discard = true; |
746 | + type = E820_TYPE_UNUSABLE; |
747 | } |
748 | |
749 | - if (!discard) |
750 | - xen_align_and_add_e820_region(addr, chunk_size, type); |
751 | + xen_align_and_add_e820_region(addr, chunk_size, type); |
752 | |
753 | addr += chunk_size; |
754 | size -= chunk_size; |
755 | diff --git a/block/blk-core.c b/block/blk-core.c |
756 | index f3750389e351..95b7ea996ac2 100644 |
757 | --- a/block/blk-core.c |
758 | +++ b/block/blk-core.c |
759 | @@ -531,6 +531,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) |
760 | } |
761 | } |
762 | |
763 | +void blk_drain_queue(struct request_queue *q) |
764 | +{ |
765 | + spin_lock_irq(q->queue_lock); |
766 | + __blk_drain_queue(q, true); |
767 | + spin_unlock_irq(q->queue_lock); |
768 | +} |
769 | + |
770 | /** |
771 | * blk_queue_bypass_start - enter queue bypass mode |
772 | * @q: queue of interest |
773 | @@ -655,8 +662,6 @@ void blk_cleanup_queue(struct request_queue *q) |
774 | */ |
775 | blk_freeze_queue(q); |
776 | spin_lock_irq(lock); |
777 | - if (!q->mq_ops) |
778 | - __blk_drain_queue(q, true); |
779 | queue_flag_set(QUEUE_FLAG_DEAD, q); |
780 | spin_unlock_irq(lock); |
781 | |
782 | diff --git a/block/blk-mq.c b/block/blk-mq.c |
783 | index 98a18609755e..b60798a30ea2 100644 |
784 | --- a/block/blk-mq.c |
785 | +++ b/block/blk-mq.c |
786 | @@ -159,6 +159,8 @@ void blk_freeze_queue(struct request_queue *q) |
787 | * exported to drivers as the only user for unfreeze is blk_mq. |
788 | */ |
789 | blk_freeze_queue_start(q); |
790 | + if (!q->mq_ops) |
791 | + blk_drain_queue(q); |
792 | blk_mq_freeze_queue_wait(q); |
793 | } |
794 | |
795 | diff --git a/block/blk.h b/block/blk.h |
796 | index 85be8b232b37..b2c287c2c6a3 100644 |
797 | --- a/block/blk.h |
798 | +++ b/block/blk.h |
799 | @@ -362,4 +362,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) |
800 | } |
801 | #endif /* CONFIG_BOUNCE */ |
802 | |
803 | +extern void blk_drain_queue(struct request_queue *q); |
804 | + |
805 | #endif /* BLK_INTERNAL_H */ |
806 | diff --git a/crypto/af_alg.c b/crypto/af_alg.c |
807 | index 53b7fa4cf4ab..4e4640bb82b9 100644 |
808 | --- a/crypto/af_alg.c |
809 | +++ b/crypto/af_alg.c |
810 | @@ -693,7 +693,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) |
811 | unsigned int i; |
812 | |
813 | list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { |
814 | - ctx->rcvused -= rsgl->sg_num_bytes; |
815 | + atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused); |
816 | af_alg_free_sg(&rsgl->sgl); |
817 | list_del(&rsgl->list); |
818 | if (rsgl != &areq->first_rsgl) |
819 | @@ -1192,7 +1192,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, |
820 | |
821 | areq->last_rsgl = rsgl; |
822 | len += err; |
823 | - ctx->rcvused += err; |
824 | + atomic_add(err, &ctx->rcvused); |
825 | rsgl->sg_num_bytes = err; |
826 | iov_iter_advance(&msg->msg_iter, err); |
827 | } |
828 | diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c |
829 | index 782cb8fec323..f138af18b500 100644 |
830 | --- a/crypto/algif_aead.c |
831 | +++ b/crypto/algif_aead.c |
832 | @@ -571,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) |
833 | INIT_LIST_HEAD(&ctx->tsgl_list); |
834 | ctx->len = len; |
835 | ctx->used = 0; |
836 | - ctx->rcvused = 0; |
837 | + atomic_set(&ctx->rcvused, 0); |
838 | ctx->more = 0; |
839 | ctx->merge = 0; |
840 | ctx->enc = 0; |
841 | diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c |
842 | index 7a3e663d54d5..90bc4e0f0785 100644 |
843 | --- a/crypto/algif_skcipher.c |
844 | +++ b/crypto/algif_skcipher.c |
845 | @@ -391,7 +391,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) |
846 | INIT_LIST_HEAD(&ctx->tsgl_list); |
847 | ctx->len = len; |
848 | ctx->used = 0; |
849 | - ctx->rcvused = 0; |
850 | + atomic_set(&ctx->rcvused, 0); |
851 | ctx->more = 0; |
852 | ctx->merge = 0; |
853 | ctx->enc = 0; |
854 | diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c |
855 | index 89ba9e85c0f3..4bcef78a08aa 100644 |
856 | --- a/drivers/crypto/inside-secure/safexcel.c |
857 | +++ b/drivers/crypto/inside-secure/safexcel.c |
858 | @@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv |
859 | ndesc = ctx->handle_result(priv, ring, sreq->req, |
860 | &should_complete, &ret); |
861 | if (ndesc < 0) { |
862 | + kfree(sreq); |
863 | dev_err(priv->dev, "failed to handle result (%d)", ndesc); |
864 | return; |
865 | } |
866 | diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c |
867 | index 5438552bc6d7..fcc0a606d748 100644 |
868 | --- a/drivers/crypto/inside-secure/safexcel_cipher.c |
869 | +++ b/drivers/crypto/inside-secure/safexcel_cipher.c |
870 | @@ -14,6 +14,7 @@ |
871 | |
872 | #include <crypto/aes.h> |
873 | #include <crypto/skcipher.h> |
874 | +#include <crypto/internal/skcipher.h> |
875 | |
876 | #include "safexcel.h" |
877 | |
878 | @@ -33,6 +34,10 @@ struct safexcel_cipher_ctx { |
879 | unsigned int key_len; |
880 | }; |
881 | |
882 | +struct safexcel_cipher_req { |
883 | + bool needs_inv; |
884 | +}; |
885 | + |
886 | static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, |
887 | struct crypto_async_request *async, |
888 | struct safexcel_command_desc *cdesc, |
889 | @@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, |
890 | return 0; |
891 | } |
892 | |
893 | -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
894 | - struct crypto_async_request *async, |
895 | - bool *should_complete, int *ret) |
896 | +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, |
897 | + struct crypto_async_request *async, |
898 | + bool *should_complete, int *ret) |
899 | { |
900 | struct skcipher_request *req = skcipher_request_cast(async); |
901 | struct safexcel_result_desc *rdesc; |
902 | @@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async, |
903 | spin_unlock_bh(&priv->ring[ring].egress_lock); |
904 | |
905 | request->req = &req->base; |
906 | - ctx->base.handle_result = safexcel_handle_result; |
907 | |
908 | *commands = n_cdesc; |
909 | *results = n_rdesc; |
910 | @@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, |
911 | |
912 | ring = safexcel_select_ring(priv); |
913 | ctx->base.ring = ring; |
914 | - ctx->base.needs_inv = false; |
915 | - ctx->base.send = safexcel_aes_send; |
916 | |
917 | spin_lock_bh(&priv->ring[ring].queue_lock); |
918 | enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); |
919 | @@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, |
920 | return ndesc; |
921 | } |
922 | |
923 | +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
924 | + struct crypto_async_request *async, |
925 | + bool *should_complete, int *ret) |
926 | +{ |
927 | + struct skcipher_request *req = skcipher_request_cast(async); |
928 | + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
929 | + int err; |
930 | + |
931 | + if (sreq->needs_inv) { |
932 | + sreq->needs_inv = false; |
933 | + err = safexcel_handle_inv_result(priv, ring, async, |
934 | + should_complete, ret); |
935 | + } else { |
936 | + err = safexcel_handle_req_result(priv, ring, async, |
937 | + should_complete, ret); |
938 | + } |
939 | + |
940 | + return err; |
941 | +} |
942 | + |
943 | static int safexcel_cipher_send_inv(struct crypto_async_request *async, |
944 | int ring, struct safexcel_request *request, |
945 | int *commands, int *results) |
946 | @@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, |
947 | struct safexcel_crypto_priv *priv = ctx->priv; |
948 | int ret; |
949 | |
950 | - ctx->base.handle_result = safexcel_handle_inv_result; |
951 | - |
952 | ret = safexcel_invalidate_cache(async, &ctx->base, priv, |
953 | ctx->base.ctxr_dma, ring, request); |
954 | if (unlikely(ret)) |
955 | @@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, |
956 | return 0; |
957 | } |
958 | |
959 | +static int safexcel_send(struct crypto_async_request *async, |
960 | + int ring, struct safexcel_request *request, |
961 | + int *commands, int *results) |
962 | +{ |
963 | + struct skcipher_request *req = skcipher_request_cast(async); |
964 | + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
965 | + int ret; |
966 | + |
967 | + if (sreq->needs_inv) |
968 | + ret = safexcel_cipher_send_inv(async, ring, request, |
969 | + commands, results); |
970 | + else |
971 | + ret = safexcel_aes_send(async, ring, request, |
972 | + commands, results); |
973 | + return ret; |
974 | +} |
975 | + |
976 | static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) |
977 | { |
978 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
979 | struct safexcel_crypto_priv *priv = ctx->priv; |
980 | - struct skcipher_request req; |
981 | + SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); |
982 | + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
983 | struct safexcel_inv_result result = {}; |
984 | int ring = ctx->base.ring; |
985 | |
986 | - memset(&req, 0, sizeof(struct skcipher_request)); |
987 | + memset(req, 0, sizeof(struct skcipher_request)); |
988 | |
989 | /* create invalidation request */ |
990 | init_completion(&result.completion); |
991 | - skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
992 | - safexcel_inv_complete, &result); |
993 | + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
994 | + safexcel_inv_complete, &result); |
995 | |
996 | - skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); |
997 | - ctx = crypto_tfm_ctx(req.base.tfm); |
998 | + skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); |
999 | + ctx = crypto_tfm_ctx(req->base.tfm); |
1000 | ctx->base.exit_inv = true; |
1001 | - ctx->base.send = safexcel_cipher_send_inv; |
1002 | + sreq->needs_inv = true; |
1003 | |
1004 | spin_lock_bh(&priv->ring[ring].queue_lock); |
1005 | - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); |
1006 | + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
1007 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
1008 | |
1009 | if (!priv->ring[ring].need_dequeue) |
1010 | @@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req, |
1011 | enum safexcel_cipher_direction dir, u32 mode) |
1012 | { |
1013 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
1014 | + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
1015 | struct safexcel_crypto_priv *priv = ctx->priv; |
1016 | int ret, ring; |
1017 | |
1018 | + sreq->needs_inv = false; |
1019 | ctx->direction = dir; |
1020 | ctx->mode = mode; |
1021 | |
1022 | if (ctx->base.ctxr) { |
1023 | - if (ctx->base.needs_inv) |
1024 | - ctx->base.send = safexcel_cipher_send_inv; |
1025 | + if (ctx->base.needs_inv) { |
1026 | + sreq->needs_inv = true; |
1027 | + ctx->base.needs_inv = false; |
1028 | + } |
1029 | } else { |
1030 | ctx->base.ring = safexcel_select_ring(priv); |
1031 | - ctx->base.send = safexcel_aes_send; |
1032 | - |
1033 | ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, |
1034 | EIP197_GFP_FLAGS(req->base), |
1035 | &ctx->base.ctxr_dma); |
1036 | @@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) |
1037 | alg.skcipher.base); |
1038 | |
1039 | ctx->priv = tmpl->priv; |
1040 | + ctx->base.send = safexcel_send; |
1041 | + ctx->base.handle_result = safexcel_handle_result; |
1042 | + |
1043 | + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), |
1044 | + sizeof(struct safexcel_cipher_req)); |
1045 | |
1046 | return 0; |
1047 | } |
1048 | diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c |
1049 | index 0626b33d2886..d626aa485a76 100644 |
1050 | --- a/drivers/crypto/inside-secure/safexcel_hash.c |
1051 | +++ b/drivers/crypto/inside-secure/safexcel_hash.c |
1052 | @@ -32,6 +32,7 @@ struct safexcel_ahash_req { |
1053 | bool last_req; |
1054 | bool finish; |
1055 | bool hmac; |
1056 | + bool needs_inv; |
1057 | |
1058 | int nents; |
1059 | |
1060 | @@ -121,9 +122,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, |
1061 | } |
1062 | } |
1063 | |
1064 | -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
1065 | - struct crypto_async_request *async, |
1066 | - bool *should_complete, int *ret) |
1067 | +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, |
1068 | + struct crypto_async_request *async, |
1069 | + bool *should_complete, int *ret) |
1070 | { |
1071 | struct safexcel_result_desc *rdesc; |
1072 | struct ahash_request *areq = ahash_request_cast(async); |
1073 | @@ -169,9 +170,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
1074 | return 1; |
1075 | } |
1076 | |
1077 | -static int safexcel_ahash_send(struct crypto_async_request *async, int ring, |
1078 | - struct safexcel_request *request, int *commands, |
1079 | - int *results) |
1080 | +static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, |
1081 | + struct safexcel_request *request, |
1082 | + int *commands, int *results) |
1083 | { |
1084 | struct ahash_request *areq = ahash_request_cast(async); |
1085 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); |
1086 | @@ -310,7 +311,6 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, |
1087 | |
1088 | req->processed += len; |
1089 | request->req = &areq->base; |
1090 | - ctx->base.handle_result = safexcel_handle_result; |
1091 | |
1092 | *commands = n_cdesc; |
1093 | *results = 1; |
1094 | @@ -394,8 +394,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, |
1095 | |
1096 | ring = safexcel_select_ring(priv); |
1097 | ctx->base.ring = ring; |
1098 | - ctx->base.needs_inv = false; |
1099 | - ctx->base.send = safexcel_ahash_send; |
1100 | |
1101 | spin_lock_bh(&priv->ring[ring].queue_lock); |
1102 | enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); |
1103 | @@ -412,6 +410,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, |
1104 | return 1; |
1105 | } |
1106 | |
1107 | +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
1108 | + struct crypto_async_request *async, |
1109 | + bool *should_complete, int *ret) |
1110 | +{ |
1111 | + struct ahash_request *areq = ahash_request_cast(async); |
1112 | + struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1113 | + int err; |
1114 | + |
1115 | + if (req->needs_inv) { |
1116 | + req->needs_inv = false; |
1117 | + err = safexcel_handle_inv_result(priv, ring, async, |
1118 | + should_complete, ret); |
1119 | + } else { |
1120 | + err = safexcel_handle_req_result(priv, ring, async, |
1121 | + should_complete, ret); |
1122 | + } |
1123 | + |
1124 | + return err; |
1125 | +} |
1126 | + |
1127 | static int safexcel_ahash_send_inv(struct crypto_async_request *async, |
1128 | int ring, struct safexcel_request *request, |
1129 | int *commands, int *results) |
1130 | @@ -420,7 +438,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, |
1131 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); |
1132 | int ret; |
1133 | |
1134 | - ctx->base.handle_result = safexcel_handle_inv_result; |
1135 | ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, |
1136 | ctx->base.ctxr_dma, ring, request); |
1137 | if (unlikely(ret)) |
1138 | @@ -432,28 +449,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, |
1139 | return 0; |
1140 | } |
1141 | |
1142 | +static int safexcel_ahash_send(struct crypto_async_request *async, |
1143 | + int ring, struct safexcel_request *request, |
1144 | + int *commands, int *results) |
1145 | +{ |
1146 | + struct ahash_request *areq = ahash_request_cast(async); |
1147 | + struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1148 | + int ret; |
1149 | + |
1150 | + if (req->needs_inv) |
1151 | + ret = safexcel_ahash_send_inv(async, ring, request, |
1152 | + commands, results); |
1153 | + else |
1154 | + ret = safexcel_ahash_send_req(async, ring, request, |
1155 | + commands, results); |
1156 | + return ret; |
1157 | +} |
1158 | + |
1159 | static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) |
1160 | { |
1161 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); |
1162 | struct safexcel_crypto_priv *priv = ctx->priv; |
1163 | - struct ahash_request req; |
1164 | + AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm)); |
1165 | + struct safexcel_ahash_req *rctx = ahash_request_ctx(req); |
1166 | struct safexcel_inv_result result = {}; |
1167 | int ring = ctx->base.ring; |
1168 | |
1169 | - memset(&req, 0, sizeof(struct ahash_request)); |
1170 | + memset(req, 0, sizeof(struct ahash_request)); |
1171 | |
1172 | /* create invalidation request */ |
1173 | init_completion(&result.completion); |
1174 | - ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
1175 | + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
1176 | safexcel_inv_complete, &result); |
1177 | |
1178 | - ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); |
1179 | - ctx = crypto_tfm_ctx(req.base.tfm); |
1180 | + ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); |
1181 | + ctx = crypto_tfm_ctx(req->base.tfm); |
1182 | ctx->base.exit_inv = true; |
1183 | - ctx->base.send = safexcel_ahash_send_inv; |
1184 | + rctx->needs_inv = true; |
1185 | |
1186 | spin_lock_bh(&priv->ring[ring].queue_lock); |
1187 | - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); |
1188 | + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
1189 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
1190 | |
1191 | if (!priv->ring[ring].need_dequeue) |
1192 | @@ -501,14 +536,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) |
1193 | struct safexcel_crypto_priv *priv = ctx->priv; |
1194 | int ret, ring; |
1195 | |
1196 | - ctx->base.send = safexcel_ahash_send; |
1197 | + req->needs_inv = false; |
1198 | |
1199 | if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) |
1200 | ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); |
1201 | |
1202 | if (ctx->base.ctxr) { |
1203 | - if (ctx->base.needs_inv) |
1204 | - ctx->base.send = safexcel_ahash_send_inv; |
1205 | + if (ctx->base.needs_inv) { |
1206 | + ctx->base.needs_inv = false; |
1207 | + req->needs_inv = true; |
1208 | + } |
1209 | } else { |
1210 | ctx->base.ring = safexcel_select_ring(priv); |
1211 | ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, |
1212 | @@ -642,6 +679,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) |
1213 | struct safexcel_alg_template, alg.ahash); |
1214 | |
1215 | ctx->priv = tmpl->priv; |
1216 | + ctx->base.send = safexcel_ahash_send; |
1217 | + ctx->base.handle_result = safexcel_handle_result; |
1218 | |
1219 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
1220 | sizeof(struct safexcel_ahash_req)); |
1221 | diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c |
1222 | index 6775f2c74e25..c7568869284e 100644 |
1223 | --- a/drivers/dma/fsl-edma.c |
1224 | +++ b/drivers/dma/fsl-edma.c |
1225 | @@ -863,11 +863,11 @@ static void fsl_edma_irq_exit( |
1226 | } |
1227 | } |
1228 | |
1229 | -static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) |
1230 | +static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) |
1231 | { |
1232 | int i; |
1233 | |
1234 | - for (i = 0; i < DMAMUX_NR; i++) |
1235 | + for (i = 0; i < nr_clocks; i++) |
1236 | clk_disable_unprepare(fsl_edma->muxclk[i]); |
1237 | } |
1238 | |
1239 | @@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev) |
1240 | |
1241 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); |
1242 | fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); |
1243 | - if (IS_ERR(fsl_edma->muxbase[i])) |
1244 | + if (IS_ERR(fsl_edma->muxbase[i])) { |
1245 | + /* on error: disable all previously enabled clks */ |
1246 | + fsl_disable_clocks(fsl_edma, i); |
1247 | return PTR_ERR(fsl_edma->muxbase[i]); |
1248 | + } |
1249 | |
1250 | sprintf(clkname, "dmamux%d", i); |
1251 | fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); |
1252 | if (IS_ERR(fsl_edma->muxclk[i])) { |
1253 | dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); |
1254 | + /* on error: disable all previously enabled clks */ |
1255 | + fsl_disable_clocks(fsl_edma, i); |
1256 | return PTR_ERR(fsl_edma->muxclk[i]); |
1257 | } |
1258 | |
1259 | ret = clk_prepare_enable(fsl_edma->muxclk[i]); |
1260 | - if (ret) { |
1261 | - /* disable only clks which were enabled on error */ |
1262 | - for (; i >= 0; i--) |
1263 | - clk_disable_unprepare(fsl_edma->muxclk[i]); |
1264 | - |
1265 | - dev_err(&pdev->dev, "DMAMUX clk block failed.\n"); |
1266 | - return ret; |
1267 | - } |
1268 | + if (ret) |
1269 | + /* on error: disable all previously enabled clks */ |
1270 | + fsl_disable_clocks(fsl_edma, i); |
1271 | |
1272 | } |
1273 | |
1274 | @@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev) |
1275 | if (ret) { |
1276 | dev_err(&pdev->dev, |
1277 | "Can't register Freescale eDMA engine. (%d)\n", ret); |
1278 | - fsl_disable_clocks(fsl_edma); |
1279 | + fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
1280 | return ret; |
1281 | } |
1282 | |
1283 | @@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev) |
1284 | dev_err(&pdev->dev, |
1285 | "Can't register Freescale eDMA of_dma. (%d)\n", ret); |
1286 | dma_async_device_unregister(&fsl_edma->dma_dev); |
1287 | - fsl_disable_clocks(fsl_edma); |
1288 | + fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
1289 | return ret; |
1290 | } |
1291 | |
1292 | @@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev) |
1293 | fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); |
1294 | of_dma_controller_free(np); |
1295 | dma_async_device_unregister(&fsl_edma->dma_dev); |
1296 | - fsl_disable_clocks(fsl_edma); |
1297 | + fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
1298 | |
1299 | return 0; |
1300 | } |
1301 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
1302 | index 46485692db48..059db50109bc 100644 |
1303 | --- a/drivers/gpu/drm/i915/intel_display.c |
1304 | +++ b/drivers/gpu/drm/i915/intel_display.c |
1305 | @@ -13240,7 +13240,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) |
1306 | primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); |
1307 | primary->check_plane = intel_check_primary_plane; |
1308 | |
1309 | - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { |
1310 | + if (INTEL_GEN(dev_priv) >= 10) { |
1311 | intel_primary_formats = skl_primary_formats; |
1312 | num_formats = ARRAY_SIZE(skl_primary_formats); |
1313 | modifiers = skl_format_modifiers_ccs; |
1314 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c |
1315 | index a4cb82495cee..245c946ea661 100644 |
1316 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c |
1317 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c |
1318 | @@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev) |
1319 | return ret; |
1320 | |
1321 | pci->irq = pdev->irq; |
1322 | + |
1323 | + /* Ensure MSI interrupts are armed, for the case where there are |
1324 | + * already interrupts pending (for whatever reason) at load time. |
1325 | + */ |
1326 | + if (pci->msi) |
1327 | + pci->func->msi_rearm(pci); |
1328 | + |
1329 | return ret; |
1330 | } |
1331 | |
1332 | diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c |
1333 | index 871599826773..91f9263f3c3b 100644 |
1334 | --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c |
1335 | +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c |
1336 | @@ -821,6 +821,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
1337 | pr_info("Initializing pool allocator\n"); |
1338 | |
1339 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
1340 | + if (!_manager) |
1341 | + return -ENOMEM; |
1342 | |
1343 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
1344 | |
1345 | diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h |
1346 | index a1d687a664f8..66f0268f37a6 100644 |
1347 | --- a/drivers/infiniband/core/core_priv.h |
1348 | +++ b/drivers/infiniband/core/core_priv.h |
1349 | @@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, |
1350 | } |
1351 | #endif |
1352 | |
1353 | -struct ib_device *__ib_device_get_by_index(u32 ifindex); |
1354 | +struct ib_device *ib_device_get_by_index(u32 ifindex); |
1355 | /* RDMA device netlink */ |
1356 | void nldev_init(void); |
1357 | void nldev_exit(void); |
1358 | diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c |
1359 | index b4b28ff8b7dc..d7d042a20ab4 100644 |
1360 | --- a/drivers/infiniband/core/device.c |
1361 | +++ b/drivers/infiniband/core/device.c |
1362 | @@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device) |
1363 | return 0; |
1364 | } |
1365 | |
1366 | -struct ib_device *__ib_device_get_by_index(u32 index) |
1367 | +static struct ib_device *__ib_device_get_by_index(u32 index) |
1368 | { |
1369 | struct ib_device *device; |
1370 | |
1371 | @@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index) |
1372 | return NULL; |
1373 | } |
1374 | |
1375 | +/* |
1376 | + * Caller is responsible to return refrerence count by calling put_device() |
1377 | + */ |
1378 | +struct ib_device *ib_device_get_by_index(u32 index) |
1379 | +{ |
1380 | + struct ib_device *device; |
1381 | + |
1382 | + down_read(&lists_rwsem); |
1383 | + device = __ib_device_get_by_index(index); |
1384 | + if (device) |
1385 | + get_device(&device->dev); |
1386 | + |
1387 | + up_read(&lists_rwsem); |
1388 | + return device; |
1389 | +} |
1390 | + |
1391 | static struct ib_device *__ib_device_get_by_name(const char *name) |
1392 | { |
1393 | struct ib_device *device; |
1394 | diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c |
1395 | index 9a05245a1acf..0dcd1aa6f683 100644 |
1396 | --- a/drivers/infiniband/core/nldev.c |
1397 | +++ b/drivers/infiniband/core/nldev.c |
1398 | @@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, |
1399 | |
1400 | index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); |
1401 | |
1402 | - device = __ib_device_get_by_index(index); |
1403 | + device = ib_device_get_by_index(index); |
1404 | if (!device) |
1405 | return -EINVAL; |
1406 | |
1407 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
1408 | - if (!msg) |
1409 | - return -ENOMEM; |
1410 | + if (!msg) { |
1411 | + err = -ENOMEM; |
1412 | + goto err; |
1413 | + } |
1414 | |
1415 | nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, |
1416 | RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), |
1417 | 0, 0); |
1418 | |
1419 | err = fill_dev_info(msg, device); |
1420 | - if (err) { |
1421 | - nlmsg_free(msg); |
1422 | - return err; |
1423 | - } |
1424 | + if (err) |
1425 | + goto err_free; |
1426 | |
1427 | nlmsg_end(msg, nlh); |
1428 | |
1429 | + put_device(&device->dev); |
1430 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); |
1431 | + |
1432 | +err_free: |
1433 | + nlmsg_free(msg); |
1434 | +err: |
1435 | + put_device(&device->dev); |
1436 | + return err; |
1437 | } |
1438 | |
1439 | static int _nldev_get_dumpit(struct ib_device *device, |
1440 | @@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, |
1441 | return -EINVAL; |
1442 | |
1443 | index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); |
1444 | - device = __ib_device_get_by_index(index); |
1445 | + device = ib_device_get_by_index(index); |
1446 | if (!device) |
1447 | return -EINVAL; |
1448 | |
1449 | port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); |
1450 | - if (!rdma_is_port_valid(device, port)) |
1451 | - return -EINVAL; |
1452 | + if (!rdma_is_port_valid(device, port)) { |
1453 | + err = -EINVAL; |
1454 | + goto err; |
1455 | + } |
1456 | |
1457 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
1458 | - if (!msg) |
1459 | - return -ENOMEM; |
1460 | + if (!msg) { |
1461 | + err = -ENOMEM; |
1462 | + goto err; |
1463 | + } |
1464 | |
1465 | nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, |
1466 | RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), |
1467 | 0, 0); |
1468 | |
1469 | err = fill_port_info(msg, device, port); |
1470 | - if (err) { |
1471 | - nlmsg_free(msg); |
1472 | - return err; |
1473 | - } |
1474 | + if (err) |
1475 | + goto err_free; |
1476 | |
1477 | nlmsg_end(msg, nlh); |
1478 | + put_device(&device->dev); |
1479 | |
1480 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); |
1481 | + |
1482 | +err_free: |
1483 | + nlmsg_free(msg); |
1484 | +err: |
1485 | + put_device(&device->dev); |
1486 | + return err; |
1487 | } |
1488 | |
1489 | static int nldev_port_get_dumpit(struct sk_buff *skb, |
1490 | @@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, |
1491 | return -EINVAL; |
1492 | |
1493 | ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); |
1494 | - device = __ib_device_get_by_index(ifindex); |
1495 | + device = ib_device_get_by_index(ifindex); |
1496 | if (!device) |
1497 | return -EINVAL; |
1498 | |
1499 | @@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, |
1500 | nlmsg_end(skb, nlh); |
1501 | } |
1502 | |
1503 | -out: cb->args[0] = idx; |
1504 | +out: |
1505 | + put_device(&device->dev); |
1506 | + cb->args[0] = idx; |
1507 | return skb->len; |
1508 | } |
1509 | |
1510 | diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c |
1511 | index e6f77f63da75..e80a7f764a74 100644 |
1512 | --- a/drivers/infiniband/hw/mlx4/mr.c |
1513 | +++ b/drivers/infiniband/hw/mlx4/mr.c |
1514 | @@ -406,7 +406,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
1515 | goto err_free_mr; |
1516 | |
1517 | mr->max_pages = max_num_sg; |
1518 | - |
1519 | err = mlx4_mr_enable(dev->dev, &mr->mmr); |
1520 | if (err) |
1521 | goto err_free_pl; |
1522 | @@ -417,6 +416,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
1523 | return &mr->ibmr; |
1524 | |
1525 | err_free_pl: |
1526 | + mr->ibmr.device = pd->device; |
1527 | mlx4_free_priv_pages(mr); |
1528 | err_free_mr: |
1529 | (void) mlx4_mr_free(dev->dev, &mr->mmr); |
1530 | diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c |
1531 | index 37bbc543847a..231b043e2806 100644 |
1532 | --- a/drivers/infiniband/hw/mlx5/mr.c |
1533 | +++ b/drivers/infiniband/hw/mlx5/mr.c |
1534 | @@ -1637,6 +1637,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, |
1535 | MLX5_SET(mkc, mkc, access_mode, mr->access_mode); |
1536 | MLX5_SET(mkc, mkc, umr_en, 1); |
1537 | |
1538 | + mr->ibmr.device = pd->device; |
1539 | err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); |
1540 | if (err) |
1541 | goto err_destroy_psv; |
1542 | diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c |
1543 | index ed34d5a581fa..d7162f2b7979 100644 |
1544 | --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c |
1545 | +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c |
1546 | @@ -406,6 +406,13 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) |
1547 | atomic_dec(&qp->refcnt); |
1548 | wait_event(qp->wait, !atomic_read(&qp->refcnt)); |
1549 | |
1550 | + if (!qp->is_kernel) { |
1551 | + if (qp->rumem) |
1552 | + ib_umem_release(qp->rumem); |
1553 | + if (qp->sumem) |
1554 | + ib_umem_release(qp->sumem); |
1555 | + } |
1556 | + |
1557 | pvrdma_page_dir_cleanup(dev, &qp->pdir); |
1558 | |
1559 | kfree(qp); |
1560 | diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c |
1561 | index dcc77014018d..f6935811ef3f 100644 |
1562 | --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c |
1563 | +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c |
1564 | @@ -903,8 +903,8 @@ static int path_rec_start(struct net_device *dev, |
1565 | return 0; |
1566 | } |
1567 | |
1568 | -static void neigh_add_path(struct sk_buff *skb, u8 *daddr, |
1569 | - struct net_device *dev) |
1570 | +static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, |
1571 | + struct net_device *dev) |
1572 | { |
1573 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
1574 | struct rdma_netdev *rn = netdev_priv(dev); |
1575 | @@ -918,7 +918,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, |
1576 | spin_unlock_irqrestore(&priv->lock, flags); |
1577 | ++dev->stats.tx_dropped; |
1578 | dev_kfree_skb_any(skb); |
1579 | - return; |
1580 | + return NULL; |
1581 | + } |
1582 | + |
1583 | + /* To avoid race condition, make sure that the |
1584 | + * neigh will be added only once. |
1585 | + */ |
1586 | + if (unlikely(!list_empty(&neigh->list))) { |
1587 | + spin_unlock_irqrestore(&priv->lock, flags); |
1588 | + return neigh; |
1589 | } |
1590 | |
1591 | path = __path_find(dev, daddr + 4); |
1592 | @@ -957,7 +965,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, |
1593 | path->ah->last_send = rn->send(dev, skb, path->ah->ah, |
1594 | IPOIB_QPN(daddr)); |
1595 | ipoib_neigh_put(neigh); |
1596 | - return; |
1597 | + return NULL; |
1598 | } |
1599 | } else { |
1600 | neigh->ah = NULL; |
1601 | @@ -974,7 +982,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, |
1602 | |
1603 | spin_unlock_irqrestore(&priv->lock, flags); |
1604 | ipoib_neigh_put(neigh); |
1605 | - return; |
1606 | + return NULL; |
1607 | |
1608 | err_path: |
1609 | ipoib_neigh_free(neigh); |
1610 | @@ -984,6 +992,8 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, |
1611 | |
1612 | spin_unlock_irqrestore(&priv->lock, flags); |
1613 | ipoib_neigh_put(neigh); |
1614 | + |
1615 | + return NULL; |
1616 | } |
1617 | |
1618 | static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, |
1619 | @@ -1092,8 +1102,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1620 | case htons(ETH_P_TIPC): |
1621 | neigh = ipoib_neigh_get(dev, phdr->hwaddr); |
1622 | if (unlikely(!neigh)) { |
1623 | - neigh_add_path(skb, phdr->hwaddr, dev); |
1624 | - return NETDEV_TX_OK; |
1625 | + neigh = neigh_add_path(skb, phdr->hwaddr, dev); |
1626 | + if (likely(!neigh)) |
1627 | + return NETDEV_TX_OK; |
1628 | } |
1629 | break; |
1630 | case htons(ETH_P_ARP): |
1631 | diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c |
1632 | index 93e149efc1f5..9b3f47ae2016 100644 |
1633 | --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c |
1634 | +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c |
1635 | @@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) |
1636 | spin_lock_irqsave(&priv->lock, flags); |
1637 | if (!neigh) { |
1638 | neigh = ipoib_neigh_alloc(daddr, dev); |
1639 | - if (neigh) { |
1640 | + /* Make sure that the neigh will be added only |
1641 | + * once to mcast list. |
1642 | + */ |
1643 | + if (neigh && list_empty(&neigh->list)) { |
1644 | kref_get(&mcast->ah->ref); |
1645 | neigh->ah = mcast->ah; |
1646 | list_add_tail(&neigh->list, &mcast->neigh_list); |
1647 | diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c |
1648 | index 6bf56bb5f8d9..d91f3b1c5375 100644 |
1649 | --- a/drivers/input/misc/xen-kbdfront.c |
1650 | +++ b/drivers/input/misc/xen-kbdfront.c |
1651 | @@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev, |
1652 | 0, width, 0, 0); |
1653 | input_set_abs_params(mtouch, ABS_MT_POSITION_Y, |
1654 | 0, height, 0, 0); |
1655 | - input_set_abs_params(mtouch, ABS_MT_PRESSURE, |
1656 | - 0, 255, 0, 0); |
1657 | |
1658 | ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); |
1659 | if (ret) { |
1660 | diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c |
1661 | index ef1360445413..9ce6b32f52a1 100644 |
1662 | --- a/drivers/leds/led-core.c |
1663 | +++ b/drivers/leds/led-core.c |
1664 | @@ -189,6 +189,7 @@ void led_blink_set(struct led_classdev *led_cdev, |
1665 | { |
1666 | del_timer_sync(&led_cdev->blink_timer); |
1667 | |
1668 | + clear_bit(LED_BLINK_SW, &led_cdev->work_flags); |
1669 | clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); |
1670 | clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); |
1671 | |
1672 | diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c |
1673 | index edf24c148fa6..2a978d9832a7 100644 |
1674 | --- a/drivers/mtd/nand/brcmnand/brcmnand.c |
1675 | +++ b/drivers/mtd/nand/brcmnand/brcmnand.c |
1676 | @@ -1763,7 +1763,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, |
1677 | err = brcmstb_nand_verify_erased_page(mtd, chip, buf, |
1678 | addr); |
1679 | /* erased page bitflips corrected */ |
1680 | - if (err > 0) |
1681 | + if (err >= 0) |
1682 | return err; |
1683 | } |
1684 | |
1685 | diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c |
1686 | index 50f8d4a1b983..d4d824ef64e9 100644 |
1687 | --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c |
1688 | +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c |
1689 | @@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, |
1690 | return ret; |
1691 | } |
1692 | |
1693 | - /* handle the block mark swapping */ |
1694 | - block_mark_swapping(this, payload_virt, auxiliary_virt); |
1695 | - |
1696 | /* Loop over status bytes, accumulating ECC status. */ |
1697 | status = auxiliary_virt + nfc_geo->auxiliary_status_offset; |
1698 | |
1699 | @@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, |
1700 | max_bitflips = max_t(unsigned int, max_bitflips, *status); |
1701 | } |
1702 | |
1703 | + /* handle the block mark swapping */ |
1704 | + block_mark_swapping(this, buf, auxiliary_virt); |
1705 | + |
1706 | if (oob_required) { |
1707 | /* |
1708 | * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() |
1709 | diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c |
1710 | index c4d1140116ea..ed8a2a7ce500 100644 |
1711 | --- a/drivers/net/can/flexcan.c |
1712 | +++ b/drivers/net/can/flexcan.c |
1713 | @@ -526,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1714 | data = be32_to_cpup((__be32 *)&cf->data[0]); |
1715 | flexcan_write(data, &priv->tx_mb->data[0]); |
1716 | } |
1717 | - if (cf->can_dlc > 3) { |
1718 | + if (cf->can_dlc > 4) { |
1719 | data = be32_to_cpup((__be32 *)&cf->data[4]); |
1720 | flexcan_write(data, &priv->tx_mb->data[1]); |
1721 | } |
1722 | diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
1723 | index c6bd5e24005d..67df5053dc30 100644 |
1724 | --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c |
1725 | +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
1726 | @@ -1565,7 +1565,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) |
1727 | |
1728 | static int ena_up_complete(struct ena_adapter *adapter) |
1729 | { |
1730 | - int rc, i; |
1731 | + int rc; |
1732 | |
1733 | rc = ena_rss_configure(adapter); |
1734 | if (rc) |
1735 | @@ -1584,17 +1584,6 @@ static int ena_up_complete(struct ena_adapter *adapter) |
1736 | |
1737 | ena_napi_enable_all(adapter); |
1738 | |
1739 | - /* Enable completion queues interrupt */ |
1740 | - for (i = 0; i < adapter->num_queues; i++) |
1741 | - ena_unmask_interrupt(&adapter->tx_ring[i], |
1742 | - &adapter->rx_ring[i]); |
1743 | - |
1744 | - /* schedule napi in case we had pending packets |
1745 | - * from the last time we disable napi |
1746 | - */ |
1747 | - for (i = 0; i < adapter->num_queues; i++) |
1748 | - napi_schedule(&adapter->ena_napi[i].napi); |
1749 | - |
1750 | return 0; |
1751 | } |
1752 | |
1753 | @@ -1731,7 +1720,7 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) |
1754 | |
1755 | static int ena_up(struct ena_adapter *adapter) |
1756 | { |
1757 | - int rc; |
1758 | + int rc, i; |
1759 | |
1760 | netdev_dbg(adapter->netdev, "%s\n", __func__); |
1761 | |
1762 | @@ -1774,6 +1763,17 @@ static int ena_up(struct ena_adapter *adapter) |
1763 | |
1764 | set_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
1765 | |
1766 | + /* Enable completion queues interrupt */ |
1767 | + for (i = 0; i < adapter->num_queues; i++) |
1768 | + ena_unmask_interrupt(&adapter->tx_ring[i], |
1769 | + &adapter->rx_ring[i]); |
1770 | + |
1771 | + /* schedule napi in case we had pending packets |
1772 | + * from the last time we disable napi |
1773 | + */ |
1774 | + for (i = 0; i < adapter->num_queues; i++) |
1775 | + napi_schedule(&adapter->ena_napi[i].napi); |
1776 | + |
1777 | return rc; |
1778 | |
1779 | err_up: |
1780 | diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h |
1781 | index 0207927dc8a6..4ebd53b3c7da 100644 |
1782 | --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h |
1783 | +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h |
1784 | @@ -85,7 +85,9 @@ struct aq_hw_ops { |
1785 | void (*destroy)(struct aq_hw_s *self); |
1786 | |
1787 | int (*get_hw_caps)(struct aq_hw_s *self, |
1788 | - struct aq_hw_caps_s *aq_hw_caps); |
1789 | + struct aq_hw_caps_s *aq_hw_caps, |
1790 | + unsigned short device, |
1791 | + unsigned short subsystem_device); |
1792 | |
1793 | int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, |
1794 | unsigned int frags); |
1795 | diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c |
1796 | index 483e97691eea..c93e5613d4cc 100644 |
1797 | --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c |
1798 | +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c |
1799 | @@ -222,7 +222,7 @@ static struct net_device *aq_nic_ndev_alloc(void) |
1800 | |
1801 | struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, |
1802 | const struct ethtool_ops *et_ops, |
1803 | - struct device *dev, |
1804 | + struct pci_dev *pdev, |
1805 | struct aq_pci_func_s *aq_pci_func, |
1806 | unsigned int port, |
1807 | const struct aq_hw_ops *aq_hw_ops) |
1808 | @@ -242,7 +242,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, |
1809 | ndev->netdev_ops = ndev_ops; |
1810 | ndev->ethtool_ops = et_ops; |
1811 | |
1812 | - SET_NETDEV_DEV(ndev, dev); |
1813 | + SET_NETDEV_DEV(ndev, &pdev->dev); |
1814 | |
1815 | ndev->if_port = port; |
1816 | self->ndev = ndev; |
1817 | @@ -254,7 +254,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, |
1818 | |
1819 | self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, |
1820 | &self->aq_hw_ops); |
1821 | - err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); |
1822 | + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps, |
1823 | + pdev->device, pdev->subsystem_device); |
1824 | if (err < 0) |
1825 | goto err_exit; |
1826 | |
1827 | diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h |
1828 | index 4309983acdd6..3c9f8db03d5f 100644 |
1829 | --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h |
1830 | +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h |
1831 | @@ -71,7 +71,7 @@ struct aq_nic_cfg_s { |
1832 | |
1833 | struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, |
1834 | const struct ethtool_ops *et_ops, |
1835 | - struct device *dev, |
1836 | + struct pci_dev *pdev, |
1837 | struct aq_pci_func_s *aq_pci_func, |
1838 | unsigned int port, |
1839 | const struct aq_hw_ops *aq_hw_ops); |
1840 | diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c |
1841 | index cadaa646c89f..58c29d04b186 100644 |
1842 | --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c |
1843 | +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c |
1844 | @@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, |
1845 | pci_set_drvdata(pdev, self); |
1846 | self->pdev = pdev; |
1847 | |
1848 | - err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); |
1849 | + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device, |
1850 | + pdev->subsystem_device); |
1851 | if (err < 0) |
1852 | goto err_exit; |
1853 | |
1854 | @@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, |
1855 | |
1856 | for (port = 0; port < self->ports; ++port) { |
1857 | struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, |
1858 | - &pdev->dev, self, |
1859 | + pdev, self, |
1860 | port, aq_hw_ops); |
1861 | |
1862 | if (!aq_nic) { |
1863 | diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c |
1864 | index 07b3c49a16a4..b0abd187cead 100644 |
1865 | --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c |
1866 | +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c |
1867 | @@ -18,9 +18,20 @@ |
1868 | #include "hw_atl_a0_internal.h" |
1869 | |
1870 | static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, |
1871 | - struct aq_hw_caps_s *aq_hw_caps) |
1872 | + struct aq_hw_caps_s *aq_hw_caps, |
1873 | + unsigned short device, |
1874 | + unsigned short subsystem_device) |
1875 | { |
1876 | memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); |
1877 | + |
1878 | + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) |
1879 | + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; |
1880 | + |
1881 | + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { |
1882 | + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; |
1883 | + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G; |
1884 | + } |
1885 | + |
1886 | return 0; |
1887 | } |
1888 | |
1889 | diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c |
1890 | index ec68c20efcbd..36fddb199160 100644 |
1891 | --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c |
1892 | +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c |
1893 | @@ -16,11 +16,23 @@ |
1894 | #include "hw_atl_utils.h" |
1895 | #include "hw_atl_llh.h" |
1896 | #include "hw_atl_b0_internal.h" |
1897 | +#include "hw_atl_llh_internal.h" |
1898 | |
1899 | static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, |
1900 | - struct aq_hw_caps_s *aq_hw_caps) |
1901 | + struct aq_hw_caps_s *aq_hw_caps, |
1902 | + unsigned short device, |
1903 | + unsigned short subsystem_device) |
1904 | { |
1905 | memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); |
1906 | + |
1907 | + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) |
1908 | + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; |
1909 | + |
1910 | + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { |
1911 | + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; |
1912 | + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G; |
1913 | + } |
1914 | + |
1915 | return 0; |
1916 | } |
1917 | |
1918 | @@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, |
1919 | }; |
1920 | |
1921 | int err = 0; |
1922 | + u32 val; |
1923 | |
1924 | self->aq_nic_cfg = aq_nic_cfg; |
1925 | |
1926 | @@ -374,6 +387,16 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, |
1927 | hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); |
1928 | hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); |
1929 | |
1930 | + /* Force limit MRRS on RDM/TDM to 2K */ |
1931 | + val = aq_hw_read_reg(self, pci_reg_control6_adr); |
1932 | + aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404); |
1933 | + |
1934 | + /* TX DMA total request limit. B0 hardware is not capable to |
1935 | + * handle more than (8K-MRRS) incoming DMA data. |
1936 | + * Value 24 in 256byte units |
1937 | + */ |
1938 | + aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24); |
1939 | + |
1940 | err = aq_hw_err_from_flags(self); |
1941 | if (err < 0) |
1942 | goto err_exit; |
1943 | diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h |
1944 | index 5527fc0e5942..93450ec930e8 100644 |
1945 | --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h |
1946 | +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h |
1947 | @@ -2343,6 +2343,9 @@ |
1948 | #define tx_dma_desc_base_addrmsw_adr(descriptor) \ |
1949 | (0x00007c04u + (descriptor) * 0x40) |
1950 | |
1951 | +/* tx dma total request limit */ |
1952 | +#define tx_dma_total_req_limit_adr 0x00007b20u |
1953 | + |
1954 | /* tx interrupt moderation control register definitions |
1955 | * Preprocessor definitions for TX Interrupt Moderation Control Register |
1956 | * Base Address: 0x00008980 |
1957 | @@ -2369,6 +2372,9 @@ |
1958 | /* default value of bitfield reg_res_dsbl */ |
1959 | #define pci_reg_res_dsbl_default 0x1 |
1960 | |
1961 | +/* PCI core control register */ |
1962 | +#define pci_reg_control6_adr 0x1014u |
1963 | + |
1964 | /* global microprocessor scratch pad definitions */ |
1965 | #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) |
1966 | |
1967 | diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c |
1968 | index 3241af1ce718..5b422be56165 100644 |
1969 | --- a/drivers/net/ethernet/arc/emac_main.c |
1970 | +++ b/drivers/net/ethernet/arc/emac_main.c |
1971 | @@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget) |
1972 | continue; |
1973 | } |
1974 | |
1975 | - pktlen = info & LEN_MASK; |
1976 | - stats->rx_packets++; |
1977 | - stats->rx_bytes += pktlen; |
1978 | - skb = rx_buff->skb; |
1979 | - skb_put(skb, pktlen); |
1980 | - skb->dev = ndev; |
1981 | - skb->protocol = eth_type_trans(skb, ndev); |
1982 | - |
1983 | - dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), |
1984 | - dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); |
1985 | - |
1986 | - /* Prepare the BD for next cycle */ |
1987 | - rx_buff->skb = netdev_alloc_skb_ip_align(ndev, |
1988 | - EMAC_BUFFER_SIZE); |
1989 | - if (unlikely(!rx_buff->skb)) { |
1990 | + /* Prepare the BD for next cycle. netif_receive_skb() |
1991 | + * only if new skb was allocated and mapped to avoid holes |
1992 | + * in the RX fifo. |
1993 | + */ |
1994 | + skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); |
1995 | + if (unlikely(!skb)) { |
1996 | + if (net_ratelimit()) |
1997 | + netdev_err(ndev, "cannot allocate skb\n"); |
1998 | + /* Return ownership to EMAC */ |
1999 | + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
2000 | stats->rx_errors++; |
2001 | - /* Because receive_skb is below, increment rx_dropped */ |
2002 | stats->rx_dropped++; |
2003 | continue; |
2004 | } |
2005 | |
2006 | - /* receive_skb only if new skb was allocated to avoid holes */ |
2007 | - netif_receive_skb(skb); |
2008 | - |
2009 | - addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, |
2010 | + addr = dma_map_single(&ndev->dev, (void *)skb->data, |
2011 | EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); |
2012 | if (dma_mapping_error(&ndev->dev, addr)) { |
2013 | if (net_ratelimit()) |
2014 | - netdev_err(ndev, "cannot dma map\n"); |
2015 | - dev_kfree_skb(rx_buff->skb); |
2016 | + netdev_err(ndev, "cannot map dma buffer\n"); |
2017 | + dev_kfree_skb(skb); |
2018 | + /* Return ownership to EMAC */ |
2019 | + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
2020 | stats->rx_errors++; |
2021 | + stats->rx_dropped++; |
2022 | continue; |
2023 | } |
2024 | + |
2025 | + /* unmap previosly mapped skb */ |
2026 | + dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), |
2027 | + dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); |
2028 | + |
2029 | + pktlen = info & LEN_MASK; |
2030 | + stats->rx_packets++; |
2031 | + stats->rx_bytes += pktlen; |
2032 | + skb_put(rx_buff->skb, pktlen); |
2033 | + rx_buff->skb->dev = ndev; |
2034 | + rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); |
2035 | + |
2036 | + netif_receive_skb(rx_buff->skb); |
2037 | + |
2038 | + rx_buff->skb = skb; |
2039 | dma_unmap_addr_set(rx_buff, addr, addr); |
2040 | dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); |
2041 | |
2042 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
2043 | index 1216c1f1e052..6465414dad74 100644 |
2044 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
2045 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
2046 | @@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) |
2047 | |
2048 | del_timer_sync(&bp->timer); |
2049 | |
2050 | - if (IS_PF(bp)) { |
2051 | + if (IS_PF(bp) && !BP_NOMCP(bp)) { |
2052 | /* Set ALWAYS_ALIVE bit in shmem */ |
2053 | bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; |
2054 | bnx2x_drv_pulse(bp); |
2055 | @@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) |
2056 | bp->cnic_loaded = false; |
2057 | |
2058 | /* Clear driver version indication in shmem */ |
2059 | - if (IS_PF(bp)) |
2060 | + if (IS_PF(bp) && !BP_NOMCP(bp)) |
2061 | bnx2x_update_mng_version(bp); |
2062 | |
2063 | /* Check if there are pending parity attentions. If there are - set |
2064 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |
2065 | index c12b4d3e946e..e855a271db48 100644 |
2066 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |
2067 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |
2068 | @@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp) |
2069 | |
2070 | do { |
2071 | bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); |
2072 | + |
2073 | + /* If we read all 0xFFs, means we are in PCI error state and |
2074 | + * should bail out to avoid crashes on adapter's FW reads. |
2075 | + */ |
2076 | + if (bp->common.shmem_base == 0xFFFFFFFF) { |
2077 | + bp->flags |= NO_MCP_FLAG; |
2078 | + return -ENODEV; |
2079 | + } |
2080 | + |
2081 | if (bp->common.shmem_base) { |
2082 | val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); |
2083 | if (val & SHR_MEM_VALIDITY_MB) |
2084 | @@ -14315,7 +14324,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) |
2085 | BNX2X_ERR("IO slot reset --> driver unload\n"); |
2086 | |
2087 | /* MCP should have been reset; Need to wait for validity */ |
2088 | - bnx2x_init_shmem(bp); |
2089 | + if (bnx2x_init_shmem(bp)) { |
2090 | + rtnl_unlock(); |
2091 | + return PCI_ERS_RESULT_DISCONNECT; |
2092 | + } |
2093 | |
2094 | if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { |
2095 | u32 v; |
2096 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c |
2097 | index 5ee18660bc33..c9617675f934 100644 |
2098 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c |
2099 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c |
2100 | @@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) |
2101 | netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); |
2102 | return -EINVAL; |
2103 | } |
2104 | - if (vf_id >= bp->pf.max_vfs) { |
2105 | + if (vf_id >= bp->pf.active_vfs) { |
2106 | netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); |
2107 | return -EINVAL; |
2108 | } |
2109 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c |
2110 | index 7dd3d131043a..6a185344b378 100644 |
2111 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c |
2112 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c |
2113 | @@ -327,7 +327,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, |
2114 | } |
2115 | |
2116 | /* If all IP and L4 fields are wildcarded then this is an L2 flow */ |
2117 | - if (is_wildcard(&l3_mask, sizeof(l3_mask)) && |
2118 | + if (is_wildcard(l3_mask, sizeof(*l3_mask)) && |
2119 | is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { |
2120 | flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; |
2121 | } else { |
2122 | diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
2123 | index aef3fcf2f5b9..48738eb27806 100644 |
2124 | --- a/drivers/net/ethernet/broadcom/tg3.c |
2125 | +++ b/drivers/net/ethernet/broadcom/tg3.c |
2126 | @@ -10052,6 +10052,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) |
2127 | |
2128 | tw32(GRC_MODE, tp->grc_mode | val); |
2129 | |
2130 | + /* On one of the AMD platform, MRRS is restricted to 4000 because of |
2131 | + * south bridge limitation. As a workaround, Driver is setting MRRS |
2132 | + * to 2048 instead of default 4096. |
2133 | + */ |
2134 | + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && |
2135 | + tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { |
2136 | + val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; |
2137 | + tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); |
2138 | + } |
2139 | + |
2140 | /* Setup the timer prescalar register. Clock is always 66Mhz. */ |
2141 | val = tr32(GRC_MISC_CFG); |
2142 | val &= ~0xff; |
2143 | @@ -14229,7 +14239,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) |
2144 | */ |
2145 | if (tg3_asic_rev(tp) == ASIC_REV_57766 || |
2146 | tg3_asic_rev(tp) == ASIC_REV_5717 || |
2147 | - tg3_asic_rev(tp) == ASIC_REV_5719) |
2148 | + tg3_asic_rev(tp) == ASIC_REV_5719 || |
2149 | + tg3_asic_rev(tp) == ASIC_REV_5720) |
2150 | reset_phy = true; |
2151 | |
2152 | err = tg3_restart_hw(tp, reset_phy); |
2153 | diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h |
2154 | index c2d02d02d1e6..b057f71aed48 100644 |
2155 | --- a/drivers/net/ethernet/broadcom/tg3.h |
2156 | +++ b/drivers/net/ethernet/broadcom/tg3.h |
2157 | @@ -96,6 +96,7 @@ |
2158 | #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 |
2159 | #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 |
2160 | #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a |
2161 | +#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0 |
2162 | #define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ |
2163 | #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c |
2164 | #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a |
2165 | @@ -281,6 +282,9 @@ |
2166 | #define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ |
2167 | #define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ |
2168 | /* 0xa8 --> 0xb8 unused */ |
2169 | +#define TG3PCI_DEV_STATUS_CTRL 0x000000b4 |
2170 | +#define MAX_READ_REQ_SIZE_2048 0x00004000 |
2171 | +#define MAX_READ_REQ_MASK 0x00007000 |
2172 | #define TG3PCI_DUAL_MAC_CTRL 0x000000b8 |
2173 | #define DUAL_MAC_CTRL_CH_MASK 0x00000003 |
2174 | #define DUAL_MAC_CTRL_ID 0x00000004 |
2175 | diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c |
2176 | index 544114281ea7..9f8d4f8e57e3 100644 |
2177 | --- a/drivers/net/ethernet/freescale/gianfar_ptp.c |
2178 | +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c |
2179 | @@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) |
2180 | now = tmr_cnt_read(etsects); |
2181 | now += delta; |
2182 | tmr_cnt_write(etsects, now); |
2183 | + set_fipers(etsects); |
2184 | |
2185 | spin_unlock_irqrestore(&etsects->lock, flags); |
2186 | |
2187 | - set_fipers(etsects); |
2188 | - |
2189 | return 0; |
2190 | } |
2191 | |
2192 | diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h |
2193 | index d7bdea79e9fa..8fd2458060a0 100644 |
2194 | --- a/drivers/net/ethernet/intel/e1000/e1000.h |
2195 | +++ b/drivers/net/ethernet/intel/e1000/e1000.h |
2196 | @@ -331,7 +331,8 @@ struct e1000_adapter { |
2197 | enum e1000_state_t { |
2198 | __E1000_TESTING, |
2199 | __E1000_RESETTING, |
2200 | - __E1000_DOWN |
2201 | + __E1000_DOWN, |
2202 | + __E1000_DISABLED |
2203 | }; |
2204 | |
2205 | #undef pr_fmt |
2206 | diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c |
2207 | index 1982f7917a8d..3dd4aeb2706d 100644 |
2208 | --- a/drivers/net/ethernet/intel/e1000/e1000_main.c |
2209 | +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c |
2210 | @@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter, |
2211 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
2212 | { |
2213 | struct net_device *netdev; |
2214 | - struct e1000_adapter *adapter; |
2215 | + struct e1000_adapter *adapter = NULL; |
2216 | struct e1000_hw *hw; |
2217 | |
2218 | static int cards_found; |
2219 | @@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
2220 | u16 tmp = 0; |
2221 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
2222 | int bars, need_ioport; |
2223 | + bool disable_dev = false; |
2224 | |
2225 | /* do not allocate ioport bars when not needed */ |
2226 | need_ioport = e1000_is_need_ioport(pdev); |
2227 | @@ -1259,11 +1260,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
2228 | iounmap(hw->ce4100_gbe_mdio_base_virt); |
2229 | iounmap(hw->hw_addr); |
2230 | err_ioremap: |
2231 | + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); |
2232 | free_netdev(netdev); |
2233 | err_alloc_etherdev: |
2234 | pci_release_selected_regions(pdev, bars); |
2235 | err_pci_reg: |
2236 | - pci_disable_device(pdev); |
2237 | + if (!adapter || disable_dev) |
2238 | + pci_disable_device(pdev); |
2239 | return err; |
2240 | } |
2241 | |
2242 | @@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev) |
2243 | struct net_device *netdev = pci_get_drvdata(pdev); |
2244 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2245 | struct e1000_hw *hw = &adapter->hw; |
2246 | + bool disable_dev; |
2247 | |
2248 | e1000_down_and_stop(adapter); |
2249 | e1000_release_manageability(adapter); |
2250 | @@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev) |
2251 | iounmap(hw->flash_address); |
2252 | pci_release_selected_regions(pdev, adapter->bars); |
2253 | |
2254 | + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); |
2255 | free_netdev(netdev); |
2256 | |
2257 | - pci_disable_device(pdev); |
2258 | + if (disable_dev) |
2259 | + pci_disable_device(pdev); |
2260 | } |
2261 | |
2262 | /** |
2263 | @@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) |
2264 | if (netif_running(netdev)) |
2265 | e1000_free_irq(adapter); |
2266 | |
2267 | - pci_disable_device(pdev); |
2268 | + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) |
2269 | + pci_disable_device(pdev); |
2270 | |
2271 | return 0; |
2272 | } |
2273 | @@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev) |
2274 | pr_err("Cannot enable PCI device from suspend\n"); |
2275 | return err; |
2276 | } |
2277 | + |
2278 | + /* flush memory to make sure state is correct */ |
2279 | + smp_mb__before_atomic(); |
2280 | + clear_bit(__E1000_DISABLED, &adapter->flags); |
2281 | pci_set_master(pdev); |
2282 | |
2283 | pci_enable_wake(pdev, PCI_D3hot, 0); |
2284 | @@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
2285 | |
2286 | if (netif_running(netdev)) |
2287 | e1000_down(adapter); |
2288 | - pci_disable_device(pdev); |
2289 | + |
2290 | + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) |
2291 | + pci_disable_device(pdev); |
2292 | |
2293 | /* Request a slot slot reset. */ |
2294 | return PCI_ERS_RESULT_NEED_RESET; |
2295 | @@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) |
2296 | pr_err("Cannot re-enable PCI device after reset.\n"); |
2297 | return PCI_ERS_RESULT_DISCONNECT; |
2298 | } |
2299 | + |
2300 | + /* flush memory to make sure state is correct */ |
2301 | + smp_mb__before_atomic(); |
2302 | + clear_bit(__E1000_DISABLED, &adapter->flags); |
2303 | pci_set_master(pdev); |
2304 | |
2305 | pci_enable_wake(pdev, PCI_D3hot, 0); |
2306 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
2307 | index b2cde9b16d82..b1cde1b051a4 100644 |
2308 | --- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
2309 | +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
2310 | @@ -1553,11 +1553,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p) |
2311 | else |
2312 | netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); |
2313 | |
2314 | + /* Copy the address first, so that we avoid a possible race with |
2315 | + * .set_rx_mode(). If we copy after changing the address in the filter |
2316 | + * list, we might open ourselves to a narrow race window where |
2317 | + * .set_rx_mode could delete our dev_addr filter and prevent traffic |
2318 | + * from passing. |
2319 | + */ |
2320 | + ether_addr_copy(netdev->dev_addr, addr->sa_data); |
2321 | + |
2322 | spin_lock_bh(&vsi->mac_filter_hash_lock); |
2323 | i40e_del_mac_filter(vsi, netdev->dev_addr); |
2324 | i40e_add_mac_filter(vsi, addr->sa_data); |
2325 | spin_unlock_bh(&vsi->mac_filter_hash_lock); |
2326 | - ether_addr_copy(netdev->dev_addr, addr->sa_data); |
2327 | if (vsi->type == I40E_VSI_MAIN) { |
2328 | i40e_status ret; |
2329 | |
2330 | @@ -1739,6 +1746,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) |
2331 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
2332 | struct i40e_vsi *vsi = np->vsi; |
2333 | |
2334 | + /* Under some circumstances, we might receive a request to delete |
2335 | + * our own device address from our uc list. Because we store the |
2336 | + * device address in the VSI's MAC/VLAN filter list, we need to ignore |
2337 | + * such requests and not delete our device address from this list. |
2338 | + */ |
2339 | + if (ether_addr_equal(addr, netdev->dev_addr)) |
2340 | + return 0; |
2341 | + |
2342 | i40e_del_mac_filter(vsi, addr); |
2343 | |
2344 | return 0; |
2345 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
2346 | index 3c07ff171ddc..542c00b1c823 100644 |
2347 | --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
2348 | +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
2349 | @@ -3048,10 +3048,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb) |
2350 | /* Walk through fragments adding latest fragment, testing it, and |
2351 | * then removing stale fragments from the sum. |
2352 | */ |
2353 | - stale = &skb_shinfo(skb)->frags[0]; |
2354 | - for (;;) { |
2355 | + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { |
2356 | + int stale_size = skb_frag_size(stale); |
2357 | + |
2358 | sum += skb_frag_size(frag++); |
2359 | |
2360 | + /* The stale fragment may present us with a smaller |
2361 | + * descriptor than the actual fragment size. To account |
2362 | + * for that we need to remove all the data on the front and |
2363 | + * figure out what the remainder would be in the last |
2364 | + * descriptor associated with the fragment. |
2365 | + */ |
2366 | + if (stale_size > I40E_MAX_DATA_PER_TXD) { |
2367 | + int align_pad = -(stale->page_offset) & |
2368 | + (I40E_MAX_READ_REQ_SIZE - 1); |
2369 | + |
2370 | + sum -= align_pad; |
2371 | + stale_size -= align_pad; |
2372 | + |
2373 | + do { |
2374 | + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; |
2375 | + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; |
2376 | + } while (stale_size > I40E_MAX_DATA_PER_TXD); |
2377 | + } |
2378 | + |
2379 | /* if sum is negative we failed to make sufficient progress */ |
2380 | if (sum < 0) |
2381 | return true; |
2382 | @@ -3059,7 +3079,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) |
2383 | if (!nr_frags--) |
2384 | break; |
2385 | |
2386 | - sum -= skb_frag_size(stale++); |
2387 | + sum -= stale_size; |
2388 | } |
2389 | |
2390 | return false; |
2391 | diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c |
2392 | index 07a4e6e13925..7368b0dc3af8 100644 |
2393 | --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c |
2394 | +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c |
2395 | @@ -2014,10 +2014,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) |
2396 | /* Walk through fragments adding latest fragment, testing it, and |
2397 | * then removing stale fragments from the sum. |
2398 | */ |
2399 | - stale = &skb_shinfo(skb)->frags[0]; |
2400 | - for (;;) { |
2401 | + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { |
2402 | + int stale_size = skb_frag_size(stale); |
2403 | + |
2404 | sum += skb_frag_size(frag++); |
2405 | |
2406 | + /* The stale fragment may present us with a smaller |
2407 | + * descriptor than the actual fragment size. To account |
2408 | + * for that we need to remove all the data on the front and |
2409 | + * figure out what the remainder would be in the last |
2410 | + * descriptor associated with the fragment. |
2411 | + */ |
2412 | + if (stale_size > I40E_MAX_DATA_PER_TXD) { |
2413 | + int align_pad = -(stale->page_offset) & |
2414 | + (I40E_MAX_READ_REQ_SIZE - 1); |
2415 | + |
2416 | + sum -= align_pad; |
2417 | + stale_size -= align_pad; |
2418 | + |
2419 | + do { |
2420 | + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; |
2421 | + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; |
2422 | + } while (stale_size > I40E_MAX_DATA_PER_TXD); |
2423 | + } |
2424 | + |
2425 | /* if sum is negative we failed to make sufficient progress */ |
2426 | if (sum < 0) |
2427 | return true; |
2428 | @@ -2025,7 +2045,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) |
2429 | if (!nr_frags--) |
2430 | break; |
2431 | |
2432 | - sum -= skb_frag_size(stale++); |
2433 | + sum -= stale_size; |
2434 | } |
2435 | |
2436 | return false; |
2437 | diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
2438 | index 5e81a7263654..3fd71cf5cd60 100644 |
2439 | --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
2440 | +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
2441 | @@ -1959,11 +1959,12 @@ static int mtk_hw_init(struct mtk_eth *eth) |
2442 | /* set GE2 TUNE */ |
2443 | regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); |
2444 | |
2445 | - /* GE1, Force 1000M/FD, FC ON */ |
2446 | - mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); |
2447 | - |
2448 | - /* GE2, Force 1000M/FD, FC ON */ |
2449 | - mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); |
2450 | + /* Set linkdown as the default for each GMAC. Its own MCR would be set |
2451 | + * up with the more appropriate value when mtk_phy_link_adjust call is |
2452 | + * being invoked. |
2453 | + */ |
2454 | + for (i = 0; i < MTK_MAC_COUNT; i++) |
2455 | + mtk_w32(eth, 0, MTK_MAC_MCR(i)); |
2456 | |
2457 | /* Indicates CDM to parse the MTK special tag from CPU |
2458 | * which also is working out for untag packets. |
2459 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c |
2460 | index 51c4cc00a186..9d64d0759ee9 100644 |
2461 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c |
2462 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c |
2463 | @@ -259,6 +259,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) |
2464 | static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, |
2465 | struct ieee_ets *ets) |
2466 | { |
2467 | + bool have_ets_tc = false; |
2468 | int bw_sum = 0; |
2469 | int i; |
2470 | |
2471 | @@ -273,11 +274,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, |
2472 | } |
2473 | |
2474 | /* Validate Bandwidth Sum */ |
2475 | - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) |
2476 | - if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) |
2477 | + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { |
2478 | + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { |
2479 | + have_ets_tc = true; |
2480 | bw_sum += ets->tc_tx_bw[i]; |
2481 | + } |
2482 | + } |
2483 | |
2484 | - if (bw_sum != 0 && bw_sum != 100) { |
2485 | + if (have_ets_tc && bw_sum != 100) { |
2486 | netdev_err(netdev, |
2487 | "Failed to validate ETS: BW sum is illegal\n"); |
2488 | return -EINVAL; |
2489 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c |
2490 | index fc606bfd1d6e..eb91de86202b 100644 |
2491 | --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c |
2492 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c |
2493 | @@ -776,7 +776,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) |
2494 | return err; |
2495 | } |
2496 | |
2497 | -int mlx5_stop_eqs(struct mlx5_core_dev *dev) |
2498 | +void mlx5_stop_eqs(struct mlx5_core_dev *dev) |
2499 | { |
2500 | struct mlx5_eq_table *table = &dev->priv.eq_table; |
2501 | int err; |
2502 | @@ -785,22 +785,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) |
2503 | if (MLX5_CAP_GEN(dev, pg)) { |
2504 | err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); |
2505 | if (err) |
2506 | - return err; |
2507 | + mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n", |
2508 | + err); |
2509 | } |
2510 | #endif |
2511 | |
2512 | err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); |
2513 | if (err) |
2514 | - return err; |
2515 | + mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", |
2516 | + err); |
2517 | |
2518 | - mlx5_destroy_unmap_eq(dev, &table->async_eq); |
2519 | + err = mlx5_destroy_unmap_eq(dev, &table->async_eq); |
2520 | + if (err) |
2521 | + mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", |
2522 | + err); |
2523 | mlx5_cmd_use_polling(dev); |
2524 | |
2525 | err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); |
2526 | if (err) |
2527 | - mlx5_cmd_use_events(dev); |
2528 | - |
2529 | - return err; |
2530 | + mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", |
2531 | + err); |
2532 | } |
2533 | |
2534 | int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, |
2535 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c |
2536 | index 23f7d828cf67..6ef20e5cc77d 100644 |
2537 | --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c |
2538 | +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c |
2539 | @@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, |
2540 | return 0; |
2541 | } |
2542 | |
2543 | - wmb(); /* reset needs to be written before we read control register */ |
2544 | + /* Reset needs to be written before we read control register, and |
2545 | + * we must wait for the HW to become responsive once again |
2546 | + */ |
2547 | + wmb(); |
2548 | + msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); |
2549 | + |
2550 | end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); |
2551 | do { |
2552 | u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); |
2553 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h |
2554 | index a6441208e9d9..fb082ad21b00 100644 |
2555 | --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h |
2556 | +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h |
2557 | @@ -59,6 +59,7 @@ |
2558 | #define MLXSW_PCI_SW_RESET 0xF0010 |
2559 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) |
2560 | #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 |
2561 | +#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 |
2562 | #define MLXSW_PCI_FW_READY 0xA1844 |
2563 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF |
2564 | #define MLXSW_PCI_FW_READY_MAGIC 0x5E |
2565 | diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c |
2566 | index e118b5f23996..8d53a593fb27 100644 |
2567 | --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c |
2568 | +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c |
2569 | @@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, |
2570 | return err; |
2571 | } |
2572 | nn_writeb(nn, ctrl_offset, entry->entry); |
2573 | + nfp_net_irq_unmask(nn, entry->entry); |
2574 | |
2575 | return 0; |
2576 | } |
2577 | @@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, |
2578 | unsigned int vector_idx) |
2579 | { |
2580 | nn_writeb(nn, ctrl_offset, 0xff); |
2581 | + nn_pci_flush(nn); |
2582 | free_irq(nn->irq_entries[vector_idx].vector, nn); |
2583 | } |
2584 | |
2585 | diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h |
2586 | index e82b4b70b7be..627fec210e2f 100644 |
2587 | --- a/drivers/net/ethernet/stmicro/stmmac/common.h |
2588 | +++ b/drivers/net/ethernet/stmicro/stmmac/common.h |
2589 | @@ -409,7 +409,7 @@ struct stmmac_desc_ops { |
2590 | /* get timestamp value */ |
2591 | u64(*get_timestamp) (void *desc, u32 ats); |
2592 | /* get rx timestamp status */ |
2593 | - int (*get_rx_timestamp_status) (void *desc, u32 ats); |
2594 | + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); |
2595 | /* Display ring */ |
2596 | void (*display_ring)(void *head, unsigned int size, bool rx); |
2597 | /* set MSS via context descriptor */ |
2598 | diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
2599 | index 4b286e27c4ca..7e089bf906b4 100644 |
2600 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
2601 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
2602 | @@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc) |
2603 | return ret; |
2604 | } |
2605 | |
2606 | -static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) |
2607 | +static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, |
2608 | + u32 ats) |
2609 | { |
2610 | struct dma_desc *p = (struct dma_desc *)desc; |
2611 | int ret = -EINVAL; |
2612 | @@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) |
2613 | |
2614 | /* Check if timestamp is OK from context descriptor */ |
2615 | do { |
2616 | - ret = dwmac4_rx_check_timestamp(desc); |
2617 | + ret = dwmac4_rx_check_timestamp(next_desc); |
2618 | if (ret < 0) |
2619 | goto exit; |
2620 | i++; |
2621 | diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c |
2622 | index 7546b3664113..2a828a312814 100644 |
2623 | --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c |
2624 | +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c |
2625 | @@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats) |
2626 | return ns; |
2627 | } |
2628 | |
2629 | -static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) |
2630 | +static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, |
2631 | + u32 ats) |
2632 | { |
2633 | if (ats) { |
2634 | struct dma_extended_desc *p = (struct dma_extended_desc *)desc; |
2635 | diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c |
2636 | index f817f8f36569..db4cee57bb24 100644 |
2637 | --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c |
2638 | +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c |
2639 | @@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats) |
2640 | return ns; |
2641 | } |
2642 | |
2643 | -static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) |
2644 | +static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) |
2645 | { |
2646 | struct dma_desc *p = (struct dma_desc *)desc; |
2647 | |
2648 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c |
2649 | index 721b61655261..08c19ebd5306 100644 |
2650 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c |
2651 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c |
2652 | @@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, |
2653 | { |
2654 | u32 value = readl(ioaddr + PTP_TCR); |
2655 | unsigned long data; |
2656 | + u32 reg_value; |
2657 | |
2658 | /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second |
2659 | * formula = (1/ptp_clock) * 1000000000 |
2660 | @@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, |
2661 | |
2662 | data &= PTP_SSIR_SSINC_MASK; |
2663 | |
2664 | + reg_value = data; |
2665 | if (gmac4) |
2666 | - data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; |
2667 | + reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT; |
2668 | |
2669 | - writel(data, ioaddr + PTP_SSIR); |
2670 | + writel(reg_value, ioaddr + PTP_SSIR); |
2671 | |
2672 | return data; |
2673 | } |
2674 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2675 | index 0ad12c81a9e4..d0cc73795056 100644 |
2676 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2677 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2678 | @@ -489,7 +489,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, |
2679 | desc = np; |
2680 | |
2681 | /* Check if timestamp is available */ |
2682 | - if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { |
2683 | + if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) { |
2684 | ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); |
2685 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
2686 | shhwtstamp = skb_hwtstamps(skb); |
2687 | diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c |
2688 | index fb1c9e095d0c..176fc0906bfe 100644 |
2689 | --- a/drivers/net/macvlan.c |
2690 | +++ b/drivers/net/macvlan.c |
2691 | @@ -1441,9 +1441,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, |
2692 | return 0; |
2693 | |
2694 | unregister_netdev: |
2695 | + /* macvlan_uninit would free the macvlan port */ |
2696 | unregister_netdevice(dev); |
2697 | + return err; |
2698 | destroy_macvlan_port: |
2699 | - if (create) |
2700 | + /* the macvlan port may be freed by macvlan_uninit when fail to register. |
2701 | + * so we destroy the macvlan port only when it's valid. |
2702 | + */ |
2703 | + if (create && macvlan_port_get_rtnl(dev)) |
2704 | macvlan_port_destroy(port->dev); |
2705 | return err; |
2706 | } |
2707 | diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c |
2708 | index 135296508a7e..6425ce04d3f9 100644 |
2709 | --- a/drivers/net/phy/mdio-sun4i.c |
2710 | +++ b/drivers/net/phy/mdio-sun4i.c |
2711 | @@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev) |
2712 | |
2713 | data->regulator = devm_regulator_get(&pdev->dev, "phy"); |
2714 | if (IS_ERR(data->regulator)) { |
2715 | - if (PTR_ERR(data->regulator) == -EPROBE_DEFER) |
2716 | - return -EPROBE_DEFER; |
2717 | + if (PTR_ERR(data->regulator) == -EPROBE_DEFER) { |
2718 | + ret = -EPROBE_DEFER; |
2719 | + goto err_out_free_mdiobus; |
2720 | + } |
2721 | |
2722 | dev_info(&pdev->dev, "no regulator found\n"); |
2723 | data->regulator = NULL; |
2724 | diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c |
2725 | index bfd3090fb055..07c6048200c6 100644 |
2726 | --- a/drivers/net/phy/mdio-xgene.c |
2727 | +++ b/drivers/net/phy/mdio-xgene.c |
2728 | @@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata) |
2729 | } |
2730 | |
2731 | ret = xgene_enet_ecc_init(pdata); |
2732 | - if (ret) |
2733 | + if (ret) { |
2734 | + if (pdata->dev->of_node) |
2735 | + clk_disable_unprepare(pdata->clk); |
2736 | return ret; |
2737 | + } |
2738 | xgene_gmac_reset(pdata); |
2739 | |
2740 | return 0; |
2741 | @@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev) |
2742 | return ret; |
2743 | |
2744 | mdio_bus = mdiobus_alloc(); |
2745 | - if (!mdio_bus) |
2746 | - return -ENOMEM; |
2747 | + if (!mdio_bus) { |
2748 | + ret = -ENOMEM; |
2749 | + goto out_clk; |
2750 | + } |
2751 | |
2752 | mdio_bus->name = "APM X-Gene MDIO bus"; |
2753 | |
2754 | @@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev) |
2755 | mdio_bus->phy_mask = ~0; |
2756 | ret = mdiobus_register(mdio_bus); |
2757 | if (ret) |
2758 | - goto out; |
2759 | + goto out_mdiobus; |
2760 | |
2761 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, |
2762 | acpi_register_phy, NULL, mdio_bus, NULL); |
2763 | @@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev) |
2764 | } |
2765 | |
2766 | if (ret) |
2767 | - goto out; |
2768 | + goto out_mdiobus; |
2769 | |
2770 | pdata->mdio_bus = mdio_bus; |
2771 | xgene_mdio_status = true; |
2772 | |
2773 | return 0; |
2774 | |
2775 | -out: |
2776 | +out_mdiobus: |
2777 | mdiobus_free(mdio_bus); |
2778 | |
2779 | +out_clk: |
2780 | + if (dev->of_node) |
2781 | + clk_disable_unprepare(pdata->clk); |
2782 | + |
2783 | return ret; |
2784 | } |
2785 | |
2786 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
2787 | index 8d9f02b7a71f..b1632294174f 100644 |
2788 | --- a/drivers/net/usb/qmi_wwan.c |
2789 | +++ b/drivers/net/usb/qmi_wwan.c |
2790 | @@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = { |
2791 | {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, |
2792 | {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, |
2793 | {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, |
2794 | + {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ |
2795 | {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, |
2796 | {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ |
2797 | {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ |
2798 | @@ -1211,6 +1212,7 @@ static const struct usb_device_id products[] = { |
2799 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ |
2800 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ |
2801 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ |
2802 | + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ |
2803 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
2804 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ |
2805 | {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ |
2806 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
2807 | index 9e9202b50e73..bb44f0c6891f 100644 |
2808 | --- a/drivers/net/vxlan.c |
2809 | +++ b/drivers/net/vxlan.c |
2810 | @@ -2155,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2811 | } |
2812 | |
2813 | ndst = &rt->dst; |
2814 | + if (skb_dst(skb)) { |
2815 | + int mtu = dst_mtu(ndst) - VXLAN_HEADROOM; |
2816 | + |
2817 | + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, |
2818 | + skb, mtu); |
2819 | + } |
2820 | + |
2821 | tos = ip_tunnel_ecn_encap(tos, old_iph, skb); |
2822 | ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); |
2823 | err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), |
2824 | @@ -2190,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2825 | goto out_unlock; |
2826 | } |
2827 | |
2828 | + if (skb_dst(skb)) { |
2829 | + int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM; |
2830 | + |
2831 | + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, |
2832 | + skb, mtu); |
2833 | + } |
2834 | + |
2835 | tos = ip_tunnel_ecn_encap(tos, old_iph, skb); |
2836 | ttl = ttl ? : ip6_dst_hoplimit(ndst); |
2837 | skb_scrub_packet(skb, xnet); |
2838 | diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c |
2839 | index b83f01d6e3dd..af37c19dbfd7 100644 |
2840 | --- a/drivers/net/wireless/ath/wcn36xx/main.c |
2841 | +++ b/drivers/net/wireless/ath/wcn36xx/main.c |
2842 | @@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) |
2843 | } |
2844 | } |
2845 | |
2846 | + if (changed & IEEE80211_CONF_CHANGE_PS) { |
2847 | + list_for_each_entry(tmp, &wcn->vif_list, list) { |
2848 | + vif = wcn36xx_priv_to_vif(tmp); |
2849 | + if (hw->conf.flags & IEEE80211_CONF_PS) { |
2850 | + if (vif->bss_conf.ps) /* ps allowed ? */ |
2851 | + wcn36xx_pmc_enter_bmps_state(wcn, vif); |
2852 | + } else { |
2853 | + wcn36xx_pmc_exit_bmps_state(wcn, vif); |
2854 | + } |
2855 | + } |
2856 | + } |
2857 | + |
2858 | mutex_unlock(&wcn->conf_mutex); |
2859 | |
2860 | return 0; |
2861 | @@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, |
2862 | vif_priv->dtim_period = bss_conf->dtim_period; |
2863 | } |
2864 | |
2865 | - if (changed & BSS_CHANGED_PS) { |
2866 | - wcn36xx_dbg(WCN36XX_DBG_MAC, |
2867 | - "mac bss PS set %d\n", |
2868 | - bss_conf->ps); |
2869 | - if (bss_conf->ps) { |
2870 | - wcn36xx_pmc_enter_bmps_state(wcn, vif); |
2871 | - } else { |
2872 | - wcn36xx_pmc_exit_bmps_state(wcn, vif); |
2873 | - } |
2874 | - } |
2875 | - |
2876 | if (changed & BSS_CHANGED_BSSID) { |
2877 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", |
2878 | bss_conf->bssid); |
2879 | diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c |
2880 | index 589fe5f70971..1976b80c235f 100644 |
2881 | --- a/drivers/net/wireless/ath/wcn36xx/pmc.c |
2882 | +++ b/drivers/net/wireless/ath/wcn36xx/pmc.c |
2883 | @@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn, |
2884 | struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); |
2885 | |
2886 | if (WCN36XX_BMPS != vif_priv->pw_state) { |
2887 | - wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); |
2888 | - return -EINVAL; |
2889 | + /* Unbalanced call or last BMPS enter failed */ |
2890 | + wcn36xx_dbg(WCN36XX_DBG_PMC, |
2891 | + "Not in BMPS mode, no need to exit\n"); |
2892 | + return -EALREADY; |
2893 | } |
2894 | wcn36xx_smd_exit_bmps(wcn, vif); |
2895 | vif_priv->pw_state = WCN36XX_FULL_POWER; |
2896 | diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c |
2897 | index 052e67bce6b3..710efe7b65f9 100644 |
2898 | --- a/drivers/net/wireless/mac80211_hwsim.c |
2899 | +++ b/drivers/net/wireless/mac80211_hwsim.c |
2900 | @@ -3220,7 +3220,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) |
2901 | if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) |
2902 | continue; |
2903 | |
2904 | - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
2905 | + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
2906 | if (!skb) { |
2907 | res = -ENOMEM; |
2908 | goto out_err; |
2909 | diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
2910 | index 391432e2725d..c980cdbd6e53 100644 |
2911 | --- a/drivers/net/xen-netfront.c |
2912 | +++ b/drivers/net/xen-netfront.c |
2913 | @@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) |
2914 | |
2915 | netif_carrier_off(netdev); |
2916 | |
2917 | + xenbus_switch_state(dev, XenbusStateInitialising); |
2918 | return netdev; |
2919 | |
2920 | exit: |
2921 | diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
2922 | index 0655f45643d9..dd956311a85a 100644 |
2923 | --- a/drivers/nvme/host/core.c |
2924 | +++ b/drivers/nvme/host/core.c |
2925 | @@ -1515,7 +1515,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, |
2926 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
2927 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
2928 | } |
2929 | - if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) |
2930 | + if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && |
2931 | + is_power_of_2(ctrl->max_hw_sectors)) |
2932 | blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); |
2933 | blk_queue_virt_boundary(q, ctrl->page_size - 1); |
2934 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) |
2935 | diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c |
2936 | index 555c976cc2ee..8cd42544c90e 100644 |
2937 | --- a/drivers/nvme/host/fabrics.c |
2938 | +++ b/drivers/nvme/host/fabrics.c |
2939 | @@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void) |
2940 | return NULL; |
2941 | |
2942 | kref_init(&host->ref); |
2943 | + uuid_gen(&host->id); |
2944 | snprintf(host->nqn, NVMF_NQN_SIZE, |
2945 | "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); |
2946 | |
2947 | diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c |
2948 | index 3148d760d825..7deb7b5d8683 100644 |
2949 | --- a/drivers/nvme/host/fc.c |
2950 | +++ b/drivers/nvme/host/fc.c |
2951 | @@ -2876,7 +2876,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, |
2952 | |
2953 | /* initiate nvme ctrl ref counting teardown */ |
2954 | nvme_uninit_ctrl(&ctrl->ctrl); |
2955 | - nvme_put_ctrl(&ctrl->ctrl); |
2956 | |
2957 | /* Remove core ctrl ref. */ |
2958 | nvme_put_ctrl(&ctrl->ctrl); |
2959 | diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c |
2960 | index 98258583abb0..8c1819230ed2 100644 |
2961 | --- a/drivers/of/of_mdio.c |
2962 | +++ b/drivers/of/of_mdio.c |
2963 | @@ -228,7 +228,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) |
2964 | rc = of_mdiobus_register_phy(mdio, child, addr); |
2965 | else |
2966 | rc = of_mdiobus_register_device(mdio, child, addr); |
2967 | - if (rc) |
2968 | + |
2969 | + if (rc == -ENODEV) |
2970 | + dev_err(&mdio->dev, |
2971 | + "MDIO device at address %d is missing.\n", |
2972 | + addr); |
2973 | + else if (rc) |
2974 | goto unregister; |
2975 | } |
2976 | |
2977 | @@ -252,7 +257,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) |
2978 | |
2979 | if (of_mdiobus_child_is_phy(child)) { |
2980 | rc = of_mdiobus_register_phy(mdio, child, addr); |
2981 | - if (rc) |
2982 | + if (rc && rc != -ENODEV) |
2983 | goto unregister; |
2984 | } |
2985 | } |
2986 | diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c |
2987 | index accaaaccb662..6601ad0dfb3a 100644 |
2988 | --- a/drivers/phy/motorola/phy-cpcap-usb.c |
2989 | +++ b/drivers/phy/motorola/phy-cpcap-usb.c |
2990 | @@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev, |
2991 | int irq, error; |
2992 | |
2993 | irq = platform_get_irq_byname(pdev, name); |
2994 | - if (!irq) |
2995 | + if (irq < 0) |
2996 | return -ENODEV; |
2997 | |
2998 | error = devm_request_threaded_irq(ddata->dev, irq, NULL, |
2999 | diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c |
3000 | index c94b606e0df8..ee14d8e45c97 100644 |
3001 | --- a/drivers/s390/block/dasd_3990_erp.c |
3002 | +++ b/drivers/s390/block/dasd_3990_erp.c |
3003 | @@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) |
3004 | erp = dasd_3990_erp_handle_match_erp(cqr, erp); |
3005 | } |
3006 | |
3007 | + |
3008 | + /* |
3009 | + * For path verification work we need to stick with the path that was |
3010 | + * originally chosen so that the per path configuration data is |
3011 | + * assigned correctly. |
3012 | + */ |
3013 | + if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) { |
3014 | + erp->lpm = cqr->lpm; |
3015 | + } |
3016 | + |
3017 | if (device->features & DASD_FEATURE_ERPLOG) { |
3018 | /* print current erp_chain */ |
3019 | dev_err(&device->cdev->dev, |
3020 | diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h |
3021 | index 403a639574e5..b0b290f7b8dc 100644 |
3022 | --- a/drivers/scsi/aacraid/aacraid.h |
3023 | +++ b/drivers/scsi/aacraid/aacraid.h |
3024 | @@ -1724,6 +1724,7 @@ struct aac_dev |
3025 | #define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010) |
3026 | #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020) |
3027 | #define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040) |
3028 | +#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080) |
3029 | |
3030 | /* |
3031 | * Define the command values |
3032 | diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c |
3033 | index c9252b138c1f..509fe23fafe1 100644 |
3034 | --- a/drivers/scsi/aacraid/linit.c |
3035 | +++ b/drivers/scsi/aacraid/linit.c |
3036 | @@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) |
3037 | info = &aac->hba_map[bus][cid]; |
3038 | if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || |
3039 | info->devtype != AAC_DEVTYPE_NATIVE_RAW) { |
3040 | - fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; |
3041 | + fib->flags |= FIB_CONTEXT_FLAG_EH_RESET; |
3042 | cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; |
3043 | } |
3044 | } |
3045 | diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c |
3046 | index c17ccb913fde..a3e480e7a257 100644 |
3047 | --- a/drivers/scsi/storvsc_drv.c |
3048 | +++ b/drivers/scsi/storvsc_drv.c |
3049 | @@ -952,10 +952,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, |
3050 | case TEST_UNIT_READY: |
3051 | break; |
3052 | default: |
3053 | - set_host_byte(scmnd, DID_TARGET_FAILURE); |
3054 | + set_host_byte(scmnd, DID_ERROR); |
3055 | } |
3056 | break; |
3057 | case SRB_STATUS_INVALID_LUN: |
3058 | + set_host_byte(scmnd, DID_NO_CONNECT); |
3059 | do_work = true; |
3060 | process_err_fn = storvsc_remove_lun; |
3061 | break; |
3062 | diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c |
3063 | index f95da364c283..669470971023 100644 |
3064 | --- a/drivers/spi/spi-atmel.c |
3065 | +++ b/drivers/spi/spi-atmel.c |
3066 | @@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev) |
3067 | pm_runtime_get_sync(&pdev->dev); |
3068 | |
3069 | /* reset the hardware and block queue progress */ |
3070 | - spin_lock_irq(&as->lock); |
3071 | if (as->use_dma) { |
3072 | atmel_spi_stop_dma(master); |
3073 | atmel_spi_release_dma(master); |
3074 | } |
3075 | |
3076 | + spin_lock_irq(&as->lock); |
3077 | spi_writel(as, CR, SPI_BIT(SWRST)); |
3078 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
3079 | spi_readl(as, SR); |
3080 | diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig |
3081 | index a517b2d29f1b..8f6494158d3d 100644 |
3082 | --- a/drivers/staging/android/ion/Kconfig |
3083 | +++ b/drivers/staging/android/ion/Kconfig |
3084 | @@ -37,7 +37,7 @@ config ION_CHUNK_HEAP |
3085 | |
3086 | config ION_CMA_HEAP |
3087 | bool "Ion CMA heap support" |
3088 | - depends on ION && CMA |
3089 | + depends on ION && DMA_CMA |
3090 | help |
3091 | Choose this option to enable CMA heaps with Ion. This heap is backed |
3092 | by the Contiguous Memory Allocator (CMA). If your system has these |
3093 | diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c |
3094 | index dd5545d9990a..86196ffd2faf 100644 |
3095 | --- a/drivers/staging/android/ion/ion_cma_heap.c |
3096 | +++ b/drivers/staging/android/ion/ion_cma_heap.c |
3097 | @@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, |
3098 | struct ion_cma_heap *cma_heap = to_cma_heap(heap); |
3099 | struct sg_table *table; |
3100 | struct page *pages; |
3101 | + unsigned long size = PAGE_ALIGN(len); |
3102 | + unsigned long nr_pages = size >> PAGE_SHIFT; |
3103 | + unsigned long align = get_order(size); |
3104 | int ret; |
3105 | |
3106 | - pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL); |
3107 | + if (align > CONFIG_CMA_ALIGNMENT) |
3108 | + align = CONFIG_CMA_ALIGNMENT; |
3109 | + |
3110 | + pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL); |
3111 | if (!pages) |
3112 | return -ENOMEM; |
3113 | |
3114 | @@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, |
3115 | if (ret) |
3116 | goto free_mem; |
3117 | |
3118 | - sg_set_page(table->sgl, pages, len, 0); |
3119 | + sg_set_page(table->sgl, pages, size, 0); |
3120 | |
3121 | buffer->priv_virt = pages; |
3122 | buffer->sg_table = table; |
3123 | @@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, |
3124 | free_mem: |
3125 | kfree(table); |
3126 | err: |
3127 | - cma_release(cma_heap->cma, pages, buffer->size); |
3128 | + cma_release(cma_heap->cma, pages, nr_pages); |
3129 | return -ENOMEM; |
3130 | } |
3131 | |
3132 | @@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer) |
3133 | { |
3134 | struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); |
3135 | struct page *pages = buffer->priv_virt; |
3136 | + unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; |
3137 | |
3138 | /* release memory */ |
3139 | - cma_release(cma_heap->cma, pages, buffer->size); |
3140 | + cma_release(cma_heap->cma, pages, nr_pages); |
3141 | /* release sg table */ |
3142 | sg_free_table(buffer->sg_table); |
3143 | kfree(buffer->sg_table); |
3144 | diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c |
3145 | index f77e499afddd..065f0b607373 100644 |
3146 | --- a/drivers/xen/balloon.c |
3147 | +++ b/drivers/xen/balloon.c |
3148 | @@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource) |
3149 | kfree(resource); |
3150 | } |
3151 | |
3152 | +/* |
3153 | + * Host memory not allocated to dom0. We can use this range for hotplug-based |
3154 | + * ballooning. |
3155 | + * |
3156 | + * It's a type-less resource. Setting IORESOURCE_MEM will make resource |
3157 | + * management algorithms (arch_remove_reservations()) look into guest e820, |
3158 | + * which we don't want. |
3159 | + */ |
3160 | +static struct resource hostmem_resource = { |
3161 | + .name = "Host RAM", |
3162 | +}; |
3163 | + |
3164 | +void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res) |
3165 | +{} |
3166 | + |
3167 | static struct resource *additional_memory_resource(phys_addr_t size) |
3168 | { |
3169 | - struct resource *res; |
3170 | - int ret; |
3171 | + struct resource *res, *res_hostmem; |
3172 | + int ret = -ENOMEM; |
3173 | |
3174 | res = kzalloc(sizeof(*res), GFP_KERNEL); |
3175 | if (!res) |
3176 | @@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size) |
3177 | res->name = "System RAM"; |
3178 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
3179 | |
3180 | - ret = allocate_resource(&iomem_resource, res, |
3181 | - size, 0, -1, |
3182 | - PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); |
3183 | - if (ret < 0) { |
3184 | - pr_err("Cannot allocate new System RAM resource\n"); |
3185 | - kfree(res); |
3186 | - return NULL; |
3187 | + res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL); |
3188 | + if (res_hostmem) { |
3189 | + /* Try to grab a range from hostmem */ |
3190 | + res_hostmem->name = "Host memory"; |
3191 | + ret = allocate_resource(&hostmem_resource, res_hostmem, |
3192 | + size, 0, -1, |
3193 | + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); |
3194 | + } |
3195 | + |
3196 | + if (!ret) { |
3197 | + /* |
3198 | + * Insert this resource into iomem. Because hostmem_resource |
3199 | + * tracks portion of guest e820 marked as UNUSABLE noone else |
3200 | + * should try to use it. |
3201 | + */ |
3202 | + res->start = res_hostmem->start; |
3203 | + res->end = res_hostmem->end; |
3204 | + ret = insert_resource(&iomem_resource, res); |
3205 | + if (ret < 0) { |
3206 | + pr_err("Can't insert iomem_resource [%llx - %llx]\n", |
3207 | + res->start, res->end); |
3208 | + release_memory_resource(res_hostmem); |
3209 | + res_hostmem = NULL; |
3210 | + res->start = res->end = 0; |
3211 | + } |
3212 | + } |
3213 | + |
3214 | + if (ret) { |
3215 | + ret = allocate_resource(&iomem_resource, res, |
3216 | + size, 0, -1, |
3217 | + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); |
3218 | + if (ret < 0) { |
3219 | + pr_err("Cannot allocate new System RAM resource\n"); |
3220 | + kfree(res); |
3221 | + return NULL; |
3222 | + } |
3223 | } |
3224 | |
3225 | #ifdef CONFIG_SPARSEMEM |
3226 | @@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size) |
3227 | pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", |
3228 | pfn, limit); |
3229 | release_memory_resource(res); |
3230 | + release_memory_resource(res_hostmem); |
3231 | return NULL; |
3232 | } |
3233 | } |
3234 | @@ -765,6 +810,8 @@ static int __init balloon_init(void) |
3235 | set_online_page_callback(&xen_online_page); |
3236 | register_memory_notifier(&xen_memory_nb); |
3237 | register_sysctl_table(xen_root); |
3238 | + |
3239 | + arch_xen_balloon_init(&hostmem_resource); |
3240 | #endif |
3241 | |
3242 | #ifdef CONFIG_XEN_PV |
3243 | diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c |
3244 | index 57efbd3b053b..bd56653b9bbc 100644 |
3245 | --- a/drivers/xen/gntdev.c |
3246 | +++ b/drivers/xen/gntdev.c |
3247 | @@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages) |
3248 | } |
3249 | range = 0; |
3250 | while (range < pages) { |
3251 | - if (map->unmap_ops[offset+range].handle == -1) { |
3252 | - range--; |
3253 | + if (map->unmap_ops[offset+range].handle == -1) |
3254 | break; |
3255 | - } |
3256 | range++; |
3257 | } |
3258 | err = __unmap_grant_pages(map, offset, range); |
3259 | @@ -1073,8 +1071,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) |
3260 | out_unlock_put: |
3261 | mutex_unlock(&priv->lock); |
3262 | out_put_map: |
3263 | - if (use_ptemod) |
3264 | + if (use_ptemod) { |
3265 | map->vma = NULL; |
3266 | + unmap_grant_pages(map, 0, map->count); |
3267 | + } |
3268 | gntdev_put_map(priv, map); |
3269 | return err; |
3270 | } |
3271 | diff --git a/fs/afs/write.c b/fs/afs/write.c |
3272 | index 106e43db1115..926d4d68f791 100644 |
3273 | --- a/fs/afs/write.c |
3274 | +++ b/fs/afs/write.c |
3275 | @@ -282,7 +282,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, |
3276 | ret = afs_fill_page(vnode, key, pos + copied, |
3277 | len - copied, page); |
3278 | if (ret < 0) |
3279 | - return ret; |
3280 | + goto out; |
3281 | } |
3282 | SetPageUptodate(page); |
3283 | } |
3284 | @@ -290,10 +290,12 @@ int afs_write_end(struct file *file, struct address_space *mapping, |
3285 | set_page_dirty(page); |
3286 | if (PageDirty(page)) |
3287 | _debug("dirtied"); |
3288 | + ret = copied; |
3289 | + |
3290 | +out: |
3291 | unlock_page(page); |
3292 | put_page(page); |
3293 | - |
3294 | - return copied; |
3295 | + return ret; |
3296 | } |
3297 | |
3298 | /* |
3299 | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
3300 | index 4006b2a1233d..bc534fafacf9 100644 |
3301 | --- a/fs/btrfs/volumes.c |
3302 | +++ b/fs/btrfs/volumes.c |
3303 | @@ -236,7 +236,6 @@ static struct btrfs_device *__alloc_device(void) |
3304 | kfree(dev); |
3305 | return ERR_PTR(-ENOMEM); |
3306 | } |
3307 | - bio_get(dev->flush_bio); |
3308 | |
3309 | INIT_LIST_HEAD(&dev->dev_list); |
3310 | INIT_LIST_HEAD(&dev->dev_alloc_list); |
3311 | diff --git a/fs/exec.c b/fs/exec.c |
3312 | index acec119fcc31..0da4d748b4e6 100644 |
3313 | --- a/fs/exec.c |
3314 | +++ b/fs/exec.c |
3315 | @@ -1216,15 +1216,14 @@ static int de_thread(struct task_struct *tsk) |
3316 | return -EAGAIN; |
3317 | } |
3318 | |
3319 | -char *get_task_comm(char *buf, struct task_struct *tsk) |
3320 | +char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk) |
3321 | { |
3322 | - /* buf must be at least sizeof(tsk->comm) in size */ |
3323 | task_lock(tsk); |
3324 | - strncpy(buf, tsk->comm, sizeof(tsk->comm)); |
3325 | + strncpy(buf, tsk->comm, buf_size); |
3326 | task_unlock(tsk); |
3327 | return buf; |
3328 | } |
3329 | -EXPORT_SYMBOL_GPL(get_task_comm); |
3330 | +EXPORT_SYMBOL_GPL(__get_task_comm); |
3331 | |
3332 | /* |
3333 | * These functions flushes out all traces of the currently running executable |
3334 | diff --git a/fs/super.c b/fs/super.c |
3335 | index 994db21f59bf..79d7fc5e0ddd 100644 |
3336 | --- a/fs/super.c |
3337 | +++ b/fs/super.c |
3338 | @@ -522,7 +522,11 @@ struct super_block *sget_userns(struct file_system_type *type, |
3339 | hlist_add_head(&s->s_instances, &type->fs_supers); |
3340 | spin_unlock(&sb_lock); |
3341 | get_filesystem(type); |
3342 | - register_shrinker(&s->s_shrink); |
3343 | + err = register_shrinker(&s->s_shrink); |
3344 | + if (err) { |
3345 | + deactivate_locked_super(s); |
3346 | + s = ERR_PTR(err); |
3347 | + } |
3348 | return s; |
3349 | } |
3350 | |
3351 | diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c |
3352 | index 010a13a201aa..659ed6f8c484 100644 |
3353 | --- a/fs/xfs/xfs_qm.c |
3354 | +++ b/fs/xfs/xfs_qm.c |
3355 | @@ -48,7 +48,7 @@ |
3356 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); |
3357 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); |
3358 | |
3359 | - |
3360 | +STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi); |
3361 | STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); |
3362 | /* |
3363 | * We use the batch lookup interface to iterate over the dquots as it |
3364 | @@ -695,9 +695,17 @@ xfs_qm_init_quotainfo( |
3365 | qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; |
3366 | qinf->qi_shrinker.seeks = DEFAULT_SEEKS; |
3367 | qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; |
3368 | - register_shrinker(&qinf->qi_shrinker); |
3369 | + |
3370 | + error = register_shrinker(&qinf->qi_shrinker); |
3371 | + if (error) |
3372 | + goto out_free_inos; |
3373 | + |
3374 | return 0; |
3375 | |
3376 | +out_free_inos: |
3377 | + mutex_destroy(&qinf->qi_quotaofflock); |
3378 | + mutex_destroy(&qinf->qi_tree_lock); |
3379 | + xfs_qm_destroy_quotainos(qinf); |
3380 | out_free_lru: |
3381 | list_lru_destroy(&qinf->qi_lru); |
3382 | out_free_qinf: |
3383 | @@ -706,7 +714,6 @@ xfs_qm_init_quotainfo( |
3384 | return error; |
3385 | } |
3386 | |
3387 | - |
3388 | /* |
3389 | * Gets called when unmounting a filesystem or when all quotas get |
3390 | * turned off. |
3391 | @@ -723,19 +730,8 @@ xfs_qm_destroy_quotainfo( |
3392 | |
3393 | unregister_shrinker(&qi->qi_shrinker); |
3394 | list_lru_destroy(&qi->qi_lru); |
3395 | - |
3396 | - if (qi->qi_uquotaip) { |
3397 | - IRELE(qi->qi_uquotaip); |
3398 | - qi->qi_uquotaip = NULL; /* paranoia */ |
3399 | - } |
3400 | - if (qi->qi_gquotaip) { |
3401 | - IRELE(qi->qi_gquotaip); |
3402 | - qi->qi_gquotaip = NULL; |
3403 | - } |
3404 | - if (qi->qi_pquotaip) { |
3405 | - IRELE(qi->qi_pquotaip); |
3406 | - qi->qi_pquotaip = NULL; |
3407 | - } |
3408 | + xfs_qm_destroy_quotainos(qi); |
3409 | + mutex_destroy(&qi->qi_tree_lock); |
3410 | mutex_destroy(&qi->qi_quotaofflock); |
3411 | kmem_free(qi); |
3412 | mp->m_quotainfo = NULL; |
3413 | @@ -1599,6 +1595,24 @@ xfs_qm_init_quotainos( |
3414 | return error; |
3415 | } |
3416 | |
3417 | +STATIC void |
3418 | +xfs_qm_destroy_quotainos( |
3419 | + xfs_quotainfo_t *qi) |
3420 | +{ |
3421 | + if (qi->qi_uquotaip) { |
3422 | + IRELE(qi->qi_uquotaip); |
3423 | + qi->qi_uquotaip = NULL; /* paranoia */ |
3424 | + } |
3425 | + if (qi->qi_gquotaip) { |
3426 | + IRELE(qi->qi_gquotaip); |
3427 | + qi->qi_gquotaip = NULL; |
3428 | + } |
3429 | + if (qi->qi_pquotaip) { |
3430 | + IRELE(qi->qi_pquotaip); |
3431 | + qi->qi_pquotaip = NULL; |
3432 | + } |
3433 | +} |
3434 | + |
3435 | STATIC void |
3436 | xfs_qm_dqfree_one( |
3437 | struct xfs_dquot *dqp) |
3438 | diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h |
3439 | index aeec003a566b..ac0eae8372ab 100644 |
3440 | --- a/include/crypto/if_alg.h |
3441 | +++ b/include/crypto/if_alg.h |
3442 | @@ -18,6 +18,7 @@ |
3443 | #include <linux/if_alg.h> |
3444 | #include <linux/scatterlist.h> |
3445 | #include <linux/types.h> |
3446 | +#include <linux/atomic.h> |
3447 | #include <net/sock.h> |
3448 | |
3449 | #include <crypto/aead.h> |
3450 | @@ -155,7 +156,7 @@ struct af_alg_ctx { |
3451 | struct af_alg_completion completion; |
3452 | |
3453 | size_t used; |
3454 | - size_t rcvused; |
3455 | + atomic_t rcvused; |
3456 | |
3457 | bool more; |
3458 | bool merge; |
3459 | @@ -228,7 +229,7 @@ static inline int af_alg_rcvbuf(struct sock *sk) |
3460 | struct af_alg_ctx *ctx = ask->private; |
3461 | |
3462 | return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - |
3463 | - ctx->rcvused, 0); |
3464 | + atomic_read(&ctx->rcvused), 0); |
3465 | } |
3466 | |
3467 | /** |
3468 | diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h |
3469 | index ae15864c8708..8f9fc6e5539a 100644 |
3470 | --- a/include/linux/mlx5/driver.h |
3471 | +++ b/include/linux/mlx5/driver.h |
3472 | @@ -1017,7 +1017,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, |
3473 | enum mlx5_eq_type type); |
3474 | int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); |
3475 | int mlx5_start_eqs(struct mlx5_core_dev *dev); |
3476 | -int mlx5_stop_eqs(struct mlx5_core_dev *dev); |
3477 | +void mlx5_stop_eqs(struct mlx5_core_dev *dev); |
3478 | int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, |
3479 | unsigned int *irqn); |
3480 | int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
3481 | diff --git a/include/linux/sched.h b/include/linux/sched.h |
3482 | index fdf74f27acf1..41354690e4e3 100644 |
3483 | --- a/include/linux/sched.h |
3484 | +++ b/include/linux/sched.h |
3485 | @@ -1502,7 +1502,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from) |
3486 | __set_task_comm(tsk, from, false); |
3487 | } |
3488 | |
3489 | -extern char *get_task_comm(char *to, struct task_struct *tsk); |
3490 | +extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); |
3491 | +#define get_task_comm(buf, tsk) ({ \ |
3492 | + BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ |
3493 | + __get_task_comm(buf, sizeof(buf), tsk); \ |
3494 | +}) |
3495 | |
3496 | #ifdef CONFIG_SMP |
3497 | void scheduler_ipi(void); |
3498 | diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h |
3499 | index 236bfe5b2ffe..6073e8bae025 100644 |
3500 | --- a/include/net/sch_generic.h |
3501 | +++ b/include/net/sch_generic.h |
3502 | @@ -273,7 +273,6 @@ struct tcf_chain { |
3503 | |
3504 | struct tcf_block { |
3505 | struct list_head chain_list; |
3506 | - struct work_struct work; |
3507 | }; |
3508 | |
3509 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
3510 | diff --git a/include/net/xfrm.h b/include/net/xfrm.h |
3511 | index e015e164bac0..db99efb2d1d0 100644 |
3512 | --- a/include/net/xfrm.h |
3513 | +++ b/include/net/xfrm.h |
3514 | @@ -1570,6 +1570,9 @@ int xfrm_init_state(struct xfrm_state *x); |
3515 | int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); |
3516 | int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); |
3517 | int xfrm_input_resume(struct sk_buff *skb, int nexthdr); |
3518 | +int xfrm_trans_queue(struct sk_buff *skb, |
3519 | + int (*finish)(struct net *, struct sock *, |
3520 | + struct sk_buff *)); |
3521 | int xfrm_output_resume(struct sk_buff *skb, int err); |
3522 | int xfrm_output(struct sock *sk, struct sk_buff *skb); |
3523 | int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
3524 | diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h |
3525 | index 282875cf8056..8254c937c9f4 100644 |
3526 | --- a/include/uapi/linux/libc-compat.h |
3527 | +++ b/include/uapi/linux/libc-compat.h |
3528 | @@ -168,46 +168,99 @@ |
3529 | |
3530 | /* If we did not see any headers from any supported C libraries, |
3531 | * or we are being included in the kernel, then define everything |
3532 | - * that we need. */ |
3533 | + * that we need. Check for previous __UAPI_* definitions to give |
3534 | + * unsupported C libraries a way to opt out of any kernel definition. */ |
3535 | #else /* !defined(__GLIBC__) */ |
3536 | |
3537 | /* Definitions for if.h */ |
3538 | +#ifndef __UAPI_DEF_IF_IFCONF |
3539 | #define __UAPI_DEF_IF_IFCONF 1 |
3540 | +#endif |
3541 | +#ifndef __UAPI_DEF_IF_IFMAP |
3542 | #define __UAPI_DEF_IF_IFMAP 1 |
3543 | +#endif |
3544 | +#ifndef __UAPI_DEF_IF_IFNAMSIZ |
3545 | #define __UAPI_DEF_IF_IFNAMSIZ 1 |
3546 | +#endif |
3547 | +#ifndef __UAPI_DEF_IF_IFREQ |
3548 | #define __UAPI_DEF_IF_IFREQ 1 |
3549 | +#endif |
3550 | /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ |
3551 | +#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS |
3552 | #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 |
3553 | +#endif |
3554 | /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ |
3555 | +#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO |
3556 | #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 |
3557 | +#endif |
3558 | |
3559 | /* Definitions for in.h */ |
3560 | +#ifndef __UAPI_DEF_IN_ADDR |
3561 | #define __UAPI_DEF_IN_ADDR 1 |
3562 | +#endif |
3563 | +#ifndef __UAPI_DEF_IN_IPPROTO |
3564 | #define __UAPI_DEF_IN_IPPROTO 1 |
3565 | +#endif |
3566 | +#ifndef __UAPI_DEF_IN_PKTINFO |
3567 | #define __UAPI_DEF_IN_PKTINFO 1 |
3568 | +#endif |
3569 | +#ifndef __UAPI_DEF_IP_MREQ |
3570 | #define __UAPI_DEF_IP_MREQ 1 |
3571 | +#endif |
3572 | +#ifndef __UAPI_DEF_SOCKADDR_IN |
3573 | #define __UAPI_DEF_SOCKADDR_IN 1 |
3574 | +#endif |
3575 | +#ifndef __UAPI_DEF_IN_CLASS |
3576 | #define __UAPI_DEF_IN_CLASS 1 |
3577 | +#endif |
3578 | |
3579 | /* Definitions for in6.h */ |
3580 | +#ifndef __UAPI_DEF_IN6_ADDR |
3581 | #define __UAPI_DEF_IN6_ADDR 1 |
3582 | +#endif |
3583 | +#ifndef __UAPI_DEF_IN6_ADDR_ALT |
3584 | #define __UAPI_DEF_IN6_ADDR_ALT 1 |
3585 | +#endif |
3586 | +#ifndef __UAPI_DEF_SOCKADDR_IN6 |
3587 | #define __UAPI_DEF_SOCKADDR_IN6 1 |
3588 | +#endif |
3589 | +#ifndef __UAPI_DEF_IPV6_MREQ |
3590 | #define __UAPI_DEF_IPV6_MREQ 1 |
3591 | +#endif |
3592 | +#ifndef __UAPI_DEF_IPPROTO_V6 |
3593 | #define __UAPI_DEF_IPPROTO_V6 1 |
3594 | +#endif |
3595 | +#ifndef __UAPI_DEF_IPV6_OPTIONS |
3596 | #define __UAPI_DEF_IPV6_OPTIONS 1 |
3597 | +#endif |
3598 | +#ifndef __UAPI_DEF_IN6_PKTINFO |
3599 | #define __UAPI_DEF_IN6_PKTINFO 1 |
3600 | +#endif |
3601 | +#ifndef __UAPI_DEF_IP6_MTUINFO |
3602 | #define __UAPI_DEF_IP6_MTUINFO 1 |
3603 | +#endif |
3604 | |
3605 | /* Definitions for ipx.h */ |
3606 | +#ifndef __UAPI_DEF_SOCKADDR_IPX |
3607 | #define __UAPI_DEF_SOCKADDR_IPX 1 |
3608 | +#endif |
3609 | +#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION |
3610 | #define __UAPI_DEF_IPX_ROUTE_DEFINITION 1 |
3611 | +#endif |
3612 | +#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION |
3613 | #define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1 |
3614 | +#endif |
3615 | +#ifndef __UAPI_DEF_IPX_CONFIG_DATA |
3616 | #define __UAPI_DEF_IPX_CONFIG_DATA 1 |
3617 | +#endif |
3618 | +#ifndef __UAPI_DEF_IPX_ROUTE_DEF |
3619 | #define __UAPI_DEF_IPX_ROUTE_DEF 1 |
3620 | +#endif |
3621 | |
3622 | /* Definitions for xattr.h */ |
3623 | +#ifndef __UAPI_DEF_XATTR |
3624 | #define __UAPI_DEF_XATTR 1 |
3625 | +#endif |
3626 | |
3627 | #endif /* __GLIBC__ */ |
3628 | |
3629 | diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h |
3630 | index 3fea7709a441..57ccfb32e87f 100644 |
3631 | --- a/include/uapi/linux/netfilter/nf_conntrack_common.h |
3632 | +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h |
3633 | @@ -36,7 +36,7 @@ enum ip_conntrack_info { |
3634 | |
3635 | #define NF_CT_STATE_INVALID_BIT (1 << 0) |
3636 | #define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1)) |
3637 | -#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1)) |
3638 | +#define NF_CT_STATE_UNTRACKED_BIT (1 << 6) |
3639 | |
3640 | /* Bitset representing status of connection. */ |
3641 | enum ip_conntrack_status { |
3642 | diff --git a/include/xen/balloon.h b/include/xen/balloon.h |
3643 | index 4914b93a23f2..61f410fd74e4 100644 |
3644 | --- a/include/xen/balloon.h |
3645 | +++ b/include/xen/balloon.h |
3646 | @@ -44,3 +44,8 @@ static inline void xen_balloon_init(void) |
3647 | { |
3648 | } |
3649 | #endif |
3650 | + |
3651 | +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
3652 | +struct resource; |
3653 | +void arch_xen_balloon_init(struct resource *hostmem_resource); |
3654 | +#endif |
3655 | diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c |
3656 | index dbd7b322a86b..1890be7ea9cd 100644 |
3657 | --- a/kernel/bpf/sockmap.c |
3658 | +++ b/kernel/bpf/sockmap.c |
3659 | @@ -588,8 +588,15 @@ static void sock_map_free(struct bpf_map *map) |
3660 | |
3661 | write_lock_bh(&sock->sk_callback_lock); |
3662 | psock = smap_psock_sk(sock); |
3663 | - smap_list_remove(psock, &stab->sock_map[i]); |
3664 | - smap_release_sock(psock, sock); |
3665 | + /* This check handles a racing sock event that can get the |
3666 | + * sk_callback_lock before this case but after xchg happens |
3667 | + * causing the refcnt to hit zero and sock user data (psock) |
3668 | + * to be null and queued for garbage collection. |
3669 | + */ |
3670 | + if (likely(psock)) { |
3671 | + smap_list_remove(psock, &stab->sock_map[i]); |
3672 | + smap_release_sock(psock, sock); |
3673 | + } |
3674 | write_unlock_bh(&sock->sk_callback_lock); |
3675 | } |
3676 | rcu_read_unlock(); |
3677 | diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c |
3678 | index 024085daab1a..a2c05d2476ac 100644 |
3679 | --- a/kernel/cgroup/cgroup-v1.c |
3680 | +++ b/kernel/cgroup/cgroup-v1.c |
3681 | @@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) |
3682 | */ |
3683 | do { |
3684 | css_task_iter_start(&from->self, 0, &it); |
3685 | - task = css_task_iter_next(&it); |
3686 | + |
3687 | + do { |
3688 | + task = css_task_iter_next(&it); |
3689 | + } while (task && (task->flags & PF_EXITING)); |
3690 | + |
3691 | if (task) |
3692 | get_task_struct(task); |
3693 | css_task_iter_end(&it); |
3694 | diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h |
3695 | index 17f05ef8f575..e4d3819a91cc 100644 |
3696 | --- a/kernel/irq/debug.h |
3697 | +++ b/kernel/irq/debug.h |
3698 | @@ -12,6 +12,11 @@ |
3699 | |
3700 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) |
3701 | { |
3702 | + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); |
3703 | + |
3704 | + if (!__ratelimit(&ratelimit)) |
3705 | + return; |
3706 | + |
3707 | printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", |
3708 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); |
3709 | printk("->handle_irq(): %p, ", desc->handle_irq); |
3710 | diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c |
3711 | index 052773df9f03..d00e85ac10d6 100644 |
3712 | --- a/kernel/time/hrtimer.c |
3713 | +++ b/kernel/time/hrtimer.c |
3714 | @@ -1106,7 +1106,12 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
3715 | |
3716 | cpu_base = raw_cpu_ptr(&hrtimer_bases); |
3717 | |
3718 | - if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
3719 | + /* |
3720 | + * POSIX magic: Relative CLOCK_REALTIME timers are not affected by |
3721 | + * clock modifications, so they needs to become CLOCK_MONOTONIC to |
3722 | + * ensure POSIX compliance. |
3723 | + */ |
3724 | + if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL) |
3725 | clock_id = CLOCK_MONOTONIC; |
3726 | |
3727 | base = hrtimer_clockid_to_base(clock_id); |
3728 | diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h |
3729 | index 57fd45ab7af1..08c60d10747f 100644 |
3730 | --- a/lib/mpi/longlong.h |
3731 | +++ b/lib/mpi/longlong.h |
3732 | @@ -671,7 +671,23 @@ do { \ |
3733 | ************** MIPS/64 ************** |
3734 | ***************************************/ |
3735 | #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 |
3736 | -#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) |
3737 | +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 |
3738 | +/* |
3739 | + * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C |
3740 | + * code below, so we special case MIPS64r6 until the compiler can do better. |
3741 | + */ |
3742 | +#define umul_ppmm(w1, w0, u, v) \ |
3743 | +do { \ |
3744 | + __asm__ ("dmulu %0,%1,%2" \ |
3745 | + : "=d" ((UDItype)(w0)) \ |
3746 | + : "d" ((UDItype)(u)), \ |
3747 | + "d" ((UDItype)(v))); \ |
3748 | + __asm__ ("dmuhu %0,%1,%2" \ |
3749 | + : "=d" ((UDItype)(w1)) \ |
3750 | + : "d" ((UDItype)(u)), \ |
3751 | + "d" ((UDItype)(v))); \ |
3752 | +} while (0) |
3753 | +#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) |
3754 | #define umul_ppmm(w1, w0, u, v) \ |
3755 | do { \ |
3756 | typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ |
3757 | diff --git a/mm/frame_vector.c b/mm/frame_vector.c |
3758 | index 297c7238f7d4..c64dca6e27c2 100644 |
3759 | --- a/mm/frame_vector.c |
3760 | +++ b/mm/frame_vector.c |
3761 | @@ -62,8 +62,10 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, |
3762 | * get_user_pages_longterm() and disallow it for filesystem-dax |
3763 | * mappings. |
3764 | */ |
3765 | - if (vma_is_fsdax(vma)) |
3766 | - return -EOPNOTSUPP; |
3767 | + if (vma_is_fsdax(vma)) { |
3768 | + ret = -EOPNOTSUPP; |
3769 | + goto out; |
3770 | + } |
3771 | |
3772 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { |
3773 | vec->got_ref = true; |
3774 | diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c |
3775 | index 045331204097..1933654007c4 100644 |
3776 | --- a/net/ipv4/ip_gre.c |
3777 | +++ b/net/ipv4/ip_gre.c |
3778 | @@ -1274,6 +1274,7 @@ static const struct net_device_ops erspan_netdev_ops = { |
3779 | static void ipgre_tap_setup(struct net_device *dev) |
3780 | { |
3781 | ether_setup(dev); |
3782 | + dev->max_mtu = 0; |
3783 | dev->netdev_ops = &gre_tap_netdev_ops; |
3784 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
3785 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
3786 | diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c |
3787 | index e50b7fea57ee..bcfc00e88756 100644 |
3788 | --- a/net/ipv4/xfrm4_input.c |
3789 | +++ b/net/ipv4/xfrm4_input.c |
3790 | @@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) |
3791 | return xfrm4_extract_header(skb); |
3792 | } |
3793 | |
3794 | +static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk, |
3795 | + struct sk_buff *skb) |
3796 | +{ |
3797 | + return dst_input(skb); |
3798 | +} |
3799 | + |
3800 | static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk, |
3801 | struct sk_buff *skb) |
3802 | { |
3803 | @@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk, |
3804 | iph->tos, skb->dev)) |
3805 | goto drop; |
3806 | } |
3807 | - return dst_input(skb); |
3808 | + |
3809 | + if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2)) |
3810 | + goto drop; |
3811 | + |
3812 | + return 0; |
3813 | drop: |
3814 | kfree_skb(skb); |
3815 | return NET_RX_DROP; |
3816 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
3817 | index 5b4870caf268..e8ab306794d8 100644 |
3818 | --- a/net/ipv6/ip6_gre.c |
3819 | +++ b/net/ipv6/ip6_gre.c |
3820 | @@ -1335,6 +1335,7 @@ static void ip6gre_tap_setup(struct net_device *dev) |
3821 | |
3822 | ether_setup(dev); |
3823 | |
3824 | + dev->max_mtu = 0; |
3825 | dev->netdev_ops = &ip6gre_tap_netdev_ops; |
3826 | dev->needs_free_netdev = true; |
3827 | dev->priv_destructor = ip6gre_dev_free; |
3828 | diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
3829 | index 3f46121ad139..1161fd5630c1 100644 |
3830 | --- a/net/ipv6/ip6_tunnel.c |
3831 | +++ b/net/ipv6/ip6_tunnel.c |
3832 | @@ -1131,8 +1131,13 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
3833 | max_headroom += 8; |
3834 | mtu -= 8; |
3835 | } |
3836 | - if (mtu < IPV6_MIN_MTU) |
3837 | - mtu = IPV6_MIN_MTU; |
3838 | + if (skb->protocol == htons(ETH_P_IPV6)) { |
3839 | + if (mtu < IPV6_MIN_MTU) |
3840 | + mtu = IPV6_MIN_MTU; |
3841 | + } else if (mtu < 576) { |
3842 | + mtu = 576; |
3843 | + } |
3844 | + |
3845 | if (skb_dst(skb) && !t->parms.collect_md) |
3846 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
3847 | if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { |
3848 | @@ -1679,11 +1684,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) |
3849 | { |
3850 | struct ip6_tnl *tnl = netdev_priv(dev); |
3851 | |
3852 | - if (tnl->parms.proto == IPPROTO_IPIP) { |
3853 | - if (new_mtu < ETH_MIN_MTU) |
3854 | + if (tnl->parms.proto == IPPROTO_IPV6) { |
3855 | + if (new_mtu < IPV6_MIN_MTU) |
3856 | return -EINVAL; |
3857 | } else { |
3858 | - if (new_mtu < IPV6_MIN_MTU) |
3859 | + if (new_mtu < ETH_MIN_MTU) |
3860 | return -EINVAL; |
3861 | } |
3862 | if (new_mtu > 0xFFF8 - dev->hard_header_len) |
3863 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
3864 | index ca8d3266e92e..a4a865c8a23c 100644 |
3865 | --- a/net/ipv6/route.c |
3866 | +++ b/net/ipv6/route.c |
3867 | @@ -1755,6 +1755,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, |
3868 | } |
3869 | |
3870 | rt->dst.flags |= DST_HOST; |
3871 | + rt->dst.input = ip6_input; |
3872 | rt->dst.output = ip6_output; |
3873 | rt->rt6i_gateway = fl6->daddr; |
3874 | rt->rt6i_dst.addr = fl6->daddr; |
3875 | diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c |
3876 | index fe04e23af986..841f4a07438e 100644 |
3877 | --- a/net/ipv6/xfrm6_input.c |
3878 | +++ b/net/ipv6/xfrm6_input.c |
3879 | @@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, |
3880 | } |
3881 | EXPORT_SYMBOL(xfrm6_rcv_spi); |
3882 | |
3883 | +static int xfrm6_transport_finish2(struct net *net, struct sock *sk, |
3884 | + struct sk_buff *skb) |
3885 | +{ |
3886 | + if (xfrm_trans_queue(skb, ip6_rcv_finish)) |
3887 | + __kfree_skb(skb); |
3888 | + return -1; |
3889 | +} |
3890 | + |
3891 | int xfrm6_transport_finish(struct sk_buff *skb, int async) |
3892 | { |
3893 | struct xfrm_offload *xo = xfrm_offload(skb); |
3894 | @@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) |
3895 | |
3896 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, |
3897 | dev_net(skb->dev), NULL, skb, skb->dev, NULL, |
3898 | - ip6_rcv_finish); |
3899 | + xfrm6_transport_finish2); |
3900 | return -1; |
3901 | } |
3902 | |
3903 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
3904 | index 70e9d2ca8bbe..4daafb07602f 100644 |
3905 | --- a/net/mac80211/rx.c |
3906 | +++ b/net/mac80211/rx.c |
3907 | @@ -3632,6 +3632,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) |
3908 | } |
3909 | return true; |
3910 | case NL80211_IFTYPE_MESH_POINT: |
3911 | + if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) |
3912 | + return false; |
3913 | if (multicast) |
3914 | return true; |
3915 | return ether_addr_equal(sdata->vif.addr, hdr->addr1); |
3916 | diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
3917 | index 64e1ee091225..5b504aa653f5 100644 |
3918 | --- a/net/netfilter/nf_tables_api.c |
3919 | +++ b/net/netfilter/nf_tables_api.c |
3920 | @@ -2072,7 +2072,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, |
3921 | continue; |
3922 | |
3923 | list_for_each_entry_rcu(chain, &table->chains, list) { |
3924 | - if (ctx && ctx->chain[0] && |
3925 | + if (ctx && ctx->chain && |
3926 | strcmp(ctx->chain, chain->name) != 0) |
3927 | continue; |
3928 | |
3929 | @@ -4596,8 +4596,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb) |
3930 | { |
3931 | struct nft_obj_filter *filter = cb->data; |
3932 | |
3933 | - kfree(filter->table); |
3934 | - kfree(filter); |
3935 | + if (filter) { |
3936 | + kfree(filter->table); |
3937 | + kfree(filter); |
3938 | + } |
3939 | |
3940 | return 0; |
3941 | } |
3942 | diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c |
3943 | index ecbb019efcbd..934c239cf98d 100644 |
3944 | --- a/net/sched/cls_api.c |
3945 | +++ b/net/sched/cls_api.c |
3946 | @@ -197,21 +197,26 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block, |
3947 | |
3948 | static void tcf_chain_flush(struct tcf_chain *chain) |
3949 | { |
3950 | - struct tcf_proto *tp; |
3951 | + struct tcf_proto *tp = rtnl_dereference(chain->filter_chain); |
3952 | |
3953 | if (chain->p_filter_chain) |
3954 | RCU_INIT_POINTER(*chain->p_filter_chain, NULL); |
3955 | - while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { |
3956 | + while (tp) { |
3957 | RCU_INIT_POINTER(chain->filter_chain, tp->next); |
3958 | - tcf_chain_put(chain); |
3959 | tcf_proto_destroy(tp); |
3960 | + tp = rtnl_dereference(chain->filter_chain); |
3961 | + tcf_chain_put(chain); |
3962 | } |
3963 | } |
3964 | |
3965 | static void tcf_chain_destroy(struct tcf_chain *chain) |
3966 | { |
3967 | + struct tcf_block *block = chain->block; |
3968 | + |
3969 | list_del(&chain->list); |
3970 | kfree(chain); |
3971 | + if (list_empty(&block->chain_list)) |
3972 | + kfree(block); |
3973 | } |
3974 | |
3975 | static void tcf_chain_hold(struct tcf_chain *chain) |
3976 | @@ -275,22 +280,8 @@ int tcf_block_get(struct tcf_block **p_block, |
3977 | } |
3978 | EXPORT_SYMBOL(tcf_block_get); |
3979 | |
3980 | -static void tcf_block_put_final(struct work_struct *work) |
3981 | -{ |
3982 | - struct tcf_block *block = container_of(work, struct tcf_block, work); |
3983 | - struct tcf_chain *chain, *tmp; |
3984 | - |
3985 | - rtnl_lock(); |
3986 | - /* Only chain 0 should be still here. */ |
3987 | - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) |
3988 | - tcf_chain_put(chain); |
3989 | - rtnl_unlock(); |
3990 | - kfree(block); |
3991 | -} |
3992 | - |
3993 | /* XXX: Standalone actions are not allowed to jump to any chain, and bound |
3994 | - * actions should be all removed after flushing. However, filters are now |
3995 | - * destroyed in tc filter workqueue with RTNL lock, they can not race here. |
3996 | + * actions should be all removed after flushing. |
3997 | */ |
3998 | void tcf_block_put(struct tcf_block *block) |
3999 | { |
4000 | @@ -299,15 +290,22 @@ void tcf_block_put(struct tcf_block *block) |
4001 | if (!block) |
4002 | return; |
4003 | |
4004 | - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) |
4005 | + /* Hold a refcnt for all chains, so that they don't disappear |
4006 | + * while we are iterating. |
4007 | + */ |
4008 | + list_for_each_entry(chain, &block->chain_list, list) |
4009 | + tcf_chain_hold(chain); |
4010 | + |
4011 | + list_for_each_entry(chain, &block->chain_list, list) |
4012 | tcf_chain_flush(chain); |
4013 | |
4014 | - INIT_WORK(&block->work, tcf_block_put_final); |
4015 | - /* Wait for RCU callbacks to release the reference count and make |
4016 | - * sure their works have been queued before this. |
4017 | - */ |
4018 | - rcu_barrier(); |
4019 | - tcf_queue_work(&block->work); |
4020 | + /* At this point, all the chains should have refcnt >= 1. */ |
4021 | + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) |
4022 | + tcf_chain_put(chain); |
4023 | + |
4024 | + /* Finally, put chain 0 and allow block to be freed. */ |
4025 | + chain = list_first_entry(&block->chain_list, struct tcf_chain, list); |
4026 | + tcf_chain_put(chain); |
4027 | } |
4028 | EXPORT_SYMBOL(tcf_block_put); |
4029 | |
4030 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
4031 | index 3c8b92667866..6b3a862706de 100644 |
4032 | --- a/net/sctp/socket.c |
4033 | +++ b/net/sctp/socket.c |
4034 | @@ -3494,6 +3494,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, |
4035 | |
4036 | if (optlen < sizeof(struct sctp_hmacalgo)) |
4037 | return -EINVAL; |
4038 | + optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) + |
4039 | + SCTP_AUTH_NUM_HMACS * sizeof(u16)); |
4040 | |
4041 | hmacs = memdup_user(optval, optlen); |
4042 | if (IS_ERR(hmacs)) |
4043 | @@ -3532,6 +3534,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk, |
4044 | |
4045 | if (optlen <= sizeof(struct sctp_authkey)) |
4046 | return -EINVAL; |
4047 | + /* authkey->sca_keylength is u16, so optlen can't be bigger than |
4048 | + * this. |
4049 | + */ |
4050 | + optlen = min_t(unsigned int, optlen, USHRT_MAX + |
4051 | + sizeof(struct sctp_authkey)); |
4052 | |
4053 | authkey = memdup_user(optval, optlen); |
4054 | if (IS_ERR(authkey)) |
4055 | @@ -3889,6 +3896,9 @@ static int sctp_setsockopt_reset_streams(struct sock *sk, |
4056 | |
4057 | if (optlen < sizeof(*params)) |
4058 | return -EINVAL; |
4059 | + /* srs_number_streams is u16, so optlen can't be bigger than this. */ |
4060 | + optlen = min_t(unsigned int, optlen, USHRT_MAX + |
4061 | + sizeof(__u16) * sizeof(*params)); |
4062 | |
4063 | params = memdup_user(optval, optlen); |
4064 | if (IS_ERR(params)) |
4065 | @@ -4947,7 +4957,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv |
4066 | len = sizeof(int); |
4067 | if (put_user(len, optlen)) |
4068 | return -EFAULT; |
4069 | - if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) |
4070 | + if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len)) |
4071 | return -EFAULT; |
4072 | return 0; |
4073 | } |
4074 | @@ -5578,6 +5588,9 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, |
4075 | err = -EFAULT; |
4076 | goto out; |
4077 | } |
4078 | + /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too, |
4079 | + * but we can't change it anymore. |
4080 | + */ |
4081 | if (put_user(bytes_copied, optlen)) |
4082 | err = -EFAULT; |
4083 | out: |
4084 | @@ -6014,7 +6027,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len, |
4085 | params.assoc_id = 0; |
4086 | } else if (len >= sizeof(struct sctp_assoc_value)) { |
4087 | len = sizeof(struct sctp_assoc_value); |
4088 | - if (copy_from_user(¶ms, optval, sizeof(params))) |
4089 | + if (copy_from_user(¶ms, optval, len)) |
4090 | return -EFAULT; |
4091 | } else |
4092 | return -EINVAL; |
4093 | @@ -6184,7 +6197,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, |
4094 | |
4095 | if (len < sizeof(struct sctp_authkeyid)) |
4096 | return -EINVAL; |
4097 | - if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) |
4098 | + |
4099 | + len = sizeof(struct sctp_authkeyid); |
4100 | + if (copy_from_user(&val, optval, len)) |
4101 | return -EFAULT; |
4102 | |
4103 | asoc = sctp_id2assoc(sk, val.scact_assoc_id); |
4104 | @@ -6196,7 +6211,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, |
4105 | else |
4106 | val.scact_keynumber = ep->active_key_id; |
4107 | |
4108 | - len = sizeof(struct sctp_authkeyid); |
4109 | if (put_user(len, optlen)) |
4110 | return -EFAULT; |
4111 | if (copy_to_user(optval, &val, len)) |
4112 | @@ -6222,7 +6236,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, |
4113 | if (len < sizeof(struct sctp_authchunks)) |
4114 | return -EINVAL; |
4115 | |
4116 | - if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) |
4117 | + if (copy_from_user(&val, optval, sizeof(val))) |
4118 | return -EFAULT; |
4119 | |
4120 | to = p->gauth_chunks; |
4121 | @@ -6267,7 +6281,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, |
4122 | if (len < sizeof(struct sctp_authchunks)) |
4123 | return -EINVAL; |
4124 | |
4125 | - if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) |
4126 | + if (copy_from_user(&val, optval, sizeof(val))) |
4127 | return -EFAULT; |
4128 | |
4129 | to = p->gauth_chunks; |
4130 | diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c |
4131 | index a71be33f3afe..e36ec5dd64c6 100644 |
4132 | --- a/net/sctp/ulpqueue.c |
4133 | +++ b/net/sctp/ulpqueue.c |
4134 | @@ -1084,29 +1084,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, |
4135 | void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, |
4136 | gfp_t gfp) |
4137 | { |
4138 | - struct sctp_association *asoc; |
4139 | - __u16 needed, freed; |
4140 | - |
4141 | - asoc = ulpq->asoc; |
4142 | + struct sctp_association *asoc = ulpq->asoc; |
4143 | + __u32 freed = 0; |
4144 | + __u16 needed; |
4145 | |
4146 | - if (chunk) { |
4147 | - needed = ntohs(chunk->chunk_hdr->length); |
4148 | - needed -= sizeof(struct sctp_data_chunk); |
4149 | - } else |
4150 | - needed = SCTP_DEFAULT_MAXWINDOW; |
4151 | - |
4152 | - freed = 0; |
4153 | + needed = ntohs(chunk->chunk_hdr->length) - |
4154 | + sizeof(struct sctp_data_chunk); |
4155 | |
4156 | if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { |
4157 | freed = sctp_ulpq_renege_order(ulpq, needed); |
4158 | - if (freed < needed) { |
4159 | + if (freed < needed) |
4160 | freed += sctp_ulpq_renege_frags(ulpq, needed - freed); |
4161 | - } |
4162 | } |
4163 | /* If able to free enough room, accept this chunk. */ |
4164 | - if (chunk && (freed >= needed)) { |
4165 | - int retval; |
4166 | - retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); |
4167 | + if (freed >= needed) { |
4168 | + int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); |
4169 | /* |
4170 | * Enter partial delivery if chunk has not been |
4171 | * delivered; otherwise, drain the reassembly queue. |
4172 | diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c |
4173 | index 47ec121574ce..c8001471da6c 100644 |
4174 | --- a/net/tipc/bearer.c |
4175 | +++ b/net/tipc/bearer.c |
4176 | @@ -324,6 +324,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, |
4177 | if (res) { |
4178 | pr_warn("Bearer <%s> rejected, enable failure (%d)\n", |
4179 | name, -res); |
4180 | + kfree(b); |
4181 | return -EINVAL; |
4182 | } |
4183 | |
4184 | @@ -347,8 +348,10 @@ static int tipc_enable_bearer(struct net *net, const char *name, |
4185 | if (skb) |
4186 | tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); |
4187 | |
4188 | - if (tipc_mon_create(net, bearer_id)) |
4189 | + if (tipc_mon_create(net, bearer_id)) { |
4190 | + bearer_disable(net, b); |
4191 | return -ENOMEM; |
4192 | + } |
4193 | |
4194 | pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", |
4195 | name, |
4196 | diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c |
4197 | index 9e109bb1a207..0fcfb3916dcf 100644 |
4198 | --- a/net/tipc/monitor.c |
4199 | +++ b/net/tipc/monitor.c |
4200 | @@ -633,9 +633,13 @@ void tipc_mon_delete(struct net *net, int bearer_id) |
4201 | { |
4202 | struct tipc_net *tn = tipc_net(net); |
4203 | struct tipc_monitor *mon = tipc_monitor(net, bearer_id); |
4204 | - struct tipc_peer *self = get_self(net, bearer_id); |
4205 | + struct tipc_peer *self; |
4206 | struct tipc_peer *peer, *tmp; |
4207 | |
4208 | + if (!mon) |
4209 | + return; |
4210 | + |
4211 | + self = get_self(net, bearer_id); |
4212 | write_lock_bh(&mon->lock); |
4213 | tn->monitors[bearer_id] = NULL; |
4214 | list_for_each_entry_safe(peer, tmp, &self->list, list) { |
4215 | diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c |
4216 | index 81bef0676e1d..ea28aa505302 100644 |
4217 | --- a/net/wireless/nl80211.c |
4218 | +++ b/net/wireless/nl80211.c |
4219 | @@ -11301,7 +11301,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb, |
4220 | break; |
4221 | case NL80211_NAN_FUNC_FOLLOW_UP: |
4222 | if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] || |
4223 | - !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) { |
4224 | + !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] || |
4225 | + !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) { |
4226 | err = -EINVAL; |
4227 | goto out; |
4228 | } |
4229 | diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c |
4230 | index da6447389ffb..3f6f6f8c9fa5 100644 |
4231 | --- a/net/xfrm/xfrm_input.c |
4232 | +++ b/net/xfrm/xfrm_input.c |
4233 | @@ -8,15 +8,29 @@ |
4234 | * |
4235 | */ |
4236 | |
4237 | +#include <linux/bottom_half.h> |
4238 | +#include <linux/interrupt.h> |
4239 | #include <linux/slab.h> |
4240 | #include <linux/module.h> |
4241 | #include <linux/netdevice.h> |
4242 | +#include <linux/percpu.h> |
4243 | #include <net/dst.h> |
4244 | #include <net/ip.h> |
4245 | #include <net/xfrm.h> |
4246 | #include <net/ip_tunnels.h> |
4247 | #include <net/ip6_tunnel.h> |
4248 | |
4249 | +struct xfrm_trans_tasklet { |
4250 | + struct tasklet_struct tasklet; |
4251 | + struct sk_buff_head queue; |
4252 | +}; |
4253 | + |
4254 | +struct xfrm_trans_cb { |
4255 | + int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); |
4256 | +}; |
4257 | + |
4258 | +#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) |
4259 | + |
4260 | static struct kmem_cache *secpath_cachep __read_mostly; |
4261 | |
4262 | static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); |
4263 | @@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1]; |
4264 | static struct gro_cells gro_cells; |
4265 | static struct net_device xfrm_napi_dev; |
4266 | |
4267 | +static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet); |
4268 | + |
4269 | int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) |
4270 | { |
4271 | int err = 0; |
4272 | @@ -477,9 +493,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr) |
4273 | } |
4274 | EXPORT_SYMBOL(xfrm_input_resume); |
4275 | |
4276 | +static void xfrm_trans_reinject(unsigned long data) |
4277 | +{ |
4278 | + struct xfrm_trans_tasklet *trans = (void *)data; |
4279 | + struct sk_buff_head queue; |
4280 | + struct sk_buff *skb; |
4281 | + |
4282 | + __skb_queue_head_init(&queue); |
4283 | + skb_queue_splice_init(&trans->queue, &queue); |
4284 | + |
4285 | + while ((skb = __skb_dequeue(&queue))) |
4286 | + XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb); |
4287 | +} |
4288 | + |
4289 | +int xfrm_trans_queue(struct sk_buff *skb, |
4290 | + int (*finish)(struct net *, struct sock *, |
4291 | + struct sk_buff *)) |
4292 | +{ |
4293 | + struct xfrm_trans_tasklet *trans; |
4294 | + |
4295 | + trans = this_cpu_ptr(&xfrm_trans_tasklet); |
4296 | + |
4297 | + if (skb_queue_len(&trans->queue) >= netdev_max_backlog) |
4298 | + return -ENOBUFS; |
4299 | + |
4300 | + XFRM_TRANS_SKB_CB(skb)->finish = finish; |
4301 | + skb_queue_tail(&trans->queue, skb); |
4302 | + tasklet_schedule(&trans->tasklet); |
4303 | + return 0; |
4304 | +} |
4305 | +EXPORT_SYMBOL(xfrm_trans_queue); |
4306 | + |
4307 | void __init xfrm_input_init(void) |
4308 | { |
4309 | int err; |
4310 | + int i; |
4311 | |
4312 | init_dummy_netdev(&xfrm_napi_dev); |
4313 | err = gro_cells_init(&gro_cells, &xfrm_napi_dev); |
4314 | @@ -490,4 +538,13 @@ void __init xfrm_input_init(void) |
4315 | sizeof(struct sec_path), |
4316 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
4317 | NULL); |
4318 | + |
4319 | + for_each_possible_cpu(i) { |
4320 | + struct xfrm_trans_tasklet *trans; |
4321 | + |
4322 | + trans = &per_cpu(xfrm_trans_tasklet, i); |
4323 | + __skb_queue_head_init(&trans->queue); |
4324 | + tasklet_init(&trans->tasklet, xfrm_trans_reinject, |
4325 | + (unsigned long)trans); |
4326 | + } |
4327 | } |
4328 | diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c |
4329 | index 714ce17da717..e853a6dfd33b 100644 |
4330 | --- a/sound/soc/codecs/nau8825.c |
4331 | +++ b/sound/soc/codecs/nau8825.c |
4332 | @@ -905,6 +905,7 @@ static int nau8825_adc_event(struct snd_soc_dapm_widget *w, |
4333 | |
4334 | switch (event) { |
4335 | case SND_SOC_DAPM_POST_PMU: |
4336 | + msleep(125); |
4337 | regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL, |
4338 | NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC); |
4339 | break; |
4340 | diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c |
4341 | index 938baff86ef2..2684a2ba33cd 100644 |
4342 | --- a/sound/soc/sh/rcar/adg.c |
4343 | +++ b/sound/soc/sh/rcar/adg.c |
4344 | @@ -216,7 +216,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod, |
4345 | NULL, &val, NULL); |
4346 | |
4347 | val = val << shift; |
4348 | - mask = 0xffff << shift; |
4349 | + mask = 0x0f1f << shift; |
4350 | |
4351 | rsnd_mod_bset(adg_mod, CMDOUT_TIMSEL, mask, val); |
4352 | |
4353 | @@ -244,7 +244,7 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod, |
4354 | |
4355 | in = in << shift; |
4356 | out = out << shift; |
4357 | - mask = 0xffff << shift; |
4358 | + mask = 0x0f1f << shift; |
4359 | |
4360 | switch (id / 2) { |
4361 | case 0: |
4362 | @@ -374,7 +374,7 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate) |
4363 | ckr = 0x80000000; |
4364 | } |
4365 | |
4366 | - rsnd_mod_bset(adg_mod, BRGCKR, 0x80FF0000, adg->ckr | ckr); |
4367 | + rsnd_mod_bset(adg_mod, BRGCKR, 0x80770000, adg->ckr | ckr); |
4368 | rsnd_mod_write(adg_mod, BRRA, adg->rbga); |
4369 | rsnd_mod_write(adg_mod, BRRB, adg->rbgb); |
4370 |