Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0203-4.9.104-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3180 - (show annotations) (download)
Wed Aug 8 14:17:33 2018 UTC (5 years, 9 months ago) by niro
File size: 392059 byte(s)
-linux-4.9.104
1 diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
2 index 1699a55b7b70..ef639960b272 100644
3 --- a/Documentation/device-mapper/thin-provisioning.txt
4 +++ b/Documentation/device-mapper/thin-provisioning.txt
5 @@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
6 free space on the data device drops below this level then a dm event
7 will be triggered which a userspace daemon should catch allowing it to
8 extend the pool device. Only one such event will be sent.
9 -Resuming a device with a new table itself triggers an event so the
10 -userspace daemon can use this to detect a situation where a new table
11 -already exceeds the threshold.
12 +
13 +No special event is triggered if a just resumed device's free space is below
14 +the low water mark. However, resuming a device always triggers an
15 +event; a userspace daemon should verify that free space exceeds the low
16 +water mark when handling this event.
17
18 A low water mark for the metadata device is maintained in the kernel and
19 will trigger a dm event if free space on the metadata device drops below
20 diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
21 index 217a90eaabe7..9c38bbe7e6d7 100644
22 --- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
23 +++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
24 @@ -11,7 +11,11 @@ Required properties:
25 interrupts.
26
27 Optional properties:
28 -- clocks: Optional reference to the clock used by the XOR engine.
29 +- clocks: Optional reference to the clocks used by the XOR engine.
30 +- clock-names: mandatory if there is a second clock, in this case the
31 + name must be "core" for the first clock and "reg" for the second
32 + one
33 +
34
35 Example:
36
37 diff --git a/Makefile b/Makefile
38 index 6090f655fb32..780dcc8033b2 100644
39 --- a/Makefile
40 +++ b/Makefile
41 @@ -1,6 +1,6 @@
42 VERSION = 4
43 PATCHLEVEL = 9
44 -SUBLEVEL = 103
45 +SUBLEVEL = 104
46 EXTRAVERSION =
47 NAME = Roaring Lionus
48
49 diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
50 index 0ca9724597c1..7081e52291d0 100644
51 --- a/arch/alpha/include/asm/xchg.h
52 +++ b/arch/alpha/include/asm/xchg.h
53 @@ -11,6 +11,10 @@
54 * Atomic exchange.
55 * Since it can be used to implement critical sections
56 * it must clobber "memory" (also for interrupts in UP).
57 + *
58 + * The leading and the trailing memory barriers guarantee that these
59 + * operations are fully ordered.
60 + *
61 */
62
63 static inline unsigned long
64 @@ -18,6 +22,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
65 {
66 unsigned long ret, tmp, addr64;
67
68 + smp_mb();
69 __asm__ __volatile__(
70 " andnot %4,7,%3\n"
71 " insbl %1,%4,%1\n"
72 @@ -42,6 +47,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
73 {
74 unsigned long ret, tmp, addr64;
75
76 + smp_mb();
77 __asm__ __volatile__(
78 " andnot %4,7,%3\n"
79 " inswl %1,%4,%1\n"
80 @@ -66,6 +72,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
81 {
82 unsigned long dummy;
83
84 + smp_mb();
85 __asm__ __volatile__(
86 "1: ldl_l %0,%4\n"
87 " bis $31,%3,%1\n"
88 @@ -86,6 +93,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
89 {
90 unsigned long dummy;
91
92 + smp_mb();
93 __asm__ __volatile__(
94 "1: ldq_l %0,%4\n"
95 " bis $31,%3,%1\n"
96 @@ -127,10 +135,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
97 * store NEW in MEM. Return the initial value in MEM. Success is
98 * indicated by comparing RETURN with OLD.
99 *
100 - * The memory barrier should be placed in SMP only when we actually
101 - * make the change. If we don't change anything (so if the returned
102 - * prev is equal to old) then we aren't acquiring anything new and
103 - * we don't need any memory barrier as far I can tell.
104 + * The leading and the trailing memory barriers guarantee that these
105 + * operations are fully ordered.
106 + *
107 + * The trailing memory barrier is placed in SMP unconditionally, in
108 + * order to guarantee that dependency ordering is preserved when a
109 + * dependency is headed by an unsuccessful operation.
110 */
111
112 static inline unsigned long
113 @@ -138,6 +148,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
114 {
115 unsigned long prev, tmp, cmp, addr64;
116
117 + smp_mb();
118 __asm__ __volatile__(
119 " andnot %5,7,%4\n"
120 " insbl %1,%5,%1\n"
121 @@ -149,8 +160,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
122 " or %1,%2,%2\n"
123 " stq_c %2,0(%4)\n"
124 " beq %2,3f\n"
125 - __ASM__MB
126 "2:\n"
127 + __ASM__MB
128 ".subsection 2\n"
129 "3: br 1b\n"
130 ".previous"
131 @@ -165,6 +176,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
132 {
133 unsigned long prev, tmp, cmp, addr64;
134
135 + smp_mb();
136 __asm__ __volatile__(
137 " andnot %5,7,%4\n"
138 " inswl %1,%5,%1\n"
139 @@ -176,8 +188,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
140 " or %1,%2,%2\n"
141 " stq_c %2,0(%4)\n"
142 " beq %2,3f\n"
143 - __ASM__MB
144 "2:\n"
145 + __ASM__MB
146 ".subsection 2\n"
147 "3: br 1b\n"
148 ".previous"
149 @@ -192,6 +204,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
150 {
151 unsigned long prev, cmp;
152
153 + smp_mb();
154 __asm__ __volatile__(
155 "1: ldl_l %0,%5\n"
156 " cmpeq %0,%3,%1\n"
157 @@ -199,8 +212,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
158 " mov %4,%1\n"
159 " stl_c %1,%2\n"
160 " beq %1,3f\n"
161 - __ASM__MB
162 "2:\n"
163 + __ASM__MB
164 ".subsection 2\n"
165 "3: br 1b\n"
166 ".previous"
167 @@ -215,6 +228,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
168 {
169 unsigned long prev, cmp;
170
171 + smp_mb();
172 __asm__ __volatile__(
173 "1: ldq_l %0,%5\n"
174 " cmpeq %0,%3,%1\n"
175 @@ -222,8 +236,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
176 " mov %4,%1\n"
177 " stq_c %1,%2\n"
178 " beq %1,3f\n"
179 - __ASM__MB
180 "2:\n"
181 + __ASM__MB
182 ".subsection 2\n"
183 "3: br 1b\n"
184 ".previous"
185 diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
186 index 249e10190d20..b7b78cb09a37 100644
187 --- a/arch/arc/Kconfig
188 +++ b/arch/arc/Kconfig
189 @@ -495,7 +495,6 @@ config ARC_CURR_IN_REG
190
191 config ARC_EMUL_UNALIGNED
192 bool "Emulate unaligned memory access (userspace only)"
193 - default N
194 select SYSCTL_ARCH_UNALIGN_NO_WARN
195 select SYSCTL_ARCH_UNALIGN_ALLOW
196 depends on ISA_ARCOMPACT
197 diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
198 index 74dd21b7373c..c51b88ee3cec 100644
199 --- a/arch/arm/boot/dts/bcm283x.dtsi
200 +++ b/arch/arm/boot/dts/bcm283x.dtsi
201 @@ -146,8 +146,8 @@
202
203 i2s: i2s@7e203000 {
204 compatible = "brcm,bcm2835-i2s";
205 - reg = <0x7e203000 0x20>,
206 - <0x7e101098 0x02>;
207 + reg = <0x7e203000 0x24>;
208 + clocks = <&clocks BCM2835_CLOCK_PCM>;
209
210 dmas = <&dma 2>,
211 <&dma 3>;
212 diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
213 index a1658d0721b8..cf0de77f09c4 100644
214 --- a/arch/arm/boot/dts/bcm958625hr.dts
215 +++ b/arch/arm/boot/dts/bcm958625hr.dts
216 @@ -49,7 +49,7 @@
217
218 memory {
219 device_type = "memory";
220 - reg = <0x60000000 0x80000000>;
221 + reg = <0x60000000 0x20000000>;
222 };
223
224 gpio-restart {
225 diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
226 index 58b09bf1ba2d..205130600853 100644
227 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
228 +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
229 @@ -213,37 +213,37 @@
230 &iomuxc {
231 pinctrl_enet1: enet1grp {
232 fsl,pins = <
233 - MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3
234 - MX7D_PAD_SD2_WP__ENET1_MDC 0x3
235 - MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1
236 - MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1
237 - MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1
238 - MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1
239 - MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1
240 - MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1
241 - MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1
242 - MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1
243 - MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1
244 - MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1
245 - MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1
246 - MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1
247 + MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x30
248 + MX7D_PAD_SD2_WP__ENET1_MDC 0x30
249 + MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x11
250 + MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x11
251 + MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x11
252 + MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x11
253 + MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x11
254 + MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x11
255 + MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x11
256 + MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x11
257 + MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x11
258 + MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x11
259 + MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x11
260 + MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x11
261 >;
262 };
263
264 pinctrl_enet2: enet2grp {
265 fsl,pins = <
266 - MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1
267 - MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1
268 - MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1
269 - MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1
270 - MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1
271 - MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1
272 - MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1
273 - MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1
274 - MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1
275 - MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1
276 - MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1
277 - MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1
278 + MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x11
279 + MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x11
280 + MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x11
281 + MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x11
282 + MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x11
283 + MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x11
284 + MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x11
285 + MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x11
286 + MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x11
287 + MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x11
288 + MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x11
289 + MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x11
290 >;
291 };
292
293 diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
294 index 6761d11d3f9e..db0239c7e6c7 100644
295 --- a/arch/arm/boot/dts/r8a7791-porter.dts
296 +++ b/arch/arm/boot/dts/r8a7791-porter.dts
297 @@ -428,7 +428,7 @@
298 "dclkin.0", "dclkin.1";
299
300 ports {
301 - port@1 {
302 + port@0 {
303 endpoint {
304 remote-endpoint = <&adv7511_in>;
305 };
306 diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
307 index 9f48141270b8..f0702d8063d9 100644
308 --- a/arch/arm/boot/dts/socfpga.dtsi
309 +++ b/arch/arm/boot/dts/socfpga.dtsi
310 @@ -759,7 +759,7 @@
311 timer@fffec600 {
312 compatible = "arm,cortex-a9-twd-timer";
313 reg = <0xfffec600 0x100>;
314 - interrupts = <1 13 0xf04>;
315 + interrupts = <1 13 0xf01>;
316 clocks = <&mpu_periph_clk>;
317 };
318
319 diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
320 index d0295f1dd1a3..ff65b6d96c7e 100644
321 --- a/arch/arm/include/asm/vdso.h
322 +++ b/arch/arm/include/asm/vdso.h
323 @@ -11,8 +11,6 @@ struct mm_struct;
324
325 void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
326
327 -extern char vdso_start, vdso_end;
328 -
329 extern unsigned int vdso_total_pages;
330
331 #else /* CONFIG_VDSO */
332 diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
333 index 53cf86cf2d1a..890439737374 100644
334 --- a/arch/arm/kernel/vdso.c
335 +++ b/arch/arm/kernel/vdso.c
336 @@ -39,6 +39,8 @@
337
338 static struct page **vdso_text_pagelist;
339
340 +extern char vdso_start[], vdso_end[];
341 +
342 /* Total number of pages needed for the data and text portions of the VDSO. */
343 unsigned int vdso_total_pages __ro_after_init;
344
345 @@ -179,13 +181,13 @@ static int __init vdso_init(void)
346 unsigned int text_pages;
347 int i;
348
349 - if (memcmp(&vdso_start, "\177ELF", 4)) {
350 + if (memcmp(vdso_start, "\177ELF", 4)) {
351 pr_err("VDSO is not a valid ELF object!\n");
352 return -ENOEXEC;
353 }
354
355 - text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
356 - pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
357 + text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
358 + pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
359
360 /* Allocate the VDSO text pagelist */
361 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
362 @@ -200,7 +202,7 @@ static int __init vdso_init(void)
363 for (i = 0; i < text_pages; i++) {
364 struct page *page;
365
366 - page = virt_to_page(&vdso_start + i * PAGE_SIZE);
367 + page = virt_to_page(vdso_start + i * PAGE_SIZE);
368 vdso_text_pagelist[i] = page;
369 }
370
371 @@ -211,7 +213,7 @@ static int __init vdso_init(void)
372
373 cntvct_ok = cntvct_functional();
374
375 - patch_vdso(&vdso_start);
376 + patch_vdso(vdso_start);
377
378 return 0;
379 }
380 diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
381 index 4f5fd4a084c0..034b89499bd7 100644
382 --- a/arch/arm/mach-omap1/clock.c
383 +++ b/arch/arm/mach-omap1/clock.c
384 @@ -1031,17 +1031,17 @@ static int clk_debugfs_register_one(struct clk *c)
385 return -ENOMEM;
386 c->dent = d;
387
388 - d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
389 + d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
390 if (!d) {
391 err = -ENOMEM;
392 goto err_out;
393 }
394 - d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
395 + d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
396 if (!d) {
397 err = -ENOMEM;
398 goto err_out;
399 }
400 - d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
401 + d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
402 if (!d) {
403 err = -ENOMEM;
404 goto err_out;
405 diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
406 index 678d2a31dcb8..3202015ecb83 100644
407 --- a/arch/arm/mach-omap2/pm.c
408 +++ b/arch/arm/mach-omap2/pm.c
409 @@ -225,7 +225,7 @@ static void omap_pm_end(void)
410 cpu_idle_poll_ctrl(false);
411 }
412
413 -static void omap_pm_finish(void)
414 +static void omap_pm_wake(void)
415 {
416 if (cpu_is_omap34xx())
417 omap_prcm_irq_complete();
418 @@ -235,7 +235,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
419 .begin = omap_pm_begin,
420 .end = omap_pm_end,
421 .enter = omap_pm_enter,
422 - .finish = omap_pm_finish,
423 + .wake = omap_pm_wake,
424 .valid = suspend_valid_only_mem,
425 };
426
427 diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
428 index b2f2448bfa6d..a4cab2814655 100644
429 --- a/arch/arm/mach-omap2/timer.c
430 +++ b/arch/arm/mach-omap2/timer.c
431 @@ -136,12 +136,6 @@ static struct clock_event_device clockevent_gpt = {
432 .tick_resume = omap2_gp_timer_shutdown,
433 };
434
435 -static struct property device_disabled = {
436 - .name = "status",
437 - .length = sizeof("disabled"),
438 - .value = "disabled",
439 -};
440 -
441 static const struct of_device_id omap_timer_match[] __initconst = {
442 { .compatible = "ti,omap2420-timer", },
443 { .compatible = "ti,omap3430-timer", },
444 @@ -183,8 +177,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
445 of_get_property(np, "ti,timer-secure", NULL)))
446 continue;
447
448 - if (!of_device_is_compatible(np, "ti,omap-counter32k"))
449 - of_add_property(np, &device_disabled);
450 + if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
451 + struct property *prop;
452 +
453 + prop = kzalloc(sizeof(*prop), GFP_KERNEL);
454 + if (!prop)
455 + return NULL;
456 + prop->name = "status";
457 + prop->value = "disabled";
458 + prop->length = strlen(prop->value);
459 + of_add_property(np, prop);
460 + }
461 return np;
462 }
463
464 diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
465 index 89bb0fc796bd..72905a442106 100644
466 --- a/arch/arm/mach-orion5x/Kconfig
467 +++ b/arch/arm/mach-orion5x/Kconfig
468 @@ -57,7 +57,6 @@ config MACH_KUROBOX_PRO
469
470 config MACH_DNS323
471 bool "D-Link DNS-323"
472 - select GENERIC_NET_UTILS
473 select I2C_BOARDINFO if I2C
474 help
475 Say 'Y' here if you want your kernel to support the
476 @@ -65,7 +64,6 @@ config MACH_DNS323
477
478 config MACH_TS209
479 bool "QNAP TS-109/TS-209"
480 - select GENERIC_NET_UTILS
481 help
482 Say 'Y' here if you want your kernel to support the
483 QNAP TS-109/TS-209 platform.
484 @@ -107,7 +105,6 @@ config MACH_LINKSTATION_LS_HGL
485
486 config MACH_TS409
487 bool "QNAP TS-409"
488 - select GENERIC_NET_UTILS
489 help
490 Say 'Y' here if you want your kernel to support the
491 QNAP TS-409 platform.
492 diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
493 index cd483bfb5ca8..d13344b2ddcd 100644
494 --- a/arch/arm/mach-orion5x/dns323-setup.c
495 +++ b/arch/arm/mach-orion5x/dns323-setup.c
496 @@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
497 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
498 };
499
500 +/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
501 + * functions be kept somewhere?
502 + */
503 +static int __init dns323_parse_hex_nibble(char n)
504 +{
505 + if (n >= '0' && n <= '9')
506 + return n - '0';
507 +
508 + if (n >= 'A' && n <= 'F')
509 + return n - 'A' + 10;
510 +
511 + if (n >= 'a' && n <= 'f')
512 + return n - 'a' + 10;
513 +
514 + return -1;
515 +}
516 +
517 +static int __init dns323_parse_hex_byte(const char *b)
518 +{
519 + int hi;
520 + int lo;
521 +
522 + hi = dns323_parse_hex_nibble(b[0]);
523 + lo = dns323_parse_hex_nibble(b[1]);
524 +
525 + if (hi < 0 || lo < 0)
526 + return -1;
527 +
528 + return (hi << 4) | lo;
529 +}
530 +
531 static int __init dns323_read_mac_addr(void)
532 {
533 u_int8_t addr[6];
534 - void __iomem *mac_page;
535 + int i;
536 + char *mac_page;
537
538 /* MAC address is stored as a regular ol' string in /dev/mtdblock4
539 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
540 @@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
541 if (!mac_page)
542 return -ENOMEM;
543
544 - if (!mac_pton((__force const char *) mac_page, addr))
545 - goto error_fail;
546 + /* Sanity check the string we're looking at */
547 + for (i = 0; i < 5; i++) {
548 + if (*(mac_page + (i * 3) + 2) != ':') {
549 + goto error_fail;
550 + }
551 + }
552 +
553 + for (i = 0; i < 6; i++) {
554 + int byte;
555 +
556 + byte = dns323_parse_hex_byte(mac_page + (i * 3));
557 + if (byte < 0) {
558 + goto error_fail;
559 + }
560 +
561 + addr[i] = byte;
562 + }
563
564 iounmap(mac_page);
565 printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
566 diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
567 index 89774985d380..905d4f2dd0b8 100644
568 --- a/arch/arm/mach-orion5x/tsx09-common.c
569 +++ b/arch/arm/mach-orion5x/tsx09-common.c
570 @@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
571 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
572 };
573
574 +static int __init qnap_tsx09_parse_hex_nibble(char n)
575 +{
576 + if (n >= '0' && n <= '9')
577 + return n - '0';
578 +
579 + if (n >= 'A' && n <= 'F')
580 + return n - 'A' + 10;
581 +
582 + if (n >= 'a' && n <= 'f')
583 + return n - 'a' + 10;
584 +
585 + return -1;
586 +}
587 +
588 +static int __init qnap_tsx09_parse_hex_byte(const char *b)
589 +{
590 + int hi;
591 + int lo;
592 +
593 + hi = qnap_tsx09_parse_hex_nibble(b[0]);
594 + lo = qnap_tsx09_parse_hex_nibble(b[1]);
595 +
596 + if (hi < 0 || lo < 0)
597 + return -1;
598 +
599 + return (hi << 4) | lo;
600 +}
601 +
602 static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
603 {
604 u_int8_t addr[6];
605 + int i;
606
607 - if (!mac_pton(addr_str, addr))
608 - return -1;
609 + for (i = 0; i < 6; i++) {
610 + int byte;
611 +
612 + /*
613 + * Enforce "xx:xx:xx:xx:xx:xx\n" format.
614 + */
615 + if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
616 + return -1;
617 +
618 + byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
619 + if (byte < 0)
620 + return -1;
621 + addr[i] = byte;
622 + }
623
624 printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
625
626 @@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
627 unsigned long addr;
628
629 for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
630 - void __iomem *nor_page;
631 + char *nor_page;
632 int ret = 0;
633
634 nor_page = ioremap(addr, 1024);
635 if (nor_page != NULL) {
636 - ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
637 + ret = qnap_tsx09_check_mac_addr(nor_page);
638 iounmap(nor_page);
639 }
640
641 diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
642 index 7a327bd32521..ebef8aacea83 100644
643 --- a/arch/arm/plat-omap/dmtimer.c
644 +++ b/arch/arm/plat-omap/dmtimer.c
645 @@ -890,11 +890,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
646 timer->irq = irq->start;
647 timer->pdev = pdev;
648
649 - /* Skip pm_runtime_enable for OMAP1 */
650 - if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
651 - pm_runtime_enable(dev);
652 - pm_runtime_irq_safe(dev);
653 - }
654 + pm_runtime_enable(dev);
655 + pm_runtime_irq_safe(dev);
656
657 if (!timer->reserved) {
658 ret = pm_runtime_get_sync(dev);
659 diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
660 index 338f82a7fdc7..2c93de7fffe5 100644
661 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
662 +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
663 @@ -326,8 +326,8 @@
664 blsp2_spi5: spi@075ba000{
665 compatible = "qcom,spi-qup-v2.2.1";
666 reg = <0x075ba000 0x600>;
667 - interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
668 - clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
669 + interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
670 + clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
671 <&gcc GCC_BLSP2_AHB_CLK>;
672 clock-names = "core", "iface";
673 pinctrl-names = "default", "sleep";
674 diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
675 index cae331d553f8..a9d2dd03c977 100644
676 --- a/arch/arm64/include/asm/spinlock.h
677 +++ b/arch/arm64/include/asm/spinlock.h
678 @@ -141,8 +141,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
679 " cbnz %w1, 1f\n"
680 " add %w1, %w0, %3\n"
681 " casa %w0, %w1, %2\n"
682 - " and %w1, %w1, #0xffff\n"
683 - " eor %w1, %w1, %w0, lsr #16\n"
684 + " sub %w1, %w1, %3\n"
685 + " eor %w1, %w1, %w0\n"
686 "1:")
687 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
688 : "I" (1 << TICKET_SHIFT)
689 diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
690 index 801a16dbbdf6..7d2a15a0f625 100644
691 --- a/arch/arm64/include/asm/stacktrace.h
692 +++ b/arch/arm64/include/asm/stacktrace.h
693 @@ -23,7 +23,7 @@ struct stackframe {
694 unsigned long sp;
695 unsigned long pc;
696 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
697 - unsigned int graph;
698 + int graph;
699 #endif
700 };
701
702 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
703 index 74107134cc30..2de62aa91303 100644
704 --- a/arch/arm64/kernel/cpu_errata.c
705 +++ b/arch/arm64/kernel/cpu_errata.c
706 @@ -160,7 +160,7 @@ static int enable_smccc_arch_workaround_1(void *data)
707 case PSCI_CONDUIT_HVC:
708 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
709 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
710 - if (res.a0)
711 + if ((int)res.a0 < 0)
712 return 0;
713 cb = call_hvc_arch_workaround_1;
714 smccc_start = __smccc_workaround_1_hvc_start;
715 @@ -170,7 +170,7 @@ static int enable_smccc_arch_workaround_1(void *data)
716 case PSCI_CONDUIT_SMC:
717 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
718 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
719 - if (res.a0)
720 + if ((int)res.a0 < 0)
721 return 0;
722 cb = call_smc_arch_workaround_1;
723 smccc_start = __smccc_workaround_1_smc_start;
724 diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
725 index c2efddfca18c..0cc01e0d38eb 100644
726 --- a/arch/arm64/kernel/stacktrace.c
727 +++ b/arch/arm64/kernel/stacktrace.c
728 @@ -72,6 +72,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
729 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
730 if (tsk->ret_stack &&
731 (frame->pc == (unsigned long)return_to_handler)) {
732 + if (WARN_ON_ONCE(frame->graph == -1))
733 + return -EINVAL;
734 + if (frame->graph < -1)
735 + frame->graph += FTRACE_NOTRACE_DEPTH;
736 +
737 /*
738 * This is a case where function graph tracer has
739 * modified a return address (LR) in a stack frame
740 diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
741 index 59779699a1a4..5d9076e86200 100644
742 --- a/arch/arm64/kernel/time.c
743 +++ b/arch/arm64/kernel/time.c
744 @@ -53,7 +53,7 @@ unsigned long profile_pc(struct pt_regs *regs)
745 frame.sp = regs->sp;
746 frame.pc = regs->pc;
747 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
748 - frame.graph = -1; /* no task info */
749 + frame.graph = current->curr_ret_stack;
750 #endif
751 do {
752 int ret = unwind_frame(NULL, &frame);
753 diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
754 index 5ed0ea92c5bf..f851c9d651f0 100644
755 --- a/arch/ia64/kernel/err_inject.c
756 +++ b/arch/ia64/kernel/err_inject.c
757 @@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
758 u64 virt_addr=simple_strtoull(buf, NULL, 16);
759 int ret;
760
761 - ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
762 + ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
763 if (ret<=0) {
764 #ifdef ERR_INJ_DEBUG
765 printk("Virtual address %lx is not existing.\n",virt_addr);
766 diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
767 index a0fc0c192427..3e8be0f54a44 100644
768 --- a/arch/m68k/coldfire/device.c
769 +++ b/arch/m68k/coldfire/device.c
770 @@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
771 .id = 0,
772 .num_resources = ARRAY_SIZE(mcf_fec0_resources),
773 .resource = mcf_fec0_resources,
774 - .dev.platform_data = FEC_PDATA,
775 + .dev = {
776 + .dma_mask = &mcf_fec0.dev.coherent_dma_mask,
777 + .coherent_dma_mask = DMA_BIT_MASK(32),
778 + .platform_data = FEC_PDATA,
779 + }
780 };
781
782 #ifdef MCFFEC_BASE1
783 @@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
784 .id = 1,
785 .num_resources = ARRAY_SIZE(mcf_fec1_resources),
786 .resource = mcf_fec1_resources,
787 - .dev.platform_data = FEC_PDATA,
788 + .dev = {
789 + .dma_mask = &mcf_fec1.dev.coherent_dma_mask,
790 + .coherent_dma_mask = DMA_BIT_MASK(32),
791 + .platform_data = FEC_PDATA,
792 + }
793 };
794 #endif /* MCFFEC_BASE1 */
795 #endif /* CONFIG_FEC */
796 diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
797 index 6ed1ded87b8f..6420c83c29d1 100644
798 --- a/arch/mips/cavium-octeon/octeon-irq.c
799 +++ b/arch/mips/cavium-octeon/octeon-irq.c
800 @@ -2271,7 +2271,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
801
802 parent_irq = irq_of_parse_and_map(ciu_node, 0);
803 if (!parent_irq) {
804 - pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
805 + pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
806 ciu_node->name);
807 return -EINVAL;
808 }
809 @@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
810
811 addr = of_get_address(ciu_node, 0, NULL, NULL);
812 if (!addr) {
813 - pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
814 + pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
815 return -EINVAL;
816 }
817 host_data->raw_reg = (u64)phys_to_virt(
818 @@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
819
820 addr = of_get_address(ciu_node, 1, NULL, NULL);
821 if (!addr) {
822 - pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
823 + pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
824 return -EINVAL;
825 }
826 host_data->en_reg = (u64)phys_to_virt(
827 @@ -2299,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
828
829 r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
830 if (r) {
831 - pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
832 + pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
833 ciu_node->name);
834 return r;
835 }
836 @@ -2309,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
837 &octeon_irq_domain_cib_ops,
838 host_data);
839 if (!cib_domain) {
840 - pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
841 + pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
842 return -ENOMEM;
843 }
844
845 diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
846 index aa3800c82332..d99ca862dae3 100644
847 --- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
848 +++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
849 @@ -167,7 +167,7 @@
850 #define AR71XX_AHB_DIV_MASK 0x7
851
852 #define AR724X_PLL_REG_CPU_CONFIG 0x00
853 -#define AR724X_PLL_REG_PCIE_CONFIG 0x18
854 +#define AR724X_PLL_REG_PCIE_CONFIG 0x10
855
856 #define AR724X_PLL_FB_SHIFT 0
857 #define AR724X_PLL_FB_MASK 0x3ff
858 diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
859 index 6b444cd9526f..db930cdc715f 100644
860 --- a/arch/mips/include/asm/machine.h
861 +++ b/arch/mips/include/asm/machine.h
862 @@ -52,7 +52,7 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
863 if (!mach->matches)
864 return NULL;
865
866 - for (match = mach->matches; match->compatible; match++) {
867 + for (match = mach->matches; match->compatible[0]; match++) {
868 if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
869 return match;
870 }
871 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
872 index 0c8ae2cc6380..8f7bf74d1c0b 100644
873 --- a/arch/mips/kernel/ptrace.c
874 +++ b/arch/mips/kernel/ptrace.c
875 @@ -483,7 +483,7 @@ static int fpr_get_msa(struct task_struct *target,
876 /*
877 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
878 * Choose the appropriate helper for general registers, and then copy
879 - * the FCSR register separately.
880 + * the FCSR and FIR registers separately.
881 */
882 static int fpr_get(struct task_struct *target,
883 const struct user_regset *regset,
884 @@ -491,6 +491,7 @@ static int fpr_get(struct task_struct *target,
885 void *kbuf, void __user *ubuf)
886 {
887 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
888 + const int fir_pos = fcr31_pos + sizeof(u32);
889 int err;
890
891 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
892 @@ -503,6 +504,12 @@ static int fpr_get(struct task_struct *target,
893 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
894 &target->thread.fpu.fcr31,
895 fcr31_pos, fcr31_pos + sizeof(u32));
896 + if (err)
897 + return err;
898 +
899 + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
900 + &boot_cpu_data.fpu_id,
901 + fir_pos, fir_pos + sizeof(u32));
902
903 return err;
904 }
905 @@ -551,7 +558,8 @@ static int fpr_set_msa(struct task_struct *target,
906 /*
907 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
908 * Choose the appropriate helper for general registers, and then copy
909 - * the FCSR register separately.
910 + * the FCSR register separately. Ignore the incoming FIR register
911 + * contents though, as the register is read-only.
912 *
913 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
914 * which is supposed to have been guaranteed by the kernel before
915 @@ -565,6 +573,7 @@ static int fpr_set(struct task_struct *target,
916 const void *kbuf, const void __user *ubuf)
917 {
918 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
919 + const int fir_pos = fcr31_pos + sizeof(u32);
920 u32 fcr31;
921 int err;
922
923 @@ -592,6 +601,11 @@ static int fpr_set(struct task_struct *target,
924 ptrace_setfcr31(target, fcr31);
925 }
926
927 + if (count > 0)
928 + err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
929 + fir_pos,
930 + fir_pos + sizeof(u32));
931 +
932 return err;
933 }
934
935 @@ -813,7 +827,7 @@ long arch_ptrace(struct task_struct *child, long request,
936 fregs = get_fpu_regs(child);
937
938 #ifdef CONFIG_32BIT
939 - if (test_thread_flag(TIF_32BIT_FPREGS)) {
940 + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
941 /*
942 * The odd registers are actually the high
943 * order bits of the values stored in the even
944 @@ -902,7 +916,7 @@ long arch_ptrace(struct task_struct *child, long request,
945
946 init_fp_ctx(child);
947 #ifdef CONFIG_32BIT
948 - if (test_thread_flag(TIF_32BIT_FPREGS)) {
949 + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
950 /*
951 * The odd registers are actually the high
952 * order bits of the values stored in the even
953 diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
954 index 5fcbdcd7abd0..bc9afbabbe14 100644
955 --- a/arch/mips/kernel/ptrace32.c
956 +++ b/arch/mips/kernel/ptrace32.c
957 @@ -97,7 +97,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
958 break;
959 }
960 fregs = get_fpu_regs(child);
961 - if (test_thread_flag(TIF_32BIT_FPREGS)) {
962 + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
963 /*
964 * The odd registers are actually the high
965 * order bits of the values stored in the even
966 @@ -204,7 +204,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
967 sizeof(child->thread.fpu));
968 child->thread.fpu.fcr31 = 0;
969 }
970 - if (test_thread_flag(TIF_32BIT_FPREGS)) {
971 + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
972 /*
973 * The odd registers are actually the high
974 * order bits of the values stored in the even
975 diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
976 index 29ec9ab3fd55..a2c46f539e3e 100644
977 --- a/arch/mips/kvm/mips.c
978 +++ b/arch/mips/kvm/mips.c
979 @@ -42,7 +42,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
980 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
981 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
982 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
983 - { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
984 + { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
985 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
986 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
987 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
988 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
989 index 9d0107fbb169..43fa682e55da 100644
990 --- a/arch/mips/mm/c-r4k.c
991 +++ b/arch/mips/mm/c-r4k.c
992 @@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
993 /*
994 * Either no secondary cache or the available caches don't have the
995 * subset property so we have to flush the primary caches
996 - * explicitly
997 + * explicitly.
998 + * If we would need IPI to perform an INDEX-type operation, then
999 + * we have to use the HIT-type alternative as IPI cannot be used
1000 + * here due to interrupts possibly being disabled.
1001 */
1002 - if (size >= dcache_size) {
1003 + if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
1004 r4k_blast_dcache();
1005 } else {
1006 R4600_HIT_CACHEOP_WAR_IMPL;
1007 @@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
1008 return;
1009 }
1010
1011 - if (size >= dcache_size) {
1012 + if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
1013 r4k_blast_dcache();
1014 } else {
1015 R4600_HIT_CACHEOP_WAR_IMPL;
1016 diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
1017 index 8b937300fb7f..fd26fadc8617 100644
1018 --- a/arch/mips/txx9/rbtx4939/setup.c
1019 +++ b/arch/mips/txx9/rbtx4939/setup.c
1020 @@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
1021
1022 #define RBTX4939_MAX_7SEGLEDS 8
1023
1024 -#if IS_ENABLED(CONFIG_LEDS_CLASS)
1025 +#if IS_BUILTIN(CONFIG_LEDS_CLASS)
1026 static u8 led_val[RBTX4939_MAX_7SEGLEDS];
1027 struct rbtx4939_led_data {
1028 struct led_classdev cdev;
1029 @@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void)
1030
1031 static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
1032 {
1033 -#if IS_ENABLED(CONFIG_LEDS_CLASS)
1034 +#if IS_BUILTIN(CONFIG_LEDS_CLASS)
1035 unsigned long flags;
1036 local_irq_save(flags);
1037 /* bit7: reserved for LED class */
1038 diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
1039 index 9d47f2efa830..bb69f3955b59 100644
1040 --- a/arch/powerpc/boot/Makefile
1041 +++ b/arch/powerpc/boot/Makefile
1042 @@ -92,7 +92,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
1043 libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
1044 libfdtheader := fdt.h libfdt.h libfdt_internal.h
1045
1046 -$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
1047 +$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
1048 + treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
1049 $(addprefix $(obj)/,$(libfdtheader))
1050
1051 src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
1052 diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
1053 index 744fd54de374..1bcc84903930 100644
1054 --- a/arch/powerpc/include/asm/irq_work.h
1055 +++ b/arch/powerpc/include/asm/irq_work.h
1056 @@ -5,5 +5,6 @@ static inline bool arch_irq_work_has_interrupt(void)
1057 {
1058 return true;
1059 }
1060 +extern void arch_irq_work_raise(void);
1061
1062 #endif /* _ASM_POWERPC_IRQ_WORK_H */
1063 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
1064 index 218cba2f5699..0a2b247dbc6b 100644
1065 --- a/arch/powerpc/kvm/book3s_hv.c
1066 +++ b/arch/powerpc/kvm/book3s_hv.c
1067 @@ -3107,15 +3107,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1068 goto up_out;
1069
1070 psize = vma_kernel_pagesize(vma);
1071 - porder = __ilog2(psize);
1072
1073 up_read(&current->mm->mmap_sem);
1074
1075 /* We can handle 4k, 64k or 16M pages in the VRMA */
1076 - err = -EINVAL;
1077 - if (!(psize == 0x1000 || psize == 0x10000 ||
1078 - psize == 0x1000000))
1079 - goto out_srcu;
1080 + if (psize >= 0x1000000)
1081 + psize = 0x1000000;
1082 + else if (psize >= 0x10000)
1083 + psize = 0x10000;
1084 + else
1085 + psize = 0x1000;
1086 + porder = __ilog2(psize);
1087
1088 /* Update VRMASD field in the LPCR */
1089 senc = slb_pgsize_encoding(psize);
1090 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1091 index a51c188b81f3..6cff96e0d77b 100644
1092 --- a/arch/powerpc/mm/numa.c
1093 +++ b/arch/powerpc/mm/numa.c
1094 @@ -551,7 +551,7 @@ static int numa_setup_cpu(unsigned long lcpu)
1095 nid = of_node_to_nid_single(cpu);
1096
1097 out_present:
1098 - if (nid < 0 || !node_online(nid))
1099 + if (nid < 0 || !node_possible(nid))
1100 nid = first_online_node;
1101
1102 map_cpu_to_node(lcpu, nid);
1103 @@ -904,6 +904,32 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1104 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1105 }
1106
1107 +static void __init find_possible_nodes(void)
1108 +{
1109 + struct device_node *rtas;
1110 + u32 numnodes, i;
1111 +
1112 + if (min_common_depth <= 0)
1113 + return;
1114 +
1115 + rtas = of_find_node_by_path("/rtas");
1116 + if (!rtas)
1117 + return;
1118 +
1119 + if (of_property_read_u32_index(rtas,
1120 + "ibm,max-associativity-domains",
1121 + min_common_depth, &numnodes))
1122 + goto out;
1123 +
1124 + for (i = 0; i < numnodes; i++) {
1125 + if (!node_possible(i))
1126 + node_set(i, node_possible_map);
1127 + }
1128 +
1129 +out:
1130 + of_node_put(rtas);
1131 +}
1132 +
1133 void __init initmem_init(void)
1134 {
1135 int nid, cpu;
1136 @@ -917,12 +943,15 @@ void __init initmem_init(void)
1137 memblock_dump_all();
1138
1139 /*
1140 - * Reduce the possible NUMA nodes to the online NUMA nodes,
1141 - * since we do not support node hotplug. This ensures that we
1142 - * lower the maximum NUMA node ID to what is actually present.
1143 + * Modify the set of possible NUMA nodes to reflect information
1144 + * available about the set of online nodes, and the set of nodes
1145 + * that we expect to make use of for this platform's affinity
1146 + * calculations.
1147 */
1148 nodes_and(node_possible_map, node_possible_map, node_online_map);
1149
1150 + find_possible_nodes();
1151 +
1152 for_each_online_node(nid) {
1153 unsigned long start_pfn, end_pfn;
1154
1155 @@ -1274,6 +1303,40 @@ static long vphn_get_associativity(unsigned long cpu,
1156 return rc;
1157 }
1158
1159 +static inline int find_and_online_cpu_nid(int cpu)
1160 +{
1161 + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1162 + int new_nid;
1163 +
1164 + /* Use associativity from first thread for all siblings */
1165 + vphn_get_associativity(cpu, associativity);
1166 + new_nid = associativity_to_nid(associativity);
1167 + if (new_nid < 0 || !node_possible(new_nid))
1168 + new_nid = first_online_node;
1169 +
1170 + if (NODE_DATA(new_nid) == NULL) {
1171 +#ifdef CONFIG_MEMORY_HOTPLUG
1172 + /*
1173 + * Need to ensure that NODE_DATA is initialized for a node from
1174 + * available memory (see memblock_alloc_try_nid). If unable to
1175 + * init the node, then default to nearest node that has memory
1176 + * installed.
1177 + */
1178 + if (try_online_node(new_nid))
1179 + new_nid = first_online_node;
1180 +#else
1181 + /*
1182 + * Default to using the nearest node that has memory installed.
1183 + * Otherwise, it would be necessary to patch the kernel MM code
1184 + * to deal with more memoryless-node error conditions.
1185 + */
1186 + new_nid = first_online_node;
1187 +#endif
1188 + }
1189 +
1190 + return new_nid;
1191 +}
1192 +
1193 /*
1194 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1195 * characteristics change. This function doesn't perform any locking and is
1196 @@ -1339,7 +1402,6 @@ int arch_update_cpu_topology(void)
1197 {
1198 unsigned int cpu, sibling, changed = 0;
1199 struct topology_update_data *updates, *ud;
1200 - __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1201 cpumask_t updated_cpus;
1202 struct device *dev;
1203 int weight, new_nid, i = 0;
1204 @@ -1374,11 +1436,7 @@ int arch_update_cpu_topology(void)
1205 continue;
1206 }
1207
1208 - /* Use associativity from first thread for all siblings */
1209 - vphn_get_associativity(cpu, associativity);
1210 - new_nid = associativity_to_nid(associativity);
1211 - if (new_nid < 0 || !node_online(new_nid))
1212 - new_nid = first_online_node;
1213 + new_nid = find_and_online_cpu_nid(cpu);
1214
1215 if (new_nid == numa_cpu_lookup_table[cpu]) {
1216 cpumask_andnot(&cpu_associativity_changes_mask,
1217 diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
1218 index 7e706f36e364..9c58194c7ea5 100644
1219 --- a/arch/powerpc/net/bpf_jit_comp.c
1220 +++ b/arch/powerpc/net/bpf_jit_comp.c
1221 @@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
1222 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
1223 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
1224 break;
1225 + case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
1226 + PPC_LWZ_OFFS(r_A, r_skb, K);
1227 + break;
1228 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
1229 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
1230 break;
1231 diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
1232 index bf949623de90..771edffa2d40 100644
1233 --- a/arch/powerpc/perf/core-book3s.c
1234 +++ b/arch/powerpc/perf/core-book3s.c
1235 @@ -448,6 +448,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
1236 /* invalid entry */
1237 continue;
1238
1239 + /*
1240 + * BHRB rolling buffer could very much contain the kernel
1241 + * addresses at this point. Check the privileges before
1242 + * exporting it to userspace (avoid exposure of regions
1243 + * where we could have speculative execution)
1244 + */
1245 + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
1246 + is_kernel_addr(addr))
1247 + continue;
1248 +
1249 /* Branches are read most recent first (ie. mfbhrb 0 is
1250 * the most recent branch).
1251 * There are two types of valid entries:
1252 @@ -1188,6 +1198,7 @@ static void power_pmu_disable(struct pmu *pmu)
1253 */
1254 write_mmcr0(cpuhw, val);
1255 mb();
1256 + isync();
1257
1258 /*
1259 * Disable instruction sampling if it was enabled
1260 @@ -1196,12 +1207,26 @@ static void power_pmu_disable(struct pmu *pmu)
1261 mtspr(SPRN_MMCRA,
1262 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
1263 mb();
1264 + isync();
1265 }
1266
1267 cpuhw->disabled = 1;
1268 cpuhw->n_added = 0;
1269
1270 ebb_switch_out(mmcr0);
1271 +
1272 +#ifdef CONFIG_PPC64
1273 + /*
1274 + * These are readable by userspace, may contain kernel
1275 + * addresses and are not switched by context switch, so clear
1276 + * them now to avoid leaking anything to userspace in general
1277 + * including to another process.
1278 + */
1279 + if (ppmu->flags & PPMU_ARCH_207S) {
1280 + mtspr(SPRN_SDAR, 0);
1281 + mtspr(SPRN_SIAR, 0);
1282 + }
1283 +#endif
1284 }
1285
1286 local_irq_restore(flags);
1287 diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
1288 index b9aac951a90f..f37567ed640c 100644
1289 --- a/arch/powerpc/sysdev/mpic.c
1290 +++ b/arch/powerpc/sysdev/mpic.c
1291 @@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
1292 int i;
1293 u32 mask = 0;
1294
1295 - for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
1296 + for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
1297 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
1298 return mask;
1299 }
1300 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
1301 index ced6c9b8f04d..51f842c0a175 100644
1302 --- a/arch/s390/kvm/vsie.c
1303 +++ b/arch/s390/kvm/vsie.c
1304 @@ -549,7 +549,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1305
1306 gpa = scb_o->itdba & ~0xffUL;
1307 if (gpa && (scb_s->ecb & 0x10U)) {
1308 - if (!(gpa & ~0x1fffU)) {
1309 + if (!(gpa & ~0x1fffUL)) {
1310 rc = set_validity_icpt(scb_s, 0x0080U);
1311 goto unpin;
1312 }
1313 diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
1314 index c001f782c5f1..28cc61216b64 100644
1315 --- a/arch/sh/kernel/entry-common.S
1316 +++ b/arch/sh/kernel/entry-common.S
1317 @@ -255,7 +255,7 @@ debug_trap:
1318 mov.l @r8, r8
1319 jsr @r8
1320 nop
1321 - bra __restore_all
1322 + bra ret_from_exception
1323 nop
1324 CFI_ENDPROC
1325
1326 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
1327 index 24827a3f733a..89d299ccdfa6 100644
1328 --- a/arch/sparc/include/asm/atomic_64.h
1329 +++ b/arch/sparc/include/asm/atomic_64.h
1330 @@ -82,7 +82,11 @@ ATOMIC_OPS(xor)
1331 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
1332
1333 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
1334 -#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1335 +
1336 +static inline int atomic_xchg(atomic_t *v, int new)
1337 +{
1338 + return xchg(&v->counter, new);
1339 +}
1340
1341 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1342 {
1343 diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
1344 index b6802b978140..81ad06a1672f 100644
1345 --- a/arch/sparc/include/asm/pgtable_64.h
1346 +++ b/arch/sparc/include/asm/pgtable_64.h
1347 @@ -952,7 +952,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
1348 pmd_t *pmd);
1349
1350 #define __HAVE_ARCH_PMDP_INVALIDATE
1351 -extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1352 +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1353 pmd_t *pmdp);
1354
1355 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1356 diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
1357 index c56a195c9071..b2722ed31053 100644
1358 --- a/arch/sparc/mm/tlb.c
1359 +++ b/arch/sparc/mm/tlb.c
1360 @@ -219,17 +219,28 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1361 }
1362 }
1363
1364 +static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1365 + unsigned long address, pmd_t *pmdp, pmd_t pmd)
1366 +{
1367 + pmd_t old;
1368 +
1369 + do {
1370 + old = *pmdp;
1371 + } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
1372 +
1373 + return old;
1374 +}
1375 +
1376 /*
1377 * This routine is only called when splitting a THP
1378 */
1379 -void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1380 +pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1381 pmd_t *pmdp)
1382 {
1383 - pmd_t entry = *pmdp;
1384 -
1385 - pmd_val(entry) &= ~_PAGE_VALID;
1386 + pmd_t old, entry;
1387
1388 - set_pmd_at(vma->vm_mm, address, pmdp, entry);
1389 + entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
1390 + old = pmdp_establish(vma, address, pmdp, entry);
1391 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1392
1393 /*
1394 @@ -240,6 +251,8 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1395 if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
1396 !is_huge_zero_page(pmd_page(entry)))
1397 (vma->vm_mm)->context.thp_pte_count--;
1398 +
1399 + return old;
1400 }
1401
1402 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1403 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
1404 index 02e547f9ca3f..655a65eaf105 100644
1405 --- a/arch/x86/events/core.c
1406 +++ b/arch/x86/events/core.c
1407 @@ -1155,16 +1155,13 @@ int x86_perf_event_set_period(struct perf_event *event)
1408
1409 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1410
1411 - if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
1412 - local64_read(&hwc->prev_count) != (u64)-left) {
1413 - /*
1414 - * The hw event starts counting from this event offset,
1415 - * mark it to be able to extra future deltas:
1416 - */
1417 - local64_set(&hwc->prev_count, (u64)-left);
1418 + /*
1419 + * The hw event starts counting from this event offset,
1420 + * mark it to be able to extra future deltas:
1421 + */
1422 + local64_set(&hwc->prev_count, (u64)-left);
1423
1424 - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1425 - }
1426 + wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1427
1428 /*
1429 * Due to erratum on certan cpu we need
1430 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1431 index 6f353a874178..815039327932 100644
1432 --- a/arch/x86/events/intel/core.c
1433 +++ b/arch/x86/events/intel/core.c
1434 @@ -2066,9 +2066,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1435 int bit, loops;
1436 u64 status;
1437 int handled;
1438 + int pmu_enabled;
1439
1440 cpuc = this_cpu_ptr(&cpu_hw_events);
1441
1442 + /*
1443 + * Save the PMU state.
1444 + * It needs to be restored when leaving the handler.
1445 + */
1446 + pmu_enabled = cpuc->enabled;
1447 /*
1448 * No known reason to not always do late ACK,
1449 * but just in case do it opt-in.
1450 @@ -2076,6 +2082,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1451 if (!x86_pmu.late_ack)
1452 apic_write(APIC_LVTPC, APIC_DM_NMI);
1453 intel_bts_disable_local();
1454 + cpuc->enabled = 0;
1455 __intel_pmu_disable_all();
1456 handled = intel_pmu_drain_bts_buffer();
1457 handled += intel_bts_interrupt();
1458 @@ -2173,7 +2180,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1459
1460 done:
1461 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
1462 - if (cpuc->enabled)
1463 + cpuc->enabled = pmu_enabled;
1464 + if (pmu_enabled)
1465 __intel_pmu_enable_all(0, true);
1466 intel_bts_enable_local();
1467
1468 @@ -3019,7 +3027,7 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1469 * Therefore the effective (average) period matches the requested period,
1470 * despite coarser hardware granularity.
1471 */
1472 -static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
1473 +static u64 bdw_limit_period(struct perf_event *event, u64 left)
1474 {
1475 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
1476 X86_CONFIG(.event=0xc0, .umask=0x01)) {
1477 diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
1478 index 8e7a3f1df3a5..f26e26e4d84f 100644
1479 --- a/arch/x86/events/intel/ds.c
1480 +++ b/arch/x86/events/intel/ds.c
1481 @@ -1110,6 +1110,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
1482 if (pebs == NULL)
1483 return;
1484
1485 + regs->flags &= ~PERF_EFLAGS_EXACT;
1486 sample_type = event->attr.sample_type;
1487 dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
1488
1489 @@ -1154,7 +1155,6 @@ static void setup_pebs_sample_data(struct perf_event *event,
1490 */
1491 *regs = *iregs;
1492 regs->flags = pebs->flags;
1493 - set_linear_ip(regs, pebs->ip);
1494
1495 if (sample_type & PERF_SAMPLE_REGS_INTR) {
1496 regs->ax = pebs->ax;
1497 @@ -1190,13 +1190,22 @@ static void setup_pebs_sample_data(struct perf_event *event,
1498 #endif
1499 }
1500
1501 - if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
1502 - regs->ip = pebs->real_ip;
1503 - regs->flags |= PERF_EFLAGS_EXACT;
1504 - } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
1505 - regs->flags |= PERF_EFLAGS_EXACT;
1506 - else
1507 - regs->flags &= ~PERF_EFLAGS_EXACT;
1508 + if (event->attr.precise_ip > 1) {
1509 + /* Haswell and later have the eventing IP, so use it: */
1510 + if (x86_pmu.intel_cap.pebs_format >= 2) {
1511 + set_linear_ip(regs, pebs->real_ip);
1512 + regs->flags |= PERF_EFLAGS_EXACT;
1513 + } else {
1514 + /* Otherwise use PEBS off-by-1 IP: */
1515 + set_linear_ip(regs, pebs->ip);
1516 +
1517 + /* ... and try to fix it up using the LBR entries: */
1518 + if (intel_pmu_pebs_fixup_ip(regs))
1519 + regs->flags |= PERF_EFLAGS_EXACT;
1520 + }
1521 + } else
1522 + set_linear_ip(regs, pebs->ip);
1523 +
1524
1525 if ((sample_type & PERF_SAMPLE_ADDR) &&
1526 x86_pmu.intel_cap.pebs_format >= 1)
1527 @@ -1263,17 +1272,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
1528 return NULL;
1529 }
1530
1531 +/*
1532 + * Special variant of intel_pmu_save_and_restart() for auto-reload.
1533 + */
1534 +static int
1535 +intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1536 +{
1537 + struct hw_perf_event *hwc = &event->hw;
1538 + int shift = 64 - x86_pmu.cntval_bits;
1539 + u64 period = hwc->sample_period;
1540 + u64 prev_raw_count, new_raw_count;
1541 + s64 new, old;
1542 +
1543 + WARN_ON(!period);
1544 +
1545 + /*
1546 + * drain_pebs() only happens when the PMU is disabled.
1547 + */
1548 + WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1549 +
1550 + prev_raw_count = local64_read(&hwc->prev_count);
1551 + rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1552 + local64_set(&hwc->prev_count, new_raw_count);
1553 +
1554 + /*
1555 + * Since the counter increments a negative counter value and
1556 + * overflows on the sign switch, giving the interval:
1557 + *
1558 + * [-period, 0]
1559 + *
1560 + * the difference between two consequtive reads is:
1561 + *
1562 + * A) value2 - value1;
1563 + * when no overflows have happened in between,
1564 + *
1565 + * B) (0 - value1) + (value2 - (-period));
1566 + * when one overflow happened in between,
1567 + *
1568 + * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1569 + * when @n overflows happened in between.
1570 + *
1571 + * Here A) is the obvious difference, B) is the extension to the
1572 + * discrete interval, where the first term is to the top of the
1573 + * interval and the second term is from the bottom of the next
1574 + * interval and C) the extension to multiple intervals, where the
1575 + * middle term is the whole intervals covered.
1576 + *
1577 + * An equivalent of C, by reduction, is:
1578 + *
1579 + * value2 - value1 + n * period
1580 + */
1581 + new = ((s64)(new_raw_count << shift) >> shift);
1582 + old = ((s64)(prev_raw_count << shift) >> shift);
1583 + local64_add(new - old + count * period, &event->count);
1584 +
1585 + perf_event_update_userpage(event);
1586 +
1587 + return 0;
1588 +}
1589 +
1590 static void __intel_pmu_pebs_event(struct perf_event *event,
1591 struct pt_regs *iregs,
1592 void *base, void *top,
1593 int bit, int count)
1594 {
1595 + struct hw_perf_event *hwc = &event->hw;
1596 struct perf_sample_data data;
1597 struct pt_regs regs;
1598 void *at = get_next_pebs_record_by_bit(base, top, bit);
1599
1600 - if (!intel_pmu_save_and_restart(event) &&
1601 - !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
1602 + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1603 + /*
1604 + * Now, auto-reload is only enabled in fixed period mode.
1605 + * The reload value is always hwc->sample_period.
1606 + * May need to change it, if auto-reload is enabled in
1607 + * freq mode later.
1608 + */
1609 + intel_pmu_save_and_restart_reload(event, count);
1610 + } else if (!intel_pmu_save_and_restart(event))
1611 return;
1612
1613 while (count > 1) {
1614 @@ -1325,8 +1401,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
1615 return;
1616
1617 n = top - at;
1618 - if (n <= 0)
1619 + if (n <= 0) {
1620 + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1621 + intel_pmu_save_and_restart_reload(event, 0);
1622 return;
1623 + }
1624
1625 __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
1626 }
1627 @@ -1349,8 +1428,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1628
1629 ds->pebs_index = ds->pebs_buffer_base;
1630
1631 - if (unlikely(base >= top))
1632 + if (unlikely(base >= top)) {
1633 + /*
1634 + * The drain_pebs() could be called twice in a short period
1635 + * for auto-reload event in pmu::read(). There are no
1636 + * overflows have happened in between.
1637 + * It needs to call intel_pmu_save_and_restart_reload() to
1638 + * update the event->count for this case.
1639 + */
1640 + for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
1641 + x86_pmu.max_pebs_events) {
1642 + event = cpuc->events[bit];
1643 + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1644 + intel_pmu_save_and_restart_reload(event, 0);
1645 + }
1646 return;
1647 + }
1648
1649 for (at = base; at < top; at += x86_pmu.pebs_record_size) {
1650 struct pebs_record_nhm *p = at;
1651 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
1652 index bcbb1d2ae10b..f3563179290b 100644
1653 --- a/arch/x86/events/perf_event.h
1654 +++ b/arch/x86/events/perf_event.h
1655 @@ -548,7 +548,7 @@ struct x86_pmu {
1656 struct x86_pmu_quirk *quirks;
1657 int perfctr_second_write;
1658 bool late_ack;
1659 - unsigned (*limit_period)(struct perf_event *event, unsigned l);
1660 + u64 (*limit_period)(struct perf_event *event, u64 l);
1661
1662 /*
1663 * sysfs attrs
1664 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
1665 index 39bcefc20de7..bb078786a323 100644
1666 --- a/arch/x86/include/asm/i8259.h
1667 +++ b/arch/x86/include/asm/i8259.h
1668 @@ -68,6 +68,11 @@ struct legacy_pic {
1669 extern struct legacy_pic *legacy_pic;
1670 extern struct legacy_pic null_legacy_pic;
1671
1672 +static inline bool has_legacy_pic(void)
1673 +{
1674 + return legacy_pic != &null_legacy_pic;
1675 +}
1676 +
1677 static inline int nr_legacy_irqs(void)
1678 {
1679 return legacy_pic->nr_legacy_irqs;
1680 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1681 index c6583efdbdaf..76cf21f887bd 100644
1682 --- a/arch/x86/kernel/apic/apic.c
1683 +++ b/arch/x86/kernel/apic/apic.c
1684 @@ -1403,7 +1403,7 @@ void setup_local_APIC(void)
1685 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1686 */
1687 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1688 - if (!cpu && (pic_mode || !value)) {
1689 + if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1690 value = APIC_DM_EXTINT;
1691 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1692 } else {
1693 diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
1694 index 3fe45f84ced4..7a07b15b451c 100644
1695 --- a/arch/x86/kernel/devicetree.c
1696 +++ b/arch/x86/kernel/devicetree.c
1697 @@ -11,6 +11,7 @@
1698 #include <linux/of_address.h>
1699 #include <linux/of_platform.h>
1700 #include <linux/of_irq.h>
1701 +#include <linux/libfdt.h>
1702 #include <linux/slab.h>
1703 #include <linux/pci.h>
1704 #include <linux/of_pci.h>
1705 @@ -199,19 +200,22 @@ static struct of_ioapic_type of_ioapic_type[] =
1706 static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
1707 unsigned int nr_irqs, void *arg)
1708 {
1709 - struct of_phandle_args *irq_data = (void *)arg;
1710 + struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
1711 struct of_ioapic_type *it;
1712 struct irq_alloc_info tmp;
1713 + int type_index;
1714
1715 - if (WARN_ON(irq_data->args_count < 2))
1716 + if (WARN_ON(fwspec->param_count < 2))
1717 return -EINVAL;
1718 - if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
1719 +
1720 + type_index = fwspec->param[1];
1721 + if (type_index >= ARRAY_SIZE(of_ioapic_type))
1722 return -EINVAL;
1723
1724 - it = &of_ioapic_type[irq_data->args[1]];
1725 + it = &of_ioapic_type[type_index];
1726 ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
1727 tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
1728 - tmp.ioapic_pin = irq_data->args[0];
1729 + tmp.ioapic_pin = fwspec->param[0];
1730
1731 return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
1732 }
1733 @@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void)
1734
1735 map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
1736
1737 - initial_boot_params = dt = early_memremap(initial_dtb, map_len);
1738 - size = of_get_flat_dt_size();
1739 + dt = early_memremap(initial_dtb, map_len);
1740 + size = fdt_totalsize(dt);
1741 if (map_len < size) {
1742 early_memunmap(dt, map_len);
1743 - initial_boot_params = dt = early_memremap(initial_dtb, size);
1744 + dt = early_memremap(initial_dtb, size);
1745 map_len = size;
1746 }
1747
1748 + early_init_dt_verify(dt);
1749 unflatten_and_copy_device_tree();
1750 early_memunmap(dt, map_len);
1751 }
1752 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1753 index cb945146b7c8..10b22fc6ef5a 100644
1754 --- a/arch/x86/kernel/smpboot.c
1755 +++ b/arch/x86/kernel/smpboot.c
1756 @@ -1497,6 +1497,7 @@ static void remove_siblinginfo(int cpu)
1757 cpumask_clear(topology_core_cpumask(cpu));
1758 c->phys_proc_id = 0;
1759 c->cpu_core_id = 0;
1760 + c->booted_cores = 0;
1761 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1762 recompute_smt_state();
1763 }
1764 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1765 index da6a287a11e4..769c370011d6 100644
1766 --- a/arch/x86/kernel/tsc.c
1767 +++ b/arch/x86/kernel/tsc.c
1768 @@ -24,6 +24,7 @@
1769 #include <asm/geode.h>
1770 #include <asm/apic.h>
1771 #include <asm/intel-family.h>
1772 +#include <asm/i8259.h>
1773
1774 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
1775 EXPORT_SYMBOL(cpu_khz);
1776 @@ -456,6 +457,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
1777 unsigned long tscmin, tscmax;
1778 int pitcnt;
1779
1780 + if (!has_legacy_pic()) {
1781 + /*
1782 + * Relies on tsc_early_delay_calibrate() to have given us semi
1783 + * usable udelay(), wait for the same 50ms we would have with
1784 + * the PIT loop below.
1785 + */
1786 + udelay(10 * USEC_PER_MSEC);
1787 + udelay(10 * USEC_PER_MSEC);
1788 + udelay(10 * USEC_PER_MSEC);
1789 + udelay(10 * USEC_PER_MSEC);
1790 + udelay(10 * USEC_PER_MSEC);
1791 + return ULONG_MAX;
1792 + }
1793 +
1794 /* Set the Gate high, disable speaker */
1795 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
1796
1797 @@ -580,6 +595,9 @@ static unsigned long quick_pit_calibrate(void)
1798 u64 tsc, delta;
1799 unsigned long d1, d2;
1800
1801 + if (!has_legacy_pic())
1802 + return 0;
1803 +
1804 /* Set the Gate high, disable speaker */
1805 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
1806
1807 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
1808 index a69f18d4676c..7e5119c1d15c 100644
1809 --- a/arch/x86/kvm/cpuid.c
1810 +++ b/arch/x86/kvm/cpuid.c
1811 @@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1812
1813 /* cpuid 7.0.edx*/
1814 const u32 kvm_cpuid_7_0_edx_x86_features =
1815 - F(SPEC_CTRL) | F(SSBD) | F(ARCH_CAPABILITIES);
1816 + F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
1817
1818 /* all calls to cpuid_count() should be made on the same cpu */
1819 get_cpu();
1820 @@ -468,6 +468,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1821 entry->ecx &= ~F(PKU);
1822 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
1823 cpuid_mask(&entry->edx, CPUID_7_EDX);
1824 + /*
1825 + * We emulate ARCH_CAPABILITIES in software even
1826 + * if the host doesn't support it.
1827 + */
1828 + entry->edx |= F(ARCH_CAPABILITIES);
1829 } else {
1830 entry->ebx = 0;
1831 entry->ecx = 0;
1832 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1833 index 5c3d416fff17..a8a86be8cf15 100644
1834 --- a/arch/x86/kvm/lapic.c
1835 +++ b/arch/x86/kvm/lapic.c
1836 @@ -299,8 +299,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
1837 if (!lapic_in_kernel(vcpu))
1838 return;
1839
1840 + /*
1841 + * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
1842 + * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
1843 + * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
1844 + * version first and level-triggered interrupts never get EOIed in
1845 + * IOAPIC.
1846 + */
1847 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
1848 - if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
1849 + if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
1850 + !ioapic_in_kernel(vcpu->kvm))
1851 v |= APIC_LVR_DIRECTED_EOI;
1852 kvm_lapic_set_reg(apic, APIC_LVR, v);
1853 }
1854 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1855 index d92523afb425..2827a9622d97 100644
1856 --- a/arch/x86/kvm/vmx.c
1857 +++ b/arch/x86/kvm/vmx.c
1858 @@ -2558,6 +2558,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1859 return;
1860 }
1861
1862 + WARN_ON_ONCE(vmx->emulation_required);
1863 +
1864 if (kvm_exception_is_soft(nr)) {
1865 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1866 vmx->vcpu.arch.event_exit_inst_len);
1867 @@ -6430,12 +6432,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
1868 goto out;
1869 }
1870
1871 - if (err != EMULATE_DONE) {
1872 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1873 - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1874 - vcpu->run->internal.ndata = 0;
1875 - return 0;
1876 - }
1877 + if (err != EMULATE_DONE)
1878 + goto emulation_error;
1879 +
1880 + if (vmx->emulation_required && !vmx->rmode.vm86_active &&
1881 + vcpu->arch.exception.pending)
1882 + goto emulation_error;
1883
1884 if (vcpu->arch.halt_request) {
1885 vcpu->arch.halt_request = 0;
1886 @@ -6451,6 +6453,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
1887
1888 out:
1889 return ret;
1890 +
1891 +emulation_error:
1892 + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1893 + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1894 + vcpu->run->internal.ndata = 0;
1895 + return 0;
1896 }
1897
1898 static int __grow_ple_window(int val)
1899 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1900 index a0cb85f30c94..4aa265ae8cf7 100644
1901 --- a/arch/x86/kvm/x86.c
1902 +++ b/arch/x86/kvm/x86.c
1903 @@ -4131,13 +4131,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
1904 mutex_unlock(&kvm->lock);
1905 break;
1906 case KVM_XEN_HVM_CONFIG: {
1907 + struct kvm_xen_hvm_config xhc;
1908 r = -EFAULT;
1909 - if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
1910 - sizeof(struct kvm_xen_hvm_config)))
1911 + if (copy_from_user(&xhc, argp, sizeof(xhc)))
1912 goto out;
1913 r = -EINVAL;
1914 - if (kvm->arch.xen_hvm_config.flags)
1915 + if (xhc.flags)
1916 goto out;
1917 + memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
1918 r = 0;
1919 break;
1920 }
1921 @@ -7258,6 +7259,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1922 {
1923 struct msr_data apic_base_msr;
1924 int mmu_reset_needed = 0;
1925 + int cpuid_update_needed = 0;
1926 int pending_vec, max_bits, idx;
1927 struct desc_ptr dt;
1928
1929 @@ -7289,8 +7291,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1930 vcpu->arch.cr0 = sregs->cr0;
1931
1932 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
1933 + cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
1934 + (X86_CR4_OSXSAVE | X86_CR4_PKE));
1935 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
1936 - if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1937 + if (cpuid_update_needed)
1938 kvm_update_cpuid(vcpu);
1939
1940 idx = srcu_read_lock(&vcpu->kvm->srcu);
1941 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1942 index 7df8e3a79dc0..d35d0e4bbf99 100644
1943 --- a/arch/x86/mm/init_64.c
1944 +++ b/arch/x86/mm/init_64.c
1945 @@ -1014,8 +1014,7 @@ void __init mem_init(void)
1946 after_bootmem = 1;
1947
1948 /* Register memory areas for /proc/kcore */
1949 - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1950 - PAGE_SIZE, KCORE_OTHER);
1951 + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
1952
1953 mem_init_print_info(NULL);
1954 }
1955 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
1956 index 73dcb0e18c1b..dcd671467154 100644
1957 --- a/arch/x86/mm/pageattr.c
1958 +++ b/arch/x86/mm/pageattr.c
1959 @@ -279,9 +279,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
1960
1961 /*
1962 * The .rodata section needs to be read-only. Using the pfn
1963 - * catches all aliases.
1964 + * catches all aliases. This also includes __ro_after_init,
1965 + * so do not enforce until kernel_set_to_readonly is true.
1966 */
1967 - if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
1968 + if (kernel_set_to_readonly &&
1969 + within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
1970 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
1971 pgprot_val(forbidden) |= _PAGE_RW;
1972
1973 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
1974 index b97ef29c940f..a3b63e5a527c 100644
1975 --- a/arch/x86/mm/pgtable.c
1976 +++ b/arch/x86/mm/pgtable.c
1977 @@ -1,5 +1,6 @@
1978 #include <linux/mm.h>
1979 #include <linux/gfp.h>
1980 +#include <linux/hugetlb.h>
1981 #include <asm/pgalloc.h>
1982 #include <asm/pgtable.h>
1983 #include <asm/tlb.h>
1984 @@ -577,6 +578,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1985 (mtrr != MTRR_TYPE_WRBACK))
1986 return 0;
1987
1988 + /* Bail out if we are we on a populated non-leaf entry: */
1989 + if (pud_present(*pud) && !pud_huge(*pud))
1990 + return 0;
1991 +
1992 prot = pgprot_4k_2_large(prot);
1993
1994 set_pte((pte_t *)pud, pfn_pte(
1995 @@ -605,6 +610,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1996 return 0;
1997 }
1998
1999 + /* Bail out if we are we on a populated non-leaf entry: */
2000 + if (pmd_present(*pmd) && !pmd_huge(*pmd))
2001 + return 0;
2002 +
2003 prot = pgprot_4k_2_large(prot);
2004
2005 set_pte((pte_t *)pmd, pfn_pte(
2006 diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
2007 index 9f14bd34581d..74b516cb39df 100644
2008 --- a/arch/x86/power/hibernate_32.c
2009 +++ b/arch/x86/power/hibernate_32.c
2010 @@ -142,7 +142,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
2011 #endif
2012 }
2013
2014 -int swsusp_arch_resume(void)
2015 +asmlinkage int swsusp_arch_resume(void)
2016 {
2017 int error;
2018
2019 diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
2020 index 9634557a5444..0cb1dd461529 100644
2021 --- a/arch/x86/power/hibernate_64.c
2022 +++ b/arch/x86/power/hibernate_64.c
2023 @@ -149,7 +149,7 @@ static int relocate_restore_code(void)
2024 return 0;
2025 }
2026
2027 -int swsusp_arch_resume(void)
2028 +asmlinkage int swsusp_arch_resume(void)
2029 {
2030 int error;
2031
2032 diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
2033 index f6a009d88a33..52e5ea3b8e40 100644
2034 --- a/crypto/asymmetric_keys/pkcs7_trust.c
2035 +++ b/crypto/asymmetric_keys/pkcs7_trust.c
2036 @@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
2037 pr_devel("sinfo %u: Direct signer is key %x\n",
2038 sinfo->index, key_serial(key));
2039 x509 = NULL;
2040 + sig = sinfo->sig;
2041 goto matched;
2042 }
2043 if (PTR_ERR(key) != -ENOKEY)
2044 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
2045 index eb76a4c10dbf..8ce203f84ec4 100644
2046 --- a/drivers/acpi/acpi_pad.c
2047 +++ b/drivers/acpi/acpi_pad.c
2048 @@ -109,6 +109,7 @@ static void round_robin_cpu(unsigned int tsk_index)
2049 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
2050 if (cpumask_empty(tmp)) {
2051 mutex_unlock(&round_robin_lock);
2052 + free_cpumask_var(tmp);
2053 return;
2054 }
2055 for_each_cpu(cpu, tmp) {
2056 @@ -126,6 +127,8 @@ static void round_robin_cpu(unsigned int tsk_index)
2057 mutex_unlock(&round_robin_lock);
2058
2059 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
2060 +
2061 + free_cpumask_var(tmp);
2062 }
2063
2064 static void exit_round_robin(unsigned int tsk_index)
2065 diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
2066 index 80fc0b9b11e5..f362841881e6 100644
2067 --- a/drivers/acpi/acpica/evevent.c
2068 +++ b/drivers/acpi/acpica/evevent.c
2069 @@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
2070 u32 fixed_status;
2071 u32 fixed_enable;
2072 u32 i;
2073 + acpi_status status;
2074
2075 ACPI_FUNCTION_NAME(ev_fixed_event_detect);
2076
2077 @@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
2078 * Read the fixed feature status and enable registers, as all the cases
2079 * depend on their values. Ignore errors here.
2080 */
2081 - (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
2082 - (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
2083 + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
2084 + status |=
2085 + acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
2086 + if (ACPI_FAILURE(status)) {
2087 + return (int_status);
2088 + }
2089
2090 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
2091 "Fixed Event Block: Enable %08X Status %08X\n",
2092 diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
2093 index 5d59cfcef6f4..c5d6701a5ad2 100644
2094 --- a/drivers/acpi/acpica/nseval.c
2095 +++ b/drivers/acpi/acpica/nseval.c
2096 @@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
2097 /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
2098
2099 status = AE_OK;
2100 + } else if (ACPI_FAILURE(status)) {
2101 +
2102 + /* If return_object exists, delete it */
2103 +
2104 + if (info->return_object) {
2105 + acpi_ut_remove_reference(info->return_object);
2106 + info->return_object = NULL;
2107 + }
2108 }
2109
2110 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
2111 diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
2112 index bb01dea39fdc..9825780a1cd2 100644
2113 --- a/drivers/acpi/processor_perflib.c
2114 +++ b/drivers/acpi/processor_perflib.c
2115 @@ -161,7 +161,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
2116 {
2117 int ret;
2118
2119 - if (ignore_ppc) {
2120 + if (ignore_ppc || !pr->performance) {
2121 /*
2122 * Only when it is notification event, the _OST object
2123 * will be evaluated. Otherwise it is skipped.
2124 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
2125 index cf725d581cae..145dcf293c6f 100644
2126 --- a/drivers/acpi/scan.c
2127 +++ b/drivers/acpi/scan.c
2128 @@ -1422,6 +1422,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
2129 device_initialize(&device->dev);
2130 dev_set_uevent_suppress(&device->dev, true);
2131 acpi_init_coherency(device);
2132 + /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
2133 + device->dep_unmet = 1;
2134 }
2135
2136 void acpi_device_add_finalize(struct acpi_device *device)
2137 @@ -1445,6 +1447,14 @@ static int acpi_add_single_object(struct acpi_device **child,
2138 }
2139
2140 acpi_init_device_object(device, handle, type, sta);
2141 + /*
2142 + * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
2143 + * that we can call acpi_bus_get_status() and use its quirk handling.
2144 + * Note this must be done before the get power-/wakeup_dev-flags calls.
2145 + */
2146 + if (type == ACPI_BUS_TYPE_DEVICE)
2147 + acpi_bus_get_status(device);
2148 +
2149 acpi_bus_get_power_flags(device);
2150 acpi_bus_get_wakeup_device_flags(device);
2151
2152 @@ -1517,9 +1527,11 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
2153 return -ENODEV;
2154
2155 *type = ACPI_BUS_TYPE_DEVICE;
2156 - status = acpi_bus_get_status_handle(handle, sta);
2157 - if (ACPI_FAILURE(status))
2158 - *sta = 0;
2159 + /*
2160 + * acpi_add_single_object updates this once we've an acpi_device
2161 + * so that acpi_bus_get_status' quirk handling can be used.
2162 + */
2163 + *sta = 0;
2164 break;
2165 case ACPI_TYPE_PROCESSOR:
2166 *type = ACPI_BUS_TYPE_PROCESSOR;
2167 @@ -1621,6 +1633,8 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
2168 acpi_status status;
2169 int i;
2170
2171 + adev->dep_unmet = 0;
2172 +
2173 if (!acpi_has_method(adev->handle, "_DEP"))
2174 return;
2175
2176 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2177 index 4fe3ec122bf0..0e2c0ac5792d 100644
2178 --- a/drivers/ata/libata-core.c
2179 +++ b/drivers/ata/libata-core.c
2180 @@ -4366,6 +4366,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2181 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
2182 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
2183
2184 + /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
2185 + SD7SN6S256G and SD8SN8U256G */
2186 + { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
2187 +
2188 /* devices which puke on READ_NATIVE_MAX */
2189 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
2190 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
2191 @@ -4426,6 +4430,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2192 { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
2193
2194 /* devices that don't properly handle queued TRIM commands */
2195 + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
2196 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2197 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2198 ATA_HORKAGE_ZERO_AFTER_TRIM, },
2199 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2200 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
2201 index 9babbc845750..fb2c00fce8f9 100644
2202 --- a/drivers/ata/libata-scsi.c
2203 +++ b/drivers/ata/libata-scsi.c
2204 @@ -4156,7 +4156,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2205 #ifdef ATA_DEBUG
2206 struct scsi_device *scsidev = cmd->device;
2207
2208 - DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
2209 + DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
2210 ap->print_id,
2211 scsidev->channel, scsidev->id, scsidev->lun,
2212 cmd->cmnd);
2213 diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
2214 index a7b0fc7cb468..69c84fddfe8a 100644
2215 --- a/drivers/base/regmap/regmap.c
2216 +++ b/drivers/base/regmap/regmap.c
2217 @@ -98,7 +98,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
2218 int ret;
2219 unsigned int val;
2220
2221 - if (map->cache == REGCACHE_NONE)
2222 + if (map->cache_type == REGCACHE_NONE)
2223 return false;
2224
2225 if (!map->cache_ops)
2226 diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
2227 index 93362362aa55..8474a1b0740f 100644
2228 --- a/drivers/block/paride/pcd.c
2229 +++ b/drivers/block/paride/pcd.c
2230 @@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
2231 struct pcd_unit *cd = bdev->bd_disk->private_data;
2232 int ret;
2233
2234 + check_disk_change(bdev);
2235 +
2236 mutex_lock(&pcd_mutex);
2237 ret = cdrom_open(&cd->info, bdev, mode);
2238 mutex_unlock(&pcd_mutex);
2239 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
2240 index 128ebd439221..07b77fb102a1 100644
2241 --- a/drivers/cdrom/cdrom.c
2242 +++ b/drivers/cdrom/cdrom.c
2243 @@ -1154,9 +1154,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
2244
2245 cd_dbg(CD_OPEN, "entering cdrom_open\n");
2246
2247 - /* open is event synchronization point, check events first */
2248 - check_disk_change(bdev);
2249 -
2250 /* if this was a O_NONBLOCK open and we should honor the flags,
2251 * do a quick open without drive/disc integrity checks. */
2252 cdi->use_count++;
2253 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
2254 index 584bc3126403..e2808fefbb78 100644
2255 --- a/drivers/cdrom/gdrom.c
2256 +++ b/drivers/cdrom/gdrom.c
2257 @@ -497,6 +497,9 @@ static struct cdrom_device_ops gdrom_ops = {
2258 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
2259 {
2260 int ret;
2261 +
2262 + check_disk_change(bdev);
2263 +
2264 mutex_lock(&gdrom_mutex);
2265 ret = cdrom_open(gd.cd_info, bdev, mode);
2266 mutex_unlock(&gdrom_mutex);
2267 diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
2268 index 63d84e6f1891..83c695938a2d 100644
2269 --- a/drivers/char/hw_random/stm32-rng.c
2270 +++ b/drivers/char/hw_random/stm32-rng.c
2271 @@ -21,6 +21,7 @@
2272 #include <linux/of_address.h>
2273 #include <linux/of_platform.h>
2274 #include <linux/pm_runtime.h>
2275 +#include <linux/reset.h>
2276 #include <linux/slab.h>
2277
2278 #define RNG_CR 0x00
2279 @@ -46,6 +47,7 @@ struct stm32_rng_private {
2280 struct hwrng rng;
2281 void __iomem *base;
2282 struct clk *clk;
2283 + struct reset_control *rst;
2284 };
2285
2286 static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
2287 @@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
2288 if (IS_ERR(priv->clk))
2289 return PTR_ERR(priv->clk);
2290
2291 + priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
2292 + if (!IS_ERR(priv->rst)) {
2293 + reset_control_assert(priv->rst);
2294 + udelay(2);
2295 + reset_control_deassert(priv->rst);
2296 + }
2297 +
2298 dev_set_drvdata(dev, priv);
2299
2300 priv->rng.name = dev_driver_string(dev),
2301 diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
2302 index 6e658aa114f1..a70518a4fcec 100644
2303 --- a/drivers/char/ipmi/ipmi_powernv.c
2304 +++ b/drivers/char/ipmi/ipmi_powernv.c
2305 @@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
2306 ipmi->irq = opal_event_request(prop);
2307 }
2308
2309 - if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
2310 - "opal-ipmi", ipmi)) {
2311 + rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
2312 + "opal-ipmi", ipmi);
2313 + if (rc) {
2314 dev_warn(dev, "Unable to request irq\n");
2315 goto err_dispose;
2316 }
2317 diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
2318 index f11c1c7e84c6..121319198478 100644
2319 --- a/drivers/char/ipmi/ipmi_ssif.c
2320 +++ b/drivers/char/ipmi/ipmi_ssif.c
2321 @@ -761,7 +761,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2322 ssif_info->ssif_state = SSIF_NORMAL;
2323 ipmi_ssif_unlock_cond(ssif_info, flags);
2324 pr_warn(PFX "Error getting flags: %d %d, %x\n",
2325 - result, len, data[2]);
2326 + result, len, (len >= 3) ? data[2] : 0);
2327 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
2328 || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
2329 /*
2330 @@ -783,7 +783,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2331 if ((result < 0) || (len < 3) || (data[2] != 0)) {
2332 /* Error clearing flags */
2333 pr_warn(PFX "Error clearing flags: %d %d, %x\n",
2334 - result, len, data[2]);
2335 + result, len, (len >= 3) ? data[2] : 0);
2336 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
2337 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
2338 pr_warn(PFX "Invalid response clearing flags: %x %x\n",
2339 diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
2340 index 738515b89073..a22c1d704901 100644
2341 --- a/drivers/clocksource/fsl_ftm_timer.c
2342 +++ b/drivers/clocksource/fsl_ftm_timer.c
2343 @@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
2344
2345 static unsigned long __init ftm_clk_init(struct device_node *np)
2346 {
2347 - unsigned long freq;
2348 + long freq;
2349
2350 freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
2351 if (freq <= 0)
2352 diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
2353 index 4852d9efe74e..9f09752169ea 100644
2354 --- a/drivers/cpufreq/cppc_cpufreq.c
2355 +++ b/drivers/cpufreq/cppc_cpufreq.c
2356 @@ -151,9 +151,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
2357 policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
2358 policy->shared_type = cpu->shared_type;
2359
2360 - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
2361 + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
2362 + int i;
2363 +
2364 cpumask_copy(policy->cpus, cpu->shared_cpu_map);
2365 - else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
2366 +
2367 + for_each_cpu(i, policy->cpus) {
2368 + if (unlikely(i == policy->cpu))
2369 + continue;
2370 +
2371 + memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
2372 + sizeof(cpu->perf_caps));
2373 + }
2374 + } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
2375 /* Support only SW_ANY for now. */
2376 pr_debug("Unsupported CPU co-ord type\n");
2377 return -EFAULT;
2378 @@ -218,8 +228,13 @@ static int __init cppc_cpufreq_init(void)
2379 return ret;
2380
2381 out:
2382 - for_each_possible_cpu(i)
2383 - kfree(all_cpu_data[i]);
2384 + for_each_possible_cpu(i) {
2385 + cpu = all_cpu_data[i];
2386 + if (!cpu)
2387 + break;
2388 + free_cpumask_var(cpu->shared_cpu_map);
2389 + kfree(cpu);
2390 + }
2391
2392 kfree(all_cpu_data);
2393 return -ENODEV;
2394 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2395 index 35e34c0e0429..7523929becdc 100644
2396 --- a/drivers/cpufreq/cpufreq.c
2397 +++ b/drivers/cpufreq/cpufreq.c
2398 @@ -1288,14 +1288,14 @@ static int cpufreq_online(unsigned int cpu)
2399 return 0;
2400
2401 out_exit_policy:
2402 + for_each_cpu(j, policy->real_cpus)
2403 + remove_cpu_dev_symlink(policy, get_cpu_device(j));
2404 +
2405 up_write(&policy->rwsem);
2406
2407 if (cpufreq_driver->exit)
2408 cpufreq_driver->exit(policy);
2409
2410 - for_each_cpu(j, policy->real_cpus)
2411 - remove_cpu_dev_symlink(policy, get_cpu_device(j));
2412 -
2413 out_free_policy:
2414 cpufreq_policy_free(policy, !new_policy);
2415 return ret;
2416 diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
2417 index f3e211f8f6c5..71866646ffef 100644
2418 --- a/drivers/dma/mv_xor_v2.c
2419 +++ b/drivers/dma/mv_xor_v2.c
2420 @@ -152,6 +152,7 @@ struct mv_xor_v2_device {
2421 void __iomem *dma_base;
2422 void __iomem *glob_base;
2423 struct clk *clk;
2424 + struct clk *reg_clk;
2425 struct tasklet_struct irq_tasklet;
2426 struct list_head free_sw_desc;
2427 struct dma_device dmadev;
2428 @@ -697,13 +698,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
2429 if (ret)
2430 return ret;
2431
2432 + xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
2433 + if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
2434 + if (!IS_ERR(xor_dev->reg_clk)) {
2435 + ret = clk_prepare_enable(xor_dev->reg_clk);
2436 + if (ret)
2437 + return ret;
2438 + } else {
2439 + return PTR_ERR(xor_dev->reg_clk);
2440 + }
2441 + }
2442 +
2443 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
2444 - if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
2445 - return -EPROBE_DEFER;
2446 + if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
2447 + ret = EPROBE_DEFER;
2448 + goto disable_reg_clk;
2449 + }
2450 if (!IS_ERR(xor_dev->clk)) {
2451 ret = clk_prepare_enable(xor_dev->clk);
2452 if (ret)
2453 - return ret;
2454 + goto disable_reg_clk;
2455 }
2456
2457 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
2458 @@ -812,8 +826,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
2459 free_msi_irqs:
2460 platform_msi_domain_free_irqs(&pdev->dev);
2461 disable_clk:
2462 - if (!IS_ERR(xor_dev->clk))
2463 - clk_disable_unprepare(xor_dev->clk);
2464 + clk_disable_unprepare(xor_dev->clk);
2465 +disable_reg_clk:
2466 + clk_disable_unprepare(xor_dev->reg_clk);
2467 return ret;
2468 }
2469
2470 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
2471 index fb2e7476d96b..2c449bdacb91 100644
2472 --- a/drivers/dma/pl330.c
2473 +++ b/drivers/dma/pl330.c
2474 @@ -1570,7 +1570,7 @@ static void pl330_dotask(unsigned long data)
2475 /* Returns 1 if state was updated, 0 otherwise */
2476 static int pl330_update(struct pl330_dmac *pl330)
2477 {
2478 - struct dma_pl330_desc *descdone, *tmp;
2479 + struct dma_pl330_desc *descdone;
2480 unsigned long flags;
2481 void __iomem *regs;
2482 u32 val;
2483 @@ -1648,7 +1648,9 @@ static int pl330_update(struct pl330_dmac *pl330)
2484 }
2485
2486 /* Now that we are in no hurry, do the callbacks */
2487 - list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
2488 + while (!list_empty(&pl330->req_done)) {
2489 + descdone = list_first_entry(&pl330->req_done,
2490 + struct dma_pl330_desc, rqd);
2491 list_del(&descdone->rqd);
2492 spin_unlock_irqrestore(&pl330->lock, flags);
2493 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
2494 diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
2495 index 03c4eb3fd314..6497f5283e3b 100644
2496 --- a/drivers/dma/qcom/bam_dma.c
2497 +++ b/drivers/dma/qcom/bam_dma.c
2498 @@ -387,6 +387,7 @@ struct bam_device {
2499 struct device_dma_parameters dma_parms;
2500 struct bam_chan *channels;
2501 u32 num_channels;
2502 + u32 num_ees;
2503
2504 /* execution environment ID, from DT */
2505 u32 ee;
2506 @@ -1076,15 +1077,19 @@ static int bam_init(struct bam_device *bdev)
2507 u32 val;
2508
2509 /* read revision and configuration information */
2510 - val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
2511 - val &= NUM_EES_MASK;
2512 + if (!bdev->num_ees) {
2513 + val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
2514 + bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
2515 + }
2516
2517 /* check that configured EE is within range */
2518 - if (bdev->ee >= val)
2519 + if (bdev->ee >= bdev->num_ees)
2520 return -EINVAL;
2521
2522 - val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
2523 - bdev->num_channels = val & BAM_NUM_PIPES_MASK;
2524 + if (!bdev->num_channels) {
2525 + val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
2526 + bdev->num_channels = val & BAM_NUM_PIPES_MASK;
2527 + }
2528
2529 if (bdev->controlled_remotely)
2530 return 0;
2531 @@ -1179,6 +1184,18 @@ static int bam_dma_probe(struct platform_device *pdev)
2532 bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
2533 "qcom,controlled-remotely");
2534
2535 + if (bdev->controlled_remotely) {
2536 + ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
2537 + &bdev->num_channels);
2538 + if (ret)
2539 + dev_err(bdev->dev, "num-channels unspecified in dt\n");
2540 +
2541 + ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
2542 + &bdev->num_ees);
2543 + if (ret)
2544 + dev_err(bdev->dev, "num-ees unspecified in dt\n");
2545 + }
2546 +
2547 bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
2548 if (IS_ERR(bdev->bamclk))
2549 return PTR_ERR(bdev->bamclk);
2550 diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
2551 index 4c357d475465..d032032337e7 100644
2552 --- a/drivers/dma/sh/rcar-dmac.c
2553 +++ b/drivers/dma/sh/rcar-dmac.c
2554 @@ -870,7 +870,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
2555
2556 rcar_dmac_chan_configure_desc(chan, desc);
2557
2558 - max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
2559 + max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
2560
2561 /*
2562 * Allocate and fill the transfer chunk descriptors. We own the only
2563 @@ -1246,8 +1246,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
2564 * If the cookie doesn't correspond to the currently running transfer
2565 * then the descriptor hasn't been processed yet, and the residue is
2566 * equal to the full descriptor size.
2567 + * Also, a client driver is possible to call this function before
2568 + * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
2569 + * will be the next descriptor, and the done list will appear. So, if
2570 + * the argument cookie matches the done list's cookie, we can assume
2571 + * the residue is zero.
2572 */
2573 if (cookie != desc->async_tx.cookie) {
2574 + list_for_each_entry(desc, &chan->desc.done, node) {
2575 + if (cookie == desc->async_tx.cookie)
2576 + return 0;
2577 + }
2578 list_for_each_entry(desc, &chan->desc.pending, node) {
2579 if (cookie == desc->async_tx.cookie)
2580 return desc->size;
2581 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
2582 index 8bf89267dc25..d731b413cb2c 100644
2583 --- a/drivers/firewire/ohci.c
2584 +++ b/drivers/firewire/ohci.c
2585 @@ -1130,7 +1130,13 @@ static int context_add_buffer(struct context *ctx)
2586 return -ENOMEM;
2587
2588 offset = (void *)&desc->buffer - (void *)desc;
2589 - desc->buffer_size = PAGE_SIZE - offset;
2590 + /*
2591 + * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
2592 + * for descriptors, even 0x10-byte ones. This can cause page faults when
2593 + * an IOMMU is in use and the oversized read crosses a page boundary.
2594 + * Work around this by always leaving at least 0x10 bytes of padding.
2595 + */
2596 + desc->buffer_size = PAGE_SIZE - offset - 0x10;
2597 desc->buffer_bus = bus_addr + offset;
2598 desc->used = 0;
2599
2600 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
2601 index 88bebe1968b7..42844c318445 100644
2602 --- a/drivers/firmware/dmi_scan.c
2603 +++ b/drivers/firmware/dmi_scan.c
2604 @@ -18,7 +18,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj);
2605 * of and an antecedent to, SMBIOS, which stands for System
2606 * Management BIOS. See further: http://www.dmtf.org/standards
2607 */
2608 -static const char dmi_empty_string[] = " ";
2609 +static const char dmi_empty_string[] = "";
2610
2611 static u32 dmi_ver __initdata;
2612 static u32 dmi_len;
2613 @@ -44,25 +44,21 @@ static int dmi_memdev_nr;
2614 static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
2615 {
2616 const u8 *bp = ((u8 *) dm) + dm->length;
2617 + const u8 *nsp;
2618
2619 if (s) {
2620 - s--;
2621 - while (s > 0 && *bp) {
2622 + while (--s > 0 && *bp)
2623 bp += strlen(bp) + 1;
2624 - s--;
2625 - }
2626 -
2627 - if (*bp != 0) {
2628 - size_t len = strlen(bp)+1;
2629 - size_t cmp_len = len > 8 ? 8 : len;
2630
2631 - if (!memcmp(bp, dmi_empty_string, cmp_len))
2632 - return dmi_empty_string;
2633 + /* Strings containing only spaces are considered empty */
2634 + nsp = bp;
2635 + while (*nsp == ' ')
2636 + nsp++;
2637 + if (*nsp != '\0')
2638 return bp;
2639 - }
2640 }
2641
2642 - return "";
2643 + return dmi_empty_string;
2644 }
2645
2646 static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
2647 diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
2648 index 603d8425cca6..699db138c5de 100644
2649 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
2650 +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
2651 @@ -926,7 +926,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
2652 struct drm_device *drm_dev = g2d->subdrv.drm_dev;
2653 struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
2654 struct drm_exynos_pending_g2d_event *e;
2655 - struct timeval now;
2656 + struct timespec64 now;
2657
2658 if (list_empty(&runqueue_node->event_list))
2659 return;
2660 @@ -934,9 +934,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
2661 e = list_first_entry(&runqueue_node->event_list,
2662 struct drm_exynos_pending_g2d_event, base.link);
2663
2664 - do_gettimeofday(&now);
2665 + ktime_get_ts64(&now);
2666 e->event.tv_sec = now.tv_sec;
2667 - e->event.tv_usec = now.tv_usec;
2668 + e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
2669 e->event.cmdlist_no = cmdlist_no;
2670
2671 drm_send_event(drm_dev, &e->base);
2672 diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
2673 index 30496134a3d0..d7cbe53c4c01 100644
2674 --- a/drivers/gpu/drm/exynos/regs-fimc.h
2675 +++ b/drivers/gpu/drm/exynos/regs-fimc.h
2676 @@ -569,7 +569,7 @@
2677 #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
2678 #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
2679 #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
2680 -#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
2681 +#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
2682
2683 /* Real input DMA size register */
2684 #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
2685 diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
2686 index 6be515a9fb69..8dbba61a2708 100644
2687 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c
2688 +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
2689 @@ -189,7 +189,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
2690 struct drm_crtc_state *old_crtc_state)
2691 {
2692 drm_crtc_vblank_on(crtc);
2693 +}
2694
2695 +static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
2696 + struct drm_crtc_state *old_crtc_state)
2697 +{
2698 spin_lock_irq(&crtc->dev->event_lock);
2699 if (crtc->state->event) {
2700 WARN_ON(drm_crtc_vblank_get(crtc));
2701 @@ -257,6 +261,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
2702 .mode_set_nofb = ipu_crtc_mode_set_nofb,
2703 .atomic_check = ipu_crtc_atomic_check,
2704 .atomic_begin = ipu_crtc_atomic_begin,
2705 + .atomic_flush = ipu_crtc_atomic_flush,
2706 .atomic_disable = ipu_crtc_atomic_disable,
2707 .enable = ipu_crtc_enable,
2708 };
2709 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
2710 index e2faccffee6f..d66e0e76faf4 100644
2711 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
2712 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
2713 @@ -46,8 +46,8 @@ uint32_t gf100_pmu_data[] = {
2714 0x00000000,
2715 0x00000000,
2716 0x584d454d,
2717 - 0x00000756,
2718 - 0x00000748,
2719 + 0x00000754,
2720 + 0x00000746,
2721 0x00000000,
2722 0x00000000,
2723 0x00000000,
2724 @@ -68,8 +68,8 @@ uint32_t gf100_pmu_data[] = {
2725 0x00000000,
2726 0x00000000,
2727 0x46524550,
2728 - 0x0000075a,
2729 0x00000758,
2730 + 0x00000756,
2731 0x00000000,
2732 0x00000000,
2733 0x00000000,
2734 @@ -90,8 +90,8 @@ uint32_t gf100_pmu_data[] = {
2735 0x00000000,
2736 0x00000000,
2737 0x5f433249,
2738 - 0x00000b8a,
2739 - 0x00000a2d,
2740 + 0x00000b88,
2741 + 0x00000a2b,
2742 0x00000000,
2743 0x00000000,
2744 0x00000000,
2745 @@ -112,8 +112,8 @@ uint32_t gf100_pmu_data[] = {
2746 0x00000000,
2747 0x00000000,
2748 0x54534554,
2749 - 0x00000bb3,
2750 - 0x00000b8c,
2751 + 0x00000bb1,
2752 + 0x00000b8a,
2753 0x00000000,
2754 0x00000000,
2755 0x00000000,
2756 @@ -134,8 +134,8 @@ uint32_t gf100_pmu_data[] = {
2757 0x00000000,
2758 0x00000000,
2759 0x454c4449,
2760 - 0x00000bbf,
2761 0x00000bbd,
2762 + 0x00000bbb,
2763 0x00000000,
2764 0x00000000,
2765 0x00000000,
2766 @@ -236,19 +236,19 @@ uint32_t gf100_pmu_data[] = {
2767 0x000005d3,
2768 0x00000003,
2769 0x00000002,
2770 - 0x0000069d,
2771 + 0x0000069b,
2772 0x00040004,
2773 0x00000000,
2774 - 0x000006b9,
2775 + 0x000006b7,
2776 0x00010005,
2777 0x00000000,
2778 - 0x000006d6,
2779 + 0x000006d4,
2780 0x00010006,
2781 0x00000000,
2782 0x0000065b,
2783 0x00000007,
2784 0x00000000,
2785 - 0x000006e1,
2786 + 0x000006df,
2787 /* 0x03c4: memx_func_tail */
2788 /* 0x03c4: memx_ts_start */
2789 0x00000000,
2790 @@ -1372,432 +1372,432 @@ uint32_t gf100_pmu_code[] = {
2791 /* 0x065b: memx_func_wait_vblank */
2792 0x9800f840,
2793 0x66b00016,
2794 - 0x130bf400,
2795 + 0x120bf400,
2796 0xf40166b0,
2797 0x0ef4060b,
2798 /* 0x066d: memx_func_wait_vblank_head1 */
2799 - 0x2077f12e,
2800 - 0x070ef400,
2801 -/* 0x0674: memx_func_wait_vblank_head0 */
2802 - 0x000877f1,
2803 -/* 0x0678: memx_func_wait_vblank_0 */
2804 - 0x07c467f1,
2805 - 0xcf0664b6,
2806 - 0x67fd0066,
2807 - 0xf31bf404,
2808 -/* 0x0688: memx_func_wait_vblank_1 */
2809 - 0x07c467f1,
2810 - 0xcf0664b6,
2811 - 0x67fd0066,
2812 - 0xf30bf404,
2813 -/* 0x0698: memx_func_wait_vblank_fini */
2814 - 0xf80410b6,
2815 -/* 0x069d: memx_func_wr32 */
2816 - 0x00169800,
2817 - 0xb6011598,
2818 - 0x60f90810,
2819 - 0xd0fc50f9,
2820 - 0x21f4e0fc,
2821 - 0x0242b640,
2822 - 0xf8e91bf4,
2823 -/* 0x06b9: memx_func_wait */
2824 - 0x2c87f000,
2825 - 0xcf0684b6,
2826 - 0x1e980088,
2827 - 0x011d9800,
2828 - 0x98021c98,
2829 - 0x10b6031b,
2830 - 0xa321f410,
2831 -/* 0x06d6: memx_func_delay */
2832 - 0x1e9800f8,
2833 - 0x0410b600,
2834 - 0xf87e21f4,
2835 -/* 0x06e1: memx_func_train */
2836 -/* 0x06e3: memx_exec */
2837 - 0xf900f800,
2838 - 0xb9d0f9e0,
2839 - 0xb2b902c1,
2840 -/* 0x06ed: memx_exec_next */
2841 - 0x00139802,
2842 - 0xe70410b6,
2843 - 0xe701f034,
2844 - 0xb601e033,
2845 - 0x30f00132,
2846 - 0xde35980c,
2847 - 0x12b855f9,
2848 - 0xe41ef406,
2849 - 0x98f10b98,
2850 - 0xcbbbf20c,
2851 - 0xc4b7f102,
2852 - 0x06b4b607,
2853 - 0xfc00bbcf,
2854 - 0xf5e0fcd0,
2855 - 0xf8033621,
2856 -/* 0x0729: memx_info */
2857 - 0x01c67000,
2858 -/* 0x072f: memx_info_data */
2859 - 0xf10e0bf4,
2860 - 0xf103ccc7,
2861 - 0xf40800b7,
2862 -/* 0x073a: memx_info_train */
2863 - 0xc7f10b0e,
2864 - 0xb7f10bcc,
2865 -/* 0x0742: memx_info_send */
2866 - 0x21f50100,
2867 - 0x00f80336,
2868 -/* 0x0748: memx_recv */
2869 - 0xf401d6b0,
2870 - 0xd6b0980b,
2871 - 0xd80bf400,
2872 -/* 0x0756: memx_init */
2873 - 0x00f800f8,
2874 -/* 0x0758: perf_recv */
2875 -/* 0x075a: perf_init */
2876 + 0x2077f02c,
2877 +/* 0x0673: memx_func_wait_vblank_head0 */
2878 + 0xf0060ef4,
2879 +/* 0x0676: memx_func_wait_vblank_0 */
2880 + 0x67f10877,
2881 + 0x64b607c4,
2882 + 0x0066cf06,
2883 + 0xf40467fd,
2884 +/* 0x0686: memx_func_wait_vblank_1 */
2885 + 0x67f1f31b,
2886 + 0x64b607c4,
2887 + 0x0066cf06,
2888 + 0xf40467fd,
2889 +/* 0x0696: memx_func_wait_vblank_fini */
2890 + 0x10b6f30b,
2891 +/* 0x069b: memx_func_wr32 */
2892 + 0x9800f804,
2893 + 0x15980016,
2894 + 0x0810b601,
2895 + 0x50f960f9,
2896 + 0xe0fcd0fc,
2897 + 0xb64021f4,
2898 + 0x1bf40242,
2899 +/* 0x06b7: memx_func_wait */
2900 + 0xf000f8e9,
2901 + 0x84b62c87,
2902 + 0x0088cf06,
2903 + 0x98001e98,
2904 + 0x1c98011d,
2905 + 0x031b9802,
2906 + 0xf41010b6,
2907 + 0x00f8a321,
2908 +/* 0x06d4: memx_func_delay */
2909 + 0xb6001e98,
2910 + 0x21f40410,
2911 +/* 0x06df: memx_func_train */
2912 + 0xf800f87e,
2913 +/* 0x06e1: memx_exec */
2914 + 0xf9e0f900,
2915 + 0x02c1b9d0,
2916 +/* 0x06eb: memx_exec_next */
2917 + 0x9802b2b9,
2918 + 0x10b60013,
2919 + 0xf034e704,
2920 + 0xe033e701,
2921 + 0x0132b601,
2922 + 0x980c30f0,
2923 + 0x55f9de35,
2924 + 0xf40612b8,
2925 + 0x0b98e41e,
2926 + 0xf20c98f1,
2927 + 0xf102cbbb,
2928 + 0xb607c4b7,
2929 + 0xbbcf06b4,
2930 + 0xfcd0fc00,
2931 + 0x3621f5e0,
2932 +/* 0x0727: memx_info */
2933 + 0x7000f803,
2934 + 0x0bf401c6,
2935 +/* 0x072d: memx_info_data */
2936 + 0xccc7f10e,
2937 + 0x00b7f103,
2938 + 0x0b0ef408,
2939 +/* 0x0738: memx_info_train */
2940 + 0x0bccc7f1,
2941 + 0x0100b7f1,
2942 +/* 0x0740: memx_info_send */
2943 + 0x033621f5,
2944 +/* 0x0746: memx_recv */
2945 + 0xd6b000f8,
2946 + 0x980bf401,
2947 + 0xf400d6b0,
2948 + 0x00f8d80b,
2949 +/* 0x0754: memx_init */
2950 +/* 0x0756: perf_recv */
2951 0x00f800f8,
2952 -/* 0x075c: i2c_drive_scl */
2953 - 0xf40036b0,
2954 - 0x07f1110b,
2955 - 0x04b607e0,
2956 - 0x0001d006,
2957 - 0x00f804bd,
2958 -/* 0x0770: i2c_drive_scl_lo */
2959 - 0x07e407f1,
2960 - 0xd00604b6,
2961 - 0x04bd0001,
2962 -/* 0x077e: i2c_drive_sda */
2963 +/* 0x0758: perf_init */
2964 +/* 0x075a: i2c_drive_scl */
2965 0x36b000f8,
2966 0x110bf400,
2967 0x07e007f1,
2968 0xd00604b6,
2969 - 0x04bd0002,
2970 -/* 0x0792: i2c_drive_sda_lo */
2971 + 0x04bd0001,
2972 +/* 0x076e: i2c_drive_scl_lo */
2973 0x07f100f8,
2974 0x04b607e4,
2975 + 0x0001d006,
2976 + 0x00f804bd,
2977 +/* 0x077c: i2c_drive_sda */
2978 + 0xf40036b0,
2979 + 0x07f1110b,
2980 + 0x04b607e0,
2981 0x0002d006,
2982 0x00f804bd,
2983 -/* 0x07a0: i2c_sense_scl */
2984 - 0xf10132f4,
2985 - 0xb607c437,
2986 - 0x33cf0634,
2987 - 0x0431fd00,
2988 - 0xf4060bf4,
2989 -/* 0x07b6: i2c_sense_scl_done */
2990 - 0x00f80131,
2991 -/* 0x07b8: i2c_sense_sda */
2992 - 0xf10132f4,
2993 - 0xb607c437,
2994 - 0x33cf0634,
2995 - 0x0432fd00,
2996 - 0xf4060bf4,
2997 -/* 0x07ce: i2c_sense_sda_done */
2998 - 0x00f80131,
2999 -/* 0x07d0: i2c_raise_scl */
3000 - 0x47f140f9,
3001 - 0x37f00898,
3002 - 0x5c21f501,
3003 -/* 0x07dd: i2c_raise_scl_wait */
3004 - 0xe8e7f107,
3005 - 0x7e21f403,
3006 - 0x07a021f5,
3007 - 0xb60901f4,
3008 - 0x1bf40142,
3009 -/* 0x07f1: i2c_raise_scl_done */
3010 - 0xf840fcef,
3011 -/* 0x07f5: i2c_start */
3012 - 0xa021f500,
3013 - 0x0d11f407,
3014 - 0x07b821f5,
3015 - 0xf40611f4,
3016 -/* 0x0806: i2c_start_rep */
3017 - 0x37f0300e,
3018 - 0x5c21f500,
3019 - 0x0137f007,
3020 - 0x077e21f5,
3021 - 0xb60076bb,
3022 - 0x50f90465,
3023 - 0xbb046594,
3024 - 0x50bd0256,
3025 - 0xfc0475fd,
3026 - 0xd021f550,
3027 - 0x0464b607,
3028 -/* 0x0833: i2c_start_send */
3029 - 0xf01f11f4,
3030 +/* 0x0790: i2c_drive_sda_lo */
3031 + 0x07e407f1,
3032 + 0xd00604b6,
3033 + 0x04bd0002,
3034 +/* 0x079e: i2c_sense_scl */
3035 + 0x32f400f8,
3036 + 0xc437f101,
3037 + 0x0634b607,
3038 + 0xfd0033cf,
3039 + 0x0bf40431,
3040 + 0x0131f406,
3041 +/* 0x07b4: i2c_sense_scl_done */
3042 +/* 0x07b6: i2c_sense_sda */
3043 + 0x32f400f8,
3044 + 0xc437f101,
3045 + 0x0634b607,
3046 + 0xfd0033cf,
3047 + 0x0bf40432,
3048 + 0x0131f406,
3049 +/* 0x07cc: i2c_sense_sda_done */
3050 +/* 0x07ce: i2c_raise_scl */
3051 + 0x40f900f8,
3052 + 0x089847f1,
3053 + 0xf50137f0,
3054 +/* 0x07db: i2c_raise_scl_wait */
3055 + 0xf1075a21,
3056 + 0xf403e8e7,
3057 + 0x21f57e21,
3058 + 0x01f4079e,
3059 + 0x0142b609,
3060 +/* 0x07ef: i2c_raise_scl_done */
3061 + 0xfcef1bf4,
3062 +/* 0x07f3: i2c_start */
3063 + 0xf500f840,
3064 + 0xf4079e21,
3065 + 0x21f50d11,
3066 + 0x11f407b6,
3067 + 0x300ef406,
3068 +/* 0x0804: i2c_start_rep */
3069 + 0xf50037f0,
3070 + 0xf0075a21,
3071 + 0x21f50137,
3072 + 0x76bb077c,
3073 + 0x0465b600,
3074 + 0x659450f9,
3075 + 0x0256bb04,
3076 + 0x75fd50bd,
3077 + 0xf550fc04,
3078 + 0xb607ce21,
3079 + 0x11f40464,
3080 +/* 0x0831: i2c_start_send */
3081 + 0x0037f01f,
3082 + 0x077c21f5,
3083 + 0x1388e7f1,
3084 + 0xf07e21f4,
3085 0x21f50037,
3086 - 0xe7f1077e,
3087 + 0xe7f1075a,
3088 0x21f41388,
3089 - 0x0037f07e,
3090 - 0x075c21f5,
3091 - 0x1388e7f1,
3092 -/* 0x084f: i2c_start_out */
3093 - 0xf87e21f4,
3094 -/* 0x0851: i2c_stop */
3095 - 0x0037f000,
3096 - 0x075c21f5,
3097 - 0xf50037f0,
3098 - 0xf1077e21,
3099 - 0xf403e8e7,
3100 - 0x37f07e21,
3101 - 0x5c21f501,
3102 - 0x88e7f107,
3103 - 0x7e21f413,
3104 +/* 0x084d: i2c_start_out */
3105 +/* 0x084f: i2c_stop */
3106 + 0xf000f87e,
3107 + 0x21f50037,
3108 + 0x37f0075a,
3109 + 0x7c21f500,
3110 + 0xe8e7f107,
3111 + 0x7e21f403,
3112 0xf50137f0,
3113 - 0xf1077e21,
3114 + 0xf1075a21,
3115 0xf41388e7,
3116 - 0x00f87e21,
3117 -/* 0x0884: i2c_bitw */
3118 - 0x077e21f5,
3119 - 0x03e8e7f1,
3120 - 0xbb7e21f4,
3121 - 0x65b60076,
3122 - 0x9450f904,
3123 - 0x56bb0465,
3124 - 0xfd50bd02,
3125 - 0x50fc0475,
3126 - 0x07d021f5,
3127 - 0xf40464b6,
3128 - 0xe7f11811,
3129 - 0x21f41388,
3130 - 0x0037f07e,
3131 - 0x075c21f5,
3132 - 0x1388e7f1,
3133 -/* 0x08c3: i2c_bitw_out */
3134 - 0xf87e21f4,
3135 -/* 0x08c5: i2c_bitr */
3136 - 0x0137f000,
3137 - 0x077e21f5,
3138 - 0x03e8e7f1,
3139 - 0xbb7e21f4,
3140 - 0x65b60076,
3141 - 0x9450f904,
3142 - 0x56bb0465,
3143 - 0xfd50bd02,
3144 - 0x50fc0475,
3145 - 0x07d021f5,
3146 - 0xf40464b6,
3147 - 0x21f51b11,
3148 - 0x37f007b8,
3149 - 0x5c21f500,
3150 + 0x37f07e21,
3151 + 0x7c21f501,
3152 0x88e7f107,
3153 0x7e21f413,
3154 - 0xf4013cf0,
3155 -/* 0x090a: i2c_bitr_done */
3156 - 0x00f80131,
3157 -/* 0x090c: i2c_get_byte */
3158 - 0xf00057f0,
3159 -/* 0x0912: i2c_get_byte_next */
3160 - 0x54b60847,
3161 - 0x0076bb01,
3162 +/* 0x0882: i2c_bitw */
3163 + 0x21f500f8,
3164 + 0xe7f1077c,
3165 + 0x21f403e8,
3166 + 0x0076bb7e,
3167 0xf90465b6,
3168 0x04659450,
3169 0xbd0256bb,
3170 0x0475fd50,
3171 0x21f550fc,
3172 - 0x64b608c5,
3173 - 0x2b11f404,
3174 - 0xb60553fd,
3175 - 0x1bf40142,
3176 - 0x0137f0d8,
3177 - 0xb60076bb,
3178 - 0x50f90465,
3179 - 0xbb046594,
3180 - 0x50bd0256,
3181 - 0xfc0475fd,
3182 - 0x8421f550,
3183 - 0x0464b608,
3184 -/* 0x095c: i2c_get_byte_done */
3185 -/* 0x095e: i2c_put_byte */
3186 - 0x47f000f8,
3187 -/* 0x0961: i2c_put_byte_next */
3188 - 0x0142b608,
3189 - 0xbb3854ff,
3190 + 0x64b607ce,
3191 + 0x1811f404,
3192 + 0x1388e7f1,
3193 + 0xf07e21f4,
3194 + 0x21f50037,
3195 + 0xe7f1075a,
3196 + 0x21f41388,
3197 +/* 0x08c1: i2c_bitw_out */
3198 +/* 0x08c3: i2c_bitr */
3199 + 0xf000f87e,
3200 + 0x21f50137,
3201 + 0xe7f1077c,
3202 + 0x21f403e8,
3203 + 0x0076bb7e,
3204 + 0xf90465b6,
3205 + 0x04659450,
3206 + 0xbd0256bb,
3207 + 0x0475fd50,
3208 + 0x21f550fc,
3209 + 0x64b607ce,
3210 + 0x1b11f404,
3211 + 0x07b621f5,
3212 + 0xf50037f0,
3213 + 0xf1075a21,
3214 + 0xf41388e7,
3215 + 0x3cf07e21,
3216 + 0x0131f401,
3217 +/* 0x0908: i2c_bitr_done */
3218 +/* 0x090a: i2c_get_byte */
3219 + 0x57f000f8,
3220 + 0x0847f000,
3221 +/* 0x0910: i2c_get_byte_next */
3222 + 0xbb0154b6,
3223 0x65b60076,
3224 0x9450f904,
3225 0x56bb0465,
3226 0xfd50bd02,
3227 0x50fc0475,
3228 - 0x088421f5,
3229 + 0x08c321f5,
3230 0xf40464b6,
3231 - 0x46b03411,
3232 - 0xd81bf400,
3233 - 0xb60076bb,
3234 - 0x50f90465,
3235 - 0xbb046594,
3236 - 0x50bd0256,
3237 - 0xfc0475fd,
3238 - 0xc521f550,
3239 - 0x0464b608,
3240 - 0xbb0f11f4,
3241 - 0x36b00076,
3242 - 0x061bf401,
3243 -/* 0x09b7: i2c_put_byte_done */
3244 - 0xf80132f4,
3245 -/* 0x09b9: i2c_addr */
3246 - 0x0076bb00,
3247 + 0x53fd2b11,
3248 + 0x0142b605,
3249 + 0xf0d81bf4,
3250 + 0x76bb0137,
3251 + 0x0465b600,
3252 + 0x659450f9,
3253 + 0x0256bb04,
3254 + 0x75fd50bd,
3255 + 0xf550fc04,
3256 + 0xb6088221,
3257 +/* 0x095a: i2c_get_byte_done */
3258 + 0x00f80464,
3259 +/* 0x095c: i2c_put_byte */
3260 +/* 0x095f: i2c_put_byte_next */
3261 + 0xb60847f0,
3262 + 0x54ff0142,
3263 + 0x0076bb38,
3264 0xf90465b6,
3265 0x04659450,
3266 0xbd0256bb,
3267 0x0475fd50,
3268 0x21f550fc,
3269 - 0x64b607f5,
3270 - 0x2911f404,
3271 - 0x012ec3e7,
3272 - 0xfd0134b6,
3273 - 0x76bb0553,
3274 + 0x64b60882,
3275 + 0x3411f404,
3276 + 0xf40046b0,
3277 + 0x76bbd81b,
3278 0x0465b600,
3279 0x659450f9,
3280 0x0256bb04,
3281 0x75fd50bd,
3282 0xf550fc04,
3283 - 0xb6095e21,
3284 -/* 0x09fe: i2c_addr_done */
3285 - 0x00f80464,
3286 -/* 0x0a00: i2c_acquire_addr */
3287 - 0xb6f8cec7,
3288 - 0xe0b702e4,
3289 - 0xee980d1c,
3290 -/* 0x0a0f: i2c_acquire */
3291 - 0xf500f800,
3292 - 0xf40a0021,
3293 - 0xd9f00421,
3294 - 0x4021f403,
3295 -/* 0x0a1e: i2c_release */
3296 - 0x21f500f8,
3297 - 0x21f40a00,
3298 - 0x03daf004,
3299 - 0xf84021f4,
3300 -/* 0x0a2d: i2c_recv */
3301 - 0x0132f400,
3302 - 0xb6f8c1c7,
3303 - 0x16b00214,
3304 - 0x3a1ff528,
3305 - 0xf413a001,
3306 - 0x0032980c,
3307 - 0x0ccc13a0,
3308 - 0xf4003198,
3309 - 0xd0f90231,
3310 - 0xd0f9e0f9,
3311 - 0x000067f1,
3312 - 0x100063f1,
3313 - 0xbb016792,
3314 + 0xb608c321,
3315 + 0x11f40464,
3316 + 0x0076bb0f,
3317 + 0xf40136b0,
3318 + 0x32f4061b,
3319 +/* 0x09b5: i2c_put_byte_done */
3320 +/* 0x09b7: i2c_addr */
3321 + 0xbb00f801,
3322 0x65b60076,
3323 0x9450f904,
3324 0x56bb0465,
3325 0xfd50bd02,
3326 0x50fc0475,
3327 - 0x0a0f21f5,
3328 - 0xfc0464b6,
3329 - 0x00d6b0d0,
3330 - 0x00b31bf5,
3331 - 0xbb0057f0,
3332 - 0x65b60076,
3333 - 0x9450f904,
3334 - 0x56bb0465,
3335 - 0xfd50bd02,
3336 - 0x50fc0475,
3337 - 0x09b921f5,
3338 - 0xf50464b6,
3339 - 0xc700d011,
3340 - 0x76bbe0c5,
3341 - 0x0465b600,
3342 - 0x659450f9,
3343 - 0x0256bb04,
3344 - 0x75fd50bd,
3345 - 0xf550fc04,
3346 - 0xb6095e21,
3347 - 0x11f50464,
3348 - 0x57f000ad,
3349 + 0x07f321f5,
3350 + 0xf40464b6,
3351 + 0xc3e72911,
3352 + 0x34b6012e,
3353 + 0x0553fd01,
3354 + 0xb60076bb,
3355 + 0x50f90465,
3356 + 0xbb046594,
3357 + 0x50bd0256,
3358 + 0xfc0475fd,
3359 + 0x5c21f550,
3360 + 0x0464b609,
3361 +/* 0x09fc: i2c_addr_done */
3362 +/* 0x09fe: i2c_acquire_addr */
3363 + 0xcec700f8,
3364 + 0x02e4b6f8,
3365 + 0x0d1ce0b7,
3366 + 0xf800ee98,
3367 +/* 0x0a0d: i2c_acquire */
3368 + 0xfe21f500,
3369 + 0x0421f409,
3370 + 0xf403d9f0,
3371 + 0x00f84021,
3372 +/* 0x0a1c: i2c_release */
3373 + 0x09fe21f5,
3374 + 0xf00421f4,
3375 + 0x21f403da,
3376 +/* 0x0a2b: i2c_recv */
3377 + 0xf400f840,
3378 + 0xc1c70132,
3379 + 0x0214b6f8,
3380 + 0xf52816b0,
3381 + 0xa0013a1f,
3382 + 0x980cf413,
3383 + 0x13a00032,
3384 + 0x31980ccc,
3385 + 0x0231f400,
3386 + 0xe0f9d0f9,
3387 + 0x67f1d0f9,
3388 + 0x63f10000,
3389 + 0x67921000,
3390 0x0076bb01,
3391 0xf90465b6,
3392 0x04659450,
3393 0xbd0256bb,
3394 0x0475fd50,
3395 0x21f550fc,
3396 - 0x64b609b9,
3397 - 0x8a11f504,
3398 + 0x64b60a0d,
3399 + 0xb0d0fc04,
3400 + 0x1bf500d6,
3401 + 0x57f000b3,
3402 0x0076bb00,
3403 0xf90465b6,
3404 0x04659450,
3405 0xbd0256bb,
3406 0x0475fd50,
3407 0x21f550fc,
3408 - 0x64b6090c,
3409 - 0x6a11f404,
3410 - 0xbbe05bcb,
3411 + 0x64b609b7,
3412 + 0xd011f504,
3413 + 0xe0c5c700,
3414 + 0xb60076bb,
3415 + 0x50f90465,
3416 + 0xbb046594,
3417 + 0x50bd0256,
3418 + 0xfc0475fd,
3419 + 0x5c21f550,
3420 + 0x0464b609,
3421 + 0x00ad11f5,
3422 + 0xbb0157f0,
3423 0x65b60076,
3424 0x9450f904,
3425 0x56bb0465,
3426 0xfd50bd02,
3427 0x50fc0475,
3428 - 0x085121f5,
3429 - 0xb90464b6,
3430 - 0x74bd025b,
3431 -/* 0x0b33: i2c_recv_not_rd08 */
3432 - 0xb0430ef4,
3433 - 0x1bf401d6,
3434 - 0x0057f03d,
3435 - 0x09b921f5,
3436 - 0xc73311f4,
3437 - 0x21f5e0c5,
3438 - 0x11f4095e,
3439 - 0x0057f029,
3440 - 0x09b921f5,
3441 - 0xc71f11f4,
3442 - 0x21f5e0b5,
3443 - 0x11f4095e,
3444 - 0x5121f515,
3445 - 0xc774bd08,
3446 - 0x1bf408c5,
3447 - 0x0232f409,
3448 -/* 0x0b73: i2c_recv_not_wr08 */
3449 -/* 0x0b73: i2c_recv_done */
3450 - 0xc7030ef4,
3451 - 0x21f5f8ce,
3452 - 0xe0fc0a1e,
3453 - 0x12f4d0fc,
3454 - 0x027cb90a,
3455 - 0x033621f5,
3456 -/* 0x0b88: i2c_recv_exit */
3457 -/* 0x0b8a: i2c_init */
3458 - 0x00f800f8,
3459 -/* 0x0b8c: test_recv */
3460 - 0x05d817f1,
3461 + 0x09b721f5,
3462 + 0xf50464b6,
3463 + 0xbb008a11,
3464 + 0x65b60076,
3465 + 0x9450f904,
3466 + 0x56bb0465,
3467 + 0xfd50bd02,
3468 + 0x50fc0475,
3469 + 0x090a21f5,
3470 + 0xf40464b6,
3471 + 0x5bcb6a11,
3472 + 0x0076bbe0,
3473 + 0xf90465b6,
3474 + 0x04659450,
3475 + 0xbd0256bb,
3476 + 0x0475fd50,
3477 + 0x21f550fc,
3478 + 0x64b6084f,
3479 + 0x025bb904,
3480 + 0x0ef474bd,
3481 +/* 0x0b31: i2c_recv_not_rd08 */
3482 + 0x01d6b043,
3483 + 0xf03d1bf4,
3484 + 0x21f50057,
3485 + 0x11f409b7,
3486 + 0xe0c5c733,
3487 + 0x095c21f5,
3488 + 0xf02911f4,
3489 + 0x21f50057,
3490 + 0x11f409b7,
3491 + 0xe0b5c71f,
3492 + 0x095c21f5,
3493 + 0xf51511f4,
3494 + 0xbd084f21,
3495 + 0x08c5c774,
3496 + 0xf4091bf4,
3497 + 0x0ef40232,
3498 +/* 0x0b71: i2c_recv_not_wr08 */
3499 +/* 0x0b71: i2c_recv_done */
3500 + 0xf8cec703,
3501 + 0x0a1c21f5,
3502 + 0xd0fce0fc,
3503 + 0xb90a12f4,
3504 + 0x21f5027c,
3505 +/* 0x0b86: i2c_recv_exit */
3506 + 0x00f80336,
3507 +/* 0x0b88: i2c_init */
3508 +/* 0x0b8a: test_recv */
3509 + 0x17f100f8,
3510 + 0x14b605d8,
3511 + 0x0011cf06,
3512 + 0xf10110b6,
3513 + 0xb605d807,
3514 + 0x01d00604,
3515 + 0xf104bd00,
3516 + 0xf1d900e7,
3517 + 0xf5134fe3,
3518 + 0xf8025621,
3519 +/* 0x0bb1: test_init */
3520 + 0x00e7f100,
3521 + 0x5621f508,
3522 +/* 0x0bbb: idle_recv */
3523 + 0xf800f802,
3524 +/* 0x0bbd: idle */
3525 + 0x0031f400,
3526 + 0x05d417f1,
3527 0xcf0614b6,
3528 0x10b60011,
3529 - 0xd807f101,
3530 + 0xd407f101,
3531 0x0604b605,
3532 0xbd0001d0,
3533 - 0x00e7f104,
3534 - 0x4fe3f1d9,
3535 - 0x5621f513,
3536 -/* 0x0bb3: test_init */
3537 - 0xf100f802,
3538 - 0xf50800e7,
3539 - 0xf8025621,
3540 -/* 0x0bbd: idle_recv */
3541 -/* 0x0bbf: idle */
3542 - 0xf400f800,
3543 - 0x17f10031,
3544 - 0x14b605d4,
3545 - 0x0011cf06,
3546 - 0xf10110b6,
3547 - 0xb605d407,
3548 - 0x01d00604,
3549 -/* 0x0bdb: idle_loop */
3550 - 0xf004bd00,
3551 - 0x32f45817,
3552 -/* 0x0be1: idle_proc */
3553 -/* 0x0be1: idle_proc_exec */
3554 - 0xb910f902,
3555 - 0x21f5021e,
3556 - 0x10fc033f,
3557 - 0xf40911f4,
3558 - 0x0ef40231,
3559 -/* 0x0bf5: idle_proc_next */
3560 - 0x5810b6ef,
3561 - 0xf4061fb8,
3562 - 0x02f4e61b,
3563 - 0x0028f4dd,
3564 - 0x00bb0ef4,
3565 +/* 0x0bd9: idle_loop */
3566 + 0x5817f004,
3567 +/* 0x0bdf: idle_proc */
3568 +/* 0x0bdf: idle_proc_exec */
3569 + 0xf90232f4,
3570 + 0x021eb910,
3571 + 0x033f21f5,
3572 + 0x11f410fc,
3573 + 0x0231f409,
3574 +/* 0x0bf3: idle_proc_next */
3575 + 0xb6ef0ef4,
3576 + 0x1fb85810,
3577 + 0xe61bf406,
3578 + 0xf4dd02f4,
3579 + 0x0ef40028,
3580 + 0x000000bb,
3581 0x00000000,
3582 0x00000000,
3583 0x00000000,
3584 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
3585 index 3c731ff12871..958222415a34 100644
3586 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
3587 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
3588 @@ -46,8 +46,8 @@ uint32_t gk208_pmu_data[] = {
3589 0x00000000,
3590 0x00000000,
3591 0x584d454d,
3592 - 0x000005f3,
3593 - 0x000005e5,
3594 + 0x000005ee,
3595 + 0x000005e0,
3596 0x00000000,
3597 0x00000000,
3598 0x00000000,
3599 @@ -68,8 +68,8 @@ uint32_t gk208_pmu_data[] = {
3600 0x00000000,
3601 0x00000000,
3602 0x46524550,
3603 - 0x000005f7,
3604 - 0x000005f5,
3605 + 0x000005f2,
3606 + 0x000005f0,
3607 0x00000000,
3608 0x00000000,
3609 0x00000000,
3610 @@ -90,8 +90,8 @@ uint32_t gk208_pmu_data[] = {
3611 0x00000000,
3612 0x00000000,
3613 0x5f433249,
3614 - 0x000009f8,
3615 - 0x000008a2,
3616 + 0x000009f3,
3617 + 0x0000089d,
3618 0x00000000,
3619 0x00000000,
3620 0x00000000,
3621 @@ -112,8 +112,8 @@ uint32_t gk208_pmu_data[] = {
3622 0x00000000,
3623 0x00000000,
3624 0x54534554,
3625 - 0x00000a16,
3626 - 0x000009fa,
3627 + 0x00000a11,
3628 + 0x000009f5,
3629 0x00000000,
3630 0x00000000,
3631 0x00000000,
3632 @@ -134,8 +134,8 @@ uint32_t gk208_pmu_data[] = {
3633 0x00000000,
3634 0x00000000,
3635 0x454c4449,
3636 - 0x00000a21,
3637 - 0x00000a1f,
3638 + 0x00000a1c,
3639 + 0x00000a1a,
3640 0x00000000,
3641 0x00000000,
3642 0x00000000,
3643 @@ -233,22 +233,22 @@ uint32_t gk208_pmu_data[] = {
3644 /* 0x037c: memx_func_next */
3645 0x00000002,
3646 0x00000000,
3647 - 0x000004cf,
3648 + 0x000004cc,
3649 0x00000003,
3650 0x00000002,
3651 - 0x00000546,
3652 + 0x00000541,
3653 0x00040004,
3654 0x00000000,
3655 - 0x00000563,
3656 + 0x0000055e,
3657 0x00010005,
3658 0x00000000,
3659 - 0x0000057d,
3660 + 0x00000578,
3661 0x00010006,
3662 0x00000000,
3663 - 0x00000541,
3664 + 0x0000053c,
3665 0x00000007,
3666 0x00000000,
3667 - 0x00000589,
3668 + 0x00000584,
3669 /* 0x03c4: memx_func_tail */
3670 /* 0x03c4: memx_ts_start */
3671 0x00000000,
3672 @@ -1238,454 +1238,454 @@ uint32_t gk208_pmu_code[] = {
3673 0x0001f604,
3674 0x00f804bd,
3675 /* 0x045c: memx_func_enter */
3676 - 0x162067f1,
3677 - 0xf55d77f1,
3678 - 0x047e6eb2,
3679 - 0xd8b20000,
3680 - 0xf90487fd,
3681 - 0xfc80f960,
3682 - 0x7ee0fcd0,
3683 - 0x0700002d,
3684 - 0x7e6eb2fe,
3685 + 0x47162046,
3686 + 0x6eb2f55d,
3687 + 0x0000047e,
3688 + 0x87fdd8b2,
3689 + 0xf960f904,
3690 + 0xfcd0fc80,
3691 + 0x002d7ee0,
3692 + 0xb2fe0700,
3693 + 0x00047e6e,
3694 + 0xfdd8b200,
3695 + 0x60f90487,
3696 + 0xd0fc80f9,
3697 + 0x2d7ee0fc,
3698 + 0xf0460000,
3699 + 0x7e6eb226,
3700 0xb2000004,
3701 0x0487fdd8,
3702 0x80f960f9,
3703 0xe0fcd0fc,
3704 0x00002d7e,
3705 - 0x26f067f1,
3706 - 0x047e6eb2,
3707 - 0xd8b20000,
3708 - 0xf90487fd,
3709 - 0xfc80f960,
3710 - 0x7ee0fcd0,
3711 - 0x0600002d,
3712 - 0x07e04004,
3713 - 0xbd0006f6,
3714 -/* 0x04b9: memx_func_enter_wait */
3715 - 0x07c04604,
3716 - 0xf00066cf,
3717 - 0x0bf40464,
3718 - 0xcf2c06f7,
3719 - 0x06b50066,
3720 -/* 0x04cf: memx_func_leave */
3721 - 0x0600f8f1,
3722 - 0x0066cf2c,
3723 - 0x06f206b5,
3724 - 0x07e44004,
3725 - 0xbd0006f6,
3726 -/* 0x04e1: memx_func_leave_wait */
3727 - 0x07c04604,
3728 - 0xf00066cf,
3729 - 0x1bf40464,
3730 - 0xf067f1f7,
3731 + 0xe0400406,
3732 + 0x0006f607,
3733 +/* 0x04b6: memx_func_enter_wait */
3734 + 0xc04604bd,
3735 + 0x0066cf07,
3736 + 0xf40464f0,
3737 + 0x2c06f70b,
3738 + 0xb50066cf,
3739 + 0x00f8f106,
3740 +/* 0x04cc: memx_func_leave */
3741 + 0x66cf2c06,
3742 + 0xf206b500,
3743 + 0xe4400406,
3744 + 0x0006f607,
3745 +/* 0x04de: memx_func_leave_wait */
3746 + 0xc04604bd,
3747 + 0x0066cf07,
3748 + 0xf40464f0,
3749 + 0xf046f71b,
3750 0xb2010726,
3751 0x00047e6e,
3752 0xfdd8b200,
3753 0x60f90587,
3754 0xd0fc80f9,
3755 0x2d7ee0fc,
3756 - 0x67f10000,
3757 - 0x6eb21620,
3758 - 0x0000047e,
3759 - 0x87fdd8b2,
3760 - 0xf960f905,
3761 - 0xfcd0fc80,
3762 - 0x002d7ee0,
3763 - 0x0aa24700,
3764 - 0x047e6eb2,
3765 - 0xd8b20000,
3766 - 0xf90587fd,
3767 - 0xfc80f960,
3768 - 0x7ee0fcd0,
3769 - 0xf800002d,
3770 -/* 0x0541: memx_func_wait_vblank */
3771 + 0x20460000,
3772 + 0x7e6eb216,
3773 + 0xb2000004,
3774 + 0x0587fdd8,
3775 + 0x80f960f9,
3776 + 0xe0fcd0fc,
3777 + 0x00002d7e,
3778 + 0xb20aa247,
3779 + 0x00047e6e,
3780 + 0xfdd8b200,
3781 + 0x60f90587,
3782 + 0xd0fc80f9,
3783 + 0x2d7ee0fc,
3784 + 0x00f80000,
3785 +/* 0x053c: memx_func_wait_vblank */
3786 + 0xf80410b6,
3787 +/* 0x0541: memx_func_wr32 */
3788 + 0x00169800,
3789 + 0xb6011598,
3790 + 0x60f90810,
3791 + 0xd0fc50f9,
3792 + 0x2d7ee0fc,
3793 + 0x42b60000,
3794 + 0xe81bf402,
3795 +/* 0x055e: memx_func_wait */
3796 + 0x2c0800f8,
3797 + 0x980088cf,
3798 + 0x1d98001e,
3799 + 0x021c9801,
3800 + 0xb6031b98,
3801 + 0x747e1010,
3802 + 0x00f80000,
3803 +/* 0x0578: memx_func_delay */
3804 + 0xb6001e98,
3805 + 0x587e0410,
3806 + 0x00f80000,
3807 +/* 0x0584: memx_func_train */
3808 +/* 0x0586: memx_exec */
3809 + 0xe0f900f8,
3810 + 0xc1b2d0f9,
3811 +/* 0x058e: memx_exec_next */
3812 + 0x1398b2b2,
3813 0x0410b600,
3814 -/* 0x0546: memx_func_wr32 */
3815 - 0x169800f8,
3816 - 0x01159800,
3817 - 0xf90810b6,
3818 - 0xfc50f960,
3819 + 0x01f034e7,
3820 + 0x01e033e7,
3821 + 0xf00132b6,
3822 + 0x35980c30,
3823 + 0xa655f9de,
3824 + 0xe51ef412,
3825 + 0x98f10b98,
3826 + 0xcbbbf20c,
3827 + 0x07c44b02,
3828 + 0xfc00bbcf,
3829 0x7ee0fcd0,
3830 - 0xb600002d,
3831 - 0x1bf40242,
3832 -/* 0x0563: memx_func_wait */
3833 - 0x0800f8e8,
3834 - 0x0088cf2c,
3835 - 0x98001e98,
3836 - 0x1c98011d,
3837 - 0x031b9802,
3838 - 0x7e1010b6,
3839 - 0xf8000074,
3840 -/* 0x057d: memx_func_delay */
3841 - 0x001e9800,
3842 - 0x7e0410b6,
3843 - 0xf8000058,
3844 -/* 0x0589: memx_func_train */
3845 -/* 0x058b: memx_exec */
3846 - 0xf900f800,
3847 - 0xb2d0f9e0,
3848 -/* 0x0593: memx_exec_next */
3849 - 0x98b2b2c1,
3850 - 0x10b60013,
3851 - 0xf034e704,
3852 - 0xe033e701,
3853 - 0x0132b601,
3854 - 0x980c30f0,
3855 - 0x55f9de35,
3856 - 0x1ef412a6,
3857 - 0xf10b98e5,
3858 - 0xbbf20c98,
3859 - 0xc44b02cb,
3860 - 0x00bbcf07,
3861 - 0xe0fcd0fc,
3862 - 0x00029f7e,
3863 -/* 0x05ca: memx_info */
3864 - 0xc67000f8,
3865 - 0x0c0bf401,
3866 -/* 0x05d0: memx_info_data */
3867 - 0x4b03cc4c,
3868 - 0x0ef40800,
3869 -/* 0x05d9: memx_info_train */
3870 - 0x0bcc4c09,
3871 -/* 0x05df: memx_info_send */
3872 - 0x7e01004b,
3873 0xf800029f,
3874 -/* 0x05e5: memx_recv */
3875 - 0x01d6b000,
3876 - 0xb0a30bf4,
3877 - 0x0bf400d6,
3878 -/* 0x05f3: memx_init */
3879 - 0xf800f8dc,
3880 -/* 0x05f5: perf_recv */
3881 -/* 0x05f7: perf_init */
3882 - 0xf800f800,
3883 -/* 0x05f9: i2c_drive_scl */
3884 - 0x0036b000,
3885 - 0x400d0bf4,
3886 - 0x01f607e0,
3887 - 0xf804bd00,
3888 -/* 0x0609: i2c_drive_scl_lo */
3889 - 0x07e44000,
3890 - 0xbd0001f6,
3891 -/* 0x0613: i2c_drive_sda */
3892 - 0xb000f804,
3893 - 0x0bf40036,
3894 - 0x07e0400d,
3895 - 0xbd0002f6,
3896 -/* 0x0623: i2c_drive_sda_lo */
3897 - 0x4000f804,
3898 - 0x02f607e4,
3899 - 0xf804bd00,
3900 -/* 0x062d: i2c_sense_scl */
3901 - 0x0132f400,
3902 - 0xcf07c443,
3903 - 0x31fd0033,
3904 - 0x060bf404,
3905 -/* 0x063f: i2c_sense_scl_done */
3906 - 0xf80131f4,
3907 -/* 0x0641: i2c_sense_sda */
3908 - 0x0132f400,
3909 - 0xcf07c443,
3910 - 0x32fd0033,
3911 - 0x060bf404,
3912 -/* 0x0653: i2c_sense_sda_done */
3913 - 0xf80131f4,
3914 -/* 0x0655: i2c_raise_scl */
3915 - 0x4440f900,
3916 - 0x01030898,
3917 - 0x0005f97e,
3918 -/* 0x0660: i2c_raise_scl_wait */
3919 - 0x7e03e84e,
3920 - 0x7e000058,
3921 - 0xf400062d,
3922 - 0x42b60901,
3923 - 0xef1bf401,
3924 -/* 0x0674: i2c_raise_scl_done */
3925 - 0x00f840fc,
3926 -/* 0x0678: i2c_start */
3927 - 0x00062d7e,
3928 - 0x7e0d11f4,
3929 - 0xf4000641,
3930 - 0x0ef40611,
3931 -/* 0x0689: i2c_start_rep */
3932 - 0x7e00032e,
3933 - 0x030005f9,
3934 - 0x06137e01,
3935 +/* 0x05c5: memx_info */
3936 + 0x01c67000,
3937 +/* 0x05cb: memx_info_data */
3938 + 0x4c0c0bf4,
3939 + 0x004b03cc,
3940 + 0x090ef408,
3941 +/* 0x05d4: memx_info_train */
3942 + 0x4b0bcc4c,
3943 +/* 0x05da: memx_info_send */
3944 + 0x9f7e0100,
3945 + 0x00f80002,
3946 +/* 0x05e0: memx_recv */
3947 + 0xf401d6b0,
3948 + 0xd6b0a30b,
3949 + 0xdc0bf400,
3950 +/* 0x05ee: memx_init */
3951 + 0x00f800f8,
3952 +/* 0x05f0: perf_recv */
3953 +/* 0x05f2: perf_init */
3954 + 0x00f800f8,
3955 +/* 0x05f4: i2c_drive_scl */
3956 + 0xf40036b0,
3957 + 0xe0400d0b,
3958 + 0x0001f607,
3959 + 0x00f804bd,
3960 +/* 0x0604: i2c_drive_scl_lo */
3961 + 0xf607e440,
3962 + 0x04bd0001,
3963 +/* 0x060e: i2c_drive_sda */
3964 + 0x36b000f8,
3965 + 0x0d0bf400,
3966 + 0xf607e040,
3967 + 0x04bd0002,
3968 +/* 0x061e: i2c_drive_sda_lo */
3969 + 0xe44000f8,
3970 + 0x0002f607,
3971 + 0x00f804bd,
3972 +/* 0x0628: i2c_sense_scl */
3973 + 0x430132f4,
3974 + 0x33cf07c4,
3975 + 0x0431fd00,
3976 + 0xf4060bf4,
3977 +/* 0x063a: i2c_sense_scl_done */
3978 + 0x00f80131,
3979 +/* 0x063c: i2c_sense_sda */
3980 + 0x430132f4,
3981 + 0x33cf07c4,
3982 + 0x0432fd00,
3983 + 0xf4060bf4,
3984 +/* 0x064e: i2c_sense_sda_done */
3985 + 0x00f80131,
3986 +/* 0x0650: i2c_raise_scl */
3987 + 0x984440f9,
3988 + 0x7e010308,
3989 +/* 0x065b: i2c_raise_scl_wait */
3990 + 0x4e0005f4,
3991 + 0x587e03e8,
3992 + 0x287e0000,
3993 + 0x01f40006,
3994 + 0x0142b609,
3995 +/* 0x066f: i2c_raise_scl_done */
3996 + 0xfcef1bf4,
3997 +/* 0x0673: i2c_start */
3998 + 0x7e00f840,
3999 + 0xf4000628,
4000 + 0x3c7e0d11,
4001 + 0x11f40006,
4002 + 0x2e0ef406,
4003 +/* 0x0684: i2c_start_rep */
4004 + 0xf47e0003,
4005 + 0x01030005,
4006 + 0x00060e7e,
4007 + 0xb60076bb,
4008 + 0x50f90465,
4009 + 0xbb046594,
4010 + 0x50bd0256,
4011 + 0xfc0475fd,
4012 + 0x06507e50,
4013 + 0x0464b600,
4014 +/* 0x06af: i2c_start_send */
4015 + 0x031d11f4,
4016 + 0x060e7e00,
4017 + 0x13884e00,
4018 + 0x0000587e,
4019 + 0xf47e0003,
4020 + 0x884e0005,
4021 + 0x00587e13,
4022 +/* 0x06c9: i2c_start_out */
4023 +/* 0x06cb: i2c_stop */
4024 + 0x0300f800,
4025 + 0x05f47e00,
4026 + 0x7e000300,
4027 + 0x4e00060e,
4028 + 0x587e03e8,
4029 + 0x01030000,
4030 + 0x0005f47e,
4031 + 0x7e13884e,
4032 + 0x03000058,
4033 + 0x060e7e01,
4034 + 0x13884e00,
4035 + 0x0000587e,
4036 +/* 0x06fa: i2c_bitw */
4037 + 0x0e7e00f8,
4038 + 0xe84e0006,
4039 + 0x00587e03,
4040 0x0076bb00,
4041 0xf90465b6,
4042 0x04659450,
4043 0xbd0256bb,
4044 0x0475fd50,
4045 - 0x557e50fc,
4046 + 0x507e50fc,
4047 0x64b60006,
4048 - 0x1d11f404,
4049 -/* 0x06b4: i2c_start_send */
4050 - 0x137e0003,
4051 - 0x884e0006,
4052 - 0x00587e13,
4053 - 0x7e000300,
4054 - 0x4e0005f9,
4055 - 0x587e1388,
4056 -/* 0x06ce: i2c_start_out */
4057 - 0x00f80000,
4058 -/* 0x06d0: i2c_stop */
4059 - 0xf97e0003,
4060 - 0x00030005,
4061 - 0x0006137e,
4062 - 0x7e03e84e,
4063 + 0x1711f404,
4064 + 0x7e13884e,
4065 0x03000058,
4066 - 0x05f97e01,
4067 + 0x05f47e00,
4068 0x13884e00,
4069 0x0000587e,
4070 - 0x137e0103,
4071 - 0x884e0006,
4072 - 0x00587e13,
4073 -/* 0x06ff: i2c_bitw */
4074 - 0x7e00f800,
4075 - 0x4e000613,
4076 - 0x587e03e8,
4077 - 0x76bb0000,
4078 +/* 0x0738: i2c_bitw_out */
4079 +/* 0x073a: i2c_bitr */
4080 + 0x010300f8,
4081 + 0x00060e7e,
4082 + 0x7e03e84e,
4083 + 0xbb000058,
4084 + 0x65b60076,
4085 + 0x9450f904,
4086 + 0x56bb0465,
4087 + 0xfd50bd02,
4088 + 0x50fc0475,
4089 + 0x0006507e,
4090 + 0xf40464b6,
4091 + 0x3c7e1a11,
4092 + 0x00030006,
4093 + 0x0005f47e,
4094 + 0x7e13884e,
4095 + 0xf0000058,
4096 + 0x31f4013c,
4097 +/* 0x077d: i2c_bitr_done */
4098 +/* 0x077f: i2c_get_byte */
4099 + 0x0500f801,
4100 +/* 0x0783: i2c_get_byte_next */
4101 + 0xb6080400,
4102 + 0x76bb0154,
4103 0x0465b600,
4104 0x659450f9,
4105 0x0256bb04,
4106 0x75fd50bd,
4107 0x7e50fc04,
4108 - 0xb6000655,
4109 + 0xb600073a,
4110 0x11f40464,
4111 - 0x13884e17,
4112 - 0x0000587e,
4113 - 0xf97e0003,
4114 - 0x884e0005,
4115 - 0x00587e13,
4116 -/* 0x073d: i2c_bitw_out */
4117 -/* 0x073f: i2c_bitr */
4118 - 0x0300f800,
4119 - 0x06137e01,
4120 - 0x03e84e00,
4121 - 0x0000587e,
4122 + 0x0553fd2a,
4123 + 0xf40142b6,
4124 + 0x0103d81b,
4125 0xb60076bb,
4126 0x50f90465,
4127 0xbb046594,
4128 0x50bd0256,
4129 0xfc0475fd,
4130 - 0x06557e50,
4131 + 0x06fa7e50,
4132 0x0464b600,
4133 - 0x7e1a11f4,
4134 - 0x03000641,
4135 - 0x05f97e00,
4136 - 0x13884e00,
4137 - 0x0000587e,
4138 - 0xf4013cf0,
4139 -/* 0x0782: i2c_bitr_done */
4140 - 0x00f80131,
4141 -/* 0x0784: i2c_get_byte */
4142 - 0x08040005,
4143 -/* 0x0788: i2c_get_byte_next */
4144 - 0xbb0154b6,
4145 - 0x65b60076,
4146 - 0x9450f904,
4147 - 0x56bb0465,
4148 - 0xfd50bd02,
4149 - 0x50fc0475,
4150 - 0x00073f7e,
4151 - 0xf40464b6,
4152 - 0x53fd2a11,
4153 - 0x0142b605,
4154 - 0x03d81bf4,
4155 - 0x0076bb01,
4156 - 0xf90465b6,
4157 - 0x04659450,
4158 - 0xbd0256bb,
4159 - 0x0475fd50,
4160 - 0xff7e50fc,
4161 - 0x64b60006,
4162 -/* 0x07d1: i2c_get_byte_done */
4163 -/* 0x07d3: i2c_put_byte */
4164 - 0x0400f804,
4165 -/* 0x07d5: i2c_put_byte_next */
4166 - 0x0142b608,
4167 - 0xbb3854ff,
4168 +/* 0x07cc: i2c_get_byte_done */
4169 +/* 0x07ce: i2c_put_byte */
4170 + 0x080400f8,
4171 +/* 0x07d0: i2c_put_byte_next */
4172 + 0xff0142b6,
4173 + 0x76bb3854,
4174 + 0x0465b600,
4175 + 0x659450f9,
4176 + 0x0256bb04,
4177 + 0x75fd50bd,
4178 + 0x7e50fc04,
4179 + 0xb60006fa,
4180 + 0x11f40464,
4181 + 0x0046b034,
4182 + 0xbbd81bf4,
4183 0x65b60076,
4184 0x9450f904,
4185 0x56bb0465,
4186 0xfd50bd02,
4187 0x50fc0475,
4188 - 0x0006ff7e,
4189 + 0x00073a7e,
4190 0xf40464b6,
4191 - 0x46b03411,
4192 - 0xd81bf400,
4193 + 0x76bb0f11,
4194 + 0x0136b000,
4195 + 0xf4061bf4,
4196 +/* 0x0826: i2c_put_byte_done */
4197 + 0x00f80132,
4198 +/* 0x0828: i2c_addr */
4199 0xb60076bb,
4200 0x50f90465,
4201 0xbb046594,
4202 0x50bd0256,
4203 0xfc0475fd,
4204 - 0x073f7e50,
4205 + 0x06737e50,
4206 0x0464b600,
4207 - 0xbb0f11f4,
4208 - 0x36b00076,
4209 - 0x061bf401,
4210 -/* 0x082b: i2c_put_byte_done */
4211 - 0xf80132f4,
4212 -/* 0x082d: i2c_addr */
4213 - 0x0076bb00,
4214 + 0xe72911f4,
4215 + 0xb6012ec3,
4216 + 0x53fd0134,
4217 + 0x0076bb05,
4218 0xf90465b6,
4219 0x04659450,
4220 0xbd0256bb,
4221 0x0475fd50,
4222 - 0x787e50fc,
4223 - 0x64b60006,
4224 - 0x2911f404,
4225 - 0x012ec3e7,
4226 - 0xfd0134b6,
4227 - 0x76bb0553,
4228 - 0x0465b600,
4229 - 0x659450f9,
4230 - 0x0256bb04,
4231 - 0x75fd50bd,
4232 - 0x7e50fc04,
4233 - 0xb60007d3,
4234 -/* 0x0872: i2c_addr_done */
4235 - 0x00f80464,
4236 -/* 0x0874: i2c_acquire_addr */
4237 - 0xb6f8cec7,
4238 - 0xe0b705e4,
4239 - 0x00f8d014,
4240 -/* 0x0880: i2c_acquire */
4241 - 0x0008747e,
4242 + 0xce7e50fc,
4243 + 0x64b60007,
4244 +/* 0x086d: i2c_addr_done */
4245 +/* 0x086f: i2c_acquire_addr */
4246 + 0xc700f804,
4247 + 0xe4b6f8ce,
4248 + 0x14e0b705,
4249 +/* 0x087b: i2c_acquire */
4250 + 0x7e00f8d0,
4251 + 0x7e00086f,
4252 + 0xf0000004,
4253 + 0x2d7e03d9,
4254 + 0x00f80000,
4255 +/* 0x088c: i2c_release */
4256 + 0x00086f7e,
4257 0x0000047e,
4258 - 0x7e03d9f0,
4259 + 0x7e03daf0,
4260 0xf800002d,
4261 -/* 0x0891: i2c_release */
4262 - 0x08747e00,
4263 - 0x00047e00,
4264 - 0x03daf000,
4265 - 0x00002d7e,
4266 -/* 0x08a2: i2c_recv */
4267 - 0x32f400f8,
4268 - 0xf8c1c701,
4269 - 0xb00214b6,
4270 - 0x1ff52816,
4271 - 0x13b80134,
4272 - 0x98000cf4,
4273 - 0x13b80032,
4274 - 0x98000ccc,
4275 - 0x31f40031,
4276 - 0xf9d0f902,
4277 - 0xd6d0f9e0,
4278 - 0x10000000,
4279 - 0xbb016792,
4280 - 0x65b60076,
4281 - 0x9450f904,
4282 - 0x56bb0465,
4283 - 0xfd50bd02,
4284 - 0x50fc0475,
4285 - 0x0008807e,
4286 - 0xfc0464b6,
4287 - 0x00d6b0d0,
4288 - 0x00b01bf5,
4289 - 0x76bb0005,
4290 +/* 0x089d: i2c_recv */
4291 + 0x0132f400,
4292 + 0xb6f8c1c7,
4293 + 0x16b00214,
4294 + 0x341ff528,
4295 + 0xf413b801,
4296 + 0x3298000c,
4297 + 0xcc13b800,
4298 + 0x3198000c,
4299 + 0x0231f400,
4300 + 0xe0f9d0f9,
4301 + 0x00d6d0f9,
4302 + 0x92100000,
4303 + 0x76bb0167,
4304 0x0465b600,
4305 0x659450f9,
4306 0x0256bb04,
4307 0x75fd50bd,
4308 0x7e50fc04,
4309 - 0xb600082d,
4310 - 0x11f50464,
4311 - 0xc5c700cc,
4312 - 0x0076bbe0,
4313 - 0xf90465b6,
4314 - 0x04659450,
4315 - 0xbd0256bb,
4316 - 0x0475fd50,
4317 - 0xd37e50fc,
4318 - 0x64b60007,
4319 - 0xa911f504,
4320 - 0xbb010500,
4321 - 0x65b60076,
4322 - 0x9450f904,
4323 - 0x56bb0465,
4324 - 0xfd50bd02,
4325 - 0x50fc0475,
4326 - 0x00082d7e,
4327 - 0xf50464b6,
4328 - 0xbb008711,
4329 - 0x65b60076,
4330 - 0x9450f904,
4331 - 0x56bb0465,
4332 - 0xfd50bd02,
4333 - 0x50fc0475,
4334 - 0x0007847e,
4335 - 0xf40464b6,
4336 - 0x5bcb6711,
4337 - 0x0076bbe0,
4338 + 0xb600087b,
4339 + 0xd0fc0464,
4340 + 0xf500d6b0,
4341 + 0x0500b01b,
4342 + 0x0076bb00,
4343 0xf90465b6,
4344 0x04659450,
4345 0xbd0256bb,
4346 0x0475fd50,
4347 - 0xd07e50fc,
4348 - 0x64b60006,
4349 - 0xbd5bb204,
4350 - 0x410ef474,
4351 -/* 0x09a4: i2c_recv_not_rd08 */
4352 - 0xf401d6b0,
4353 - 0x00053b1b,
4354 - 0x00082d7e,
4355 - 0xc73211f4,
4356 - 0xd37ee0c5,
4357 - 0x11f40007,
4358 - 0x7e000528,
4359 - 0xf400082d,
4360 - 0xb5c71f11,
4361 - 0x07d37ee0,
4362 - 0x1511f400,
4363 - 0x0006d07e,
4364 - 0xc5c774bd,
4365 - 0x091bf408,
4366 - 0xf40232f4,
4367 -/* 0x09e2: i2c_recv_not_wr08 */
4368 -/* 0x09e2: i2c_recv_done */
4369 - 0xcec7030e,
4370 - 0x08917ef8,
4371 - 0xfce0fc00,
4372 - 0x0912f4d0,
4373 - 0x9f7e7cb2,
4374 -/* 0x09f6: i2c_recv_exit */
4375 - 0x00f80002,
4376 -/* 0x09f8: i2c_init */
4377 -/* 0x09fa: test_recv */
4378 - 0x584100f8,
4379 - 0x0011cf04,
4380 - 0x400110b6,
4381 - 0x01f60458,
4382 - 0xde04bd00,
4383 - 0x134fd900,
4384 - 0x0001de7e,
4385 -/* 0x0a16: test_init */
4386 - 0x004e00f8,
4387 - 0x01de7e08,
4388 -/* 0x0a1f: idle_recv */
4389 + 0x287e50fc,
4390 + 0x64b60008,
4391 + 0xcc11f504,
4392 + 0xe0c5c700,
4393 + 0xb60076bb,
4394 + 0x50f90465,
4395 + 0xbb046594,
4396 + 0x50bd0256,
4397 + 0xfc0475fd,
4398 + 0x07ce7e50,
4399 + 0x0464b600,
4400 + 0x00a911f5,
4401 + 0x76bb0105,
4402 + 0x0465b600,
4403 + 0x659450f9,
4404 + 0x0256bb04,
4405 + 0x75fd50bd,
4406 + 0x7e50fc04,
4407 + 0xb6000828,
4408 + 0x11f50464,
4409 + 0x76bb0087,
4410 + 0x0465b600,
4411 + 0x659450f9,
4412 + 0x0256bb04,
4413 + 0x75fd50bd,
4414 + 0x7e50fc04,
4415 + 0xb600077f,
4416 + 0x11f40464,
4417 + 0xe05bcb67,
4418 + 0xb60076bb,
4419 + 0x50f90465,
4420 + 0xbb046594,
4421 + 0x50bd0256,
4422 + 0xfc0475fd,
4423 + 0x06cb7e50,
4424 + 0x0464b600,
4425 + 0x74bd5bb2,
4426 +/* 0x099f: i2c_recv_not_rd08 */
4427 + 0xb0410ef4,
4428 + 0x1bf401d6,
4429 + 0x7e00053b,
4430 + 0xf4000828,
4431 + 0xc5c73211,
4432 + 0x07ce7ee0,
4433 + 0x2811f400,
4434 + 0x287e0005,
4435 + 0x11f40008,
4436 + 0xe0b5c71f,
4437 + 0x0007ce7e,
4438 + 0x7e1511f4,
4439 + 0xbd0006cb,
4440 + 0x08c5c774,
4441 + 0xf4091bf4,
4442 + 0x0ef40232,
4443 +/* 0x09dd: i2c_recv_not_wr08 */
4444 +/* 0x09dd: i2c_recv_done */
4445 + 0xf8cec703,
4446 + 0x00088c7e,
4447 + 0xd0fce0fc,
4448 + 0xb20912f4,
4449 + 0x029f7e7c,
4450 +/* 0x09f1: i2c_recv_exit */
4451 +/* 0x09f3: i2c_init */
4452 0xf800f800,
4453 -/* 0x0a21: idle */
4454 - 0x0031f400,
4455 - 0xcf045441,
4456 - 0x10b60011,
4457 - 0x04544001,
4458 - 0xbd0001f6,
4459 -/* 0x0a35: idle_loop */
4460 - 0xf4580104,
4461 -/* 0x0a3a: idle_proc */
4462 -/* 0x0a3a: idle_proc_exec */
4463 - 0x10f90232,
4464 - 0xa87e1eb2,
4465 - 0x10fc0002,
4466 - 0xf40911f4,
4467 - 0x0ef40231,
4468 -/* 0x0a4d: idle_proc_next */
4469 - 0x5810b6f0,
4470 - 0x1bf41fa6,
4471 - 0xe002f4e8,
4472 - 0xf40028f4,
4473 - 0x0000c60e,
4474 +/* 0x09f5: test_recv */
4475 + 0x04584100,
4476 + 0xb60011cf,
4477 + 0x58400110,
4478 + 0x0001f604,
4479 + 0x00de04bd,
4480 + 0x7e134fd9,
4481 + 0xf80001de,
4482 +/* 0x0a11: test_init */
4483 + 0x08004e00,
4484 + 0x0001de7e,
4485 +/* 0x0a1a: idle_recv */
4486 + 0x00f800f8,
4487 +/* 0x0a1c: idle */
4488 + 0x410031f4,
4489 + 0x11cf0454,
4490 + 0x0110b600,
4491 + 0xf6045440,
4492 + 0x04bd0001,
4493 +/* 0x0a30: idle_loop */
4494 + 0x32f45801,
4495 +/* 0x0a35: idle_proc */
4496 +/* 0x0a35: idle_proc_exec */
4497 + 0xb210f902,
4498 + 0x02a87e1e,
4499 + 0xf410fc00,
4500 + 0x31f40911,
4501 + 0xf00ef402,
4502 +/* 0x0a48: idle_proc_next */
4503 + 0xa65810b6,
4504 + 0xe81bf41f,
4505 + 0xf4e002f4,
4506 + 0x0ef40028,
4507 + 0x000000c6,
4508 + 0x00000000,
4509 0x00000000,
4510 0x00000000,
4511 0x00000000,
4512 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
4513 index e83341815ec6..e29b785d9f22 100644
4514 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
4515 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
4516 @@ -46,8 +46,8 @@ uint32_t gt215_pmu_data[] = {
4517 0x00000000,
4518 0x00000000,
4519 0x584d454d,
4520 - 0x0000083a,
4521 - 0x0000082c,
4522 + 0x00000833,
4523 + 0x00000825,
4524 0x00000000,
4525 0x00000000,
4526 0x00000000,
4527 @@ -68,8 +68,8 @@ uint32_t gt215_pmu_data[] = {
4528 0x00000000,
4529 0x00000000,
4530 0x46524550,
4531 - 0x0000083e,
4532 - 0x0000083c,
4533 + 0x00000837,
4534 + 0x00000835,
4535 0x00000000,
4536 0x00000000,
4537 0x00000000,
4538 @@ -90,8 +90,8 @@ uint32_t gt215_pmu_data[] = {
4539 0x00000000,
4540 0x00000000,
4541 0x5f433249,
4542 - 0x00000c6e,
4543 - 0x00000b11,
4544 + 0x00000c67,
4545 + 0x00000b0a,
4546 0x00000000,
4547 0x00000000,
4548 0x00000000,
4549 @@ -112,8 +112,8 @@ uint32_t gt215_pmu_data[] = {
4550 0x00000000,
4551 0x00000000,
4552 0x54534554,
4553 - 0x00000c97,
4554 - 0x00000c70,
4555 + 0x00000c90,
4556 + 0x00000c69,
4557 0x00000000,
4558 0x00000000,
4559 0x00000000,
4560 @@ -134,8 +134,8 @@ uint32_t gt215_pmu_data[] = {
4561 0x00000000,
4562 0x00000000,
4563 0x454c4449,
4564 - 0x00000ca3,
4565 - 0x00000ca1,
4566 + 0x00000c9c,
4567 + 0x00000c9a,
4568 0x00000000,
4569 0x00000000,
4570 0x00000000,
4571 @@ -233,22 +233,22 @@ uint32_t gt215_pmu_data[] = {
4572 /* 0x037c: memx_func_next */
4573 0x00000002,
4574 0x00000000,
4575 - 0x000005a0,
4576 + 0x0000059f,
4577 0x00000003,
4578 0x00000002,
4579 - 0x00000632,
4580 + 0x0000062f,
4581 0x00040004,
4582 0x00000000,
4583 - 0x0000064e,
4584 + 0x0000064b,
4585 0x00010005,
4586 0x00000000,
4587 - 0x0000066b,
4588 + 0x00000668,
4589 0x00010006,
4590 0x00000000,
4591 - 0x000005f0,
4592 + 0x000005ef,
4593 0x00000007,
4594 0x00000000,
4595 - 0x00000676,
4596 + 0x00000673,
4597 /* 0x03c4: memx_func_tail */
4598 /* 0x03c4: memx_ts_start */
4599 0x00000000,
4600 @@ -1304,560 +1304,560 @@ uint32_t gt215_pmu_code[] = {
4601 0x67f102d7,
4602 0x63f1fffc,
4603 0x76fdffff,
4604 - 0x0267f104,
4605 - 0x0576fd00,
4606 - 0x70f980f9,
4607 - 0xe0fcd0fc,
4608 - 0xf04021f4,
4609 + 0x0267f004,
4610 + 0xf90576fd,
4611 + 0xfc70f980,
4612 + 0xf4e0fcd0,
4613 + 0x67f04021,
4614 + 0xe007f104,
4615 + 0x0604b607,
4616 + 0xbd0006d0,
4617 +/* 0x0581: memx_func_enter_wait */
4618 + 0xc067f104,
4619 + 0x0664b607,
4620 + 0xf00066cf,
4621 + 0x0bf40464,
4622 + 0x2c67f0f3,
4623 + 0xcf0664b6,
4624 + 0x06800066,
4625 +/* 0x059f: memx_func_leave */
4626 + 0xf000f8f1,
4627 + 0x64b62c67,
4628 + 0x0066cf06,
4629 + 0xf0f20680,
4630 0x07f10467,
4631 - 0x04b607e0,
4632 + 0x04b607e4,
4633 0x0006d006,
4634 -/* 0x0582: memx_func_enter_wait */
4635 +/* 0x05ba: memx_func_leave_wait */
4636 0x67f104bd,
4637 0x64b607c0,
4638 0x0066cf06,
4639 0xf40464f0,
4640 - 0x67f0f30b,
4641 - 0x0664b62c,
4642 - 0x800066cf,
4643 - 0x00f8f106,
4644 -/* 0x05a0: memx_func_leave */
4645 - 0xb62c67f0,
4646 - 0x66cf0664,
4647 - 0xf2068000,
4648 - 0xf10467f0,
4649 - 0xb607e407,
4650 - 0x06d00604,
4651 -/* 0x05bb: memx_func_leave_wait */
4652 - 0xf104bd00,
4653 - 0xb607c067,
4654 - 0x66cf0664,
4655 - 0x0464f000,
4656 - 0xf1f31bf4,
4657 - 0xb9161087,
4658 - 0x21f4028e,
4659 - 0x02d7b904,
4660 - 0xffcc67f1,
4661 - 0xffff63f1,
4662 - 0xf90476fd,
4663 - 0xfc70f980,
4664 - 0xf4e0fcd0,
4665 - 0x00f84021,
4666 -/* 0x05f0: memx_func_wait_vblank */
4667 - 0xb0001698,
4668 - 0x0bf40066,
4669 - 0x0166b013,
4670 - 0xf4060bf4,
4671 -/* 0x0602: memx_func_wait_vblank_head1 */
4672 - 0x77f12e0e,
4673 - 0x0ef40020,
4674 -/* 0x0609: memx_func_wait_vblank_head0 */
4675 - 0x0877f107,
4676 -/* 0x060d: memx_func_wait_vblank_0 */
4677 - 0xc467f100,
4678 - 0x0664b607,
4679 - 0xfd0066cf,
4680 - 0x1bf40467,
4681 -/* 0x061d: memx_func_wait_vblank_1 */
4682 - 0xc467f1f3,
4683 - 0x0664b607,
4684 - 0xfd0066cf,
4685 - 0x0bf40467,
4686 -/* 0x062d: memx_func_wait_vblank_fini */
4687 - 0x0410b6f3,
4688 -/* 0x0632: memx_func_wr32 */
4689 - 0x169800f8,
4690 - 0x01159800,
4691 - 0xf90810b6,
4692 - 0xfc50f960,
4693 - 0xf4e0fcd0,
4694 - 0x42b64021,
4695 - 0xe91bf402,
4696 -/* 0x064e: memx_func_wait */
4697 - 0x87f000f8,
4698 - 0x0684b62c,
4699 - 0x980088cf,
4700 - 0x1d98001e,
4701 - 0x021c9801,
4702 - 0xb6031b98,
4703 - 0x21f41010,
4704 -/* 0x066b: memx_func_delay */
4705 - 0x9800f8a3,
4706 - 0x10b6001e,
4707 - 0x7e21f404,
4708 -/* 0x0676: memx_func_train */
4709 - 0x57f100f8,
4710 - 0x77f10003,
4711 - 0x97f10000,
4712 - 0x93f00000,
4713 - 0x029eb970,
4714 - 0xb90421f4,
4715 - 0xe7f102d8,
4716 - 0x21f42710,
4717 -/* 0x0695: memx_func_train_loop_outer */
4718 - 0x0158e07e,
4719 - 0x0083f101,
4720 - 0xe097f102,
4721 - 0x1193f011,
4722 - 0x80f990f9,
4723 + 0x87f1f31b,
4724 + 0x8eb91610,
4725 + 0x0421f402,
4726 + 0xf102d7b9,
4727 + 0xf1ffcc67,
4728 + 0xfdffff63,
4729 + 0x80f90476,
4730 + 0xd0fc70f9,
4731 + 0x21f4e0fc,
4732 +/* 0x05ef: memx_func_wait_vblank */
4733 + 0x9800f840,
4734 + 0x66b00016,
4735 + 0x120bf400,
4736 + 0xf40166b0,
4737 + 0x0ef4060b,
4738 +/* 0x0601: memx_func_wait_vblank_head1 */
4739 + 0x2077f02c,
4740 +/* 0x0607: memx_func_wait_vblank_head0 */
4741 + 0xf0060ef4,
4742 +/* 0x060a: memx_func_wait_vblank_0 */
4743 + 0x67f10877,
4744 + 0x64b607c4,
4745 + 0x0066cf06,
4746 + 0xf40467fd,
4747 +/* 0x061a: memx_func_wait_vblank_1 */
4748 + 0x67f1f31b,
4749 + 0x64b607c4,
4750 + 0x0066cf06,
4751 + 0xf40467fd,
4752 +/* 0x062a: memx_func_wait_vblank_fini */
4753 + 0x10b6f30b,
4754 +/* 0x062f: memx_func_wr32 */
4755 + 0x9800f804,
4756 + 0x15980016,
4757 + 0x0810b601,
4758 + 0x50f960f9,
4759 0xe0fcd0fc,
4760 - 0xf94021f4,
4761 - 0x0067f150,
4762 -/* 0x06b5: memx_func_train_loop_inner */
4763 - 0x1187f100,
4764 - 0x9068ff11,
4765 - 0xfd109894,
4766 - 0x97f10589,
4767 - 0x93f00720,
4768 - 0xf990f910,
4769 - 0xfcd0fc80,
4770 - 0x4021f4e0,
4771 - 0x008097f1,
4772 - 0xb91093f0,
4773 - 0x21f4029e,
4774 - 0x02d8b904,
4775 - 0xf92088c5,
4776 + 0xb64021f4,
4777 + 0x1bf40242,
4778 +/* 0x064b: memx_func_wait */
4779 + 0xf000f8e9,
4780 + 0x84b62c87,
4781 + 0x0088cf06,
4782 + 0x98001e98,
4783 + 0x1c98011d,
4784 + 0x031b9802,
4785 + 0xf41010b6,
4786 + 0x00f8a321,
4787 +/* 0x0668: memx_func_delay */
4788 + 0xb6001e98,
4789 + 0x21f40410,
4790 +/* 0x0673: memx_func_train */
4791 + 0xf000f87e,
4792 + 0x77f00357,
4793 + 0x0097f100,
4794 + 0x7093f000,
4795 + 0xf4029eb9,
4796 + 0xd8b90421,
4797 + 0x10e7f102,
4798 + 0x7e21f427,
4799 +/* 0x0690: memx_func_train_loop_outer */
4800 + 0x010158e0,
4801 + 0x020083f1,
4802 + 0x11e097f1,
4803 + 0xf91193f0,
4804 + 0xfc80f990,
4805 + 0xf4e0fcd0,
4806 + 0x50f94021,
4807 +/* 0x06af: memx_func_train_loop_inner */
4808 + 0xf10067f0,
4809 + 0xff111187,
4810 + 0x98949068,
4811 + 0x0589fd10,
4812 + 0x072097f1,
4813 + 0xf91093f0,
4814 0xfc80f990,
4815 0xf4e0fcd0,
4816 0x97f14021,
4817 - 0x93f0053c,
4818 - 0x0287f110,
4819 - 0x0083f130,
4820 - 0xf990f980,
4821 + 0x93f00080,
4822 + 0x029eb910,
4823 + 0xb90421f4,
4824 + 0x88c502d8,
4825 + 0xf990f920,
4826 0xfcd0fc80,
4827 0x4021f4e0,
4828 - 0x0560e7f1,
4829 - 0xf110e3f0,
4830 - 0xf10000d7,
4831 - 0x908000d3,
4832 - 0xb7f100dc,
4833 - 0xb3f08480,
4834 - 0xa321f41e,
4835 - 0x000057f1,
4836 - 0xffff97f1,
4837 - 0x830093f1,
4838 -/* 0x0734: memx_func_train_loop_4x */
4839 - 0x0080a7f1,
4840 - 0xb910a3f0,
4841 - 0x21f402ae,
4842 - 0x02d8b904,
4843 - 0xffdfb7f1,
4844 - 0xffffb3f1,
4845 - 0xf9048bfd,
4846 - 0xfc80f9a0,
4847 + 0x053c97f1,
4848 + 0xf11093f0,
4849 + 0xf1300287,
4850 + 0xf9800083,
4851 + 0xfc80f990,
4852 0xf4e0fcd0,
4853 - 0xa7f14021,
4854 - 0xa3f0053c,
4855 - 0x0287f110,
4856 - 0x0083f130,
4857 - 0xf9a0f980,
4858 - 0xfcd0fc80,
4859 - 0x4021f4e0,
4860 - 0x0560e7f1,
4861 - 0xf110e3f0,
4862 - 0xf10000d7,
4863 - 0xb98000d3,
4864 - 0xb7f102dc,
4865 - 0xb3f02710,
4866 - 0xa321f400,
4867 - 0xf402eeb9,
4868 - 0xddb90421,
4869 - 0x949dff02,
4870 + 0xe7f14021,
4871 + 0xe3f00560,
4872 + 0x00d7f110,
4873 + 0x00d3f100,
4874 + 0x00dc9080,
4875 + 0x8480b7f1,
4876 + 0xf41eb3f0,
4877 + 0x57f0a321,
4878 + 0xff97f100,
4879 + 0x0093f1ff,
4880 +/* 0x072d: memx_func_train_loop_4x */
4881 + 0x80a7f183,
4882 + 0x10a3f000,
4883 + 0xf402aeb9,
4884 + 0xd8b90421,
4885 + 0xdfb7f102,
4886 + 0xffb3f1ff,
4887 + 0x048bfdff,
4888 + 0x80f9a0f9,
4889 + 0xe0fcd0fc,
4890 + 0xf14021f4,
4891 + 0xf0053ca7,
4892 + 0x87f110a3,
4893 + 0x83f13002,
4894 + 0xa0f98000,
4895 + 0xd0fc80f9,
4896 + 0x21f4e0fc,
4897 + 0x60e7f140,
4898 + 0x10e3f005,
4899 + 0x0000d7f1,
4900 + 0x8000d3f1,
4901 + 0xf102dcb9,
4902 + 0xf02710b7,
4903 + 0x21f400b3,
4904 + 0x02eeb9a3,
4905 + 0xb90421f4,
4906 + 0x9dff02dd,
4907 + 0x0150b694,
4908 + 0xf4045670,
4909 + 0x7aa0921e,
4910 + 0xa9800bcc,
4911 + 0x0160b600,
4912 + 0x700470b6,
4913 + 0x1ef51066,
4914 + 0x50fcff01,
4915 0x700150b6,
4916 - 0x1ef40456,
4917 - 0xcc7aa092,
4918 - 0x00a9800b,
4919 - 0xb60160b6,
4920 - 0x66700470,
4921 - 0x001ef510,
4922 - 0xb650fcff,
4923 - 0x56700150,
4924 - 0xd41ef507,
4925 -/* 0x07c7: memx_exec */
4926 - 0xf900f8fe,
4927 - 0xb9d0f9e0,
4928 - 0xb2b902c1,
4929 -/* 0x07d1: memx_exec_next */
4930 - 0x00139802,
4931 - 0xe70410b6,
4932 - 0xe701f034,
4933 - 0xb601e033,
4934 - 0x30f00132,
4935 - 0xde35980c,
4936 - 0x12b855f9,
4937 - 0xe41ef406,
4938 - 0x98f10b98,
4939 - 0xcbbbf20c,
4940 - 0xc4b7f102,
4941 - 0x06b4b607,
4942 - 0xfc00bbcf,
4943 - 0xf5e0fcd0,
4944 + 0x1ef50756,
4945 + 0x00f8fed6,
4946 +/* 0x07c0: memx_exec */
4947 + 0xd0f9e0f9,
4948 + 0xb902c1b9,
4949 +/* 0x07ca: memx_exec_next */
4950 + 0x139802b2,
4951 + 0x0410b600,
4952 + 0x01f034e7,
4953 + 0x01e033e7,
4954 + 0xf00132b6,
4955 + 0x35980c30,
4956 + 0xb855f9de,
4957 + 0x1ef40612,
4958 + 0xf10b98e4,
4959 + 0xbbf20c98,
4960 + 0xb7f102cb,
4961 + 0xb4b607c4,
4962 + 0x00bbcf06,
4963 + 0xe0fcd0fc,
4964 + 0x033621f5,
4965 +/* 0x0806: memx_info */
4966 + 0xc67000f8,
4967 + 0x0e0bf401,
4968 +/* 0x080c: memx_info_data */
4969 + 0x03ccc7f1,
4970 + 0x0800b7f1,
4971 +/* 0x0817: memx_info_train */
4972 + 0xf10b0ef4,
4973 + 0xf10bccc7,
4974 +/* 0x081f: memx_info_send */
4975 + 0xf50100b7,
4976 0xf8033621,
4977 -/* 0x080d: memx_info */
4978 - 0x01c67000,
4979 -/* 0x0813: memx_info_data */
4980 - 0xf10e0bf4,
4981 - 0xf103ccc7,
4982 - 0xf40800b7,
4983 -/* 0x081e: memx_info_train */
4984 - 0xc7f10b0e,
4985 - 0xb7f10bcc,
4986 -/* 0x0826: memx_info_send */
4987 - 0x21f50100,
4988 - 0x00f80336,
4989 -/* 0x082c: memx_recv */
4990 - 0xf401d6b0,
4991 - 0xd6b0980b,
4992 - 0xd80bf400,
4993 -/* 0x083a: memx_init */
4994 - 0x00f800f8,
4995 -/* 0x083c: perf_recv */
4996 -/* 0x083e: perf_init */
4997 - 0x00f800f8,
4998 -/* 0x0840: i2c_drive_scl */
4999 - 0xf40036b0,
5000 - 0x07f1110b,
5001 - 0x04b607e0,
5002 - 0x0001d006,
5003 - 0x00f804bd,
5004 -/* 0x0854: i2c_drive_scl_lo */
5005 - 0x07e407f1,
5006 - 0xd00604b6,
5007 - 0x04bd0001,
5008 -/* 0x0862: i2c_drive_sda */
5009 - 0x36b000f8,
5010 - 0x110bf400,
5011 - 0x07e007f1,
5012 - 0xd00604b6,
5013 - 0x04bd0002,
5014 -/* 0x0876: i2c_drive_sda_lo */
5015 - 0x07f100f8,
5016 - 0x04b607e4,
5017 - 0x0002d006,
5018 - 0x00f804bd,
5019 -/* 0x0884: i2c_sense_scl */
5020 - 0xf10132f4,
5021 - 0xb607c437,
5022 - 0x33cf0634,
5023 - 0x0431fd00,
5024 - 0xf4060bf4,
5025 -/* 0x089a: i2c_sense_scl_done */
5026 - 0x00f80131,
5027 -/* 0x089c: i2c_sense_sda */
5028 - 0xf10132f4,
5029 - 0xb607c437,
5030 - 0x33cf0634,
5031 - 0x0432fd00,
5032 - 0xf4060bf4,
5033 -/* 0x08b2: i2c_sense_sda_done */
5034 - 0x00f80131,
5035 -/* 0x08b4: i2c_raise_scl */
5036 - 0x47f140f9,
5037 - 0x37f00898,
5038 - 0x4021f501,
5039 -/* 0x08c1: i2c_raise_scl_wait */
5040 +/* 0x0825: memx_recv */
5041 + 0x01d6b000,
5042 + 0xb0980bf4,
5043 + 0x0bf400d6,
5044 +/* 0x0833: memx_init */
5045 + 0xf800f8d8,
5046 +/* 0x0835: perf_recv */
5047 +/* 0x0837: perf_init */
5048 + 0xf800f800,
5049 +/* 0x0839: i2c_drive_scl */
5050 + 0x0036b000,
5051 + 0xf1110bf4,
5052 + 0xb607e007,
5053 + 0x01d00604,
5054 + 0xf804bd00,
5055 +/* 0x084d: i2c_drive_scl_lo */
5056 + 0xe407f100,
5057 + 0x0604b607,
5058 + 0xbd0001d0,
5059 +/* 0x085b: i2c_drive_sda */
5060 + 0xb000f804,
5061 + 0x0bf40036,
5062 + 0xe007f111,
5063 + 0x0604b607,
5064 + 0xbd0002d0,
5065 +/* 0x086f: i2c_drive_sda_lo */
5066 + 0xf100f804,
5067 + 0xb607e407,
5068 + 0x02d00604,
5069 + 0xf804bd00,
5070 +/* 0x087d: i2c_sense_scl */
5071 + 0x0132f400,
5072 + 0x07c437f1,
5073 + 0xcf0634b6,
5074 + 0x31fd0033,
5075 + 0x060bf404,
5076 +/* 0x0893: i2c_sense_scl_done */
5077 + 0xf80131f4,
5078 +/* 0x0895: i2c_sense_sda */
5079 + 0x0132f400,
5080 + 0x07c437f1,
5081 + 0xcf0634b6,
5082 + 0x32fd0033,
5083 + 0x060bf404,
5084 +/* 0x08ab: i2c_sense_sda_done */
5085 + 0xf80131f4,
5086 +/* 0x08ad: i2c_raise_scl */
5087 + 0xf140f900,
5088 + 0xf0089847,
5089 + 0x21f50137,
5090 +/* 0x08ba: i2c_raise_scl_wait */
5091 + 0xe7f10839,
5092 + 0x21f403e8,
5093 + 0x7d21f57e,
5094 + 0x0901f408,
5095 + 0xf40142b6,
5096 +/* 0x08ce: i2c_raise_scl_done */
5097 + 0x40fcef1b,
5098 +/* 0x08d2: i2c_start */
5099 + 0x21f500f8,
5100 + 0x11f4087d,
5101 + 0x9521f50d,
5102 + 0x0611f408,
5103 +/* 0x08e3: i2c_start_rep */
5104 + 0xf0300ef4,
5105 + 0x21f50037,
5106 + 0x37f00839,
5107 + 0x5b21f501,
5108 + 0x0076bb08,
5109 + 0xf90465b6,
5110 + 0x04659450,
5111 + 0xbd0256bb,
5112 + 0x0475fd50,
5113 + 0x21f550fc,
5114 + 0x64b608ad,
5115 + 0x1f11f404,
5116 +/* 0x0910: i2c_start_send */
5117 + 0xf50037f0,
5118 + 0xf1085b21,
5119 + 0xf41388e7,
5120 + 0x37f07e21,
5121 + 0x3921f500,
5122 + 0x88e7f108,
5123 + 0x7e21f413,
5124 +/* 0x092c: i2c_start_out */
5125 +/* 0x092e: i2c_stop */
5126 + 0x37f000f8,
5127 + 0x3921f500,
5128 + 0x0037f008,
5129 + 0x085b21f5,
5130 + 0x03e8e7f1,
5131 + 0xf07e21f4,
5132 + 0x21f50137,
5133 + 0xe7f10839,
5134 + 0x21f41388,
5135 + 0x0137f07e,
5136 + 0x085b21f5,
5137 + 0x1388e7f1,
5138 + 0xf87e21f4,
5139 +/* 0x0961: i2c_bitw */
5140 + 0x5b21f500,
5141 0xe8e7f108,
5142 0x7e21f403,
5143 - 0x088421f5,
5144 - 0xb60901f4,
5145 - 0x1bf40142,
5146 -/* 0x08d5: i2c_raise_scl_done */
5147 - 0xf840fcef,
5148 -/* 0x08d9: i2c_start */
5149 - 0x8421f500,
5150 - 0x0d11f408,
5151 - 0x089c21f5,
5152 - 0xf40611f4,
5153 -/* 0x08ea: i2c_start_rep */
5154 - 0x37f0300e,
5155 - 0x4021f500,
5156 - 0x0137f008,
5157 - 0x086221f5,
5158 0xb60076bb,
5159 0x50f90465,
5160 0xbb046594,
5161 0x50bd0256,
5162 0xfc0475fd,
5163 - 0xb421f550,
5164 + 0xad21f550,
5165 0x0464b608,
5166 -/* 0x0917: i2c_start_send */
5167 - 0xf01f11f4,
5168 - 0x21f50037,
5169 - 0xe7f10862,
5170 - 0x21f41388,
5171 - 0x0037f07e,
5172 - 0x084021f5,
5173 - 0x1388e7f1,
5174 -/* 0x0933: i2c_start_out */
5175 - 0xf87e21f4,
5176 -/* 0x0935: i2c_stop */
5177 - 0x0037f000,
5178 - 0x084021f5,
5179 - 0xf50037f0,
5180 - 0xf1086221,
5181 - 0xf403e8e7,
5182 + 0xf11811f4,
5183 + 0xf41388e7,
5184 0x37f07e21,
5185 - 0x4021f501,
5186 + 0x3921f500,
5187 0x88e7f108,
5188 0x7e21f413,
5189 - 0xf50137f0,
5190 - 0xf1086221,
5191 - 0xf41388e7,
5192 - 0x00f87e21,
5193 -/* 0x0968: i2c_bitw */
5194 - 0x086221f5,
5195 - 0x03e8e7f1,
5196 - 0xbb7e21f4,
5197 - 0x65b60076,
5198 - 0x9450f904,
5199 - 0x56bb0465,
5200 - 0xfd50bd02,
5201 - 0x50fc0475,
5202 - 0x08b421f5,
5203 - 0xf40464b6,
5204 - 0xe7f11811,
5205 +/* 0x09a0: i2c_bitw_out */
5206 +/* 0x09a2: i2c_bitr */
5207 + 0x37f000f8,
5208 + 0x5b21f501,
5209 + 0xe8e7f108,
5210 + 0x7e21f403,
5211 + 0xb60076bb,
5212 + 0x50f90465,
5213 + 0xbb046594,
5214 + 0x50bd0256,
5215 + 0xfc0475fd,
5216 + 0xad21f550,
5217 + 0x0464b608,
5218 + 0xf51b11f4,
5219 + 0xf0089521,
5220 + 0x21f50037,
5221 + 0xe7f10839,
5222 0x21f41388,
5223 - 0x0037f07e,
5224 - 0x084021f5,
5225 - 0x1388e7f1,
5226 -/* 0x09a7: i2c_bitw_out */
5227 - 0xf87e21f4,
5228 -/* 0x09a9: i2c_bitr */
5229 - 0x0137f000,
5230 - 0x086221f5,
5231 - 0x03e8e7f1,
5232 - 0xbb7e21f4,
5233 - 0x65b60076,
5234 - 0x9450f904,
5235 - 0x56bb0465,
5236 - 0xfd50bd02,
5237 - 0x50fc0475,
5238 - 0x08b421f5,
5239 - 0xf40464b6,
5240 - 0x21f51b11,
5241 - 0x37f0089c,
5242 - 0x4021f500,
5243 - 0x88e7f108,
5244 - 0x7e21f413,
5245 - 0xf4013cf0,
5246 -/* 0x09ee: i2c_bitr_done */
5247 - 0x00f80131,
5248 -/* 0x09f0: i2c_get_byte */
5249 - 0xf00057f0,
5250 -/* 0x09f6: i2c_get_byte_next */
5251 - 0x54b60847,
5252 + 0x013cf07e,
5253 +/* 0x09e7: i2c_bitr_done */
5254 + 0xf80131f4,
5255 +/* 0x09e9: i2c_get_byte */
5256 + 0x0057f000,
5257 +/* 0x09ef: i2c_get_byte_next */
5258 + 0xb60847f0,
5259 + 0x76bb0154,
5260 + 0x0465b600,
5261 + 0x659450f9,
5262 + 0x0256bb04,
5263 + 0x75fd50bd,
5264 + 0xf550fc04,
5265 + 0xb609a221,
5266 + 0x11f40464,
5267 + 0x0553fd2b,
5268 + 0xf40142b6,
5269 + 0x37f0d81b,
5270 0x0076bb01,
5271 0xf90465b6,
5272 0x04659450,
5273 0xbd0256bb,
5274 0x0475fd50,
5275 0x21f550fc,
5276 - 0x64b609a9,
5277 - 0x2b11f404,
5278 - 0xb60553fd,
5279 - 0x1bf40142,
5280 - 0x0137f0d8,
5281 - 0xb60076bb,
5282 - 0x50f90465,
5283 - 0xbb046594,
5284 - 0x50bd0256,
5285 - 0xfc0475fd,
5286 - 0x6821f550,
5287 - 0x0464b609,
5288 -/* 0x0a40: i2c_get_byte_done */
5289 -/* 0x0a42: i2c_put_byte */
5290 - 0x47f000f8,
5291 -/* 0x0a45: i2c_put_byte_next */
5292 - 0x0142b608,
5293 - 0xbb3854ff,
5294 - 0x65b60076,
5295 - 0x9450f904,
5296 - 0x56bb0465,
5297 - 0xfd50bd02,
5298 - 0x50fc0475,
5299 - 0x096821f5,
5300 - 0xf40464b6,
5301 - 0x46b03411,
5302 - 0xd81bf400,
5303 + 0x64b60961,
5304 +/* 0x0a39: i2c_get_byte_done */
5305 +/* 0x0a3b: i2c_put_byte */
5306 + 0xf000f804,
5307 +/* 0x0a3e: i2c_put_byte_next */
5308 + 0x42b60847,
5309 + 0x3854ff01,
5310 0xb60076bb,
5311 0x50f90465,
5312 0xbb046594,
5313 0x50bd0256,
5314 0xfc0475fd,
5315 - 0xa921f550,
5316 + 0x6121f550,
5317 0x0464b609,
5318 - 0xbb0f11f4,
5319 - 0x36b00076,
5320 - 0x061bf401,
5321 -/* 0x0a9b: i2c_put_byte_done */
5322 - 0xf80132f4,
5323 -/* 0x0a9d: i2c_addr */
5324 - 0x0076bb00,
5325 + 0xb03411f4,
5326 + 0x1bf40046,
5327 + 0x0076bbd8,
5328 0xf90465b6,
5329 0x04659450,
5330 0xbd0256bb,
5331 0x0475fd50,
5332 0x21f550fc,
5333 - 0x64b608d9,
5334 - 0x2911f404,
5335 - 0x012ec3e7,
5336 - 0xfd0134b6,
5337 - 0x76bb0553,
5338 + 0x64b609a2,
5339 + 0x0f11f404,
5340 + 0xb00076bb,
5341 + 0x1bf40136,
5342 + 0x0132f406,
5343 +/* 0x0a94: i2c_put_byte_done */
5344 +/* 0x0a96: i2c_addr */
5345 + 0x76bb00f8,
5346 0x0465b600,
5347 0x659450f9,
5348 0x0256bb04,
5349 0x75fd50bd,
5350 0xf550fc04,
5351 - 0xb60a4221,
5352 -/* 0x0ae2: i2c_addr_done */
5353 - 0x00f80464,
5354 -/* 0x0ae4: i2c_acquire_addr */
5355 - 0xb6f8cec7,
5356 - 0xe0b702e4,
5357 - 0xee980d1c,
5358 -/* 0x0af3: i2c_acquire */
5359 - 0xf500f800,
5360 - 0xf40ae421,
5361 - 0xd9f00421,
5362 - 0x4021f403,
5363 -/* 0x0b02: i2c_release */
5364 - 0x21f500f8,
5365 - 0x21f40ae4,
5366 - 0x03daf004,
5367 - 0xf84021f4,
5368 -/* 0x0b11: i2c_recv */
5369 - 0x0132f400,
5370 - 0xb6f8c1c7,
5371 - 0x16b00214,
5372 - 0x3a1ff528,
5373 - 0xf413a001,
5374 - 0x0032980c,
5375 - 0x0ccc13a0,
5376 - 0xf4003198,
5377 - 0xd0f90231,
5378 - 0xd0f9e0f9,
5379 - 0x000067f1,
5380 - 0x100063f1,
5381 - 0xbb016792,
5382 + 0xb608d221,
5383 + 0x11f40464,
5384 + 0x2ec3e729,
5385 + 0x0134b601,
5386 + 0xbb0553fd,
5387 0x65b60076,
5388 0x9450f904,
5389 0x56bb0465,
5390 0xfd50bd02,
5391 0x50fc0475,
5392 - 0x0af321f5,
5393 - 0xfc0464b6,
5394 - 0x00d6b0d0,
5395 - 0x00b31bf5,
5396 - 0xbb0057f0,
5397 + 0x0a3b21f5,
5398 +/* 0x0adb: i2c_addr_done */
5399 + 0xf80464b6,
5400 +/* 0x0add: i2c_acquire_addr */
5401 + 0xf8cec700,
5402 + 0xb702e4b6,
5403 + 0x980d1ce0,
5404 + 0x00f800ee,
5405 +/* 0x0aec: i2c_acquire */
5406 + 0x0add21f5,
5407 + 0xf00421f4,
5408 + 0x21f403d9,
5409 +/* 0x0afb: i2c_release */
5410 + 0xf500f840,
5411 + 0xf40add21,
5412 + 0xdaf00421,
5413 + 0x4021f403,
5414 +/* 0x0b0a: i2c_recv */
5415 + 0x32f400f8,
5416 + 0xf8c1c701,
5417 + 0xb00214b6,
5418 + 0x1ff52816,
5419 + 0x13a0013a,
5420 + 0x32980cf4,
5421 + 0xcc13a000,
5422 + 0x0031980c,
5423 + 0xf90231f4,
5424 + 0xf9e0f9d0,
5425 + 0x0067f1d0,
5426 + 0x0063f100,
5427 + 0x01679210,
5428 + 0xb60076bb,
5429 + 0x50f90465,
5430 + 0xbb046594,
5431 + 0x50bd0256,
5432 + 0xfc0475fd,
5433 + 0xec21f550,
5434 + 0x0464b60a,
5435 + 0xd6b0d0fc,
5436 + 0xb31bf500,
5437 + 0x0057f000,
5438 + 0xb60076bb,
5439 + 0x50f90465,
5440 + 0xbb046594,
5441 + 0x50bd0256,
5442 + 0xfc0475fd,
5443 + 0x9621f550,
5444 + 0x0464b60a,
5445 + 0x00d011f5,
5446 + 0xbbe0c5c7,
5447 0x65b60076,
5448 0x9450f904,
5449 0x56bb0465,
5450 0xfd50bd02,
5451 0x50fc0475,
5452 - 0x0a9d21f5,
5453 + 0x0a3b21f5,
5454 0xf50464b6,
5455 - 0xc700d011,
5456 - 0x76bbe0c5,
5457 + 0xf000ad11,
5458 + 0x76bb0157,
5459 0x0465b600,
5460 0x659450f9,
5461 0x0256bb04,
5462 0x75fd50bd,
5463 0xf550fc04,
5464 - 0xb60a4221,
5465 + 0xb60a9621,
5466 0x11f50464,
5467 - 0x57f000ad,
5468 - 0x0076bb01,
5469 - 0xf90465b6,
5470 - 0x04659450,
5471 - 0xbd0256bb,
5472 - 0x0475fd50,
5473 - 0x21f550fc,
5474 - 0x64b60a9d,
5475 - 0x8a11f504,
5476 - 0x0076bb00,
5477 - 0xf90465b6,
5478 - 0x04659450,
5479 - 0xbd0256bb,
5480 - 0x0475fd50,
5481 - 0x21f550fc,
5482 - 0x64b609f0,
5483 - 0x6a11f404,
5484 - 0xbbe05bcb,
5485 - 0x65b60076,
5486 - 0x9450f904,
5487 - 0x56bb0465,
5488 - 0xfd50bd02,
5489 - 0x50fc0475,
5490 - 0x093521f5,
5491 - 0xb90464b6,
5492 - 0x74bd025b,
5493 -/* 0x0c17: i2c_recv_not_rd08 */
5494 - 0xb0430ef4,
5495 - 0x1bf401d6,
5496 - 0x0057f03d,
5497 - 0x0a9d21f5,
5498 - 0xc73311f4,
5499 - 0x21f5e0c5,
5500 - 0x11f40a42,
5501 - 0x0057f029,
5502 - 0x0a9d21f5,
5503 - 0xc71f11f4,
5504 - 0x21f5e0b5,
5505 - 0x11f40a42,
5506 - 0x3521f515,
5507 - 0xc774bd09,
5508 - 0x1bf408c5,
5509 - 0x0232f409,
5510 -/* 0x0c57: i2c_recv_not_wr08 */
5511 -/* 0x0c57: i2c_recv_done */
5512 - 0xc7030ef4,
5513 - 0x21f5f8ce,
5514 - 0xe0fc0b02,
5515 - 0x12f4d0fc,
5516 - 0x027cb90a,
5517 - 0x033621f5,
5518 -/* 0x0c6c: i2c_recv_exit */
5519 -/* 0x0c6e: i2c_init */
5520 + 0x76bb008a,
5521 + 0x0465b600,
5522 + 0x659450f9,
5523 + 0x0256bb04,
5524 + 0x75fd50bd,
5525 + 0xf550fc04,
5526 + 0xb609e921,
5527 + 0x11f40464,
5528 + 0xe05bcb6a,
5529 + 0xb60076bb,
5530 + 0x50f90465,
5531 + 0xbb046594,
5532 + 0x50bd0256,
5533 + 0xfc0475fd,
5534 + 0x2e21f550,
5535 + 0x0464b609,
5536 + 0xbd025bb9,
5537 + 0x430ef474,
5538 +/* 0x0c10: i2c_recv_not_rd08 */
5539 + 0xf401d6b0,
5540 + 0x57f03d1b,
5541 + 0x9621f500,
5542 + 0x3311f40a,
5543 + 0xf5e0c5c7,
5544 + 0xf40a3b21,
5545 + 0x57f02911,
5546 + 0x9621f500,
5547 + 0x1f11f40a,
5548 + 0xf5e0b5c7,
5549 + 0xf40a3b21,
5550 + 0x21f51511,
5551 + 0x74bd092e,
5552 + 0xf408c5c7,
5553 + 0x32f4091b,
5554 + 0x030ef402,
5555 +/* 0x0c50: i2c_recv_not_wr08 */
5556 +/* 0x0c50: i2c_recv_done */
5557 + 0xf5f8cec7,
5558 + 0xfc0afb21,
5559 + 0xf4d0fce0,
5560 + 0x7cb90a12,
5561 + 0x3621f502,
5562 +/* 0x0c65: i2c_recv_exit */
5563 +/* 0x0c67: i2c_init */
5564 + 0xf800f803,
5565 +/* 0x0c69: test_recv */
5566 + 0xd817f100,
5567 + 0x0614b605,
5568 + 0xb60011cf,
5569 + 0x07f10110,
5570 + 0x04b605d8,
5571 + 0x0001d006,
5572 + 0xe7f104bd,
5573 + 0xe3f1d900,
5574 + 0x21f5134f,
5575 + 0x00f80256,
5576 +/* 0x0c90: test_init */
5577 + 0x0800e7f1,
5578 + 0x025621f5,
5579 +/* 0x0c9a: idle_recv */
5580 0x00f800f8,
5581 -/* 0x0c70: test_recv */
5582 - 0x05d817f1,
5583 - 0xcf0614b6,
5584 - 0x10b60011,
5585 - 0xd807f101,
5586 - 0x0604b605,
5587 - 0xbd0001d0,
5588 - 0x00e7f104,
5589 - 0x4fe3f1d9,
5590 - 0x5621f513,
5591 -/* 0x0c97: test_init */
5592 - 0xf100f802,
5593 - 0xf50800e7,
5594 - 0xf8025621,
5595 -/* 0x0ca1: idle_recv */
5596 -/* 0x0ca3: idle */
5597 - 0xf400f800,
5598 - 0x17f10031,
5599 - 0x14b605d4,
5600 - 0x0011cf06,
5601 - 0xf10110b6,
5602 - 0xb605d407,
5603 - 0x01d00604,
5604 -/* 0x0cbf: idle_loop */
5605 - 0xf004bd00,
5606 - 0x32f45817,
5607 -/* 0x0cc5: idle_proc */
5608 -/* 0x0cc5: idle_proc_exec */
5609 - 0xb910f902,
5610 - 0x21f5021e,
5611 - 0x10fc033f,
5612 - 0xf40911f4,
5613 - 0x0ef40231,
5614 -/* 0x0cd9: idle_proc_next */
5615 - 0x5810b6ef,
5616 - 0xf4061fb8,
5617 - 0x02f4e61b,
5618 - 0x0028f4dd,
5619 - 0x00bb0ef4,
5620 +/* 0x0c9c: idle */
5621 + 0xf10031f4,
5622 + 0xb605d417,
5623 + 0x11cf0614,
5624 + 0x0110b600,
5625 + 0x05d407f1,
5626 + 0xd00604b6,
5627 + 0x04bd0001,
5628 +/* 0x0cb8: idle_loop */
5629 + 0xf45817f0,
5630 +/* 0x0cbe: idle_proc */
5631 +/* 0x0cbe: idle_proc_exec */
5632 + 0x10f90232,
5633 + 0xf5021eb9,
5634 + 0xfc033f21,
5635 + 0x0911f410,
5636 + 0xf40231f4,
5637 +/* 0x0cd2: idle_proc_next */
5638 + 0x10b6ef0e,
5639 + 0x061fb858,
5640 + 0xf4e61bf4,
5641 + 0x28f4dd02,
5642 + 0xbb0ef400,
5643 + 0x00000000,
5644 + 0x00000000,
5645 0x00000000,
5646 0x00000000,
5647 0x00000000,
5648 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
5649 index ec03f9a4290b..1663bf943d77 100644
5650 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
5651 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
5652 @@ -82,15 +82,15 @@ memx_train_tail:
5653 // $r0 - zero
5654 memx_func_enter:
5655 #if NVKM_PPWR_CHIPSET == GT215
5656 - movw $r8 0x1610
5657 + mov $r8 0x1610
5658 nv_rd32($r7, $r8)
5659 imm32($r6, 0xfffffffc)
5660 and $r7 $r6
5661 - movw $r6 0x2
5662 + mov $r6 0x2
5663 or $r7 $r6
5664 nv_wr32($r8, $r7)
5665 #else
5666 - movw $r6 0x001620
5667 + mov $r6 0x001620
5668 imm32($r7, ~0x00000aa2);
5669 nv_rd32($r8, $r6)
5670 and $r8 $r7
5671 @@ -101,7 +101,7 @@ memx_func_enter:
5672 and $r8 $r7
5673 nv_wr32($r6, $r8)
5674
5675 - movw $r6 0x0026f0
5676 + mov $r6 0x0026f0
5677 nv_rd32($r8, $r6)
5678 and $r8 $r7
5679 nv_wr32($r6, $r8)
5680 @@ -136,19 +136,19 @@ memx_func_leave:
5681 bra nz #memx_func_leave_wait
5682
5683 #if NVKM_PPWR_CHIPSET == GT215
5684 - movw $r8 0x1610
5685 + mov $r8 0x1610
5686 nv_rd32($r7, $r8)
5687 imm32($r6, 0xffffffcc)
5688 and $r7 $r6
5689 nv_wr32($r8, $r7)
5690 #else
5691 - movw $r6 0x0026f0
5692 + mov $r6 0x0026f0
5693 imm32($r7, 0x00000001)
5694 nv_rd32($r8, $r6)
5695 or $r8 $r7
5696 nv_wr32($r6, $r8)
5697
5698 - movw $r6 0x001620
5699 + mov $r6 0x001620
5700 nv_rd32($r8, $r6)
5701 or $r8 $r7
5702 nv_wr32($r6, $r8)
5703 @@ -177,11 +177,11 @@ memx_func_wait_vblank:
5704 bra #memx_func_wait_vblank_fini
5705
5706 memx_func_wait_vblank_head1:
5707 - movw $r7 0x20
5708 + mov $r7 0x20
5709 bra #memx_func_wait_vblank_0
5710
5711 memx_func_wait_vblank_head0:
5712 - movw $r7 0x8
5713 + mov $r7 0x8
5714
5715 memx_func_wait_vblank_0:
5716 nv_iord($r6, NV_PPWR_INPUT)
5717 @@ -273,13 +273,13 @@ memx_func_train:
5718 // $r5 - outer loop counter
5719 // $r6 - inner loop counter
5720 // $r7 - entry counter (#memx_train_head + $r7)
5721 - movw $r5 0x3
5722 - movw $r7 0x0
5723 + mov $r5 0x3
5724 + mov $r7 0x0
5725
5726 // Read random memory to wake up... things
5727 imm32($r9, 0x700000)
5728 nv_rd32($r8,$r9)
5729 - movw $r14 0x2710
5730 + mov $r14 0x2710
5731 call(nsec)
5732
5733 memx_func_train_loop_outer:
5734 @@ -289,9 +289,9 @@ memx_func_train:
5735 nv_wr32($r9, $r8)
5736 push $r5
5737
5738 - movw $r6 0x0
5739 + mov $r6 0x0
5740 memx_func_train_loop_inner:
5741 - movw $r8 0x1111
5742 + mov $r8 0x1111
5743 mulu $r9 $r6 $r8
5744 shl b32 $r8 $r9 0x10
5745 or $r8 $r9
5746 @@ -315,7 +315,7 @@ memx_func_train:
5747
5748 // $r5 - inner inner loop counter
5749 // $r9 - result
5750 - movw $r5 0
5751 + mov $r5 0
5752 imm32($r9, 0x8300ffff)
5753 memx_func_train_loop_4x:
5754 imm32($r10, 0x100080)
5755 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
5756 index 6f65846b1783..5b2a9f97ff04 100644
5757 --- a/drivers/gpu/drm/panel/panel-simple.c
5758 +++ b/drivers/gpu/drm/panel/panel-simple.c
5759 @@ -1250,7 +1250,7 @@ static const struct panel_desc ontat_yx700wv03 = {
5760 .width = 154,
5761 .height = 83,
5762 },
5763 - .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
5764 + .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
5765 };
5766
5767 static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
5768 diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5769 index b70f9423379c..cab4d60060a0 100644
5770 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5771 +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5772 @@ -64,7 +64,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
5773 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
5774 */
5775 vma->vm_flags &= ~VM_PFNMAP;
5776 - vma->vm_pgoff = 0;
5777
5778 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
5779 obj->size, rk_obj->dma_attrs);
5780 @@ -96,6 +95,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
5781 if (ret)
5782 return ret;
5783
5784 + /*
5785 + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
5786 + * whole buffer from the start.
5787 + */
5788 + vma->vm_pgoff = 0;
5789 +
5790 obj = vma->vm_private_data;
5791
5792 return rockchip_drm_gem_object_mmap(obj, vma);
5793 diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
5794 index d401156490f3..4460ca46a350 100644
5795 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
5796 +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
5797 @@ -129,10 +129,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
5798 static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
5799 {
5800 struct sun4i_dclk *dclk = hw_to_dclk(hw);
5801 + u32 val = degrees / 120;
5802 +
5803 + val <<= 28;
5804
5805 regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
5806 GENMASK(29, 28),
5807 - degrees / 120);
5808 + val);
5809
5810 return 0;
5811 }
5812 diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
5813 index 818478b4c4f0..54639395aba0 100644
5814 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
5815 +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
5816 @@ -194,6 +194,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
5817 case VIRTGPU_PARAM_3D_FEATURES:
5818 value = vgdev->has_virgl_3d == true ? 1 : 0;
5819 break;
5820 + case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
5821 + value = 1;
5822 + break;
5823 default:
5824 return -EINVAL;
5825 }
5826 @@ -469,7 +472,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
5827 {
5828 struct virtio_gpu_device *vgdev = dev->dev_private;
5829 struct drm_virtgpu_get_caps *args = data;
5830 - int size;
5831 + unsigned size, host_caps_size;
5832 int i;
5833 int found_valid = -1;
5834 int ret;
5835 @@ -478,6 +481,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
5836 if (vgdev->num_capsets == 0)
5837 return -ENOSYS;
5838
5839 + /* don't allow userspace to pass 0 */
5840 + if (args->size == 0)
5841 + return -EINVAL;
5842 +
5843 spin_lock(&vgdev->display_info_lock);
5844 for (i = 0; i < vgdev->num_capsets; i++) {
5845 if (vgdev->capsets[i].id == args->cap_set_id) {
5846 @@ -493,11 +500,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
5847 return -EINVAL;
5848 }
5849
5850 - size = vgdev->capsets[found_valid].max_size;
5851 - if (args->size > size) {
5852 - spin_unlock(&vgdev->display_info_lock);
5853 - return -EINVAL;
5854 - }
5855 + host_caps_size = vgdev->capsets[found_valid].max_size;
5856 + /* only copy to user the minimum of the host caps size or the guest caps size */
5857 + size = min(args->size, host_caps_size);
5858
5859 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
5860 if (cache_ent->id == args->cap_set_id &&
5861 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
5862 index 557a033fb610..8545488aa0cf 100644
5863 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
5864 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
5865 @@ -135,17 +135,24 @@
5866
5867 #else
5868
5869 -/* In the 32-bit version of this macro, we use "m" because there is no
5870 - * more register left for bp
5871 +/*
5872 + * In the 32-bit version of this macro, we store bp in a memory location
5873 + * because we've ran out of registers.
5874 + * Now we can't reference that memory location while we've modified
5875 + * %esp or %ebp, so we first push it on the stack, just before we push
5876 + * %ebp, and then when we need it we read it from the stack where we
5877 + * just pushed it.
5878 */
5879 #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
5880 port_num, magic, bp, \
5881 eax, ebx, ecx, edx, si, di) \
5882 ({ \
5883 - asm volatile ("push %%ebp;" \
5884 - "mov %12, %%ebp;" \
5885 + asm volatile ("push %12;" \
5886 + "push %%ebp;" \
5887 + "mov 0x04(%%esp), %%ebp;" \
5888 "rep outsb;" \
5889 - "pop %%ebp;" : \
5890 + "pop %%ebp;" \
5891 + "add $0x04, %%esp;" : \
5892 "=a"(eax), \
5893 "=b"(ebx), \
5894 "=c"(ecx), \
5895 @@ -167,10 +174,12 @@
5896 port_num, magic, bp, \
5897 eax, ebx, ecx, edx, si, di) \
5898 ({ \
5899 - asm volatile ("push %%ebp;" \
5900 - "mov %12, %%ebp;" \
5901 + asm volatile ("push %12;" \
5902 + "push %%ebp;" \
5903 + "mov 0x04(%%esp), %%ebp;" \
5904 "rep insb;" \
5905 - "pop %%ebp" : \
5906 + "pop %%ebp;" \
5907 + "add $0x04, %%esp;" : \
5908 "=a"(eax), \
5909 "=b"(ebx), \
5910 "=c"(ecx), \
5911 diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
5912 index 43617fb28b87..317c9c2c0a7c 100644
5913 --- a/drivers/hid/hid-roccat-kovaplus.c
5914 +++ b/drivers/hid/hid-roccat-kovaplus.c
5915 @@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
5916 static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
5917 uint new_profile_index)
5918 {
5919 + if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
5920 + return;
5921 kovaplus->actual_profile = new_profile_index;
5922 kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
5923 kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
5924 diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
5925 index ce75dd4db7eb..2b31b84d0a5b 100644
5926 --- a/drivers/hwmon/nct6775.c
5927 +++ b/drivers/hwmon/nct6775.c
5928 @@ -1393,7 +1393,7 @@ static void nct6775_update_pwm(struct device *dev)
5929 duty_is_dc = data->REG_PWM_MODE[i] &&
5930 (nct6775_read_value(data, data->REG_PWM_MODE[i])
5931 & data->PWM_MODE_MASK[i]);
5932 - data->pwm_mode[i] = duty_is_dc;
5933 + data->pwm_mode[i] = !duty_is_dc;
5934
5935 fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
5936 for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
5937 @@ -2270,7 +2270,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
5938 struct nct6775_data *data = nct6775_update_device(dev);
5939 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
5940
5941 - return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
5942 + return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
5943 }
5944
5945 static ssize_t
5946 @@ -2291,9 +2291,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
5947 if (val > 1)
5948 return -EINVAL;
5949
5950 - /* Setting DC mode is not supported for all chips/channels */
5951 + /* Setting DC mode (0) is not supported for all chips/channels */
5952 if (data->REG_PWM_MODE[nr] == 0) {
5953 - if (val)
5954 + if (!val)
5955 return -EINVAL;
5956 return count;
5957 }
5958 @@ -2302,7 +2302,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
5959 data->pwm_mode[nr] = val;
5960 reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
5961 reg &= ~data->PWM_MODE_MASK[nr];
5962 - if (val)
5963 + if (!val)
5964 reg |= data->PWM_MODE_MASK[nr];
5965 nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
5966 mutex_unlock(&data->update_lock);
5967 diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
5968 index d659a02647d4..c3a8f682f834 100644
5969 --- a/drivers/hwmon/pmbus/adm1275.c
5970 +++ b/drivers/hwmon/pmbus/adm1275.c
5971 @@ -154,7 +154,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
5972 const struct adm1275_data *data = to_adm1275_data(info);
5973 int ret = 0;
5974
5975 - if (page)
5976 + if (page > 0)
5977 return -ENXIO;
5978
5979 switch (reg) {
5980 @@ -240,7 +240,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
5981 const struct adm1275_data *data = to_adm1275_data(info);
5982 int ret;
5983
5984 - if (page)
5985 + if (page > 0)
5986 return -ENXIO;
5987
5988 switch (reg) {
5989 diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
5990 index dd4883a19045..e951f9b87abb 100644
5991 --- a/drivers/hwmon/pmbus/max8688.c
5992 +++ b/drivers/hwmon/pmbus/max8688.c
5993 @@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
5994 {
5995 int ret;
5996
5997 - if (page)
5998 + if (page > 0)
5999 return -ENXIO;
6000
6001 switch (reg) {
6002 diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
6003 index b4dec0841bc2..5c9dea7a40bc 100644
6004 --- a/drivers/i2c/busses/i2c-mv64xxx.c
6005 +++ b/drivers/i2c/busses/i2c-mv64xxx.c
6006 @@ -848,12 +848,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
6007 */
6008 if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
6009 drv_data->offload_enabled = true;
6010 - drv_data->errata_delay = true;
6011 + /* The delay is only needed in standard mode (100kHz) */
6012 + if (bus_freq <= 100000)
6013 + drv_data->errata_delay = true;
6014 }
6015
6016 if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
6017 drv_data->offload_enabled = false;
6018 - drv_data->errata_delay = true;
6019 + /* The delay is only needed in standard mode (100kHz) */
6020 + if (bus_freq <= 100000)
6021 + drv_data->errata_delay = true;
6022 }
6023
6024 if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
6025 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
6026 index bf9a2ad296ed..883fe2cdd42c 100644
6027 --- a/drivers/ide/ide-cd.c
6028 +++ b/drivers/ide/ide-cd.c
6029 @@ -1593,6 +1593,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
6030 struct cdrom_info *info;
6031 int rc = -ENXIO;
6032
6033 + check_disk_change(bdev);
6034 +
6035 mutex_lock(&ide_cd_mutex);
6036 info = ide_cd_get(bdev->bd_disk);
6037 if (!info)
6038 diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
6039 index 322cb67b07a9..28d18453c950 100644
6040 --- a/drivers/infiniband/core/multicast.c
6041 +++ b/drivers/infiniband/core/multicast.c
6042 @@ -724,21 +724,19 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
6043 {
6044 int ret;
6045 u16 gid_index;
6046 - u8 p;
6047 -
6048 - if (rdma_protocol_roce(device, port_num)) {
6049 - ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
6050 - gid_type, port_num,
6051 - ndev,
6052 - &gid_index);
6053 - } else if (rdma_protocol_ib(device, port_num)) {
6054 - ret = ib_find_cached_gid(device, &rec->port_gid,
6055 - IB_GID_TYPE_IB, NULL, &p,
6056 - &gid_index);
6057 - } else {
6058 - ret = -EINVAL;
6059 - }
6060
6061 + /* GID table is not based on the netdevice for IB link layer,
6062 + * so ignore ndev during search.
6063 + */
6064 + if (rdma_protocol_ib(device, port_num))
6065 + ndev = NULL;
6066 + else if (!rdma_protocol_roce(device, port_num))
6067 + return -EINVAL;
6068 +
6069 + ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
6070 + gid_type, port_num,
6071 + ndev,
6072 + &gid_index);
6073 if (ret)
6074 return ret;
6075
6076 diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
6077 index 81b742ca1639..4baf3b864a57 100644
6078 --- a/drivers/infiniband/core/sa_query.c
6079 +++ b/drivers/infiniband/core/sa_query.c
6080 @@ -1137,10 +1137,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
6081
6082 resolved_dev = dev_get_by_index(dev_addr.net,
6083 dev_addr.bound_dev_if);
6084 - if (resolved_dev->flags & IFF_LOOPBACK) {
6085 - dev_put(resolved_dev);
6086 - resolved_dev = idev;
6087 - dev_hold(resolved_dev);
6088 + if (!resolved_dev) {
6089 + dev_put(idev);
6090 + return -ENODEV;
6091 }
6092 ndev = ib_get_ndev_from_path(rec);
6093 rcu_read_lock();
6094 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
6095 index f2f1c9fec0b1..a036d7087ddf 100644
6096 --- a/drivers/infiniband/core/ucma.c
6097 +++ b/drivers/infiniband/core/ucma.c
6098 @@ -1296,7 +1296,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
6099 if (IS_ERR(ctx))
6100 return PTR_ERR(ctx);
6101
6102 - if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
6103 + if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
6104 return -EINVAL;
6105
6106 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
6107 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
6108 index 7853b0caad32..148b313c6471 100644
6109 --- a/drivers/infiniband/hw/hfi1/chip.c
6110 +++ b/drivers/infiniband/hw/hfi1/chip.c
6111 @@ -5860,6 +5860,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
6112 u64 status;
6113 u32 sw_index;
6114 int i = 0;
6115 + unsigned long irq_flags;
6116
6117 sw_index = dd->hw_to_sw[hw_context];
6118 if (sw_index >= dd->num_send_contexts) {
6119 @@ -5869,10 +5870,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
6120 return;
6121 }
6122 sci = &dd->send_contexts[sw_index];
6123 + spin_lock_irqsave(&dd->sc_lock, irq_flags);
6124 sc = sci->sc;
6125 if (!sc) {
6126 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
6127 sw_index, hw_context);
6128 + spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6129 return;
6130 }
6131
6132 @@ -5894,6 +5897,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
6133 */
6134 if (sc->type != SC_USER)
6135 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6136 + spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6137
6138 /*
6139 * Update the counters for the corresponding status bits.
6140 diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
6141 index 4b892ca2b13a..095912fb3201 100644
6142 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
6143 +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
6144 @@ -1515,6 +1515,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
6145 err_code = -EOVERFLOW;
6146 goto err;
6147 }
6148 + stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
6149 iwmr->stag = stag;
6150 iwmr->ibmr.rkey = stag;
6151 iwmr->ibmr.lkey = stag;
6152 diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
6153 index 19bc1c2186ff..8d59a5905ee8 100644
6154 --- a/drivers/infiniband/hw/mlx4/main.c
6155 +++ b/drivers/infiniband/hw/mlx4/main.c
6156 @@ -216,8 +216,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
6157 gid_tbl[i].version = 2;
6158 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
6159 gid_tbl[i].type = 1;
6160 - else
6161 - memset(&gid_tbl[i].gid, 0, 12);
6162 }
6163 }
6164
6165 @@ -363,8 +361,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
6166 if (!gids) {
6167 ret = -ENOMEM;
6168 } else {
6169 - for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
6170 - memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
6171 + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
6172 + memcpy(&gids[i].gid,
6173 + &port_gid_table->gids[i].gid,
6174 + sizeof(union ib_gid));
6175 + gids[i].gid_type =
6176 + port_gid_table->gids[i].gid_type;
6177 + }
6178 }
6179 }
6180 spin_unlock_bh(&iboe->lock);
6181 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
6182 index 3cdcbfbd6a79..abb47e780070 100644
6183 --- a/drivers/infiniband/hw/mlx5/qp.c
6184 +++ b/drivers/infiniband/hw/mlx5/qp.c
6185 @@ -2809,8 +2809,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
6186 mlx5_ib_qp_disable_pagefaults(qp);
6187
6188 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
6189 - !optab[mlx5_cur][mlx5_new])
6190 + !optab[mlx5_cur][mlx5_new]) {
6191 + err = -EINVAL;
6192 goto out;
6193 + }
6194
6195 op = optab[mlx5_cur][mlx5_new];
6196 optpar = ib_mask_to_mlx5_opt(attr_mask);
6197 @@ -4610,13 +4612,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
6198 int err;
6199
6200 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
6201 - if (err) {
6202 + if (err)
6203 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
6204 - return err;
6205 - }
6206
6207 kfree(xrcd);
6208 -
6209 return 0;
6210 }
6211
6212 diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
6213 index 58e92bce6825..f937873e93df 100644
6214 --- a/drivers/infiniband/hw/qedr/main.c
6215 +++ b/drivers/infiniband/hw/qedr/main.c
6216 @@ -762,7 +762,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
6217
6218 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
6219 if (!dev->num_cnq) {
6220 - DP_ERR(dev, "not enough CNQ resources.\n");
6221 + DP_ERR(dev, "Failed. At least one CNQ is required.\n");
6222 + rc = -ENOMEM;
6223 goto init_err;
6224 }
6225
6226 diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
6227 index 35d5b89decb4..cd0408c2b376 100644
6228 --- a/drivers/infiniband/hw/qedr/verbs.c
6229 +++ b/drivers/infiniband/hw/qedr/verbs.c
6230 @@ -1888,18 +1888,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
6231 SET_FIELD(qp_params.modify_flags,
6232 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
6233
6234 - qp_params.ack_timeout = attr->timeout;
6235 - if (attr->timeout) {
6236 - u32 temp;
6237 -
6238 - temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
6239 - /* FW requires [msec] */
6240 - qp_params.ack_timeout = temp;
6241 - } else {
6242 - /* Infinite */
6243 + /* The received timeout value is an exponent used like this:
6244 + * "12.7.34 LOCAL ACK TIMEOUT
6245 + * Value representing the transport (ACK) timeout for use by
6246 + * the remote, expressed as: 4.096 * 2^timeout [usec]"
6247 + * The FW expects timeout in msec so we need to divide the usec
6248 + * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
6249 + * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
6250 + * The value of zero means infinite so we use a 'max_t' to make
6251 + * sure that sub 1 msec values will be configured as 1 msec.
6252 + */
6253 + if (attr->timeout)
6254 + qp_params.ack_timeout =
6255 + 1 << max_t(int, attr->timeout - 8, 0);
6256 + else
6257 qp_params.ack_timeout = 0;
6258 - }
6259 }
6260 +
6261 if (attr_mask & IB_QP_RETRY_CNT) {
6262 SET_FIELD(qp_params.modify_flags,
6263 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
6264 @@ -2807,6 +2812,11 @@ int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
6265
6266 switch (wr->opcode) {
6267 case IB_WR_SEND_WITH_IMM:
6268 + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
6269 + rc = -EINVAL;
6270 + *bad_wr = wr;
6271 + break;
6272 + }
6273 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
6274 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
6275 swqe->wqe_size = 2;
6276 @@ -2848,6 +2858,11 @@ int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
6277 break;
6278
6279 case IB_WR_RDMA_WRITE_WITH_IMM:
6280 + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
6281 + rc = -EINVAL;
6282 + *bad_wr = wr;
6283 + break;
6284 + }
6285 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
6286 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
6287
6288 @@ -3467,7 +3482,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
6289 {
6290 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
6291 struct qedr_cq *cq = get_qedr_cq(ibcq);
6292 - union rdma_cqe *cqe = cq->latest_cqe;
6293 + union rdma_cqe *cqe;
6294 u32 old_cons, new_cons;
6295 unsigned long flags;
6296 int update = 0;
6297 @@ -3477,6 +3492,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
6298 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
6299
6300 spin_lock_irqsave(&cq->cq_lock, flags);
6301 + cqe = cq->latest_cqe;
6302 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
6303 while (num_entries && is_valid_cqe(cq, cqe)) {
6304 struct qedr_qp *qp;
6305 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
6306 index 0df7d4504c06..17c5bc7e8957 100644
6307 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
6308 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
6309 @@ -2119,6 +2119,9 @@ static struct net_device *ipoib_add_port(const char *format,
6310 goto event_failed;
6311 }
6312
6313 + /* call event handler to ensure pkey in sync */
6314 + queue_work(ipoib_workqueue, &priv->flush_heavy);
6315 +
6316 result = register_netdev(priv->dev);
6317 if (result) {
6318 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
6319 diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
6320 index bee267424972..5cbf17aa8443 100644
6321 --- a/drivers/input/mouse/psmouse-base.c
6322 +++ b/drivers/input/mouse/psmouse-base.c
6323 @@ -937,6 +937,21 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
6324 psmouse->pt_deactivate = NULL;
6325 }
6326
6327 +static bool psmouse_do_detect(int (*detect)(struct psmouse *, bool),
6328 + struct psmouse *psmouse, bool allow_passthrough,
6329 + bool set_properties)
6330 +{
6331 + if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
6332 + !allow_passthrough) {
6333 + return false;
6334 + }
6335 +
6336 + if (set_properties)
6337 + psmouse_apply_defaults(psmouse);
6338 +
6339 + return detect(psmouse, set_properties) == 0;
6340 +}
6341 +
6342 static bool psmouse_try_protocol(struct psmouse *psmouse,
6343 enum psmouse_type type,
6344 unsigned int *max_proto,
6345 @@ -948,15 +963,8 @@ static bool psmouse_try_protocol(struct psmouse *psmouse,
6346 if (!proto)
6347 return false;
6348
6349 - if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
6350 - !proto->try_passthru) {
6351 - return false;
6352 - }
6353 -
6354 - if (set_properties)
6355 - psmouse_apply_defaults(psmouse);
6356 -
6357 - if (proto->detect(psmouse, set_properties) != 0)
6358 + if (!psmouse_do_detect(proto->detect, psmouse, proto->try_passthru,
6359 + set_properties))
6360 return false;
6361
6362 if (set_properties && proto->init && init_allowed) {
6363 @@ -988,8 +996,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
6364 * Always check for focaltech, this is safe as it uses pnp-id
6365 * matching.
6366 */
6367 - if (psmouse_try_protocol(psmouse, PSMOUSE_FOCALTECH,
6368 - &max_proto, set_properties, false)) {
6369 + if (psmouse_do_detect(focaltech_detect,
6370 + psmouse, false, set_properties)) {
6371 if (max_proto > PSMOUSE_IMEX &&
6372 IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH) &&
6373 (!set_properties || focaltech_init(psmouse) == 0)) {
6374 @@ -1035,8 +1043,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
6375 * probing for IntelliMouse.
6376 */
6377 if (max_proto > PSMOUSE_PS2 &&
6378 - psmouse_try_protocol(psmouse, PSMOUSE_SYNAPTICS, &max_proto,
6379 - set_properties, false)) {
6380 + psmouse_do_detect(synaptics_detect,
6381 + psmouse, false, set_properties)) {
6382 synaptics_hardware = true;
6383
6384 if (max_proto > PSMOUSE_IMEX) {
6385 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
6386 index 88bbc8ccc5e3..1612d3a22d42 100644
6387 --- a/drivers/iommu/intel-iommu.c
6388 +++ b/drivers/iommu/intel-iommu.c
6389 @@ -1612,8 +1612,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
6390 * flush. However, device IOTLB doesn't need to be flushed in this case.
6391 */
6392 if (!cap_caching_mode(iommu->cap) || !map)
6393 - iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
6394 - addr, mask);
6395 + iommu_flush_dev_iotlb(domain, addr, mask);
6396 }
6397
6398 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
6399 diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
6400 index aee1c60d7ab5..cc58b1b272c0 100644
6401 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
6402 +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
6403 @@ -133,6 +133,8 @@ static int __init its_pci_of_msi_init(void)
6404
6405 for (np = of_find_matching_node(NULL, its_device_id); np;
6406 np = of_find_matching_node(np, its_device_id)) {
6407 + if (!of_device_is_available(np))
6408 + continue;
6409 if (!of_property_read_bool(np, "msi-controller"))
6410 continue;
6411
6412 diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
6413 index 470b4aa7d62c..e4768fcdc672 100644
6414 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
6415 +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
6416 @@ -80,6 +80,8 @@ static int __init its_pmsi_init(void)
6417
6418 for (np = of_find_matching_node(NULL, its_device_id); np;
6419 np = of_find_matching_node(np, its_device_id)) {
6420 + if (!of_device_is_available(np))
6421 + continue;
6422 if (!of_property_read_bool(np, "msi-controller"))
6423 continue;
6424
6425 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
6426 index ac15e5d5d9b2..558c7589c329 100644
6427 --- a/drivers/irqchip/irq-gic-v3-its.c
6428 +++ b/drivers/irqchip/irq-gic-v3-its.c
6429 @@ -1807,6 +1807,8 @@ static int __init its_of_probe(struct device_node *node)
6430
6431 for (np = of_find_matching_node(node, its_device_id); np;
6432 np = of_find_matching_node(np, its_device_id)) {
6433 + if (!of_device_is_available(np))
6434 + continue;
6435 if (!of_property_read_bool(np, "msi-controller")) {
6436 pr_warn("%s: no msi-controller property, ITS ignored\n",
6437 np->full_name);
6438 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
6439 index 100c80e48190..0b1d5bdd0862 100644
6440 --- a/drivers/irqchip/irq-gic-v3.c
6441 +++ b/drivers/irqchip/irq-gic-v3.c
6442 @@ -601,7 +601,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
6443 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
6444 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
6445
6446 - pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
6447 + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
6448 gic_write_sgi1r(val);
6449 }
6450
6451 diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
6452 index 775527135b93..25852e399ab2 100644
6453 --- a/drivers/macintosh/rack-meter.c
6454 +++ b/drivers/macintosh/rack-meter.c
6455 @@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause)
6456 DBDMA_DO_STOP(rm->dma_regs);
6457 return;
6458 }
6459 - memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
6460 - memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
6461 + memset(rdma->buf1, 0, sizeof(rdma->buf1));
6462 + memset(rdma->buf2, 0, sizeof(rdma->buf2));
6463
6464 rm->dma_buf_v->mark = 0;
6465
6466 diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
6467 index d23337e8c4ee..dd344ee9e62b 100644
6468 --- a/drivers/md/bcache/alloc.c
6469 +++ b/drivers/md/bcache/alloc.c
6470 @@ -284,8 +284,10 @@ do { \
6471 break; \
6472 \
6473 mutex_unlock(&(ca)->set->bucket_lock); \
6474 - if (kthread_should_stop()) \
6475 + if (kthread_should_stop()) { \
6476 + set_current_state(TASK_RUNNING); \
6477 return 0; \
6478 + } \
6479 \
6480 schedule(); \
6481 mutex_lock(&(ca)->set->bucket_lock); \
6482 diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
6483 index 02619cabda8b..7fe7df56fa33 100644
6484 --- a/drivers/md/bcache/bcache.h
6485 +++ b/drivers/md/bcache/bcache.h
6486 @@ -904,7 +904,7 @@ void bcache_write_super(struct cache_set *);
6487
6488 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
6489
6490 -int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
6491 +int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
6492 void bch_cached_dev_detach(struct cached_dev *);
6493 void bch_cached_dev_run(struct cached_dev *);
6494 void bcache_device_stop(struct bcache_device *);
6495 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
6496 index cac297f8170e..cf7c68920b33 100644
6497 --- a/drivers/md/bcache/btree.c
6498 +++ b/drivers/md/bcache/btree.c
6499 @@ -1864,14 +1864,17 @@ void bch_initial_gc_finish(struct cache_set *c)
6500 */
6501 for_each_cache(ca, c, i) {
6502 for_each_bucket(b, ca) {
6503 - if (fifo_full(&ca->free[RESERVE_PRIO]))
6504 + if (fifo_full(&ca->free[RESERVE_PRIO]) &&
6505 + fifo_full(&ca->free[RESERVE_BTREE]))
6506 break;
6507
6508 if (bch_can_invalidate_bucket(ca, b) &&
6509 !GC_MARK(b)) {
6510 __bch_invalidate_one_bucket(ca, b);
6511 - fifo_push(&ca->free[RESERVE_PRIO],
6512 - b - ca->buckets);
6513 + if (!fifo_push(&ca->free[RESERVE_PRIO],
6514 + b - ca->buckets))
6515 + fifo_push(&ca->free[RESERVE_BTREE],
6516 + b - ca->buckets);
6517 }
6518 }
6519 }
6520 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
6521 index edb8d1a1a69f..bd6f6f4b4256 100644
6522 --- a/drivers/md/bcache/request.c
6523 +++ b/drivers/md/bcache/request.c
6524 @@ -633,11 +633,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
6525 static void search_free(struct closure *cl)
6526 {
6527 struct search *s = container_of(cl, struct search, cl);
6528 - bio_complete(s);
6529
6530 if (s->iop.bio)
6531 bio_put(s->iop.bio);
6532
6533 + bio_complete(s);
6534 closure_debug_destroy(cl);
6535 mempool_free(s, s->d->c->search);
6536 }
6537 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
6538 index 4af7cd423c71..894992ae9be0 100644
6539 --- a/drivers/md/bcache/super.c
6540 +++ b/drivers/md/bcache/super.c
6541 @@ -938,7 +938,8 @@ void bch_cached_dev_detach(struct cached_dev *dc)
6542 cached_dev_put(dc);
6543 }
6544
6545 -int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
6546 +int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
6547 + uint8_t *set_uuid)
6548 {
6549 uint32_t rtime = cpu_to_le32(get_seconds());
6550 struct uuid_entry *u;
6551 @@ -947,7 +948,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
6552
6553 bdevname(dc->bdev, buf);
6554
6555 - if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
6556 + if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
6557 + (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
6558 return -ENOENT;
6559
6560 if (dc->disk.c) {
6561 @@ -1191,7 +1193,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
6562
6563 list_add(&dc->list, &uncached_devices);
6564 list_for_each_entry(c, &bch_cache_sets, list)
6565 - bch_cached_dev_attach(dc, c);
6566 + bch_cached_dev_attach(dc, c, NULL);
6567
6568 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
6569 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
6570 @@ -1714,7 +1716,7 @@ static void run_cache_set(struct cache_set *c)
6571 bcache_write_super(c);
6572
6573 list_for_each_entry_safe(dc, t, &uncached_devices, list)
6574 - bch_cached_dev_attach(dc, c);
6575 + bch_cached_dev_attach(dc, c, NULL);
6576
6577 flash_devs_run(c);
6578
6579 @@ -1831,6 +1833,7 @@ void bch_cache_release(struct kobject *kobj)
6580 static int cache_alloc(struct cache *ca)
6581 {
6582 size_t free;
6583 + size_t btree_buckets;
6584 struct bucket *b;
6585
6586 __module_get(THIS_MODULE);
6587 @@ -1840,9 +1843,19 @@ static int cache_alloc(struct cache *ca)
6588 ca->journal.bio.bi_max_vecs = 8;
6589 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
6590
6591 + /*
6592 + * when ca->sb.njournal_buckets is not zero, journal exists,
6593 + * and in bch_journal_replay(), tree node may split,
6594 + * so bucket of RESERVE_BTREE type is needed,
6595 + * the worst situation is all journal buckets are valid journal,
6596 + * and all the keys need to replay,
6597 + * so the number of RESERVE_BTREE type buckets should be as much
6598 + * as journal buckets
6599 + */
6600 + btree_buckets = ca->sb.njournal_buckets ?: 8;
6601 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
6602
6603 - if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
6604 + if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
6605 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
6606 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
6607 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
6608 diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
6609 index 4fbb5532f24c..5a5c1f1bd8a5 100644
6610 --- a/drivers/md/bcache/sysfs.c
6611 +++ b/drivers/md/bcache/sysfs.c
6612 @@ -191,7 +191,7 @@ STORE(__cached_dev)
6613 {
6614 struct cached_dev *dc = container_of(kobj, struct cached_dev,
6615 disk.kobj);
6616 - ssize_t v = size;
6617 + ssize_t v;
6618 struct cache_set *c;
6619 struct kobj_uevent_env *env;
6620
6621 @@ -263,17 +263,20 @@ STORE(__cached_dev)
6622 }
6623
6624 if (attr == &sysfs_attach) {
6625 - if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
6626 + uint8_t set_uuid[16];
6627 +
6628 + if (bch_parse_uuid(buf, set_uuid) < 16)
6629 return -EINVAL;
6630
6631 + v = -ENOENT;
6632 list_for_each_entry(c, &bch_cache_sets, list) {
6633 - v = bch_cached_dev_attach(dc, c);
6634 + v = bch_cached_dev_attach(dc, c, set_uuid);
6635 if (!v)
6636 return size;
6637 }
6638
6639 pr_err("Can't attach %s: cache set not found", buf);
6640 - size = v;
6641 + return v;
6642 }
6643
6644 if (attr == &sysfs_detach && dc->disk.c)
6645 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
6646 index 4ce2b19fe120..bb7aa31c2a08 100644
6647 --- a/drivers/md/bcache/writeback.c
6648 +++ b/drivers/md/bcache/writeback.c
6649 @@ -420,18 +420,27 @@ static int bch_writeback_thread(void *arg)
6650
6651 while (!kthread_should_stop()) {
6652 down_write(&dc->writeback_lock);
6653 - if (!atomic_read(&dc->has_dirty) ||
6654 - (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
6655 - !dc->writeback_running)) {
6656 + set_current_state(TASK_INTERRUPTIBLE);
6657 + /*
6658 + * If the bache device is detaching, skip here and continue
6659 + * to perform writeback. Otherwise, if no dirty data on cache,
6660 + * or there is dirty data on cache but writeback is disabled,
6661 + * the writeback thread should sleep here and wait for others
6662 + * to wake up it.
6663 + */
6664 + if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
6665 + (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
6666 up_write(&dc->writeback_lock);
6667 - set_current_state(TASK_INTERRUPTIBLE);
6668
6669 - if (kthread_should_stop())
6670 + if (kthread_should_stop()) {
6671 + set_current_state(TASK_RUNNING);
6672 return 0;
6673 + }
6674
6675 schedule();
6676 continue;
6677 }
6678 + set_current_state(TASK_RUNNING);
6679
6680 searched_full_index = refill_dirty(dc);
6681
6682 @@ -441,6 +450,14 @@ static int bch_writeback_thread(void *arg)
6683 cached_dev_put(dc);
6684 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
6685 bch_write_bdev_super(dc, NULL);
6686 + /*
6687 + * If bcache device is detaching via sysfs interface,
6688 + * writeback thread should stop after there is no dirty
6689 + * data on cache. BCACHE_DEV_DETACHING flag is set in
6690 + * bch_cached_dev_detach().
6691 + */
6692 + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
6693 + break;
6694 }
6695
6696 up_write(&dc->writeback_lock);
6697 diff --git a/drivers/md/md.c b/drivers/md/md.c
6698 index a7bc70334f0e..cae8f3c12e32 100644
6699 --- a/drivers/md/md.c
6700 +++ b/drivers/md/md.c
6701 @@ -8200,6 +8200,19 @@ void md_do_sync(struct md_thread *thread)
6702 set_mask_bits(&mddev->flags, 0,
6703 BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
6704
6705 + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6706 + !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6707 + mddev->delta_disks > 0 &&
6708 + mddev->pers->finish_reshape &&
6709 + mddev->pers->size &&
6710 + mddev->queue) {
6711 + mddev_lock_nointr(mddev);
6712 + md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
6713 + mddev_unlock(mddev);
6714 + set_capacity(mddev->gendisk, mddev->array_sectors);
6715 + revalidate_disk(mddev->gendisk);
6716 + }
6717 +
6718 spin_lock(&mddev->lock);
6719 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6720 /* We completed so min/max setting can be forgotten if used. */
6721 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
6722 index 81a78757bc78..998102697619 100644
6723 --- a/drivers/md/raid1.c
6724 +++ b/drivers/md/raid1.c
6725 @@ -1673,6 +1673,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
6726 struct md_rdev *repl =
6727 conf->mirrors[conf->raid_disks + number].rdev;
6728 freeze_array(conf, 0);
6729 + if (atomic_read(&repl->nr_pending)) {
6730 + /* It means that some queued IO of retry_list
6731 + * hold repl. Thus, we cannot set replacement
6732 + * as NULL, avoiding rdev NULL pointer
6733 + * dereference in sync_request_write and
6734 + * handle_write_finished.
6735 + */
6736 + err = -EBUSY;
6737 + unfreeze_array(conf);
6738 + goto abort;
6739 + }
6740 clear_bit(Replacement, &repl->flags);
6741 p->rdev = repl;
6742 conf->mirrors[conf->raid_disks + number].rdev = NULL;
6743 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
6744 index 6a7b9b1dcfe3..b138b5cba286 100644
6745 --- a/drivers/md/raid10.c
6746 +++ b/drivers/md/raid10.c
6747 @@ -2636,7 +2636,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
6748 for (m = 0; m < conf->copies; m++) {
6749 int dev = r10_bio->devs[m].devnum;
6750 rdev = conf->mirrors[dev].rdev;
6751 - if (r10_bio->devs[m].bio == NULL)
6752 + if (r10_bio->devs[m].bio == NULL ||
6753 + r10_bio->devs[m].bio->bi_end_io == NULL)
6754 continue;
6755 if (!r10_bio->devs[m].bio->bi_error) {
6756 rdev_clear_badblocks(
6757 @@ -2651,7 +2652,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
6758 md_error(conf->mddev, rdev);
6759 }
6760 rdev = conf->mirrors[dev].replacement;
6761 - if (r10_bio->devs[m].repl_bio == NULL)
6762 + if (r10_bio->devs[m].repl_bio == NULL ||
6763 + r10_bio->devs[m].repl_bio->bi_end_io == NULL)
6764 continue;
6765
6766 if (!r10_bio->devs[m].repl_bio->bi_error) {
6767 @@ -4682,17 +4684,11 @@ static void raid10_finish_reshape(struct mddev *mddev)
6768 return;
6769
6770 if (mddev->delta_disks > 0) {
6771 - sector_t size = raid10_size(mddev, 0, 0);
6772 - md_set_array_sectors(mddev, size);
6773 if (mddev->recovery_cp > mddev->resync_max_sectors) {
6774 mddev->recovery_cp = mddev->resync_max_sectors;
6775 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6776 }
6777 - mddev->resync_max_sectors = size;
6778 - if (mddev->queue) {
6779 - set_capacity(mddev->gendisk, mddev->array_sectors);
6780 - revalidate_disk(mddev->gendisk);
6781 - }
6782 + mddev->resync_max_sectors = mddev->array_sectors;
6783 } else {
6784 int d;
6785 rcu_read_lock();
6786 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
6787 index 86ba7851e881..e43b9f80bb1d 100644
6788 --- a/drivers/md/raid5.c
6789 +++ b/drivers/md/raid5.c
6790 @@ -2049,15 +2049,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
6791 static int grow_stripes(struct r5conf *conf, int num)
6792 {
6793 struct kmem_cache *sc;
6794 + size_t namelen = sizeof(conf->cache_name[0]);
6795 int devs = max(conf->raid_disks, conf->previous_raid_disks);
6796
6797 if (conf->mddev->gendisk)
6798 - sprintf(conf->cache_name[0],
6799 + snprintf(conf->cache_name[0], namelen,
6800 "raid%d-%s", conf->level, mdname(conf->mddev));
6801 else
6802 - sprintf(conf->cache_name[0],
6803 + snprintf(conf->cache_name[0], namelen,
6804 "raid%d-%p", conf->level, conf->mddev);
6805 - sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
6806 + snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
6807
6808 conf->active_name = 0;
6809 sc = kmem_cache_create(conf->cache_name[conf->active_name],
6810 @@ -7614,13 +7615,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
6811
6812 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6813
6814 - if (mddev->delta_disks > 0) {
6815 - md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
6816 - if (mddev->queue) {
6817 - set_capacity(mddev->gendisk, mddev->array_sectors);
6818 - revalidate_disk(mddev->gendisk);
6819 - }
6820 - } else {
6821 + if (mddev->delta_disks <= 0) {
6822 int d;
6823 spin_lock_irq(&conf->device_lock);
6824 mddev->degraded = calc_degraded(conf);
6825 diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
6826 index 50dd6bd02951..524c8e0b72fd 100644
6827 --- a/drivers/mmc/host/sdhci-iproc.c
6828 +++ b/drivers/mmc/host/sdhci-iproc.c
6829 @@ -33,6 +33,8 @@ struct sdhci_iproc_host {
6830 const struct sdhci_iproc_data *data;
6831 u32 shadow_cmd;
6832 u32 shadow_blk;
6833 + bool is_cmd_shadowed;
6834 + bool is_blk_shadowed;
6835 };
6836
6837 #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
6838 @@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
6839
6840 static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
6841 {
6842 - u32 val = sdhci_iproc_readl(host, (reg & ~3));
6843 - u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
6844 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
6845 + struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
6846 + u32 val;
6847 + u16 word;
6848 +
6849 + if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
6850 + /* Get the saved transfer mode */
6851 + val = iproc_host->shadow_cmd;
6852 + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
6853 + iproc_host->is_blk_shadowed) {
6854 + /* Get the saved block info */
6855 + val = iproc_host->shadow_blk;
6856 + } else {
6857 + val = sdhci_iproc_readl(host, (reg & ~3));
6858 + }
6859 + word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
6860 return word;
6861 }
6862
6863 @@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
6864
6865 if (reg == SDHCI_COMMAND) {
6866 /* Write the block now as we are issuing a command */
6867 - if (iproc_host->shadow_blk != 0) {
6868 + if (iproc_host->is_blk_shadowed) {
6869 sdhci_iproc_writel(host, iproc_host->shadow_blk,
6870 SDHCI_BLOCK_SIZE);
6871 - iproc_host->shadow_blk = 0;
6872 + iproc_host->is_blk_shadowed = false;
6873 }
6874 oldval = iproc_host->shadow_cmd;
6875 - } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
6876 + iproc_host->is_cmd_shadowed = false;
6877 + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
6878 + iproc_host->is_blk_shadowed) {
6879 /* Block size and count are stored in shadow reg */
6880 oldval = iproc_host->shadow_blk;
6881 } else {
6882 @@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
6883 if (reg == SDHCI_TRANSFER_MODE) {
6884 /* Save the transfer mode until the command is issued */
6885 iproc_host->shadow_cmd = newval;
6886 + iproc_host->is_cmd_shadowed = true;
6887 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
6888 /* Save the block info until the command is issued */
6889 iproc_host->shadow_blk = newval;
6890 + iproc_host->is_blk_shadowed = true;
6891 } else {
6892 /* Command or other regular 32-bit write */
6893 sdhci_iproc_writel(host, newval, reg & ~3);
6894 @@ -176,7 +196,6 @@ static const struct sdhci_iproc_data iproc_data = {
6895 .caps1 = SDHCI_DRIVER_TYPE_C |
6896 SDHCI_DRIVER_TYPE_D |
6897 SDHCI_SUPPORT_DDR50,
6898 - .mmc_caps = MMC_CAP_1_8V_DDR,
6899 };
6900
6901 static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
6902 diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
6903 index 49f4cafe5438..86a32fe58468 100644
6904 --- a/drivers/net/ethernet/broadcom/bgmac.c
6905 +++ b/drivers/net/ethernet/broadcom/bgmac.c
6906 @@ -529,7 +529,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
6907 int i;
6908
6909 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
6910 - int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
6911 + u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
6912 + unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
6913
6914 slot = &ring->slots[i];
6915 dev_kfree_skb(slot->skb);
6916 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6917 index 3aa993bbafd9..ca57eb56c717 100644
6918 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6919 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6920 @@ -3401,6 +3401,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
6921 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6922 struct hwrm_vnic_tpa_cfg_input req = {0};
6923
6924 + if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6925 + return 0;
6926 +
6927 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
6928
6929 if (tpa_flags) {
6930 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
6931 index 3ec32d7c5866..c395b21cb57b 100644
6932 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
6933 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
6934 @@ -836,8 +836,6 @@ static int setup_fw_sge_queues(struct adapter *adap)
6935
6936 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
6937 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
6938 - if (err)
6939 - t4_free_sge_resources(adap);
6940 return err;
6941 }
6942
6943 @@ -4940,6 +4938,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6944 if (err)
6945 goto out_free_dev;
6946
6947 + err = setup_fw_sge_queues(adapter);
6948 + if (err) {
6949 + dev_err(adapter->pdev_dev,
6950 + "FW sge queue allocation failed, err %d", err);
6951 + goto out_free_dev;
6952 + }
6953 +
6954 /*
6955 * The card is now ready to go. If any errors occur during device
6956 * registration we do not fail the whole card but rather proceed only
6957 @@ -4983,7 +4988,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6958 }
6959
6960 print_adapter_info(adapter);
6961 - setup_fw_sge_queues(adapter);
6962 return 0;
6963
6964 sriov:
6965 @@ -5035,6 +5039,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6966 #endif
6967
6968 out_free_dev:
6969 + t4_free_sge_resources(adapter);
6970 free_some_resources(adapter);
6971 if (adapter->flags & USING_MSIX)
6972 free_msix_info(adapter);
6973 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
6974 index 2471ff465d5c..23d6c44dc459 100644
6975 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
6976 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
6977 @@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
6978 {
6979 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
6980
6981 + adap->sge.uld_rxq_info[uld_type] = NULL;
6982 kfree(rxq_info->rspq_id);
6983 kfree(rxq_info->uldrxq);
6984 kfree(rxq_info);
6985 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
6986 index 48f82ab6c25b..dda63b26e370 100644
6987 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
6988 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
6989 @@ -1726,6 +1726,8 @@ static int enic_open(struct net_device *netdev)
6990 }
6991
6992 for (i = 0; i < enic->rq_count; i++) {
6993 + /* enable rq before updating rq desc */
6994 + vnic_rq_enable(&enic->rq[i]);
6995 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
6996 /* Need at least one buffer on ring to get going */
6997 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
6998 @@ -1737,8 +1739,6 @@ static int enic_open(struct net_device *netdev)
6999
7000 for (i = 0; i < enic->wq_count; i++)
7001 vnic_wq_enable(&enic->wq[i]);
7002 - for (i = 0; i < enic->rq_count; i++)
7003 - vnic_rq_enable(&enic->rq[i]);
7004
7005 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
7006 enic_dev_add_station_addr(enic);
7007 @@ -1765,8 +1765,12 @@ static int enic_open(struct net_device *netdev)
7008 return 0;
7009
7010 err_out_free_rq:
7011 - for (i = 0; i < enic->rq_count; i++)
7012 + for (i = 0; i < enic->rq_count; i++) {
7013 + err = vnic_rq_disable(&enic->rq[i]);
7014 + if (err)
7015 + return err;
7016 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
7017 + }
7018 enic_dev_notify_unset(enic);
7019 err_out_free_intr:
7020 enic_unset_affinity_hint(enic);
7021 diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
7022 index c88918c4c5f3..641b916f122b 100644
7023 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
7024 +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
7025 @@ -1036,7 +1036,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
7026 set_bucket(dtsec->regs, bucket, true);
7027
7028 /* Create element to be added to the driver hash table */
7029 - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
7030 + hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
7031 if (!hash_entry)
7032 return -ENOMEM;
7033 hash_entry->addr = addr;
7034 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
7035 index e3b41ba95168..60bd1b36df60 100644
7036 --- a/drivers/net/ethernet/freescale/gianfar.c
7037 +++ b/drivers/net/ethernet/freescale/gianfar.c
7038 @@ -2935,7 +2935,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
7039 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
7040 struct sk_buff *skb, bool first)
7041 {
7042 - unsigned int size = lstatus & BD_LENGTH_MASK;
7043 + int size = lstatus & BD_LENGTH_MASK;
7044 struct page *page = rxb->page;
7045 bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
7046
7047 @@ -2950,11 +2950,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
7048 if (last)
7049 size -= skb->len;
7050
7051 - /* in case the last fragment consisted only of the FCS */
7052 + /* Add the last fragment if it contains something other than
7053 + * the FCS, otherwise drop it and trim off any part of the FCS
7054 + * that was already received.
7055 + */
7056 if (size > 0)
7057 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
7058 rxb->page_offset + RXBUF_ALIGNMENT,
7059 size, GFAR_RXB_TRUESIZE);
7060 + else if (size < 0)
7061 + pskb_trim(skb, skb->len + size);
7062 }
7063
7064 /* try reuse page */
7065 @@ -3070,9 +3075,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
7066 if (ndev->features & NETIF_F_RXCSUM)
7067 gfar_rx_checksum(skb, fcb);
7068
7069 - /* Tell the skb what kind of packet this is */
7070 - skb->protocol = eth_type_trans(skb, ndev);
7071 -
7072 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
7073 * Even if vlan rx accel is disabled, on some chips
7074 * RXFCB_VLN is pseudo randomly set.
7075 @@ -3143,13 +3145,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
7076 continue;
7077 }
7078
7079 + gfar_process_frame(ndev, skb);
7080 +
7081 /* Increment the number of packets */
7082 total_pkts++;
7083 total_bytes += skb->len;
7084
7085 skb_record_rx_queue(skb, rx_queue->qindex);
7086
7087 - gfar_process_frame(ndev, skb);
7088 + skb->protocol = eth_type_trans(skb, ndev);
7089
7090 /* Send the packet up the stack */
7091 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
7092 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
7093 index 49094c965697..897a87ae8655 100644
7094 --- a/drivers/net/ethernet/ibm/ibmvnic.c
7095 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
7096 @@ -994,6 +994,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
7097 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
7098 /* free the entry */
7099 next->rx_comp.first = 0;
7100 + dev_kfree_skb_any(rx_buff->skb);
7101 remove_buff_from_pool(adapter, rx_buff);
7102 break;
7103 }
7104 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
7105 index 8a48656a376b..7ddac956ffb5 100644
7106 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
7107 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
7108 @@ -1600,7 +1600,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
7109 * we have already determined whether we have link or not.
7110 */
7111 if (!mac->autoneg)
7112 - return -E1000_ERR_CONFIG;
7113 + return 1;
7114
7115 /* Auto-Neg is enabled. Auto Speed Detection takes care
7116 * of MAC speed/duplex configuration. So we only need to
7117 diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
7118 index f457c5703d0c..db735644b312 100644
7119 --- a/drivers/net/ethernet/intel/e1000e/mac.c
7120 +++ b/drivers/net/ethernet/intel/e1000e/mac.c
7121 @@ -450,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
7122 * we have already determined whether we have link or not.
7123 */
7124 if (!mac->autoneg)
7125 - return -E1000_ERR_CONFIG;
7126 + return 1;
7127
7128 /* Auto-Neg is enabled. Auto Speed Detection takes care
7129 * of MAC speed/duplex configuration. So we only need to
7130 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
7131 index 825ec8f710e7..9c95222e6536 100644
7132 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
7133 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
7134 @@ -2331,8 +2331,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
7135 {
7136 struct pci_dev *pdev = adapter->pdev;
7137
7138 - ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
7139 - GFP_KERNEL);
7140 + ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
7141 + GFP_KERNEL);
7142 if (!ring->desc)
7143 return -ENOMEM;
7144
7145 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
7146 index 05629381be6b..ea5ea653e1db 100644
7147 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
7148 +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
7149 @@ -803,8 +803,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
7150 if (vid >= VLAN_N_VID)
7151 return -EINVAL;
7152
7153 - /* Verify we have permission to add VLANs */
7154 - if (hw->mac.vlan_override)
7155 + /* Verify that we have permission to add VLANs. If this is a request
7156 + * to remove a VLAN, we still want to allow the user to remove the
7157 + * VLAN device. In that case, we need to clear the bit in the
7158 + * active_vlans bitmask.
7159 + */
7160 + if (set && hw->mac.vlan_override)
7161 return -EACCES;
7162
7163 /* update active_vlans bitmask */
7164 @@ -823,6 +827,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
7165 rx_ring->vid &= ~FM10K_VLAN_CLEAR;
7166 }
7167
7168 + /* If our VLAN has been overridden, there is no reason to send VLAN
7169 + * removal requests as they will be silently ignored.
7170 + */
7171 + if (hw->mac.vlan_override)
7172 + return 0;
7173 +
7174 /* Do not remove default VLAN ID related entries from VLAN and MAC
7175 * tables
7176 */
7177 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
7178 index fa463268d019..17b81780d12f 100644
7179 --- a/drivers/net/ethernet/marvell/mvneta.c
7180 +++ b/drivers/net/ethernet/marvell/mvneta.c
7181 @@ -1080,6 +1080,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
7182 }
7183 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
7184
7185 + q_map = 0;
7186 /* Enable all initialized RXQs. */
7187 for (queue = 0; queue < rxq_number; queue++) {
7188 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
7189 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
7190 index 4c3f1cb7e2c9..6631fb0782d7 100644
7191 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
7192 +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
7193 @@ -1765,7 +1765,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
7194
7195 cmd->checksum_disabled = 1;
7196 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
7197 - cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
7198 + cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
7199
7200 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
7201 if (cmd->cmdif_rev > CMD_IF_REV) {
7202 diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
7203 index f683bfbd9986..9d223ff65071 100644
7204 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
7205 +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
7206 @@ -1250,9 +1250,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
7207 while (tx_q->tpd.consume_idx != hw_consume_idx) {
7208 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
7209 if (tpbuf->dma_addr) {
7210 - dma_unmap_single(adpt->netdev->dev.parent,
7211 - tpbuf->dma_addr, tpbuf->length,
7212 - DMA_TO_DEVICE);
7213 + dma_unmap_page(adpt->netdev->dev.parent,
7214 + tpbuf->dma_addr, tpbuf->length,
7215 + DMA_TO_DEVICE);
7216 tpbuf->dma_addr = 0;
7217 }
7218
7219 @@ -1409,9 +1409,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
7220
7221 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
7222 tpbuf->length = mapped_len;
7223 - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
7224 - skb->data, tpbuf->length,
7225 - DMA_TO_DEVICE);
7226 + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
7227 + virt_to_page(skb->data),
7228 + offset_in_page(skb->data),
7229 + tpbuf->length,
7230 + DMA_TO_DEVICE);
7231 ret = dma_mapping_error(adpt->netdev->dev.parent,
7232 tpbuf->dma_addr);
7233 if (ret)
7234 @@ -1427,9 +1429,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
7235 if (mapped_len < len) {
7236 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
7237 tpbuf->length = len - mapped_len;
7238 - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
7239 - skb->data + mapped_len,
7240 - tpbuf->length, DMA_TO_DEVICE);
7241 + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
7242 + virt_to_page(skb->data +
7243 + mapped_len),
7244 + offset_in_page(skb->data +
7245 + mapped_len),
7246 + tpbuf->length, DMA_TO_DEVICE);
7247 ret = dma_mapping_error(adpt->netdev->dev.parent,
7248 tpbuf->dma_addr);
7249 if (ret)
7250 diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
7251 index 8b0016a785c0..734caa7a557b 100644
7252 --- a/drivers/net/ethernet/smsc/smsc911x.c
7253 +++ b/drivers/net/ethernet/smsc/smsc911x.c
7254 @@ -2330,14 +2330,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
7255 pdata = netdev_priv(dev);
7256 BUG_ON(!pdata);
7257 BUG_ON(!pdata->ioaddr);
7258 - WARN_ON(dev->phydev);
7259
7260 SMSC_TRACE(pdata, ifdown, "Stopping driver");
7261
7262 + unregister_netdev(dev);
7263 +
7264 mdiobus_unregister(pdata->mii_bus);
7265 mdiobus_free(pdata->mii_bus);
7266
7267 - unregister_netdev(dev);
7268 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
7269 "smsc911x-memory");
7270 if (!res)
7271 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
7272 index ffaed1f35efe..f356a44bcb81 100644
7273 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
7274 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
7275 @@ -118,7 +118,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
7276 snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
7277 init.name = clk_name;
7278 init.ops = &clk_mux_ops;
7279 - init.flags = 0;
7280 + init.flags = CLK_SET_RATE_PARENT;
7281 init.parent_names = mux_parent_names;
7282 init.num_parents = MUX_CLK_NUM_PARENTS;
7283
7284 @@ -146,7 +146,9 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
7285 dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
7286 dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
7287 dwmac->m250_div.hw.init = &init;
7288 - dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
7289 + dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
7290 + CLK_DIVIDER_ALLOW_ZERO |
7291 + CLK_DIVIDER_ROUND_CLOSEST;
7292
7293 dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
7294 if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
7295 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7296 index c212d1dd8bfd..b3bc1287b2a7 100644
7297 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7298 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7299 @@ -1343,6 +1343,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
7300 if (unlikely(status & tx_dma_own))
7301 break;
7302
7303 + /* Make sure descriptor fields are read after reading
7304 + * the own bit.
7305 + */
7306 + dma_rmb();
7307 +
7308 /* Just consider the last segment and ...*/
7309 if (likely(!(status & tx_not_ls))) {
7310 /* ... verify the status error condition */
7311 @@ -2136,8 +2141,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
7312 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
7313
7314 /* If context desc is used to change MSS */
7315 - if (mss_desc)
7316 + if (mss_desc) {
7317 + /* Make sure that first descriptor has been completely
7318 + * written, including its own bit. This is because MSS is
7319 + * actually before first descriptor, so we need to make
7320 + * sure that MSS's own bit is the last thing written.
7321 + */
7322 + dma_wmb();
7323 priv->hw->desc->set_tx_owner(mss_desc);
7324 + }
7325
7326 /* The own bit must be the latest setting done when prepare the
7327 * descriptor and then barrier is needed to make sure that
7328 diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
7329 index a2f9b47de187..e36c700c78d4 100644
7330 --- a/drivers/net/ethernet/sun/sunvnet.c
7331 +++ b/drivers/net/ethernet/sun/sunvnet.c
7332 @@ -198,7 +198,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
7333 dev->ethtool_ops = &vnet_ethtool_ops;
7334 dev->watchdog_timeo = VNET_TX_TIMEOUT;
7335
7336 - dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
7337 + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
7338 NETIF_F_HW_CSUM | NETIF_F_SG;
7339 dev->features = dev->hw_features;
7340
7341 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
7342 index e8ad4d060da7..6237236b7c4c 100644
7343 --- a/drivers/net/macvlan.c
7344 +++ b/drivers/net/macvlan.c
7345 @@ -1384,7 +1384,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
7346 /* the macvlan port may be freed by macvlan_uninit when fail to register.
7347 * so we destroy the macvlan port only when it's valid.
7348 */
7349 - if (create && macvlan_port_get_rtnl(dev))
7350 + if (create && macvlan_port_get_rtnl(lowerdev))
7351 macvlan_port_destroy(port->dev);
7352 return err;
7353 }
7354 diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
7355 index b88f7d65953d..482ea404a2d4 100644
7356 --- a/drivers/net/phy/dp83640.c
7357 +++ b/drivers/net/phy/dp83640.c
7358 @@ -1205,6 +1205,23 @@ static void dp83640_remove(struct phy_device *phydev)
7359 kfree(dp83640);
7360 }
7361
7362 +static int dp83640_soft_reset(struct phy_device *phydev)
7363 +{
7364 + int ret;
7365 +
7366 + ret = genphy_soft_reset(phydev);
7367 + if (ret < 0)
7368 + return ret;
7369 +
7370 + /* From DP83640 datasheet: "Software driver code must wait 3 us
7371 + * following a software reset before allowing further serial MII
7372 + * operations with the DP83640."
7373 + */
7374 + udelay(10); /* Taking udelay inaccuracy into account */
7375 +
7376 + return 0;
7377 +}
7378 +
7379 static int dp83640_config_init(struct phy_device *phydev)
7380 {
7381 struct dp83640_private *dp83640 = phydev->priv;
7382 @@ -1498,6 +1515,7 @@ static struct phy_driver dp83640_driver = {
7383 .flags = PHY_HAS_INTERRUPT,
7384 .probe = dp83640_probe,
7385 .remove = dp83640_remove,
7386 + .soft_reset = dp83640_soft_reset,
7387 .config_init = dp83640_config_init,
7388 .config_aneg = genphy_config_aneg,
7389 .read_status = genphy_read_status,
7390 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
7391 index 8daf5db3d922..1d56c73574e8 100644
7392 --- a/drivers/net/usb/qmi_wwan.c
7393 +++ b/drivers/net/usb/qmi_wwan.c
7394 @@ -889,6 +889,7 @@ static const struct usb_device_id products[] = {
7395 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
7396 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
7397 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
7398 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
7399 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
7400 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
7401 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
7402 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
7403 index 3cdfa2465e3f..d3d89b05f66e 100644
7404 --- a/drivers/net/usb/r8152.c
7405 +++ b/drivers/net/usb/r8152.c
7406 @@ -1693,7 +1693,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
7407
7408 tx_data += len;
7409 agg->skb_len += len;
7410 - agg->skb_num++;
7411 + agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
7412
7413 dev_kfree_skb_any(skb);
7414
7415 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
7416 index 4cb9b11a545a..2cc0f28f4fd2 100644
7417 --- a/drivers/net/usb/smsc75xx.c
7418 +++ b/drivers/net/usb/smsc75xx.c
7419 @@ -957,10 +957,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
7420 /* it's racing here! */
7421
7422 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
7423 - if (ret < 0)
7424 + if (ret < 0) {
7425 netdev_warn(dev->net, "Error writing RFE_CTL\n");
7426 -
7427 - return ret;
7428 + return ret;
7429 + }
7430 + return 0;
7431 }
7432
7433 static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
7434 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
7435 index 472ed6df2221..7118b8263760 100644
7436 --- a/drivers/net/virtio_net.c
7437 +++ b/drivers/net/virtio_net.c
7438 @@ -1949,8 +1949,8 @@ static int virtnet_probe(struct virtio_device *vdev)
7439
7440 /* Assume link up if device can't report link status,
7441 otherwise get link status from config. */
7442 + netif_carrier_off(dev);
7443 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
7444 - netif_carrier_off(dev);
7445 schedule_work(&vi->config_work);
7446 } else {
7447 vi->status = VIRTIO_NET_S_LINK_UP;
7448 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
7449 index 5aa5df24f4dc..d68f4f2965e0 100644
7450 --- a/drivers/net/wireless/ath/ath10k/mac.c
7451 +++ b/drivers/net/wireless/ath/ath10k/mac.c
7452 @@ -6928,10 +6928,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
7453 {
7454 struct ath10k *ar = hw->priv;
7455 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7456 + struct ath10k_vif *arvif = (void *)vif->drv_priv;
7457 + struct ath10k_peer *peer;
7458 u32 bw, smps;
7459
7460 spin_lock_bh(&ar->data_lock);
7461
7462 + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
7463 + if (!peer) {
7464 + spin_unlock_bh(&ar->data_lock);
7465 + ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
7466 + sta->addr, arvif->vdev_id);
7467 + return;
7468 + }
7469 +
7470 ath10k_dbg(ar, ATH10K_DBG_MAC,
7471 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
7472 sta->addr, changed, sta->bandwidth, sta->rx_nss,
7473 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
7474 index f507d821aba8..c221597e2519 100644
7475 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
7476 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
7477 @@ -6789,7 +6789,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
7478 int i;
7479
7480 /* ignore non-ISO3166 country codes */
7481 - for (i = 0; i < sizeof(req->alpha2); i++)
7482 + for (i = 0; i < 2; i++)
7483 if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
7484 brcmf_err("not a ISO3166 code (0x%02x 0x%02x)\n",
7485 req->alpha2[0], req->alpha2[1]);
7486 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7487 index f1231c0ea336..0bffade1ea5b 100644
7488 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7489 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7490 @@ -2585,6 +2585,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
7491
7492 /* enable beacon filtering */
7493 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
7494 +
7495 + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
7496 + false);
7497 +
7498 ret = 0;
7499 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
7500 new_state == IEEE80211_STA_ASSOC) {
7501 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
7502 index 0aea476ebf50..f251c2afebfc 100644
7503 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
7504 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
7505 @@ -2709,7 +2709,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
7506 struct ieee80211_sta *sta,
7507 struct iwl_lq_sta *lq_sta,
7508 enum nl80211_band band,
7509 - struct rs_rate *rate)
7510 + struct rs_rate *rate,
7511 + bool init)
7512 {
7513 int i, nentries;
7514 unsigned long active_rate;
7515 @@ -2763,14 +2764,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
7516 */
7517 if (sta->vht_cap.vht_supported &&
7518 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
7519 - switch (sta->bandwidth) {
7520 - case IEEE80211_STA_RX_BW_160:
7521 - case IEEE80211_STA_RX_BW_80:
7522 - case IEEE80211_STA_RX_BW_40:
7523 + /*
7524 + * In AP mode, when a new station associates, rs is initialized
7525 + * immediately upon association completion, before the phy
7526 + * context is updated with the association parameters, so the
7527 + * sta bandwidth might be wider than the phy context allows.
7528 + * To avoid this issue, always initialize rs with 20mhz
7529 + * bandwidth rate, and after authorization, when the phy context
7530 + * is already up-to-date, re-init rs with the correct bw.
7531 + */
7532 + u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
7533 +
7534 + switch (bw) {
7535 + case RATE_MCS_CHAN_WIDTH_40:
7536 + case RATE_MCS_CHAN_WIDTH_80:
7537 + case RATE_MCS_CHAN_WIDTH_160:
7538 initial_rates = rs_optimal_rates_vht;
7539 nentries = ARRAY_SIZE(rs_optimal_rates_vht);
7540 break;
7541 - case IEEE80211_STA_RX_BW_20:
7542 + case RATE_MCS_CHAN_WIDTH_20:
7543 initial_rates = rs_optimal_rates_vht_20mhz;
7544 nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
7545 break;
7546 @@ -2781,7 +2793,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
7547
7548 active_rate = lq_sta->active_siso_rate;
7549 rate->type = LQ_VHT_SISO;
7550 - rate->bw = rs_bw_from_sta_bw(sta);
7551 + rate->bw = bw;
7552 } else if (sta->ht_cap.ht_supported &&
7553 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
7554 initial_rates = rs_optimal_rates_ht;
7555 @@ -2863,7 +2875,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
7556 tbl = &(lq_sta->lq_info[active_tbl]);
7557 rate = &tbl->rate;
7558
7559 - rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
7560 + rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
7561 rs_init_optimal_rate(mvm, sta, lq_sta);
7562
7563 WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
7564 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
7565 index a481eb41f693..c2bbc8c17beb 100644
7566 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
7567 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
7568 @@ -72,6 +72,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
7569 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
7570 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
7571 struct iwl_mvm_key_pn *ptk_pn;
7572 + int res;
7573 u8 tid, keyidx;
7574 u8 pn[IEEE80211_CCMP_PN_LEN];
7575 u8 *extiv;
7576 @@ -128,12 +129,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
7577 pn[4] = extiv[1];
7578 pn[5] = extiv[0];
7579
7580 - if (memcmp(pn, ptk_pn->q[queue].pn[tid],
7581 - IEEE80211_CCMP_PN_LEN) <= 0)
7582 + res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
7583 + if (res < 0)
7584 + return -1;
7585 + if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
7586 return -1;
7587
7588 - if (!(stats->flag & RX_FLAG_AMSDU_MORE))
7589 - memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
7590 + memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
7591 stats->flag |= RX_FLAG_PN_VALIDATED;
7592
7593 return 0;
7594 @@ -295,28 +297,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
7595 }
7596
7597 /*
7598 - * returns true if a packet outside BA session is a duplicate and
7599 - * should be dropped
7600 + * returns true if a packet is a duplicate and should be dropped.
7601 + * Updates AMSDU PN tracking info
7602 */
7603 -static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
7604 - struct ieee80211_rx_status *rx_status,
7605 - struct ieee80211_hdr *hdr,
7606 - struct iwl_rx_mpdu_desc *desc)
7607 +static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
7608 + struct ieee80211_rx_status *rx_status,
7609 + struct ieee80211_hdr *hdr,
7610 + struct iwl_rx_mpdu_desc *desc)
7611 {
7612 struct iwl_mvm_sta *mvm_sta;
7613 struct iwl_mvm_rxq_dup_data *dup_data;
7614 - u8 baid, tid, sub_frame_idx;
7615 + u8 tid, sub_frame_idx;
7616
7617 if (WARN_ON(IS_ERR_OR_NULL(sta)))
7618 return false;
7619
7620 - baid = (le32_to_cpu(desc->reorder_data) &
7621 - IWL_RX_MPDU_REORDER_BAID_MASK) >>
7622 - IWL_RX_MPDU_REORDER_BAID_SHIFT;
7623 -
7624 - if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
7625 - return false;
7626 -
7627 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
7628 dup_data = &mvm_sta->dup_data[queue];
7629
7630 @@ -346,6 +341,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
7631 dup_data->last_sub_frame[tid] >= sub_frame_idx))
7632 return true;
7633
7634 + /* Allow same PN as the first subframe for following sub frames */
7635 + if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
7636 + sub_frame_idx > dup_data->last_sub_frame[tid] &&
7637 + desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
7638 + rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
7639 +
7640 dup_data->last_seq[tid] = hdr->seq_ctrl;
7641 dup_data->last_sub_frame[tid] = sub_frame_idx;
7642
7643 @@ -882,7 +883,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
7644 if (ieee80211_is_data(hdr->frame_control))
7645 iwl_mvm_rx_csum(sta, skb, desc);
7646
7647 - if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
7648 + if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
7649 kfree_skb(skb);
7650 rcu_read_unlock();
7651 return;
7652 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
7653 index 7465d4db136f..bd7ff562d82d 100644
7654 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
7655 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
7656 @@ -406,11 +406,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
7657 {
7658 struct ieee80211_key_conf *keyconf = info->control.hw_key;
7659 u8 *crypto_hdr = skb_frag->data + hdrlen;
7660 + enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
7661 u64 pn;
7662
7663 switch (keyconf->cipher) {
7664 case WLAN_CIPHER_SUITE_CCMP:
7665 - case WLAN_CIPHER_SUITE_CCMP_256:
7666 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
7667 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
7668 break;
7669 @@ -434,13 +434,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
7670 break;
7671 case WLAN_CIPHER_SUITE_GCMP:
7672 case WLAN_CIPHER_SUITE_GCMP_256:
7673 + type = TX_CMD_SEC_GCMP;
7674 + /* Fall through */
7675 + case WLAN_CIPHER_SUITE_CCMP_256:
7676 /* TODO: Taking the key from the table might introduce a race
7677 * when PTK rekeying is done, having an old packets with a PN
7678 * based on the old key but the message encrypted with a new
7679 * one.
7680 * Need to handle this.
7681 */
7682 - tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
7683 + tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
7684 tx_cmd->key[0] = keyconf->hw_key_idx;
7685 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
7686 break;
7687 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
7688 index 2681b5339810..95e96419b4cf 100644
7689 --- a/drivers/net/wireless/mac80211_hwsim.c
7690 +++ b/drivers/net/wireless/mac80211_hwsim.c
7691 @@ -3084,8 +3084,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
7692 if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
7693 u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
7694
7695 - if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
7696 + if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
7697 + kfree(hwname);
7698 return -EINVAL;
7699 + }
7700 param.regd = hwsim_world_regdom_custom[idx];
7701 }
7702
7703 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
7704 index 1b287861e34f..520050eae836 100644
7705 --- a/drivers/net/xen-netfront.c
7706 +++ b/drivers/net/xen-netfront.c
7707 @@ -350,6 +350,9 @@ static int xennet_open(struct net_device *dev)
7708 unsigned int i = 0;
7709 struct netfront_queue *queue = NULL;
7710
7711 + if (!np->queues)
7712 + return -ENODEV;
7713 +
7714 for (i = 0; i < num_queues; ++i) {
7715 queue = &np->queues[i];
7716 napi_enable(&queue->napi);
7717 @@ -1377,18 +1380,8 @@ static int netfront_probe(struct xenbus_device *dev,
7718 #ifdef CONFIG_SYSFS
7719 info->netdev->sysfs_groups[0] = &xennet_dev_group;
7720 #endif
7721 - err = register_netdev(info->netdev);
7722 - if (err) {
7723 - pr_warn("%s: register_netdev err=%d\n", __func__, err);
7724 - goto fail;
7725 - }
7726
7727 return 0;
7728 -
7729 - fail:
7730 - xennet_free_netdev(netdev);
7731 - dev_set_drvdata(&dev->dev, NULL);
7732 - return err;
7733 }
7734
7735 static void xennet_end_access(int ref, void *page)
7736 @@ -1757,8 +1750,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
7737 {
7738 unsigned int i;
7739
7740 - rtnl_lock();
7741 -
7742 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
7743 struct netfront_queue *queue = &info->queues[i];
7744
7745 @@ -1767,8 +1758,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
7746 netif_napi_del(&queue->napi);
7747 }
7748
7749 - rtnl_unlock();
7750 -
7751 kfree(info->queues);
7752 info->queues = NULL;
7753 }
7754 @@ -1784,8 +1773,6 @@ static int xennet_create_queues(struct netfront_info *info,
7755 if (!info->queues)
7756 return -ENOMEM;
7757
7758 - rtnl_lock();
7759 -
7760 for (i = 0; i < *num_queues; i++) {
7761 struct netfront_queue *queue = &info->queues[i];
7762
7763 @@ -1794,7 +1781,7 @@ static int xennet_create_queues(struct netfront_info *info,
7764
7765 ret = xennet_init_queue(queue);
7766 if (ret < 0) {
7767 - dev_warn(&info->netdev->dev,
7768 + dev_warn(&info->xbdev->dev,
7769 "only created %d queues\n", i);
7770 *num_queues = i;
7771 break;
7772 @@ -1808,10 +1795,8 @@ static int xennet_create_queues(struct netfront_info *info,
7773
7774 netif_set_real_num_tx_queues(info->netdev, *num_queues);
7775
7776 - rtnl_unlock();
7777 -
7778 if (*num_queues == 0) {
7779 - dev_err(&info->netdev->dev, "no queues\n");
7780 + dev_err(&info->xbdev->dev, "no queues\n");
7781 return -EINVAL;
7782 }
7783 return 0;
7784 @@ -1853,6 +1838,7 @@ static int talk_to_netback(struct xenbus_device *dev,
7785 goto out;
7786 }
7787
7788 + rtnl_lock();
7789 if (info->queues)
7790 xennet_destroy_queues(info);
7791
7792 @@ -1863,6 +1849,7 @@ static int talk_to_netback(struct xenbus_device *dev,
7793 info->queues = NULL;
7794 goto out;
7795 }
7796 + rtnl_unlock();
7797
7798 /* Create shared ring, alloc event channel -- for each queue */
7799 for (i = 0; i < num_queues; ++i) {
7800 @@ -1959,8 +1946,10 @@ static int talk_to_netback(struct xenbus_device *dev,
7801 xenbus_transaction_end(xbt, 1);
7802 destroy_ring:
7803 xennet_disconnect_backend(info);
7804 + rtnl_lock();
7805 xennet_destroy_queues(info);
7806 out:
7807 + rtnl_unlock();
7808 device_unregister(&dev->dev);
7809 return err;
7810 }
7811 @@ -1996,6 +1985,15 @@ static int xennet_connect(struct net_device *dev)
7812 netdev_update_features(dev);
7813 rtnl_unlock();
7814
7815 + if (dev->reg_state == NETREG_UNINITIALIZED) {
7816 + err = register_netdev(dev);
7817 + if (err) {
7818 + pr_warn("%s: register_netdev err=%d\n", __func__, err);
7819 + device_unregister(&np->xbdev->dev);
7820 + return err;
7821 + }
7822 + }
7823 +
7824 /*
7825 * All public and private state should now be sane. Get
7826 * ready to start sending and receiving packets and give the driver
7827 @@ -2186,10 +2184,14 @@ static int xennet_remove(struct xenbus_device *dev)
7828
7829 xennet_disconnect_backend(info);
7830
7831 - unregister_netdev(info->netdev);
7832 + if (info->netdev->reg_state == NETREG_REGISTERED)
7833 + unregister_netdev(info->netdev);
7834
7835 - if (info->queues)
7836 + if (info->queues) {
7837 + rtnl_lock();
7838 xennet_destroy_queues(info);
7839 + rtnl_unlock();
7840 + }
7841 xennet_free_netdev(info->netdev);
7842
7843 return 0;
7844 diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
7845 index 24222a5d8df2..da95bd8f0f72 100644
7846 --- a/drivers/ntb/ntb_transport.c
7847 +++ b/drivers/ntb/ntb_transport.c
7848 @@ -996,6 +996,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
7849 mw_base = nt->mw_vec[mw_num].phys_addr;
7850 mw_size = nt->mw_vec[mw_num].phys_size;
7851
7852 + if (max_mw_size && mw_size > max_mw_size)
7853 + mw_size = max_mw_size;
7854 +
7855 tx_size = (unsigned int)mw_size / num_qps_mw;
7856 qp_offset = tx_size * (qp_num / mw_count);
7857
7858 diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
7859 index eef1a68e5d95..b634b89b4540 100644
7860 --- a/drivers/nvme/host/fabrics.c
7861 +++ b/drivers/nvme/host/fabrics.c
7862 @@ -583,8 +583,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
7863 opts->discovery_nqn =
7864 !(strcmp(opts->subsysnqn,
7865 NVME_DISC_SUBSYS_NAME));
7866 - if (opts->discovery_nqn)
7867 + if (opts->discovery_nqn) {
7868 + opts->kato = 0;
7869 opts->nr_io_queues = 0;
7870 + }
7871 break;
7872 case NVMF_OPT_TRADDR:
7873 p = match_strdup(args);
7874 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
7875 index 8cc856ecec95..642ee00e9143 100644
7876 --- a/drivers/nvme/host/pci.c
7877 +++ b/drivers/nvme/host/pci.c
7878 @@ -1120,7 +1120,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
7879 nvmeq->cq_vector = qid - 1;
7880 result = adapter_alloc_cq(dev, qid, nvmeq);
7881 if (result < 0)
7882 - return result;
7883 + goto release_vector;
7884
7885 result = adapter_alloc_sq(dev, qid, nvmeq);
7886 if (result < 0)
7887 @@ -1134,9 +1134,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
7888 return result;
7889
7890 release_sq:
7891 + dev->online_queues--;
7892 adapter_delete_sq(dev, qid);
7893 release_cq:
7894 adapter_delete_cq(dev, qid);
7895 + release_vector:
7896 + nvmeq->cq_vector = -1;
7897 return result;
7898 }
7899
7900 diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
7901 index c89d68a76f3d..3a044922b048 100644
7902 --- a/drivers/nvme/target/core.c
7903 +++ b/drivers/nvme/target/core.c
7904 @@ -491,9 +491,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
7905 goto fail;
7906 }
7907
7908 - /* either variant of SGLs is fine, as we don't support metadata */
7909 - if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
7910 - (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
7911 + /*
7912 + * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
7913 + * contains an address of a single contiguous physical buffer that is
7914 + * byte aligned.
7915 + */
7916 + if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
7917 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
7918 goto fail;
7919 }
7920 diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
7921 index 1cced1d039d7..7e9385812bda 100644
7922 --- a/drivers/parisc/lba_pci.c
7923 +++ b/drivers/parisc/lba_pci.c
7924 @@ -1367,9 +1367,27 @@ lba_hw_init(struct lba_device *d)
7925 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
7926 }
7927
7928 - /* Set HF mode as the default (vs. -1 mode). */
7929 +
7930 + /*
7931 + * Hard Fail vs. Soft Fail on PCI "Master Abort".
7932 + *
7933 + * "Master Abort" means the MMIO transaction timed out - usually due to
7934 + * the device not responding to an MMIO read. We would like HF to be
7935 + * enabled to find driver problems, though it means the system will
7936 + * crash with a HPMC.
7937 + *
7938 + * In SoftFail mode "~0L" is returned as a result of a timeout on the
7939 + * pci bus. This is like how PCI busses on x86 and most other
7940 + * architectures behave. In order to increase compatibility with
7941 + * existing (x86) PCI hardware and existing Linux drivers we enable
7942 + * Soft Faul mode on PA-RISC now too.
7943 + */
7944 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
7945 +#if defined(ENABLE_HARDFAIL)
7946 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
7947 +#else
7948 + WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
7949 +#endif
7950
7951 /*
7952 ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
7953 diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
7954 index d81ad841dc0c..f11c38244088 100644
7955 --- a/drivers/pci/pci-driver.c
7956 +++ b/drivers/pci/pci-driver.c
7957 @@ -1147,11 +1147,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
7958 int error;
7959
7960 /*
7961 - * If pci_dev->driver is not set (unbound), the device should
7962 - * always remain in D0 regardless of the runtime PM status
7963 + * If pci_dev->driver is not set (unbound), we leave the device in D0,
7964 + * but it may go to D3cold when the bridge above it runtime suspends.
7965 + * Save its config space in case that happens.
7966 */
7967 - if (!pci_dev->driver)
7968 + if (!pci_dev->driver) {
7969 + pci_save_state(pci_dev);
7970 return 0;
7971 + }
7972
7973 if (!pm || !pm->runtime_suspend)
7974 return -ENOSYS;
7975 @@ -1199,16 +1202,18 @@ static int pci_pm_runtime_resume(struct device *dev)
7976 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
7977
7978 /*
7979 - * If pci_dev->driver is not set (unbound), the device should
7980 - * always remain in D0 regardless of the runtime PM status
7981 + * Restoring config space is necessary even if the device is not bound
7982 + * to a driver because although we left it in D0, it may have gone to
7983 + * D3cold when the bridge above it runtime suspended.
7984 */
7985 + pci_restore_standard_config(pci_dev);
7986 +
7987 if (!pci_dev->driver)
7988 return 0;
7989
7990 if (!pm || !pm->runtime_resume)
7991 return -ENOSYS;
7992
7993 - pci_restore_standard_config(pci_dev);
7994 pci_fixup_device(pci_fixup_resume_early, pci_dev);
7995 __pci_enable_wake(pci_dev, PCI_D0, true, false);
7996 pci_fixup_device(pci_fixup_resume, pci_dev);
7997 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
7998 index fb177dc576d6..b55f9179c94e 100644
7999 --- a/drivers/pci/quirks.c
8000 +++ b/drivers/pci/quirks.c
8001 @@ -3857,6 +3857,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
8002 quirk_dma_func1_alias);
8003 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
8004 quirk_dma_func1_alias);
8005 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
8006 + quirk_dma_func1_alias);
8007 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
8008 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
8009 quirk_dma_func1_alias);
8010 @@ -3872,6 +3874,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
8011 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
8012 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
8013 quirk_dma_func1_alias);
8014 +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
8015 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
8016 + quirk_dma_func1_alias);
8017 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
8018 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
8019 quirk_dma_func1_alias);
8020 diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
8021 index bedce3453dd3..056845bdf67b 100644
8022 --- a/drivers/pinctrl/qcom/pinctrl-msm.c
8023 +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
8024 @@ -790,7 +790,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
8025 return -EINVAL;
8026
8027 chip = &pctrl->chip;
8028 - chip->base = 0;
8029 + chip->base = -1;
8030 chip->ngpio = ngpio;
8031 chip->label = dev_name(pctrl->dev);
8032 chip->parent = pctrl->dev;
8033 diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
8034 index dc9b671ccf2e..29718886989a 100644
8035 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
8036 +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
8037 @@ -1,7 +1,7 @@
8038 /*
8039 * R8A7796 processor support - PFC hardware block.
8040 *
8041 - * Copyright (C) 2016 Renesas Electronics Corp.
8042 + * Copyright (C) 2016-2017 Renesas Electronics Corp.
8043 *
8044 * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
8045 *
8046 @@ -476,7 +476,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
8047 #define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
8048 #define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3)
8049 #define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0)
8050 -#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1)
8051 +#define MOD_SEL1_20 FM(SEL_SSI1_0) FM(SEL_SSI1_1)
8052 #define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1)
8053 #define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3)
8054 #define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1)
8055 @@ -1208,7 +1208,7 @@ static const u16 pinmux_data[] = {
8056 PINMUX_IPSR_GPSR(IP13_11_8, HSCK0),
8057 PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
8058 PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0),
8059 - PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI_1),
8060 + PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1),
8061 PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3),
8062 PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
8063 PINMUX_IPSR_MSEL(IP13_11_8, RIF0_CLK_C, SEL_DRIF0_2),
8064 @@ -1216,14 +1216,14 @@ static const u16 pinmux_data[] = {
8065
8066 PINMUX_IPSR_GPSR(IP13_15_12, HRX0),
8067 PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3),
8068 - PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI_1),
8069 + PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI2_1),
8070 PINMUX_IPSR_MSEL(IP13_15_12, TS_SDEN0_D, SEL_TSIF0_3),
8071 PINMUX_IPSR_MSEL(IP13_15_12, STP_ISEN_0_D, SEL_SSP1_0_3),
8072 PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_C, SEL_DRIF0_2),
8073
8074 PINMUX_IPSR_GPSR(IP13_19_16, HTX0),
8075 PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3),
8076 - PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI_1),
8077 + PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI9_1),
8078 PINMUX_IPSR_MSEL(IP13_19_16, TS_SDAT0_D, SEL_TSIF0_3),
8079 PINMUX_IPSR_MSEL(IP13_19_16, STP_ISD_0_D, SEL_SSP1_0_3),
8080 PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_C, SEL_DRIF0_2),
8081 @@ -1231,7 +1231,7 @@ static const u16 pinmux_data[] = {
8082 PINMUX_IPSR_GPSR(IP13_23_20, HCTS0_N),
8083 PINMUX_IPSR_MSEL(IP13_23_20, RX2_B, SEL_SCIF2_1),
8084 PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3),
8085 - PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI_0),
8086 + PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI9_0),
8087 PINMUX_IPSR_MSEL(IP13_23_20, TS_SPSYNC0_D, SEL_TSIF0_3),
8088 PINMUX_IPSR_MSEL(IP13_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3),
8089 PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_C, SEL_DRIF0_2),
8090 @@ -1240,7 +1240,7 @@ static const u16 pinmux_data[] = {
8091 PINMUX_IPSR_GPSR(IP13_27_24, HRTS0_N),
8092 PINMUX_IPSR_MSEL(IP13_27_24, TX2_B, SEL_SCIF2_1),
8093 PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3),
8094 - PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI_0),
8095 + PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI9_0),
8096 PINMUX_IPSR_MSEL(IP13_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3),
8097 PINMUX_IPSR_MSEL(IP13_27_24, BPFCLK_A, SEL_FM_0),
8098 PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT2_A),
8099 @@ -1255,7 +1255,7 @@ static const u16 pinmux_data[] = {
8100 PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0),
8101 PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0),
8102 PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2),
8103 - PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI_0),
8104 + PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0),
8105 PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
8106 PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A),
8107 PINMUX_IPSR_MSEL(IP14_3_0, TCLK1_B, SEL_TIMER_TMU_1),
8108 @@ -1264,7 +1264,7 @@ static const u16 pinmux_data[] = {
8109 PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0),
8110 PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
8111 PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0),
8112 - PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI_0),
8113 + PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0),
8114 PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
8115 PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D),
8116 PINMUX_IPSR_MSEL(IP14_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1),
8117 @@ -1292,10 +1292,10 @@ static const u16 pinmux_data[] = {
8118 PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5),
8119
8120 /* IPSR15 */
8121 - PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI_0),
8122 + PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI1_0),
8123
8124 - PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0),
8125 - PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1),
8126 + PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI2_0),
8127 + PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI1_1),
8128
8129 PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK34),
8130 PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
8131 @@ -1381,11 +1381,11 @@ static const u16 pinmux_data[] = {
8132 PINMUX_IPSR_MSEL(IP16_27_24, RIF1_D1_A, SEL_DRIF1_0),
8133 PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D1_A, SEL_DRIF3_0),
8134
8135 - PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI_0),
8136 + PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI9_0),
8137 PINMUX_IPSR_MSEL(IP16_31_28, HSCK2_B, SEL_HSCIF2_1),
8138 PINMUX_IPSR_MSEL(IP16_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2),
8139 PINMUX_IPSR_MSEL(IP16_31_28, HSCK1_A, SEL_HSCIF1_0),
8140 - PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI_1),
8141 + PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI1_1),
8142 PINMUX_IPSR_GPSR(IP16_31_28, SCK1),
8143 PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0),
8144 PINMUX_IPSR_GPSR(IP16_31_28, SCK5_A),
8145 @@ -1417,7 +1417,7 @@ static const u16 pinmux_data[] = {
8146
8147 PINMUX_IPSR_GPSR(IP17_19_16, USB1_PWEN),
8148 PINMUX_IPSR_MSEL(IP17_19_16, SIM0_CLK_C, SEL_SIMCARD_2),
8149 - PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI_0),
8150 + PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI1_0),
8151 PINMUX_IPSR_MSEL(IP17_19_16, TS_SCK0_E, SEL_TSIF0_4),
8152 PINMUX_IPSR_MSEL(IP17_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4),
8153 PINMUX_IPSR_MSEL(IP17_19_16, FMCLK_B, SEL_FM_1),
8154 @@ -1427,7 +1427,7 @@ static const u16 pinmux_data[] = {
8155
8156 PINMUX_IPSR_GPSR(IP17_23_20, USB1_OVC),
8157 PINMUX_IPSR_MSEL(IP17_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2),
8158 - PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI_0),
8159 + PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI1_0),
8160 PINMUX_IPSR_MSEL(IP17_23_20, TS_SDAT0_E, SEL_TSIF0_4),
8161 PINMUX_IPSR_MSEL(IP17_23_20, STP_ISD_0_E, SEL_SSP1_0_4),
8162 PINMUX_IPSR_MSEL(IP17_23_20, FMIN_B, SEL_FM_1),
8163 @@ -1437,7 +1437,7 @@ static const u16 pinmux_data[] = {
8164
8165 PINMUX_IPSR_GPSR(IP17_27_24, USB30_PWEN),
8166 PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_B),
8167 - PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI_1),
8168 + PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI2_1),
8169 PINMUX_IPSR_MSEL(IP17_27_24, TS_SDEN1_D, SEL_TSIF1_3),
8170 PINMUX_IPSR_MSEL(IP17_27_24, STP_ISEN_1_D, SEL_SSP1_1_2),
8171 PINMUX_IPSR_MSEL(IP17_27_24, STP_OPWM_0_E, SEL_SSP1_0_4),
8172 @@ -1449,7 +1449,7 @@ static const u16 pinmux_data[] = {
8173
8174 PINMUX_IPSR_GPSR(IP17_31_28, USB30_OVC),
8175 PINMUX_IPSR_GPSR(IP17_31_28, AUDIO_CLKOUT1_B),
8176 - PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI_1),
8177 + PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI2_1),
8178 PINMUX_IPSR_MSEL(IP17_31_28, TS_SPSYNC1_D, SEL_TSIF1_3),
8179 PINMUX_IPSR_MSEL(IP17_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3),
8180 PINMUX_IPSR_MSEL(IP17_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4),
8181 @@ -1460,7 +1460,7 @@ static const u16 pinmux_data[] = {
8182 /* IPSR18 */
8183 PINMUX_IPSR_GPSR(IP18_3_0, GP6_30),
8184 PINMUX_IPSR_GPSR(IP18_3_0, AUDIO_CLKOUT2_B),
8185 - PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI_1),
8186 + PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI9_1),
8187 PINMUX_IPSR_MSEL(IP18_3_0, TS_SDEN0_E, SEL_TSIF0_4),
8188 PINMUX_IPSR_MSEL(IP18_3_0, STP_ISEN_0_E, SEL_SSP1_0_4),
8189 PINMUX_IPSR_MSEL(IP18_3_0, RIF2_D0_B, SEL_DRIF2_1),
8190 @@ -1471,7 +1471,7 @@ static const u16 pinmux_data[] = {
8191
8192 PINMUX_IPSR_GPSR(IP18_7_4, GP6_31),
8193 PINMUX_IPSR_GPSR(IP18_7_4, AUDIO_CLKOUT3_B),
8194 - PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI_1),
8195 + PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI9_1),
8196 PINMUX_IPSR_MSEL(IP18_7_4, TS_SPSYNC0_E, SEL_TSIF0_4),
8197 PINMUX_IPSR_MSEL(IP18_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
8198 PINMUX_IPSR_MSEL(IP18_7_4, RIF2_D1_B, SEL_DRIF2_1),
8199 diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
8200 index 83e89e5d4752..b73a2376d913 100644
8201 --- a/drivers/regulator/gpio-regulator.c
8202 +++ b/drivers/regulator/gpio-regulator.c
8203 @@ -268,8 +268,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
8204 drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
8205 if (drvdata->desc.name == NULL) {
8206 dev_err(&pdev->dev, "Failed to allocate supply name\n");
8207 - ret = -ENOMEM;
8208 - goto err;
8209 + return -ENOMEM;
8210 }
8211
8212 if (config->nr_gpios != 0) {
8213 @@ -289,7 +288,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
8214 dev_err(&pdev->dev,
8215 "Could not obtain regulator setting GPIOs: %d\n",
8216 ret);
8217 - goto err_memstate;
8218 + goto err_memgpio;
8219 }
8220 }
8221
8222 @@ -300,7 +299,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
8223 if (drvdata->states == NULL) {
8224 dev_err(&pdev->dev, "Failed to allocate state data\n");
8225 ret = -ENOMEM;
8226 - goto err_memgpio;
8227 + goto err_stategpio;
8228 }
8229 drvdata->nr_states = config->nr_states;
8230
8231 @@ -321,7 +320,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
8232 default:
8233 dev_err(&pdev->dev, "No regulator type set\n");
8234 ret = -EINVAL;
8235 - goto err_memgpio;
8236 + goto err_memstate;
8237 }
8238
8239 /* build initial state from gpio init data. */
8240 @@ -358,22 +357,21 @@ static int gpio_regulator_probe(struct platform_device *pdev)
8241 if (IS_ERR(drvdata->dev)) {
8242 ret = PTR_ERR(drvdata->dev);
8243 dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
8244 - goto err_stategpio;
8245 + goto err_memstate;
8246 }
8247
8248 platform_set_drvdata(pdev, drvdata);
8249
8250 return 0;
8251
8252 -err_stategpio:
8253 - gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
8254 err_memstate:
8255 kfree(drvdata->states);
8256 +err_stategpio:
8257 + gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
8258 err_memgpio:
8259 kfree(drvdata->gpios);
8260 err_name:
8261 kfree(drvdata->desc.name);
8262 -err:
8263 return ret;
8264 }
8265
8266 diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
8267 index 4f613ec99500..037675bb36b6 100644
8268 --- a/drivers/regulator/of_regulator.c
8269 +++ b/drivers/regulator/of_regulator.c
8270 @@ -282,6 +282,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
8271 dev_err(dev,
8272 "failed to parse DT for regulator %s\n",
8273 child->name);
8274 + of_node_put(child);
8275 return -EINVAL;
8276 }
8277 match->of_node = of_node_get(child);
8278 diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
8279 index 8327d47e08b6..c46e31e0a6d9 100644
8280 --- a/drivers/s390/cio/device_fsm.c
8281 +++ b/drivers/s390/cio/device_fsm.c
8282 @@ -822,6 +822,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
8283
8284 ccw_device_set_timeout(cdev, 0);
8285 cdev->private->iretry = 255;
8286 + cdev->private->async_kill_io_rc = -ETIMEDOUT;
8287 ret = ccw_device_cancel_halt_clear(cdev);
8288 if (ret == -EBUSY) {
8289 ccw_device_set_timeout(cdev, 3*HZ);
8290 @@ -898,7 +899,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
8291 /* OK, i/o is dead now. Call interrupt handler. */
8292 if (cdev->handler)
8293 cdev->handler(cdev, cdev->private->intparm,
8294 - ERR_PTR(-EIO));
8295 + ERR_PTR(cdev->private->async_kill_io_rc));
8296 }
8297
8298 static void
8299 @@ -915,14 +916,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
8300 ccw_device_online_verify(cdev, 0);
8301 if (cdev->handler)
8302 cdev->handler(cdev, cdev->private->intparm,
8303 - ERR_PTR(-EIO));
8304 + ERR_PTR(cdev->private->async_kill_io_rc));
8305 }
8306
8307 void ccw_device_kill_io(struct ccw_device *cdev)
8308 {
8309 int ret;
8310
8311 + ccw_device_set_timeout(cdev, 0);
8312 cdev->private->iretry = 255;
8313 + cdev->private->async_kill_io_rc = -EIO;
8314 ret = ccw_device_cancel_halt_clear(cdev);
8315 if (ret == -EBUSY) {
8316 ccw_device_set_timeout(cdev, 3*HZ);
8317 diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
8318 index 877d9f601e63..85b289638133 100644
8319 --- a/drivers/s390/cio/device_ops.c
8320 +++ b/drivers/s390/cio/device_ops.c
8321 @@ -158,7 +158,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
8322 }
8323
8324 /**
8325 - * ccw_device_start_key() - start a s390 channel program with key
8326 + * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
8327 * @cdev: target ccw device
8328 * @cpa: logical start address of channel program
8329 * @intparm: user specific interruption parameter; will be presented back to
8330 @@ -169,10 +169,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
8331 * @key: storage key to be used for the I/O
8332 * @flags: additional flags; defines the action to be performed for I/O
8333 * processing.
8334 + * @expires: timeout value in jiffies
8335 *
8336 * Start a S/390 channel program. When the interrupt arrives, the
8337 * IRQ handler is called, either immediately, delayed (dev-end missing,
8338 * or sense required) or never (no IRQ handler registered).
8339 + * This function notifies the device driver if the channel program has not
8340 + * completed during the time specified by @expires. If a timeout occurs, the
8341 + * channel program is terminated via xsch, hsch or csch, and the device's
8342 + * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
8343 * Returns:
8344 * %0, if the operation was successful;
8345 * -%EBUSY, if the device is busy, or status pending;
8346 @@ -181,9 +186,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
8347 * Context:
8348 * Interrupts disabled, ccw device lock held
8349 */
8350 -int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
8351 - unsigned long intparm, __u8 lpm, __u8 key,
8352 - unsigned long flags)
8353 +int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
8354 + unsigned long intparm, __u8 lpm, __u8 key,
8355 + unsigned long flags, int expires)
8356 {
8357 struct subchannel *sch;
8358 int ret;
8359 @@ -223,6 +228,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
8360 switch (ret) {
8361 case 0:
8362 cdev->private->intparm = intparm;
8363 + if (expires)
8364 + ccw_device_set_timeout(cdev, expires);
8365 break;
8366 case -EACCES:
8367 case -ENODEV:
8368 @@ -233,7 +240,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
8369 }
8370
8371 /**
8372 - * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
8373 + * ccw_device_start_key() - start a s390 channel program with key
8374 * @cdev: target ccw device
8375 * @cpa: logical start address of channel program
8376 * @intparm: user specific interruption parameter; will be presented back to
8377 @@ -244,15 +251,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
8378 * @key: storage key to be used for the I/O
8379 * @flags: additional flags; defines the action to be performed for I/O
8380 * processing.
8381 - * @expires: timeout value in jiffies
8382 *
8383 * Start a S/390 channel program. When the interrupt arrives, the
8384 * IRQ handler is called, either immediately, delayed (dev-end missing,
8385 * or sense required) or never (no IRQ handler registered).
8386 - * This function notifies the device driver if the channel program has not
8387 - * completed during the time specified by @expires. If a timeout occurs, the
8388 - * channel program is terminated via xsch, hsch or csch, and the device's
8389 - * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
8390 * Returns:
8391 * %0, if the operation was successful;
8392 * -%EBUSY, if the device is busy, or status pending;
8393 @@ -261,19 +263,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
8394 * Context:
8395 * Interrupts disabled, ccw device lock held
8396 */
8397 -int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
8398 - unsigned long intparm, __u8 lpm, __u8 key,
8399 - unsigned long flags, int expires)
8400 +int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
8401 + unsigned long intparm, __u8 lpm, __u8 key,
8402 + unsigned long flags)
8403 {
8404 - int ret;
8405 -
8406 - if (!cdev)
8407 - return -ENODEV;
8408 - ccw_device_set_timeout(cdev, expires);
8409 - ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
8410 - if (ret != 0)
8411 - ccw_device_set_timeout(cdev, 0);
8412 - return ret;
8413 + return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
8414 + flags, 0);
8415 }
8416
8417 /**
8418 @@ -488,18 +483,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
8419 EXPORT_SYMBOL(ccw_device_get_id);
8420
8421 /**
8422 - * ccw_device_tm_start_key() - perform start function
8423 + * ccw_device_tm_start_timeout_key() - perform start function
8424 * @cdev: ccw device on which to perform the start function
8425 * @tcw: transport-command word to be started
8426 * @intparm: user defined parameter to be passed to the interrupt handler
8427 * @lpm: mask of paths to use
8428 * @key: storage key to use for storage access
8429 + * @expires: time span in jiffies after which to abort request
8430 *
8431 * Start the tcw on the given ccw device. Return zero on success, non-zero
8432 * otherwise.
8433 */
8434 -int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
8435 - unsigned long intparm, u8 lpm, u8 key)
8436 +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
8437 + unsigned long intparm, u8 lpm, u8 key,
8438 + int expires)
8439 {
8440 struct subchannel *sch;
8441 int rc;
8442 @@ -526,37 +523,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
8443 return -EACCES;
8444 }
8445 rc = cio_tm_start_key(sch, tcw, lpm, key);
8446 - if (rc == 0)
8447 + if (rc == 0) {
8448 cdev->private->intparm = intparm;
8449 + if (expires)
8450 + ccw_device_set_timeout(cdev, expires);
8451 + }
8452 return rc;
8453 }
8454 -EXPORT_SYMBOL(ccw_device_tm_start_key);
8455 +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
8456
8457 /**
8458 - * ccw_device_tm_start_timeout_key() - perform start function
8459 + * ccw_device_tm_start_key() - perform start function
8460 * @cdev: ccw device on which to perform the start function
8461 * @tcw: transport-command word to be started
8462 * @intparm: user defined parameter to be passed to the interrupt handler
8463 * @lpm: mask of paths to use
8464 * @key: storage key to use for storage access
8465 - * @expires: time span in jiffies after which to abort request
8466 *
8467 * Start the tcw on the given ccw device. Return zero on success, non-zero
8468 * otherwise.
8469 */
8470 -int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
8471 - unsigned long intparm, u8 lpm, u8 key,
8472 - int expires)
8473 +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
8474 + unsigned long intparm, u8 lpm, u8 key)
8475 {
8476 - int ret;
8477 -
8478 - ccw_device_set_timeout(cdev, expires);
8479 - ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
8480 - if (ret != 0)
8481 - ccw_device_set_timeout(cdev, 0);
8482 - return ret;
8483 + return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
8484 }
8485 -EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
8486 +EXPORT_SYMBOL(ccw_device_tm_start_key);
8487
8488 /**
8489 * ccw_device_tm_start() - perform start function
8490 diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
8491 index 220f49145b2f..1d984342eb53 100644
8492 --- a/drivers/s390/cio/io_sch.h
8493 +++ b/drivers/s390/cio/io_sch.h
8494 @@ -154,6 +154,7 @@ struct ccw_device_private {
8495 unsigned long intparm; /* user interruption parameter */
8496 struct qdio_irq *qdio_data;
8497 struct irb irb; /* device status */
8498 + int async_kill_io_rc;
8499 struct senseid senseid; /* SenseID info */
8500 struct pgid pgid[8]; /* path group IDs per chpid*/
8501 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
8502 diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
8503 index e63597342c96..01699845c42c 100644
8504 --- a/drivers/scsi/sr.c
8505 +++ b/drivers/scsi/sr.c
8506 @@ -522,6 +522,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
8507 struct scsi_cd *cd;
8508 int ret = -ENXIO;
8509
8510 + check_disk_change(bdev);
8511 +
8512 mutex_lock(&sr_mutex);
8513 cd = scsi_cd_get(bdev->bd_disk);
8514 if (cd) {
8515 @@ -582,18 +584,28 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
8516 static unsigned int sr_block_check_events(struct gendisk *disk,
8517 unsigned int clearing)
8518 {
8519 - struct scsi_cd *cd = scsi_cd(disk);
8520 + unsigned int ret = 0;
8521 + struct scsi_cd *cd;
8522
8523 - if (atomic_read(&cd->device->disk_events_disable_depth))
8524 + cd = scsi_cd_get(disk);
8525 + if (!cd)
8526 return 0;
8527
8528 - return cdrom_check_events(&cd->cdi, clearing);
8529 + if (!atomic_read(&cd->device->disk_events_disable_depth))
8530 + ret = cdrom_check_events(&cd->cdi, clearing);
8531 +
8532 + scsi_cd_put(cd);
8533 + return ret;
8534 }
8535
8536 static int sr_block_revalidate_disk(struct gendisk *disk)
8537 {
8538 - struct scsi_cd *cd = scsi_cd(disk);
8539 struct scsi_sense_hdr sshdr;
8540 + struct scsi_cd *cd;
8541 +
8542 + cd = scsi_cd_get(disk);
8543 + if (!cd)
8544 + return -ENXIO;
8545
8546 /* if the unit is not ready, nothing more to do */
8547 if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
8548 @@ -602,6 +614,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
8549 sr_cd_check(&cd->cdi);
8550 get_sectorsize(cd);
8551 out:
8552 + scsi_cd_put(cd);
8553 return 0;
8554 }
8555
8556 diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
8557 index 520aedd29965..78d3dbac872a 100644
8558 --- a/drivers/soc/qcom/wcnss_ctrl.c
8559 +++ b/drivers/soc/qcom/wcnss_ctrl.c
8560 @@ -247,7 +247,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
8561 /* Increment for next fragment */
8562 req->seq++;
8563
8564 - data += req->hdr.len;
8565 + data += NV_FRAGMENT_SIZE;
8566 left -= NV_FRAGMENT_SIZE;
8567 } while (left > 0);
8568
8569 diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
8570 index adc3f56d4773..63231760facc 100644
8571 --- a/drivers/spi/spi-bcm-qspi.c
8572 +++ b/drivers/spi/spi-bcm-qspi.c
8573 @@ -1220,7 +1220,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
8574 qspi->base[MSPI] = devm_ioremap_resource(dev, res);
8575 if (IS_ERR(qspi->base[MSPI])) {
8576 ret = PTR_ERR(qspi->base[MSPI]);
8577 - goto qspi_probe_err;
8578 + goto qspi_resource_err;
8579 }
8580 } else {
8581 goto qspi_resource_err;
8582 @@ -1231,7 +1231,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
8583 qspi->base[BSPI] = devm_ioremap_resource(dev, res);
8584 if (IS_ERR(qspi->base[BSPI])) {
8585 ret = PTR_ERR(qspi->base[BSPI]);
8586 - goto qspi_probe_err;
8587 + goto qspi_resource_err;
8588 }
8589 qspi->bspi_mode = true;
8590 } else {
8591 diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
8592 index eaeb3c51e14b..cb95c3e940f1 100644
8593 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
8594 +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
8595 @@ -75,6 +75,8 @@ int __init its_fsl_mc_msi_init(void)
8596
8597 for (np = of_find_matching_node(NULL, its_device_id); np;
8598 np = of_find_matching_node(np, its_device_id)) {
8599 + if (!of_device_is_available(np))
8600 + continue;
8601 if (!of_property_read_bool(np, "msi-controller"))
8602 continue;
8603
8604 diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
8605 index a350209ffbd3..31c301d6be62 100644
8606 --- a/drivers/video/fbdev/sbuslib.c
8607 +++ b/drivers/video/fbdev/sbuslib.c
8608 @@ -121,7 +121,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
8609 unsigned char __user *ured;
8610 unsigned char __user *ugreen;
8611 unsigned char __user *ublue;
8612 - int index, count, i;
8613 + unsigned int index, count, i;
8614
8615 if (get_user(index, &c->index) ||
8616 __get_user(count, &c->count) ||
8617 @@ -160,7 +160,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
8618 unsigned char __user *ugreen;
8619 unsigned char __user *ublue;
8620 struct fb_cmap *cmap = &info->cmap;
8621 - int index, count, i;
8622 + unsigned int index, count, i;
8623 u8 red, green, blue;
8624
8625 if (get_user(index, &c->index) ||
8626 diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
8627 index e682bf046e50..88cd2a52d8d3 100644
8628 --- a/drivers/watchdog/f71808e_wdt.c
8629 +++ b/drivers/watchdog/f71808e_wdt.c
8630 @@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,
8631 char c;
8632 if (get_user(c, buf + i))
8633 return -EFAULT;
8634 - expect_close = (c == 'V');
8635 + if (c == 'V')
8636 + expect_close = true;
8637 }
8638
8639 /* Properly order writes across fork()ed processes */
8640 diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
8641 index ce0c38bd0f00..37523f139ccd 100644
8642 --- a/drivers/watchdog/sbsa_gwdt.c
8643 +++ b/drivers/watchdog/sbsa_gwdt.c
8644 @@ -50,6 +50,7 @@
8645 */
8646
8647 #include <linux/io.h>
8648 +#include <linux/io-64-nonatomic-lo-hi.h>
8649 #include <linux/interrupt.h>
8650 #include <linux/module.h>
8651 #include <linux/moduleparam.h>
8652 @@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
8653 !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
8654 timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
8655
8656 - timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) -
8657 + timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
8658 arch_counter_get_cntvct();
8659
8660 do_div(timeleft, gwdt->clk);
8661 diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
8662 index 2b28c00da0df..dfe20b81ced5 100644
8663 --- a/drivers/watchdog/sp5100_tco.h
8664 +++ b/drivers/watchdog/sp5100_tco.h
8665 @@ -54,7 +54,7 @@
8666 #define SB800_PM_WATCHDOG_CONFIG 0x4C
8667
8668 #define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0)
8669 -#define SB800_PM_WATCHDOG_DISABLE (1 << 2)
8670 +#define SB800_PM_WATCHDOG_DISABLE (1 << 1)
8671 #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
8672 #define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
8673 #define SB800_ACPI_MMIO_SEL (1 << 1)
8674 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
8675 index d5dbdb9d24d8..6d3b32ccc2c4 100644
8676 --- a/drivers/xen/events/events_base.c
8677 +++ b/drivers/xen/events/events_base.c
8678 @@ -764,8 +764,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
8679 mutex_unlock(&irq_mapping_update_lock);
8680 return irq;
8681 error_irq:
8682 - for (; i >= 0; i--)
8683 - __unbind_from_irq(irq + i);
8684 + while (nvec--)
8685 + __unbind_from_irq(irq + nvec);
8686 mutex_unlock(&irq_mapping_update_lock);
8687 return ret;
8688 }
8689 diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
8690 index bb36b1e1dbcc..775d4195966c 100644
8691 --- a/drivers/xen/grant-table.c
8692 +++ b/drivers/xen/grant-table.c
8693 @@ -327,7 +327,7 @@ static void gnttab_handle_deferred(unsigned long unused)
8694 if (entry->page) {
8695 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
8696 entry->ref, page_to_pfn(entry->page));
8697 - __free_page(entry->page);
8698 + put_page(entry->page);
8699 } else
8700 pr_info("freeing g.e. %#x\n", entry->ref);
8701 kfree(entry);
8702 @@ -383,7 +383,7 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
8703 if (gnttab_end_foreign_access_ref(ref, readonly)) {
8704 put_free_entry(ref);
8705 if (page != 0)
8706 - free_page(page);
8707 + put_page(virt_to_page(page));
8708 } else
8709 gnttab_add_deferred(ref, readonly,
8710 page ? virt_to_page(page) : NULL);
8711 diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
8712 index b68ced5a6331..2fe7353ab720 100644
8713 --- a/drivers/xen/swiotlb-xen.c
8714 +++ b/drivers/xen/swiotlb-xen.c
8715 @@ -359,7 +359,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
8716 * physical address */
8717 phys = xen_bus_to_phys(dev_addr);
8718
8719 - if (((dev_addr + size - 1 > dma_mask)) ||
8720 + if (((dev_addr + size - 1 <= dma_mask)) ||
8721 range_straddles_page_boundary(phys, size))
8722 xen_destroy_contiguous_region(phys, order);
8723
8724 diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
8725 index 4b857463a2b4..7ff9d25f714e 100644
8726 --- a/drivers/xen/xen-acpi-processor.c
8727 +++ b/drivers/xen/xen-acpi-processor.c
8728 @@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
8729 }
8730 /* There are more ACPI Processor objects than in x2APIC or MADT.
8731 * This can happen with incorrect ACPI SSDT declerations. */
8732 - if (acpi_id > nr_acpi_bits) {
8733 - pr_debug("We only have %u, trying to set %u\n",
8734 - nr_acpi_bits, acpi_id);
8735 + if (acpi_id >= nr_acpi_bits) {
8736 + pr_debug("max acpi id %u, trying to set %u\n",
8737 + nr_acpi_bits - 1, acpi_id);
8738 return AE_OK;
8739 }
8740 /* OK, There is a ACPI Processor object */
8741 diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
8742 index 33a31cfef55d..c2d447687e33 100644
8743 --- a/drivers/xen/xenbus/xenbus_probe.c
8744 +++ b/drivers/xen/xenbus/xenbus_probe.c
8745 @@ -470,8 +470,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,
8746
8747 /* Register with generic device framework. */
8748 err = device_register(&xendev->dev);
8749 - if (err)
8750 + if (err) {
8751 + put_device(&xendev->dev);
8752 + xendev = NULL;
8753 goto fail;
8754 + }
8755
8756 return 0;
8757 fail:
8758 diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
8759 index d295d9878dff..8ec79385d3cc 100644
8760 --- a/drivers/zorro/zorro.c
8761 +++ b/drivers/zorro/zorro.c
8762 @@ -16,6 +16,7 @@
8763 #include <linux/bitops.h>
8764 #include <linux/string.h>
8765 #include <linux/platform_device.h>
8766 +#include <linux/dma-mapping.h>
8767 #include <linux/slab.h>
8768
8769 #include <asm/byteorder.h>
8770 @@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
8771 z->dev.parent = &bus->dev;
8772 z->dev.bus = &zorro_bus_type;
8773 z->dev.id = i;
8774 + switch (z->rom.er_Type & ERT_TYPEMASK) {
8775 + case ERT_ZORROIII:
8776 + z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
8777 + break;
8778 +
8779 + case ERT_ZORROII:
8780 + default:
8781 + z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
8782 + break;
8783 + }
8784 + z->dev.dma_mask = &z->dev.coherent_dma_mask;
8785 }
8786
8787 /* ... then register them */
8788 diff --git a/fs/affs/namei.c b/fs/affs/namei.c
8789 index 29186d29a3b6..2d4d4952e951 100644
8790 --- a/fs/affs/namei.c
8791 +++ b/fs/affs/namei.c
8792 @@ -224,9 +224,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
8793
8794 affs_lock_dir(dir);
8795 bh = affs_find_entry(dir, dentry);
8796 - affs_unlock_dir(dir);
8797 - if (IS_ERR(bh))
8798 + if (IS_ERR(bh)) {
8799 + affs_unlock_dir(dir);
8800 return ERR_CAST(bh);
8801 + }
8802 if (bh) {
8803 u32 ino = bh->b_blocknr;
8804
8805 @@ -240,10 +241,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
8806 }
8807 affs_brelse(bh);
8808 inode = affs_iget(sb, ino);
8809 - if (IS_ERR(inode))
8810 + if (IS_ERR(inode)) {
8811 + affs_unlock_dir(dir);
8812 return ERR_CAST(inode);
8813 + }
8814 }
8815 d_add(dentry, inode);
8816 + affs_unlock_dir(dir);
8817 return NULL;
8818 }
8819
8820 diff --git a/fs/aio.c b/fs/aio.c
8821 index 0606f033cd9b..42d8c09311d1 100644
8822 --- a/fs/aio.c
8823 +++ b/fs/aio.c
8824 @@ -1074,8 +1074,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
8825
8826 ctx = rcu_dereference(table->table[id]);
8827 if (ctx && ctx->user_id == ctx_id) {
8828 - percpu_ref_get(&ctx->users);
8829 - ret = ctx;
8830 + if (percpu_ref_tryget_live(&ctx->users))
8831 + ret = ctx;
8832 }
8833 out:
8834 rcu_read_unlock();
8835 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
8836 index 409b12392474..c94d3390cbfc 100644
8837 --- a/fs/btrfs/ctree.c
8838 +++ b/fs/btrfs/ctree.c
8839 @@ -2760,6 +2760,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
8840 * contention with the cow code
8841 */
8842 if (cow) {
8843 + bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
8844 +
8845 /*
8846 * if we don't really need to cow this block
8847 * then we don't want to set the path blocking,
8848 @@ -2784,9 +2786,13 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
8849 }
8850
8851 btrfs_set_path_blocking(p);
8852 - err = btrfs_cow_block(trans, root, b,
8853 - p->nodes[level + 1],
8854 - p->slots[level + 1], &b);
8855 + if (last_level)
8856 + err = btrfs_cow_block(trans, root, b, NULL, 0,
8857 + &b);
8858 + else
8859 + err = btrfs_cow_block(trans, root, b,
8860 + p->nodes[level + 1],
8861 + p->slots[level + 1], &b);
8862 if (err) {
8863 ret = err;
8864 goto done;
8865 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
8866 index 1cd325765aaa..c5eafcdb3664 100644
8867 --- a/fs/btrfs/disk-io.c
8868 +++ b/fs/btrfs/disk-io.c
8869 @@ -1281,7 +1281,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
8870 if (!writers)
8871 return ERR_PTR(-ENOMEM);
8872
8873 - ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
8874 + ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
8875 if (ret < 0) {
8876 kfree(writers);
8877 return ERR_PTR(ret);
8878 @@ -4142,9 +4142,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
8879 btrfs_err(fs_info, "no valid FS found");
8880 ret = -EINVAL;
8881 }
8882 - if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
8883 - btrfs_warn(fs_info, "unrecognized super flag: %llu",
8884 + if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
8885 + btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
8886 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
8887 + ret = -EINVAL;
8888 + }
8889 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
8890 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
8891 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
8892 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
8893 index a29730c44850..44a43851404a 100644
8894 --- a/fs/btrfs/extent-tree.c
8895 +++ b/fs/btrfs/extent-tree.c
8896 @@ -4527,6 +4527,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
8897 if (wait_for_alloc) {
8898 mutex_unlock(&fs_info->chunk_mutex);
8899 wait_for_alloc = 0;
8900 + cond_resched();
8901 goto again;
8902 }
8903
8904 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
8905 index c95ff096cd24..437544846e4e 100644
8906 --- a/fs/btrfs/file.c
8907 +++ b/fs/btrfs/file.c
8908 @@ -1912,10 +1912,19 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
8909 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
8910 {
8911 int ret;
8912 + struct blk_plug plug;
8913
8914 + /*
8915 + * This is only called in fsync, which would do synchronous writes, so
8916 + * a plug can merge adjacent IOs as much as possible. Esp. in case of
8917 + * multiple disks using raid profile, a large IO can be split to
8918 + * several segments of stripe length (currently 64K).
8919 + */
8920 + blk_start_plug(&plug);
8921 atomic_inc(&BTRFS_I(inode)->sync_writers);
8922 ret = btrfs_fdatawrite_range(inode, start, end);
8923 atomic_dec(&BTRFS_I(inode)->sync_writers);
8924 + blk_finish_plug(&plug);
8925
8926 return ret;
8927 }
8928 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
8929 index ffd5831ca15c..f073de65e818 100644
8930 --- a/fs/btrfs/inode.c
8931 +++ b/fs/btrfs/inode.c
8932 @@ -6491,8 +6491,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
8933 goto out_unlock_inode;
8934 } else {
8935 btrfs_update_inode(trans, root, inode);
8936 - unlock_new_inode(inode);
8937 - d_instantiate(dentry, inode);
8938 + d_instantiate_new(dentry, inode);
8939 }
8940
8941 out_unlock:
8942 @@ -6567,8 +6566,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
8943 goto out_unlock_inode;
8944
8945 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
8946 - unlock_new_inode(inode);
8947 - d_instantiate(dentry, inode);
8948 + d_instantiate_new(dentry, inode);
8949
8950 out_unlock:
8951 btrfs_end_transaction(trans, root);
8952 @@ -6711,12 +6709,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
8953 if (err)
8954 goto out_fail_inode;
8955
8956 - d_instantiate(dentry, inode);
8957 - /*
8958 - * mkdir is special. We're unlocking after we call d_instantiate
8959 - * to avoid a race with nfsd calling d_instantiate.
8960 - */
8961 - unlock_new_inode(inode);
8962 + d_instantiate_new(dentry, inode);
8963 drop_on_err = 0;
8964
8965 out_fail:
8966 @@ -10354,8 +10347,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8967 goto out_unlock_inode;
8968 }
8969
8970 - unlock_new_inode(inode);
8971 - d_instantiate(dentry, inode);
8972 + d_instantiate_new(dentry, inode);
8973
8974 out_unlock:
8975 btrfs_end_transaction(trans, root);
8976 diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
8977 index d016d4a79864..af6a776fa18c 100644
8978 --- a/fs/btrfs/raid56.c
8979 +++ b/fs/btrfs/raid56.c
8980 @@ -2161,11 +2161,21 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
8981 }
8982
8983 /*
8984 - * reconstruct from the q stripe if they are
8985 - * asking for mirror 3
8986 + * Loop retry:
8987 + * for 'mirror == 2', reconstruct from all other stripes.
8988 + * for 'mirror_num > 2', select a stripe to fail on every retry.
8989 */
8990 - if (mirror_num == 3)
8991 - rbio->failb = rbio->real_stripes - 2;
8992 + if (mirror_num > 2) {
8993 + /*
8994 + * 'mirror == 3' is to fail the p stripe and
8995 + * reconstruct from the q stripe. 'mirror > 3' is to
8996 + * fail a data stripe and reconstruct from p+q stripe.
8997 + */
8998 + rbio->failb = rbio->real_stripes - (mirror_num - 1);
8999 + ASSERT(rbio->failb > 0);
9000 + if (rbio->failb <= rbio->faila)
9001 + rbio->failb--;
9002 + }
9003
9004 ret = lock_stripe_add(rbio);
9005
9006 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
9007 index d040afc966fe..c8d2eec6596b 100644
9008 --- a/fs/btrfs/send.c
9009 +++ b/fs/btrfs/send.c
9010 @@ -4822,6 +4822,9 @@ static int send_hole(struct send_ctx *sctx, u64 end)
9011 u64 len;
9012 int ret = 0;
9013
9014 + if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
9015 + return send_update_extent(sctx, offset, end - offset);
9016 +
9017 p = fs_path_alloc();
9018 if (!p)
9019 return -ENOMEM;
9020 diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
9021 index ca7cb5e6d385..9c6666692341 100644
9022 --- a/fs/btrfs/tests/qgroup-tests.c
9023 +++ b/fs/btrfs/tests/qgroup-tests.c
9024 @@ -63,7 +63,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
9025 btrfs_set_extent_generation(leaf, item, 1);
9026 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
9027 block_info = (struct btrfs_tree_block_info *)(item + 1);
9028 - btrfs_set_tree_block_level(leaf, block_info, 1);
9029 + btrfs_set_tree_block_level(leaf, block_info, 0);
9030 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
9031 if (parent > 0) {
9032 btrfs_set_extent_inline_ref_type(leaf, iref,
9033 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
9034 index c65350e5119c..44d34923de9c 100644
9035 --- a/fs/btrfs/tree-log.c
9036 +++ b/fs/btrfs/tree-log.c
9037 @@ -2241,8 +2241,10 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
9038 nritems = btrfs_header_nritems(path->nodes[0]);
9039 if (path->slots[0] >= nritems) {
9040 ret = btrfs_next_leaf(root, path);
9041 - if (ret)
9042 + if (ret == 1)
9043 break;
9044 + else if (ret < 0)
9045 + goto out;
9046 }
9047 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
9048 path->slots[0]);
9049 @@ -3397,8 +3399,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
9050 * from this directory and from this transaction
9051 */
9052 ret = btrfs_next_leaf(root, path);
9053 - if (ret == 1) {
9054 - last_offset = (u64)-1;
9055 + if (ret) {
9056 + if (ret == 1)
9057 + last_offset = (u64)-1;
9058 + else
9059 + err = ret;
9060 goto done;
9061 }
9062 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
9063 @@ -3849,6 +3854,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
9064 ASSERT(ret == 0);
9065 src = src_path->nodes[0];
9066 i = 0;
9067 + need_find_last_extent = true;
9068 }
9069
9070 btrfs_item_key_to_cpu(src, &key, i);
9071 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
9072 index c2495cde26f6..76017e1b3c0f 100644
9073 --- a/fs/btrfs/volumes.c
9074 +++ b/fs/btrfs/volumes.c
9075 @@ -5186,7 +5186,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
9076 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
9077 ret = 2;
9078 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
9079 - ret = 3;
9080 + /*
9081 + * There could be two corrupted data stripes, we need
9082 + * to loop retry in order to rebuild the correct data.
9083 + *
9084 + * Fail a stripe at a time on every retry except the
9085 + * stripe under reconstruction.
9086 + */
9087 + ret = map->num_stripes;
9088 else
9089 ret = 1;
9090 free_extent_map(em);
9091 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
9092 index b382e5910eea..2a8903025853 100644
9093 --- a/fs/ceph/super.c
9094 +++ b/fs/ceph/super.c
9095 @@ -816,7 +816,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
9096 int err;
9097 unsigned long started = jiffies; /* note the start time */
9098 struct dentry *root;
9099 - int first = 0; /* first vfsmount for this super_block */
9100
9101 dout("mount start %p\n", fsc);
9102 mutex_lock(&fsc->client->mount_mutex);
9103 @@ -834,17 +833,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
9104 path = fsc->mount_options->server_path + 1;
9105 dout("mount opening path %s\n", path);
9106 }
9107 +
9108 + err = ceph_fs_debugfs_init(fsc);
9109 + if (err < 0)
9110 + goto out;
9111 +
9112 root = open_root_dentry(fsc, path, started);
9113 if (IS_ERR(root)) {
9114 err = PTR_ERR(root);
9115 goto out;
9116 }
9117 fsc->sb->s_root = dget(root);
9118 - first = 1;
9119 -
9120 - err = ceph_fs_debugfs_init(fsc);
9121 - if (err < 0)
9122 - goto fail;
9123 } else {
9124 root = dget(fsc->sb->s_root);
9125 }
9126 @@ -854,11 +853,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
9127 mutex_unlock(&fsc->client->mount_mutex);
9128 return root;
9129
9130 -fail:
9131 - if (first) {
9132 - dput(fsc->sb->s_root);
9133 - fsc->sb->s_root = NULL;
9134 - }
9135 out:
9136 mutex_unlock(&fsc->client->mount_mutex);
9137 return ERR_PTR(err);
9138 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
9139 index cc420d6b71f7..d57222894892 100644
9140 --- a/fs/cifs/cifssmb.c
9141 +++ b/fs/cifs/cifssmb.c
9142 @@ -6413,9 +6413,7 @@ CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
9143 pSMB->InformationLevel =
9144 cpu_to_le16(SMB_SET_FILE_EA);
9145
9146 - parm_data =
9147 - (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
9148 - offset);
9149 + parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
9150 pSMB->ParameterOffset = cpu_to_le16(param_offset);
9151 pSMB->DataOffset = cpu_to_le16(offset);
9152 pSMB->SetupCount = 1;
9153 diff --git a/fs/dcache.c b/fs/dcache.c
9154 index 2225b9855c5f..7a5e6f9717f5 100644
9155 --- a/fs/dcache.c
9156 +++ b/fs/dcache.c
9157 @@ -1859,6 +1859,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
9158 }
9159 EXPORT_SYMBOL(d_instantiate);
9160
9161 +/*
9162 + * This should be equivalent to d_instantiate() + unlock_new_inode(),
9163 + * with lockdep-related part of unlock_new_inode() done before
9164 + * anything else. Use that instead of open-coding d_instantiate()/
9165 + * unlock_new_inode() combinations.
9166 + */
9167 +void d_instantiate_new(struct dentry *entry, struct inode *inode)
9168 +{
9169 + BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
9170 + BUG_ON(!inode);
9171 + lockdep_annotate_inode_mutex_key(inode);
9172 + security_d_instantiate(entry, inode);
9173 + spin_lock(&inode->i_lock);
9174 + __d_instantiate(entry, inode);
9175 + WARN_ON(!(inode->i_state & I_NEW));
9176 + inode->i_state &= ~I_NEW;
9177 + smp_mb();
9178 + wake_up_bit(&inode->i_state, __I_NEW);
9179 + spin_unlock(&inode->i_lock);
9180 +}
9181 +EXPORT_SYMBOL(d_instantiate_new);
9182 +
9183 /**
9184 * d_instantiate_no_diralias - instantiate a non-aliased dentry
9185 * @entry: dentry to complete
9186 @@ -2452,7 +2474,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
9187
9188 retry:
9189 rcu_read_lock();
9190 - seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
9191 + seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
9192 r_seq = read_seqbegin(&rename_lock);
9193 dentry = __d_lookup_rcu(parent, name, &d_seq);
9194 if (unlikely(dentry)) {
9195 @@ -2473,8 +2495,14 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
9196 rcu_read_unlock();
9197 goto retry;
9198 }
9199 +
9200 + if (unlikely(seq & 1)) {
9201 + rcu_read_unlock();
9202 + goto retry;
9203 + }
9204 +
9205 hlist_bl_lock(b);
9206 - if (unlikely(parent->d_inode->i_dir_seq != seq)) {
9207 + if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
9208 hlist_bl_unlock(b);
9209 rcu_read_unlock();
9210 goto retry;
9211 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
9212 index cf390dceddd2..5c5ff9f6fe07 100644
9213 --- a/fs/ecryptfs/inode.c
9214 +++ b/fs/ecryptfs/inode.c
9215 @@ -284,8 +284,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
9216 iget_failed(ecryptfs_inode);
9217 goto out;
9218 }
9219 - unlock_new_inode(ecryptfs_inode);
9220 - d_instantiate(ecryptfs_dentry, ecryptfs_inode);
9221 + d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
9222 out:
9223 return rc;
9224 }
9225 diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
9226 index 814e405a2da6..c8efc5ea1b9f 100644
9227 --- a/fs/ext2/namei.c
9228 +++ b/fs/ext2/namei.c
9229 @@ -40,8 +40,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
9230 {
9231 int err = ext2_add_link(dentry, inode);
9232 if (!err) {
9233 - unlock_new_inode(inode);
9234 - d_instantiate(dentry, inode);
9235 + d_instantiate_new(dentry, inode);
9236 return 0;
9237 }
9238 inode_dec_link_count(inode);
9239 @@ -268,8 +267,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
9240 if (err)
9241 goto out_fail;
9242
9243 - unlock_new_inode(inode);
9244 - d_instantiate(dentry, inode);
9245 + d_instantiate_new(dentry, inode);
9246 out:
9247 return err;
9248
9249 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
9250 index b1766a67d2eb..248c43b63f13 100644
9251 --- a/fs/ext4/namei.c
9252 +++ b/fs/ext4/namei.c
9253 @@ -2442,8 +2442,7 @@ static int ext4_add_nondir(handle_t *handle,
9254 int err = ext4_add_entry(handle, dentry, inode);
9255 if (!err) {
9256 ext4_mark_inode_dirty(handle, inode);
9257 - unlock_new_inode(inode);
9258 - d_instantiate(dentry, inode);
9259 + d_instantiate_new(dentry, inode);
9260 return 0;
9261 }
9262 drop_nlink(inode);
9263 @@ -2682,8 +2681,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
9264 err = ext4_mark_inode_dirty(handle, dir);
9265 if (err)
9266 goto out_clear_inode;
9267 - unlock_new_inode(inode);
9268 - d_instantiate(dentry, inode);
9269 + d_instantiate_new(dentry, inode);
9270 if (IS_DIRSYNC(dir))
9271 ext4_handle_sync(handle);
9272
9273 diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
9274 index 63e519658d73..d7b8c8b5fc39 100644
9275 --- a/fs/f2fs/extent_cache.c
9276 +++ b/fs/f2fs/extent_cache.c
9277 @@ -647,6 +647,9 @@ void f2fs_drop_extent_tree(struct inode *inode)
9278 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9279 struct extent_tree *et = F2FS_I(inode)->extent_tree;
9280
9281 + if (!f2fs_may_extent_tree(inode))
9282 + return;
9283 +
9284 set_inode_flag(inode, FI_NO_EXTENT);
9285
9286 write_lock(&et->lock);
9287 diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
9288 index 8556fe1ccb8a..ccb99d5cfd8b 100644
9289 --- a/fs/f2fs/namei.c
9290 +++ b/fs/f2fs/namei.c
9291 @@ -158,8 +158,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
9292
9293 alloc_nid_done(sbi, ino);
9294
9295 - d_instantiate(dentry, inode);
9296 - unlock_new_inode(inode);
9297 + d_instantiate_new(dentry, inode);
9298
9299 if (IS_DIRSYNC(dir))
9300 f2fs_sync_fs(sbi->sb, 1);
9301 @@ -464,8 +463,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
9302 err = page_symlink(inode, disk_link.name, disk_link.len);
9303
9304 err_out:
9305 - d_instantiate(dentry, inode);
9306 - unlock_new_inode(inode);
9307 + d_instantiate_new(dentry, inode);
9308
9309 /*
9310 * Let's flush symlink data in order to avoid broken symlink as much as
9311 @@ -519,8 +517,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
9312
9313 alloc_nid_done(sbi, inode->i_ino);
9314
9315 - d_instantiate(dentry, inode);
9316 - unlock_new_inode(inode);
9317 + d_instantiate_new(dentry, inode);
9318
9319 if (IS_DIRSYNC(dir))
9320 f2fs_sync_fs(sbi->sb, 1);
9321 @@ -564,8 +561,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
9322
9323 alloc_nid_done(sbi, inode->i_ino);
9324
9325 - d_instantiate(dentry, inode);
9326 - unlock_new_inode(inode);
9327 + d_instantiate_new(dentry, inode);
9328
9329 if (IS_DIRSYNC(dir))
9330 f2fs_sync_fs(sbi->sb, 1);
9331 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
9332 index c8c4f79c7ce1..8a7923a4f93c 100644
9333 --- a/fs/fscache/page.c
9334 +++ b/fs/fscache/page.c
9335 @@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op)
9336
9337 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
9338
9339 +again:
9340 spin_lock(&object->lock);
9341 cookie = object->cookie;
9342
9343 @@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op)
9344 goto superseded;
9345 page = results[0];
9346 _debug("gang %d [%lx]", n, page->index);
9347 - if (page->index >= op->store_limit) {
9348 - fscache_stat(&fscache_n_store_pages_over_limit);
9349 - goto superseded;
9350 - }
9351
9352 radix_tree_tag_set(&cookie->stores, page->index,
9353 FSCACHE_COOKIE_STORING_TAG);
9354 @@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op)
9355 spin_unlock(&cookie->stores_lock);
9356 spin_unlock(&object->lock);
9357
9358 + if (page->index >= op->store_limit)
9359 + goto discard_page;
9360 +
9361 fscache_stat(&fscache_n_store_pages);
9362 fscache_stat(&fscache_n_cop_write_page);
9363 ret = object->cache->ops->write_page(op, page);
9364 @@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op)
9365 _leave("");
9366 return;
9367
9368 +discard_page:
9369 + fscache_stat(&fscache_n_store_pages_over_limit);
9370 + fscache_end_page_write(object, page);
9371 + goto again;
9372 +
9373 superseded:
9374 /* this writer is going away and there aren't any more things to
9375 * write */
9376 diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
9377 index 39c382f16272..ff93e96099d8 100644
9378 --- a/fs/gfs2/file.c
9379 +++ b/fs/gfs2/file.c
9380 @@ -801,7 +801,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
9381 struct gfs2_inode *ip = GFS2_I(inode);
9382 struct gfs2_alloc_parms ap = { .aflags = 0, };
9383 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
9384 - loff_t bytes, max_bytes, max_blks = UINT_MAX;
9385 + loff_t bytes, max_bytes, max_blks;
9386 int error;
9387 const loff_t pos = offset;
9388 const loff_t count = len;
9389 @@ -853,7 +853,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
9390 return error;
9391 /* ap.allowed tells us how many blocks quota will allow
9392 * us to write. Check if this reduces max_blks */
9393 - if (ap.allowed && ap.allowed < max_blks)
9394 + max_blks = UINT_MAX;
9395 + if (ap.allowed)
9396 max_blks = ap.allowed;
9397
9398 error = gfs2_inplace_reserve(ip, &ap);
9399 diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
9400 index 5e47c935a515..836f29480be6 100644
9401 --- a/fs/gfs2/quota.h
9402 +++ b/fs/gfs2/quota.h
9403 @@ -45,6 +45,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
9404 {
9405 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
9406 int ret;
9407 +
9408 + ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
9409 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
9410 return 0;
9411 ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
9412 diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
9413 index 0a754f38462e..e5a6deb38e1e 100644
9414 --- a/fs/jffs2/dir.c
9415 +++ b/fs/jffs2/dir.c
9416 @@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
9417 __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
9418 f->inocache->pino_nlink, inode->i_mapping->nrpages);
9419
9420 - unlock_new_inode(inode);
9421 - d_instantiate(dentry, inode);
9422 + d_instantiate_new(dentry, inode);
9423 return 0;
9424
9425 fail:
9426 @@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
9427 mutex_unlock(&dir_f->sem);
9428 jffs2_complete_reservation(c);
9429
9430 - unlock_new_inode(inode);
9431 - d_instantiate(dentry, inode);
9432 + d_instantiate_new(dentry, inode);
9433 return 0;
9434
9435 fail:
9436 @@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
9437 mutex_unlock(&dir_f->sem);
9438 jffs2_complete_reservation(c);
9439
9440 - unlock_new_inode(inode);
9441 - d_instantiate(dentry, inode);
9442 + d_instantiate_new(dentry, inode);
9443 return 0;
9444
9445 fail:
9446 @@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
9447 mutex_unlock(&dir_f->sem);
9448 jffs2_complete_reservation(c);
9449
9450 - unlock_new_inode(inode);
9451 - d_instantiate(dentry, inode);
9452 + d_instantiate_new(dentry, inode);
9453 return 0;
9454
9455 fail:
9456 diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
9457 index 567653f7c0ce..c9c47d03a690 100644
9458 --- a/fs/jffs2/fs.c
9459 +++ b/fs/jffs2/fs.c
9460 @@ -361,7 +361,6 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
9461 ret = -EIO;
9462 error:
9463 mutex_unlock(&f->sem);
9464 - jffs2_do_clear_inode(c, f);
9465 iget_failed(inode);
9466 return ERR_PTR(ret);
9467 }
9468 diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
9469 index b41596d71858..56c3fcbfe80e 100644
9470 --- a/fs/jfs/namei.c
9471 +++ b/fs/jfs/namei.c
9472 @@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
9473 unlock_new_inode(ip);
9474 iput(ip);
9475 } else {
9476 - unlock_new_inode(ip);
9477 - d_instantiate(dentry, ip);
9478 + d_instantiate_new(dentry, ip);
9479 }
9480
9481 out2:
9482 @@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
9483 unlock_new_inode(ip);
9484 iput(ip);
9485 } else {
9486 - unlock_new_inode(ip);
9487 - d_instantiate(dentry, ip);
9488 + d_instantiate_new(dentry, ip);
9489 }
9490
9491 out2:
9492 @@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
9493 unlock_new_inode(ip);
9494 iput(ip);
9495 } else {
9496 - unlock_new_inode(ip);
9497 - d_instantiate(dentry, ip);
9498 + d_instantiate_new(dentry, ip);
9499 }
9500
9501 out2:
9502 @@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
9503 unlock_new_inode(ip);
9504 iput(ip);
9505 } else {
9506 - unlock_new_inode(ip);
9507 - d_instantiate(dentry, ip);
9508 + d_instantiate_new(dentry, ip);
9509 }
9510
9511 out1:
9512 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
9513 index 1b1b616a6171..91e017ca7072 100644
9514 --- a/fs/nfs/nfs4proc.c
9515 +++ b/fs/nfs/nfs4proc.c
9516 @@ -1934,7 +1934,7 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
9517 return ret;
9518 }
9519
9520 -static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
9521 +static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
9522 {
9523 switch (err) {
9524 default:
9525 @@ -1981,7 +1981,11 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
9526 return -EAGAIN;
9527 case -ENOMEM:
9528 case -NFS4ERR_DENIED:
9529 - /* kill_proc(fl->fl_pid, SIGLOST, 1); */
9530 + if (fl) {
9531 + struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
9532 + if (lsp)
9533 + set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
9534 + }
9535 return 0;
9536 }
9537 return err;
9538 @@ -2017,7 +2021,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
9539 err = nfs4_open_recover_helper(opendata, FMODE_READ);
9540 }
9541 nfs4_opendata_put(opendata);
9542 - return nfs4_handle_delegation_recall_error(server, state, stateid, err);
9543 + return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
9544 }
9545
9546 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
9547 @@ -6499,7 +6503,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
9548 if (err != 0)
9549 return err;
9550 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
9551 - return nfs4_handle_delegation_recall_error(server, state, stateid, err);
9552 + return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
9553 }
9554
9555 struct nfs_release_lockowner_data {
9556 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
9557 index 0bb0e620cf42..353691366fca 100644
9558 --- a/fs/nfs/nfs4state.c
9559 +++ b/fs/nfs/nfs4state.c
9560 @@ -1429,6 +1429,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
9561 struct inode *inode = state->inode;
9562 struct nfs_inode *nfsi = NFS_I(inode);
9563 struct file_lock *fl;
9564 + struct nfs4_lock_state *lsp;
9565 int status = 0;
9566 struct file_lock_context *flctx = inode->i_flctx;
9567 struct list_head *list;
9568 @@ -1469,7 +1470,9 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
9569 case -NFS4ERR_DENIED:
9570 case -NFS4ERR_RECLAIM_BAD:
9571 case -NFS4ERR_RECLAIM_CONFLICT:
9572 - /* kill_proc(fl->fl_pid, SIGLOST, 1); */
9573 + lsp = fl->fl_u.nfs4_fl.owner;
9574 + if (lsp)
9575 + set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
9576 status = 0;
9577 }
9578 spin_lock(&flctx->flc_lock);
9579 diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
9580 index 8693d77c45ea..76241aa8d853 100644
9581 --- a/fs/nfs/nfs4sysctl.c
9582 +++ b/fs/nfs/nfs4sysctl.c
9583 @@ -31,7 +31,7 @@ static struct ctl_table nfs4_cb_sysctls[] = {
9584 .data = &nfs_idmap_cache_timeout,
9585 .maxlen = sizeof(int),
9586 .mode = 0644,
9587 - .proc_handler = proc_dointvec_jiffies,
9588 + .proc_handler = proc_dointvec,
9589 },
9590 { }
9591 };
9592 diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
9593 index 2b71c60fe982..163131809e36 100644
9594 --- a/fs/nilfs2/namei.c
9595 +++ b/fs/nilfs2/namei.c
9596 @@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
9597 int err = nilfs_add_link(dentry, inode);
9598
9599 if (!err) {
9600 - d_instantiate(dentry, inode);
9601 - unlock_new_inode(inode);
9602 + d_instantiate_new(dentry, inode);
9603 return 0;
9604 }
9605 inode_dec_link_count(inode);
9606 @@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
9607 goto out_fail;
9608
9609 nilfs_mark_inode_dirty(inode);
9610 - d_instantiate(dentry, inode);
9611 - unlock_new_inode(inode);
9612 + d_instantiate_new(dentry, inode);
9613 out:
9614 if (!err)
9615 err = nilfs_transaction_commit(dir->i_sb);
9616 diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
9617 index bed1fcb63088..ee8dbbae78b6 100644
9618 --- a/fs/ocfs2/acl.c
9619 +++ b/fs/ocfs2/acl.c
9620 @@ -314,7 +314,9 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
9621 return ERR_PTR(ret);
9622 }
9623
9624 + down_read(&OCFS2_I(inode)->ip_xattr_sem);
9625 acl = ocfs2_get_acl_nolock(inode, type, di_bh);
9626 + up_read(&OCFS2_I(inode)->ip_xattr_sem);
9627
9628 ocfs2_inode_unlock(inode, 0);
9629 brelse(di_bh);
9630 @@ -333,7 +335,9 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
9631 if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
9632 return 0;
9633
9634 + down_read(&OCFS2_I(inode)->ip_xattr_sem);
9635 acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
9636 + up_read(&OCFS2_I(inode)->ip_xattr_sem);
9637 if (IS_ERR(acl) || !acl)
9638 return PTR_ERR(acl);
9639 ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
9640 @@ -364,8 +368,10 @@ int ocfs2_init_acl(handle_t *handle,
9641
9642 if (!S_ISLNK(inode->i_mode)) {
9643 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
9644 + down_read(&OCFS2_I(dir)->ip_xattr_sem);
9645 acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
9646 dir_bh);
9647 + up_read(&OCFS2_I(dir)->ip_xattr_sem);
9648 if (IS_ERR(acl))
9649 return PTR_ERR(acl);
9650 }
9651 diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
9652 index 733e4e79c8e2..73be0c6dba5d 100644
9653 --- a/fs/ocfs2/dlm/dlmdomain.c
9654 +++ b/fs/ocfs2/dlm/dlmdomain.c
9655 @@ -675,20 +675,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
9656 spin_unlock(&dlm->spinlock);
9657 }
9658
9659 -int dlm_shutting_down(struct dlm_ctxt *dlm)
9660 -{
9661 - int ret = 0;
9662 -
9663 - spin_lock(&dlm_domain_lock);
9664 -
9665 - if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
9666 - ret = 1;
9667 -
9668 - spin_unlock(&dlm_domain_lock);
9669 -
9670 - return ret;
9671 -}
9672 -
9673 void dlm_unregister_domain(struct dlm_ctxt *dlm)
9674 {
9675 int leave = 0;
9676 diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
9677 index fd6122a38dbd..8a9281411c18 100644
9678 --- a/fs/ocfs2/dlm/dlmdomain.h
9679 +++ b/fs/ocfs2/dlm/dlmdomain.h
9680 @@ -28,7 +28,30 @@
9681 extern spinlock_t dlm_domain_lock;
9682 extern struct list_head dlm_domains;
9683
9684 -int dlm_shutting_down(struct dlm_ctxt *dlm);
9685 +static inline int dlm_joined(struct dlm_ctxt *dlm)
9686 +{
9687 + int ret = 0;
9688 +
9689 + spin_lock(&dlm_domain_lock);
9690 + if (dlm->dlm_state == DLM_CTXT_JOINED)
9691 + ret = 1;
9692 + spin_unlock(&dlm_domain_lock);
9693 +
9694 + return ret;
9695 +}
9696 +
9697 +static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
9698 +{
9699 + int ret = 0;
9700 +
9701 + spin_lock(&dlm_domain_lock);
9702 + if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
9703 + ret = 1;
9704 + spin_unlock(&dlm_domain_lock);
9705 +
9706 + return ret;
9707 +}
9708 +
9709 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
9710 int node_num);
9711
9712 diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
9713 index eef324823311..844dc8da53fb 100644
9714 --- a/fs/ocfs2/dlm/dlmrecovery.c
9715 +++ b/fs/ocfs2/dlm/dlmrecovery.c
9716 @@ -1378,6 +1378,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
9717 if (!dlm_grab(dlm))
9718 return -EINVAL;
9719
9720 + if (!dlm_joined(dlm)) {
9721 + mlog(ML_ERROR, "Domain %s not joined! "
9722 + "lockres %.*s, master %u\n",
9723 + dlm->name, mres->lockname_len,
9724 + mres->lockname, mres->master);
9725 + dlm_put(dlm);
9726 + return -EINVAL;
9727 + }
9728 +
9729 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
9730
9731 real_master = mres->master;
9732 diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
9733 index a244f14c6b87..fa947d36ae1d 100644
9734 --- a/fs/ocfs2/journal.c
9735 +++ b/fs/ocfs2/journal.c
9736 @@ -666,23 +666,24 @@ static int __ocfs2_journal_access(handle_t *handle,
9737 /* we can safely remove this assertion after testing. */
9738 if (!buffer_uptodate(bh)) {
9739 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
9740 - mlog(ML_ERROR, "b_blocknr=%llu\n",
9741 - (unsigned long long)bh->b_blocknr);
9742 + mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
9743 + (unsigned long long)bh->b_blocknr, bh->b_state);
9744
9745 lock_buffer(bh);
9746 /*
9747 - * A previous attempt to write this buffer head failed.
9748 - * Nothing we can do but to retry the write and hope for
9749 - * the best.
9750 + * A previous transaction with a couple of buffer heads fail
9751 + * to checkpoint, so all the bhs are marked as BH_Write_EIO.
9752 + * For current transaction, the bh is just among those error
9753 + * bhs which previous transaction handle. We can't just clear
9754 + * its BH_Write_EIO and reuse directly, since other bhs are
9755 + * not written to disk yet and that will cause metadata
9756 + * inconsistency. So we should set fs read-only to avoid
9757 + * further damage.
9758 */
9759 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
9760 - clear_buffer_write_io_error(bh);
9761 - set_buffer_uptodate(bh);
9762 - }
9763 -
9764 - if (!buffer_uptodate(bh)) {
9765 unlock_buffer(bh);
9766 - return -EIO;
9767 + return ocfs2_error(osb->sb, "A previous attempt to "
9768 + "write this buffer head failed\n");
9769 }
9770 unlock_buffer(bh);
9771 }
9772 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
9773 index f56fe39fab04..64dfbe5755da 100644
9774 --- a/fs/ocfs2/super.c
9775 +++ b/fs/ocfs2/super.c
9776 @@ -473,9 +473,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
9777 new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
9778 if (!new) {
9779 ocfs2_release_system_inodes(osb);
9780 - status = -EINVAL;
9781 + status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
9782 mlog_errno(status);
9783 - /* FIXME: Should ERROR_RO_FS */
9784 mlog(ML_ERROR, "Unable to load system inode %d, "
9785 "possibly corrupt fs?", i);
9786 goto bail;
9787 @@ -504,7 +503,7 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
9788 new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
9789 if (!new) {
9790 ocfs2_release_system_inodes(osb);
9791 - status = -EINVAL;
9792 + status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
9793 mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
9794 status, i, osb->slot_num);
9795 goto bail;
9796 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
9797 index cb157a34a656..03f6ff249edb 100644
9798 --- a/fs/ocfs2/xattr.c
9799 +++ b/fs/ocfs2/xattr.c
9800 @@ -638,9 +638,11 @@ int ocfs2_calc_xattr_init(struct inode *dir,
9801 si->value_len);
9802
9803 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
9804 + down_read(&OCFS2_I(dir)->ip_xattr_sem);
9805 acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
9806 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
9807 "", NULL, 0);
9808 + up_read(&OCFS2_I(dir)->ip_xattr_sem);
9809 if (acl_len > 0) {
9810 a_size = ocfs2_xattr_entry_real_size(0, acl_len);
9811 if (S_ISDIR(mode))
9812 diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
9813 index 7c315938e9c2..561497a7a247 100644
9814 --- a/fs/orangefs/namei.c
9815 +++ b/fs/orangefs/namei.c
9816 @@ -70,8 +70,7 @@ static int orangefs_create(struct inode *dir,
9817 get_khandle_from_ino(inode),
9818 dentry);
9819
9820 - d_instantiate(dentry, inode);
9821 - unlock_new_inode(inode);
9822 + d_instantiate_new(dentry, inode);
9823 orangefs_set_timeout(dentry);
9824 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
9825
9826 @@ -318,8 +317,7 @@ static int orangefs_symlink(struct inode *dir,
9827 "Assigned symlink inode new number of %pU\n",
9828 get_khandle_from_ino(inode));
9829
9830 - d_instantiate(dentry, inode);
9831 - unlock_new_inode(inode);
9832 + d_instantiate_new(dentry, inode);
9833 orangefs_set_timeout(dentry);
9834 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
9835
9836 @@ -382,8 +380,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
9837 "Assigned dir inode new number of %pU\n",
9838 get_khandle_from_ino(inode));
9839
9840 - d_instantiate(dentry, inode);
9841 - unlock_new_inode(inode);
9842 + d_instantiate_new(dentry, inode);
9843 orangefs_set_timeout(dentry);
9844 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
9845
9846 diff --git a/fs/proc/base.c b/fs/proc/base.c
9847 index 3fec83ba75fa..591bf2b1ab66 100644
9848 --- a/fs/proc/base.c
9849 +++ b/fs/proc/base.c
9850 @@ -94,6 +94,8 @@
9851 #include "internal.h"
9852 #include "fd.h"
9853
9854 +#include "../../lib/kstrtox.h"
9855 +
9856 /* NOTE:
9857 * Implementing inode permission operations in /proc is almost
9858 * certainly an error. Permission checks need to happen during
9859 @@ -1864,8 +1866,33 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
9860 static int dname_to_vma_addr(struct dentry *dentry,
9861 unsigned long *start, unsigned long *end)
9862 {
9863 - if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
9864 + const char *str = dentry->d_name.name;
9865 + unsigned long long sval, eval;
9866 + unsigned int len;
9867 +
9868 + len = _parse_integer(str, 16, &sval);
9869 + if (len & KSTRTOX_OVERFLOW)
9870 + return -EINVAL;
9871 + if (sval != (unsigned long)sval)
9872 return -EINVAL;
9873 + str += len;
9874 +
9875 + if (*str != '-')
9876 + return -EINVAL;
9877 + str++;
9878 +
9879 + len = _parse_integer(str, 16, &eval);
9880 + if (len & KSTRTOX_OVERFLOW)
9881 + return -EINVAL;
9882 + if (eval != (unsigned long)eval)
9883 + return -EINVAL;
9884 + str += len;
9885 +
9886 + if (*str != '\0')
9887 + return -EINVAL;
9888 +
9889 + *start = sval;
9890 + *end = eval;
9891
9892 return 0;
9893 }
9894 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
9895 index df7e07986ead..7ed961c0124f 100644
9896 --- a/fs/proc/kcore.c
9897 +++ b/fs/proc/kcore.c
9898 @@ -505,6 +505,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
9899 /* we have to zero-fill user buffer even if no read */
9900 if (copy_to_user(buffer, buf, tsz))
9901 return -EFAULT;
9902 + } else if (m->type == KCORE_USER) {
9903 + /* User page is handled prior to normal kernel page: */
9904 + if (copy_to_user(buffer, (char *)start, tsz))
9905 + return -EFAULT;
9906 } else {
9907 if (kern_addr_valid(start)) {
9908 /*
9909 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
9910 index d4e37acd4821..847f23420b40 100644
9911 --- a/fs/proc/proc_sysctl.c
9912 +++ b/fs/proc/proc_sysctl.c
9913 @@ -660,7 +660,10 @@ static bool proc_sys_link_fill_cache(struct file *file,
9914 struct ctl_table *table)
9915 {
9916 bool ret = true;
9917 +
9918 head = sysctl_head_grab(head);
9919 + if (IS_ERR(head))
9920 + return false;
9921
9922 if (S_ISLNK(table->mode)) {
9923 /* It is not an error if we can not follow the link ignore it */
9924 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
9925 index e6a2b406af36..1ec728cf82d1 100644
9926 --- a/fs/reiserfs/namei.c
9927 +++ b/fs/reiserfs/namei.c
9928 @@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
9929 reiserfs_update_inode_transaction(inode);
9930 reiserfs_update_inode_transaction(dir);
9931
9932 - unlock_new_inode(inode);
9933 - d_instantiate(dentry, inode);
9934 + d_instantiate_new(dentry, inode);
9935 retval = journal_end(&th);
9936
9937 out_failed:
9938 @@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
9939 goto out_failed;
9940 }
9941
9942 - unlock_new_inode(inode);
9943 - d_instantiate(dentry, inode);
9944 + d_instantiate_new(dentry, inode);
9945 retval = journal_end(&th);
9946
9947 out_failed:
9948 @@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
9949 /* the above add_entry did not update dir's stat data */
9950 reiserfs_update_sd(&th, dir);
9951
9952 - unlock_new_inode(inode);
9953 - d_instantiate(dentry, inode);
9954 + d_instantiate_new(dentry, inode);
9955 retval = journal_end(&th);
9956 out_failed:
9957 reiserfs_write_unlock(dir->i_sb);
9958 @@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
9959 goto out_failed;
9960 }
9961
9962 - unlock_new_inode(inode);
9963 - d_instantiate(dentry, inode);
9964 + d_instantiate_new(dentry, inode);
9965 retval = journal_end(&th);
9966 out_failed:
9967 reiserfs_write_unlock(parent_dir->i_sb);
9968 diff --git a/fs/udf/namei.c b/fs/udf/namei.c
9969 index 2d65e280748b..348b922d1b6a 100644
9970 --- a/fs/udf/namei.c
9971 +++ b/fs/udf/namei.c
9972 @@ -621,8 +621,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
9973 if (fibh.sbh != fibh.ebh)
9974 brelse(fibh.ebh);
9975 brelse(fibh.sbh);
9976 - unlock_new_inode(inode);
9977 - d_instantiate(dentry, inode);
9978 + d_instantiate_new(dentry, inode);
9979
9980 return 0;
9981 }
9982 @@ -732,8 +731,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
9983 inc_nlink(dir);
9984 dir->i_ctime = dir->i_mtime = current_time(dir);
9985 mark_inode_dirty(dir);
9986 - unlock_new_inode(inode);
9987 - d_instantiate(dentry, inode);
9988 + d_instantiate_new(dentry, inode);
9989 if (fibh.sbh != fibh.ebh)
9990 brelse(fibh.ebh);
9991 brelse(fibh.sbh);
9992 diff --git a/fs/udf/super.c b/fs/udf/super.c
9993 index 4b1f6d5372c3..12467ad608cd 100644
9994 --- a/fs/udf/super.c
9995 +++ b/fs/udf/super.c
9996 @@ -2094,8 +2094,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
9997 bool lvid_open = false;
9998
9999 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
10000 - uopt.uid = INVALID_UID;
10001 - uopt.gid = INVALID_GID;
10002 + /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
10003 + uopt.uid = make_kuid(current_user_ns(), overflowuid);
10004 + uopt.gid = make_kgid(current_user_ns(), overflowgid);
10005 uopt.umask = 0;
10006 uopt.fmode = UDF_INVALID_MODE;
10007 uopt.dmode = UDF_INVALID_MODE;
10008 diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
10009 index 8eca4eda8450..2109c071718b 100644
10010 --- a/fs/ufs/namei.c
10011 +++ b/fs/ufs/namei.c
10012 @@ -38,8 +38,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
10013 {
10014 int err = ufs_add_link(dentry, inode);
10015 if (!err) {
10016 - unlock_new_inode(inode);
10017 - d_instantiate(dentry, inode);
10018 + d_instantiate_new(dentry, inode);
10019 return 0;
10020 }
10021 inode_dec_link_count(inode);
10022 @@ -192,8 +191,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
10023 if (err)
10024 goto out_fail;
10025
10026 - unlock_new_inode(inode);
10027 - d_instantiate(dentry, inode);
10028 + d_instantiate_new(dentry, inode);
10029 return 0;
10030
10031 out_fail:
10032 diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
10033 index 4ff499aa7338..b2ab123e561d 100644
10034 --- a/fs/xfs/xfs_discard.c
10035 +++ b/fs/xfs/xfs_discard.c
10036 @@ -50,19 +50,19 @@ xfs_trim_extents(
10037
10038 pag = xfs_perag_get(mp, agno);
10039
10040 - error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
10041 - if (error || !agbp)
10042 - goto out_put_perag;
10043 -
10044 - cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
10045 -
10046 /*
10047 * Force out the log. This means any transactions that might have freed
10048 - * space before we took the AGF buffer lock are now on disk, and the
10049 + * space before we take the AGF buffer lock are now on disk, and the
10050 * volatile disk cache is flushed.
10051 */
10052 xfs_log_force(mp, XFS_LOG_SYNC);
10053
10054 + error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
10055 + if (error || !agbp)
10056 + goto out_put_perag;
10057 +
10058 + cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
10059 +
10060 /*
10061 * Look up the longest btree in the AGF and start with it.
10062 */
10063 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
10064 index f6ea0f3c03f8..4e8551c8ef18 100644
10065 --- a/include/asm-generic/pgtable.h
10066 +++ b/include/asm-generic/pgtable.h
10067 @@ -234,6 +234,21 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
10068 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
10069 #endif
10070
10071 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
10072 +/*
10073 + * This is an implementation of pmdp_establish() that is only suitable for an
10074 + * architecture that doesn't have hardware dirty/accessed bits. In this case we
10075 + * can't race with CPU which sets these bits and non-atomic aproach is fine.
10076 + */
10077 +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
10078 + unsigned long address, pmd_t *pmdp, pmd_t pmd)
10079 +{
10080 + pmd_t old_pmd = *pmdp;
10081 + set_pmd_at(vma->vm_mm, address, pmdp, pmd);
10082 + return old_pmd;
10083 +}
10084 +#endif
10085 +
10086 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
10087 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
10088 pmd_t *pmdp);
10089 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
10090 index 18ba29ff1449..203ad564dc60 100644
10091 --- a/include/linux/cpumask.h
10092 +++ b/include/linux/cpumask.h
10093 @@ -164,6 +164,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
10094 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
10095 #define for_each_cpu_not(cpu, mask) \
10096 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
10097 +#define for_each_cpu_wrap(cpu, mask, start) \
10098 + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
10099 #define for_each_cpu_and(cpu, mask, and) \
10100 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
10101 #else
10102 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
10103 index ff295e166b2c..b757ee42bc63 100644
10104 --- a/include/linux/dcache.h
10105 +++ b/include/linux/dcache.h
10106 @@ -219,6 +219,7 @@ extern seqlock_t rename_lock;
10107 * These are the low-level FS interfaces to the dcache..
10108 */
10109 extern void d_instantiate(struct dentry *, struct inode *);
10110 +extern void d_instantiate_new(struct dentry *, struct inode *);
10111 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
10112 extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
10113 extern void __d_drop(struct dentry *dentry);
10114 diff --git a/include/linux/kcore.h b/include/linux/kcore.h
10115 index d92762286645..3ffade4f2798 100644
10116 --- a/include/linux/kcore.h
10117 +++ b/include/linux/kcore.h
10118 @@ -9,6 +9,7 @@ enum kcore_type {
10119 KCORE_VMALLOC,
10120 KCORE_RAM,
10121 KCORE_VMEMMAP,
10122 + KCORE_USER,
10123 KCORE_OTHER,
10124 };
10125
10126 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
10127 index 8c58db2c09c6..eb55374b73f3 100644
10128 --- a/include/linux/kvm_host.h
10129 +++ b/include/linux/kvm_host.h
10130 @@ -1070,7 +1070,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm)
10131 {
10132 }
10133 #endif
10134 -void kvm_arch_irq_routing_update(struct kvm *kvm);
10135
10136 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
10137 {
10138 @@ -1079,6 +1078,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
10139
10140 #endif /* CONFIG_HAVE_KVM_EVENTFD */
10141
10142 +void kvm_arch_irq_routing_update(struct kvm *kvm);
10143 +
10144 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
10145 {
10146 /*
10147 diff --git a/include/linux/property.h b/include/linux/property.h
10148 index 338f9b76914b..459337fb44d0 100644
10149 --- a/include/linux/property.h
10150 +++ b/include/linux/property.h
10151 @@ -187,7 +187,7 @@ struct property_entry {
10152 */
10153
10154 #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
10155 -{ \
10156 +(struct property_entry) { \
10157 .name = _name_, \
10158 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
10159 .is_array = true, \
10160 @@ -205,7 +205,7 @@ struct property_entry {
10161 PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
10162
10163 #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
10164 -{ \
10165 +(struct property_entry) { \
10166 .name = _name_, \
10167 .length = ARRAY_SIZE(_val_) * sizeof(const char *), \
10168 .is_array = true, \
10169 @@ -214,7 +214,7 @@ struct property_entry {
10170 }
10171
10172 #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
10173 -{ \
10174 +(struct property_entry) { \
10175 .name = _name_, \
10176 .length = sizeof(_type_), \
10177 .is_string = false, \
10178 @@ -231,7 +231,7 @@ struct property_entry {
10179 PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
10180
10181 #define PROPERTY_ENTRY_STRING(_name_, _val_) \
10182 -{ \
10183 +(struct property_entry) { \
10184 .name = _name_, \
10185 .length = sizeof(_val_), \
10186 .is_string = true, \
10187 @@ -239,7 +239,7 @@ struct property_entry {
10188 }
10189
10190 #define PROPERTY_ENTRY_BOOL(_name_) \
10191 -{ \
10192 +(struct property_entry) { \
10193 .name = _name_, \
10194 }
10195
10196 diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
10197 index 05c6d20c2a7a..ac377a23265f 100644
10198 --- a/include/linux/ptr_ring.h
10199 +++ b/include/linux/ptr_ring.h
10200 @@ -351,7 +351,7 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
10201
10202 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
10203 {
10204 - if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
10205 + if (size > KMALLOC_MAX_SIZE / sizeof(void *))
10206 return NULL;
10207 return kcalloc(size, sizeof(void *), gfp);
10208 }
10209 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
10210 index d9718378a8be..249dafce2788 100644
10211 --- a/include/linux/suspend.h
10212 +++ b/include/linux/suspend.h
10213 @@ -378,6 +378,8 @@ extern int swsusp_page_is_forbidden(struct page *);
10214 extern void swsusp_set_page_free(struct page *);
10215 extern void swsusp_unset_page_free(struct page *);
10216 extern unsigned long get_safe_page(gfp_t gfp_mask);
10217 +extern asmlinkage int swsusp_arch_suspend(void);
10218 +extern asmlinkage int swsusp_arch_resume(void);
10219
10220 extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
10221 extern int hibernate(void);
10222 diff --git a/include/net/ip.h b/include/net/ip.h
10223 index 0e3dcd5a134d..bc9b4deeb60e 100644
10224 --- a/include/net/ip.h
10225 +++ b/include/net/ip.h
10226 @@ -304,6 +304,13 @@ int ip_decrease_ttl(struct iphdr *iph)
10227 return --iph->ttl;
10228 }
10229
10230 +static inline int ip_mtu_locked(const struct dst_entry *dst)
10231 +{
10232 + const struct rtable *rt = (const struct rtable *)dst;
10233 +
10234 + return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
10235 +}
10236 +
10237 static inline
10238 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
10239 {
10240 @@ -311,7 +318,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
10241
10242 return pmtudisc == IP_PMTUDISC_DO ||
10243 (pmtudisc == IP_PMTUDISC_WANT &&
10244 - !(dst_metric_locked(dst, RTAX_MTU)));
10245 + !ip_mtu_locked(dst));
10246 }
10247
10248 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
10249 @@ -337,7 +344,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
10250 struct net *net = dev_net(dst->dev);
10251
10252 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
10253 - dst_metric_locked(dst, RTAX_MTU) ||
10254 + ip_mtu_locked(dst) ||
10255 !forwarding)
10256 return dst_mtu(dst);
10257
10258 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
10259 index aa758280d8a8..978387d6c3e6 100644
10260 --- a/include/net/ip_fib.h
10261 +++ b/include/net/ip_fib.h
10262 @@ -57,6 +57,7 @@ struct fib_nh_exception {
10263 int fnhe_genid;
10264 __be32 fnhe_daddr;
10265 u32 fnhe_pmtu;
10266 + bool fnhe_mtu_locked;
10267 __be32 fnhe_gw;
10268 unsigned long fnhe_expires;
10269 struct rtable __rcu *fnhe_rth_input;
10270 diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
10271 index ea985aa7a6c5..df528a623548 100644
10272 --- a/include/net/llc_conn.h
10273 +++ b/include/net/llc_conn.h
10274 @@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
10275
10276 /* Access to a connection */
10277 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
10278 -void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
10279 +int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
10280 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
10281 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
10282 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
10283 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
10284 index 8fd61bc50383..920a771c710f 100644
10285 --- a/include/net/mac80211.h
10286 +++ b/include/net/mac80211.h
10287 @@ -4091,7 +4091,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
10288 * The TX headroom reserved by mac80211 for its own tx_status functions.
10289 * This is enough for the radiotap header.
10290 */
10291 -#define IEEE80211_TX_STATUS_HEADROOM 14
10292 +#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
10293
10294 /**
10295 * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
10296 diff --git a/include/net/regulatory.h b/include/net/regulatory.h
10297 index ebc5a2ed8631..f83cacce3308 100644
10298 --- a/include/net/regulatory.h
10299 +++ b/include/net/regulatory.h
10300 @@ -78,7 +78,7 @@ struct regulatory_request {
10301 int wiphy_idx;
10302 enum nl80211_reg_initiator initiator;
10303 enum nl80211_user_reg_hint_type user_reg_hint_type;
10304 - char alpha2[2];
10305 + char alpha2[3];
10306 enum nl80211_dfs_regions dfs_region;
10307 bool intersect;
10308 bool processed;
10309 diff --git a/include/net/route.h b/include/net/route.h
10310 index 0429d47cad25..b8488efef920 100644
10311 --- a/include/net/route.h
10312 +++ b/include/net/route.h
10313 @@ -63,7 +63,8 @@ struct rtable {
10314 __be32 rt_gateway;
10315
10316 /* Miscellaneous cached information */
10317 - u32 rt_pmtu;
10318 + u32 rt_mtu_locked:1,
10319 + rt_pmtu:31;
10320
10321 u32 rt_table_id;
10322
10323 diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
10324 index 28c5da6fdfac..3411da79407d 100644
10325 --- a/include/trace/events/timer.h
10326 +++ b/include/trace/events/timer.h
10327 @@ -125,6 +125,20 @@ DEFINE_EVENT(timer_class, timer_cancel,
10328 TP_ARGS(timer)
10329 );
10330
10331 +#define decode_clockid(type) \
10332 + __print_symbolic(type, \
10333 + { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
10334 + { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
10335 + { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
10336 + { CLOCK_TAI, "CLOCK_TAI" })
10337 +
10338 +#define decode_hrtimer_mode(mode) \
10339 + __print_symbolic(mode, \
10340 + { HRTIMER_MODE_ABS, "ABS" }, \
10341 + { HRTIMER_MODE_REL, "REL" }, \
10342 + { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
10343 + { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
10344 +
10345 /**
10346 * hrtimer_init - called when the hrtimer is initialized
10347 * @hrtimer: pointer to struct hrtimer
10348 @@ -151,10 +165,8 @@ TRACE_EVENT(hrtimer_init,
10349 ),
10350
10351 TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
10352 - __entry->clockid == CLOCK_REALTIME ?
10353 - "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
10354 - __entry->mode == HRTIMER_MODE_ABS ?
10355 - "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
10356 + decode_clockid(__entry->clockid),
10357 + decode_hrtimer_mode(__entry->mode))
10358 );
10359
10360 /**
10361 diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
10362 index 91a31ffed828..9a781f0611df 100644
10363 --- a/include/uapi/drm/virtgpu_drm.h
10364 +++ b/include/uapi/drm/virtgpu_drm.h
10365 @@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
10366 };
10367
10368 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
10369 +#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
10370
10371 struct drm_virtgpu_getparam {
10372 __u64 param;
10373 diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
10374 index 117d02e0fc31..659b1634de61 100644
10375 --- a/include/uapi/linux/if_ether.h
10376 +++ b/include/uapi/linux/if_ether.h
10377 @@ -29,6 +29,7 @@
10378 */
10379
10380 #define ETH_ALEN 6 /* Octets in one ethernet addr */
10381 +#define ETH_TLEN 2 /* Octets in ethernet type field */
10382 #define ETH_HLEN 14 /* Total octets in header. */
10383 #define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
10384 #define ETH_DATA_LEN 1500 /* Max. octets in payload */
10385 diff --git a/ipc/shm.c b/ipc/shm.c
10386 index b626745e771c..9c687cda9b0a 100644
10387 --- a/ipc/shm.c
10388 +++ b/ipc/shm.c
10389 @@ -1127,14 +1127,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
10390 goto out;
10391 else if ((addr = (ulong)shmaddr)) {
10392 if (addr & (shmlba - 1)) {
10393 - /*
10394 - * Round down to the nearest multiple of shmlba.
10395 - * For sane do_mmap_pgoff() parameters, avoid
10396 - * round downs that trigger nil-page and MAP_FIXED.
10397 - */
10398 - if ((shmflg & SHM_RND) && addr >= shmlba)
10399 - addr &= ~(shmlba - 1);
10400 - else
10401 + if (shmflg & SHM_RND) {
10402 + addr &= ~(shmlba - 1); /* round down */
10403 +
10404 + /*
10405 + * Ensure that the round-down is non-nil
10406 + * when remapping. This can happen for
10407 + * cases when addr < shmlba.
10408 + */
10409 + if (!addr && (shmflg & SHM_REMAP))
10410 + goto out;
10411 + } else
10412 #ifndef __ARCH_FORCE_SHMLBA
10413 if (addr & ~PAGE_MASK)
10414 #endif
10415 diff --git a/kernel/audit.c b/kernel/audit.c
10416 index da4e7c0e36f7..3461a3d874fe 100644
10417 --- a/kernel/audit.c
10418 +++ b/kernel/audit.c
10419 @@ -742,6 +742,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
10420 return;
10421
10422 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
10423 + if (!ab)
10424 + return;
10425 audit_log_task_info(ab, current);
10426 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
10427 audit_feature_names[which], !!old_feature, !!new_feature,
10428 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
10429 index 2a20c0dfdafc..5a58421d7e2d 100644
10430 --- a/kernel/debug/kdb/kdb_main.c
10431 +++ b/kernel/debug/kdb/kdb_main.c
10432 @@ -1564,6 +1564,7 @@ static int kdb_md(int argc, const char **argv)
10433 int symbolic = 0;
10434 int valid = 0;
10435 int phys = 0;
10436 + int raw = 0;
10437
10438 kdbgetintenv("MDCOUNT", &mdcount);
10439 kdbgetintenv("RADIX", &radix);
10440 @@ -1573,9 +1574,10 @@ static int kdb_md(int argc, const char **argv)
10441 repeat = mdcount * 16 / bytesperword;
10442
10443 if (strcmp(argv[0], "mdr") == 0) {
10444 - if (argc != 2)
10445 + if (argc == 2 || (argc == 0 && last_addr != 0))
10446 + valid = raw = 1;
10447 + else
10448 return KDB_ARGCOUNT;
10449 - valid = 1;
10450 } else if (isdigit(argv[0][2])) {
10451 bytesperword = (int)(argv[0][2] - '0');
10452 if (bytesperword == 0) {
10453 @@ -1611,7 +1613,10 @@ static int kdb_md(int argc, const char **argv)
10454 radix = last_radix;
10455 bytesperword = last_bytesperword;
10456 repeat = last_repeat;
10457 - mdcount = ((repeat * bytesperword) + 15) / 16;
10458 + if (raw)
10459 + mdcount = repeat;
10460 + else
10461 + mdcount = ((repeat * bytesperword) + 15) / 16;
10462 }
10463
10464 if (argc) {
10465 @@ -1628,7 +1633,10 @@ static int kdb_md(int argc, const char **argv)
10466 diag = kdbgetularg(argv[nextarg], &val);
10467 if (!diag) {
10468 mdcount = (int) val;
10469 - repeat = mdcount * 16 / bytesperword;
10470 + if (raw)
10471 + repeat = mdcount;
10472 + else
10473 + repeat = mdcount * 16 / bytesperword;
10474 }
10475 }
10476 if (argc >= nextarg+1) {
10477 @@ -1638,8 +1646,15 @@ static int kdb_md(int argc, const char **argv)
10478 }
10479 }
10480
10481 - if (strcmp(argv[0], "mdr") == 0)
10482 - return kdb_mdr(addr, mdcount);
10483 + if (strcmp(argv[0], "mdr") == 0) {
10484 + int ret;
10485 + last_addr = addr;
10486 + ret = kdb_mdr(addr, mdcount);
10487 + last_addr += mdcount;
10488 + last_repeat = mdcount;
10489 + last_bytesperword = bytesperword; // to make REPEAT happy
10490 + return ret;
10491 + }
10492
10493 switch (radix) {
10494 case 10:
10495 diff --git a/kernel/events/core.c b/kernel/events/core.c
10496 index cbc51826cb94..6e6ec229c780 100644
10497 --- a/kernel/events/core.c
10498 +++ b/kernel/events/core.c
10499 @@ -634,9 +634,15 @@ static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
10500
10501 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
10502 {
10503 - struct perf_cgroup *cgrp_out = cpuctx->cgrp;
10504 - if (cgrp_out)
10505 - __update_cgrp_time(cgrp_out);
10506 + struct perf_cgroup *cgrp = cpuctx->cgrp;
10507 + struct cgroup_subsys_state *css;
10508 +
10509 + if (cgrp) {
10510 + for (css = &cgrp->css; css; css = css->parent) {
10511 + cgrp = container_of(css, struct perf_cgroup, css);
10512 + __update_cgrp_time(cgrp);
10513 + }
10514 + }
10515 }
10516
10517 static inline void update_cgrp_time_from_event(struct perf_event *event)
10518 @@ -664,6 +670,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
10519 {
10520 struct perf_cgroup *cgrp;
10521 struct perf_cgroup_info *info;
10522 + struct cgroup_subsys_state *css;
10523
10524 /*
10525 * ctx->lock held by caller
10526 @@ -674,8 +681,12 @@ perf_cgroup_set_timestamp(struct task_struct *task,
10527 return;
10528
10529 cgrp = perf_cgroup_from_task(task, ctx);
10530 - info = this_cpu_ptr(cgrp->info);
10531 - info->timestamp = ctx->timestamp;
10532 +
10533 + for (css = &cgrp->css; css; css = css->parent) {
10534 + cgrp = container_of(css, struct perf_cgroup, css);
10535 + info = this_cpu_ptr(cgrp->info);
10536 + info->timestamp = ctx->timestamp;
10537 + }
10538 }
10539
10540 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
10541 @@ -5689,7 +5700,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
10542 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
10543 values[n++] = running;
10544
10545 - if (leader != event)
10546 + if ((leader != event) &&
10547 + (leader->state == PERF_EVENT_STATE_ACTIVE))
10548 leader->pmu->read(leader);
10549
10550 values[n++] = perf_event_count(leader);
10551 diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
10552 index b2caec7315af..a72f5df643f8 100644
10553 --- a/kernel/locking/qspinlock.c
10554 +++ b/kernel/locking/qspinlock.c
10555 @@ -495,6 +495,14 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
10556 tail = encode_tail(smp_processor_id(), idx);
10557
10558 node += idx;
10559 +
10560 + /*
10561 + * Ensure that we increment the head node->count before initialising
10562 + * the actual node. If the compiler is kind enough to reorder these
10563 + * stores, then an IRQ could overwrite our assignments.
10564 + */
10565 + barrier();
10566 +
10567 node->locked = 0;
10568 node->next = NULL;
10569 pv_init_node(node);
10570 diff --git a/kernel/power/power.h b/kernel/power/power.h
10571 index 56d1d0dedf76..ccba4d820078 100644
10572 --- a/kernel/power/power.h
10573 +++ b/kernel/power/power.h
10574 @@ -103,9 +103,6 @@ extern int in_suspend;
10575 extern dev_t swsusp_resume_device;
10576 extern sector_t swsusp_resume_block;
10577
10578 -extern asmlinkage int swsusp_arch_suspend(void);
10579 -extern asmlinkage int swsusp_arch_resume(void);
10580 -
10581 extern int create_basic_memory_bitmaps(void);
10582 extern void free_basic_memory_bitmaps(void);
10583 extern int hibernate_preallocate_memory(void);
10584 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
10585 index e3944c4b072d..554ea54e8d61 100644
10586 --- a/kernel/rcu/tree_plugin.h
10587 +++ b/kernel/rcu/tree_plugin.h
10588 @@ -521,8 +521,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
10589 }
10590 t = list_entry(rnp->gp_tasks->prev,
10591 struct task_struct, rcu_node_entry);
10592 - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
10593 + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
10594 + /*
10595 + * We could be printing a lot while holding a spinlock.
10596 + * Avoid triggering hard lockup.
10597 + */
10598 + touch_nmi_watchdog();
10599 sched_show_task(t);
10600 + }
10601 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
10602 }
10603
10604 @@ -1629,6 +1635,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
10605 char *ticks_title;
10606 unsigned long ticks_value;
10607
10608 + /*
10609 + * We could be printing a lot while holding a spinlock. Avoid
10610 + * triggering hard lockup.
10611 + */
10612 + touch_nmi_watchdog();
10613 +
10614 if (rsp->gpnum == rdp->gpnum) {
10615 ticks_title = "ticks this GP";
10616 ticks_value = rdp->ticks_this_gp;
10617 diff --git a/kernel/relay.c b/kernel/relay.c
10618 index 2603e04f55f9..91e8fbf8aff3 100644
10619 --- a/kernel/relay.c
10620 +++ b/kernel/relay.c
10621 @@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
10622 {
10623 struct rchan_buf *buf;
10624
10625 - if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
10626 + if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
10627 return NULL;
10628
10629 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
10630 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
10631 index c7b0d2e7a9aa..9ab4d73e9cc9 100644
10632 --- a/kernel/sched/rt.c
10633 +++ b/kernel/sched/rt.c
10634 @@ -830,6 +830,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
10635 struct rq *rq = rq_of_rt_rq(rt_rq);
10636
10637 raw_spin_lock(&rq->lock);
10638 + update_rq_clock(rq);
10639 +
10640 if (rt_rq->rt_time) {
10641 u64 runtime;
10642
10643 diff --git a/kernel/signal.c b/kernel/signal.c
10644 index 17428fec19b0..4364e57e6038 100644
10645 --- a/kernel/signal.c
10646 +++ b/kernel/signal.c
10647 @@ -1392,6 +1392,10 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
10648 return ret;
10649 }
10650
10651 + /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
10652 + if (pid == INT_MIN)
10653 + return -ESRCH;
10654 +
10655 read_lock(&tasklist_lock);
10656 if (pid != -1) {
10657 ret = __kill_pgrp_info(sig, info,
10658 diff --git a/kernel/sys.c b/kernel/sys.c
10659 index 143cd63f1d47..b13b530b5e0f 100644
10660 --- a/kernel/sys.c
10661 +++ b/kernel/sys.c
10662 @@ -1313,6 +1313,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
10663 if (resource >= RLIM_NLIMITS)
10664 return -EINVAL;
10665
10666 + resource = array_index_nospec(resource, RLIM_NLIMITS);
10667 task_lock(current->group_leader);
10668 x = current->signal->rlim[resource];
10669 task_unlock(current->group_leader);
10670 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
10671 index 664aebc50fe3..1961dd408bc5 100644
10672 --- a/kernel/workqueue.c
10673 +++ b/kernel/workqueue.c
10674 @@ -5272,7 +5272,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
10675
10676 ret = device_register(&wq_dev->dev);
10677 if (ret) {
10678 - kfree(wq_dev);
10679 + put_device(&wq_dev->dev);
10680 wq->wq_dev = NULL;
10681 return ret;
10682 }
10683 diff --git a/lib/test_bpf.c b/lib/test_bpf.c
10684 index 98da7520a6aa..1586dfdea809 100644
10685 --- a/lib/test_bpf.c
10686 +++ b/lib/test_bpf.c
10687 @@ -83,6 +83,7 @@ struct bpf_test {
10688 __u32 result;
10689 } test[MAX_SUBTESTS];
10690 int (*fill_helper)(struct bpf_test *self);
10691 + int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
10692 __u8 frag_data[MAX_DATA];
10693 };
10694
10695 @@ -1900,7 +1901,9 @@ static struct bpf_test tests[] = {
10696 },
10697 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
10698 { },
10699 - { }
10700 + { },
10701 + .fill_helper = NULL,
10702 + .expected_errcode = -EINVAL,
10703 },
10704 {
10705 "check: div_k_0",
10706 @@ -1910,7 +1913,9 @@ static struct bpf_test tests[] = {
10707 },
10708 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
10709 { },
10710 - { }
10711 + { },
10712 + .fill_helper = NULL,
10713 + .expected_errcode = -EINVAL,
10714 },
10715 {
10716 "check: unknown insn",
10717 @@ -1921,7 +1926,9 @@ static struct bpf_test tests[] = {
10718 },
10719 CLASSIC | FLAG_EXPECTED_FAIL,
10720 { },
10721 - { }
10722 + { },
10723 + .fill_helper = NULL,
10724 + .expected_errcode = -EINVAL,
10725 },
10726 {
10727 "check: out of range spill/fill",
10728 @@ -1931,7 +1938,9 @@ static struct bpf_test tests[] = {
10729 },
10730 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
10731 { },
10732 - { }
10733 + { },
10734 + .fill_helper = NULL,
10735 + .expected_errcode = -EINVAL,
10736 },
10737 {
10738 "JUMPS + HOLES",
10739 @@ -2023,6 +2032,8 @@ static struct bpf_test tests[] = {
10740 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
10741 { },
10742 { },
10743 + .fill_helper = NULL,
10744 + .expected_errcode = -EINVAL,
10745 },
10746 {
10747 "check: LDX + RET X",
10748 @@ -2033,6 +2044,8 @@ static struct bpf_test tests[] = {
10749 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
10750 { },
10751 { },
10752 + .fill_helper = NULL,
10753 + .expected_errcode = -EINVAL,
10754 },
10755 { /* Mainly checking JIT here. */
10756 "M[]: alt STX + LDX",
10757 @@ -2207,6 +2220,8 @@ static struct bpf_test tests[] = {
10758 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
10759 { },
10760 { },
10761 + .fill_helper = NULL,
10762 + .expected_errcode = -EINVAL,
10763 },
10764 { /* Passes checker but fails during runtime. */
10765 "LD [SKF_AD_OFF-1]",
10766 @@ -4803,6 +4818,7 @@ static struct bpf_test tests[] = {
10767 { },
10768 { },
10769 .fill_helper = bpf_fill_maxinsns4,
10770 + .expected_errcode = -EINVAL,
10771 },
10772 { /* Mainly checking JIT here. */
10773 "BPF_MAXINSNS: Very long jump",
10774 @@ -4858,10 +4874,15 @@ static struct bpf_test tests[] = {
10775 {
10776 "BPF_MAXINSNS: Jump, gap, jump, ...",
10777 { },
10778 +#ifdef CONFIG_BPF_JIT_ALWAYS_ON
10779 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
10780 +#else
10781 CLASSIC | FLAG_NO_DATA,
10782 +#endif
10783 { },
10784 { { 0, 0xababcbac } },
10785 .fill_helper = bpf_fill_maxinsns11,
10786 + .expected_errcode = -ENOTSUPP,
10787 },
10788 {
10789 "BPF_MAXINSNS: ld_abs+get_processor_id",
10790 @@ -5632,7 +5653,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
10791
10792 *err = bpf_prog_create(&fp, &fprog);
10793 if (tests[which].aux & FLAG_EXPECTED_FAIL) {
10794 - if (*err == -EINVAL) {
10795 + if (*err == tests[which].expected_errcode) {
10796 pr_cont("PASS\n");
10797 /* Verifier rejected filter as expected. */
10798 *err = 0;
10799 diff --git a/mm/fadvise.c b/mm/fadvise.c
10800 index 6c707bfe02fd..27fc9ad267ac 100644
10801 --- a/mm/fadvise.c
10802 +++ b/mm/fadvise.c
10803 @@ -126,7 +126,15 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
10804 */
10805 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
10806 end_index = (endbyte >> PAGE_SHIFT);
10807 - if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
10808 + /*
10809 + * The page at end_index will be inclusively discarded according
10810 + * by invalidate_mapping_pages(), so subtracting 1 from
10811 + * end_index means we will skip the last page. But if endbyte
10812 + * is page aligned or is at the end of file, we should not skip
10813 + * that page - discarding the last page is safe enough.
10814 + */
10815 + if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
10816 + endbyte != inode->i_size - 1) {
10817 /* First page is tricky as 0 - 1 = -1, but pgoff_t
10818 * is unsigned, so the end_index >= start_index
10819 * check below would be true and we'll discard the whole
10820 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
10821 index e2982ea26090..724372866e67 100644
10822 --- a/mm/huge_memory.c
10823 +++ b/mm/huge_memory.c
10824 @@ -542,7 +542,8 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
10825
10826 VM_BUG_ON_PAGE(!PageCompound(page), page);
10827
10828 - if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
10829 + if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
10830 + true)) {
10831 put_page(page);
10832 count_vm_event(THP_FAULT_FALLBACK);
10833 return VM_FAULT_FALLBACK;
10834 @@ -1060,7 +1061,7 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
10835 }
10836
10837 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
10838 - huge_gfp, &memcg, true))) {
10839 + huge_gfp | __GFP_NORETRY, &memcg, true))) {
10840 put_page(new_page);
10841 split_huge_pmd(vma, fe->pmd, fe->address);
10842 if (page)
10843 diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
10844 index 0e9505f66ec1..73c258129257 100644
10845 --- a/mm/kasan/kasan.c
10846 +++ b/mm/kasan/kasan.c
10847 @@ -800,5 +800,5 @@ static int __init kasan_memhotplug_init(void)
10848 return 0;
10849 }
10850
10851 -module_init(kasan_memhotplug_init);
10852 +core_initcall(kasan_memhotplug_init);
10853 #endif
10854 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
10855 index 898eb26f5dc8..1df37ee996d5 100644
10856 --- a/mm/khugepaged.c
10857 +++ b/mm/khugepaged.c
10858 @@ -963,7 +963,9 @@ static void collapse_huge_page(struct mm_struct *mm,
10859 goto out_nolock;
10860 }
10861
10862 - if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
10863 + /* Do not oom kill for khugepaged charges */
10864 + if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
10865 + &memcg, true))) {
10866 result = SCAN_CGROUP_CHARGE_FAIL;
10867 goto out_nolock;
10868 }
10869 @@ -1323,7 +1325,9 @@ static void collapse_shmem(struct mm_struct *mm,
10870 goto out;
10871 }
10872
10873 - if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
10874 + /* Do not oom kill for khugepaged charges */
10875 + if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
10876 + &memcg, true))) {
10877 result = SCAN_CGROUP_CHARGE_FAIL;
10878 goto out;
10879 }
10880 @@ -1678,10 +1682,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
10881 spin_unlock(&khugepaged_mm_lock);
10882
10883 mm = mm_slot->mm;
10884 - down_read(&mm->mmap_sem);
10885 - if (unlikely(khugepaged_test_exit(mm)))
10886 - vma = NULL;
10887 - else
10888 + /*
10889 + * Don't wait for semaphore (to avoid long wait times). Just move to
10890 + * the next mm on the list.
10891 + */
10892 + vma = NULL;
10893 + if (unlikely(!down_read_trylock(&mm->mmap_sem)))
10894 + goto breakouterloop_mmap_sem;
10895 + if (likely(!khugepaged_test_exit(mm)))
10896 vma = find_vma(mm, khugepaged_scan.address);
10897
10898 progress++;
10899 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
10900 index 20cf3be9a5e8..9e66449ed91f 100644
10901 --- a/mm/kmemleak.c
10902 +++ b/mm/kmemleak.c
10903 @@ -1577,8 +1577,7 @@ static void start_scan_thread(void)
10904 }
10905
10906 /*
10907 - * Stop the automatic memory scanning thread. This function must be called
10908 - * with the scan_mutex held.
10909 + * Stop the automatic memory scanning thread.
10910 */
10911 static void stop_scan_thread(void)
10912 {
10913 @@ -1841,12 +1840,15 @@ static void kmemleak_do_cleanup(struct work_struct *work)
10914 {
10915 stop_scan_thread();
10916
10917 + mutex_lock(&scan_mutex);
10918 /*
10919 - * Once the scan thread has stopped, it is safe to no longer track
10920 - * object freeing. Ordering of the scan thread stopping and the memory
10921 - * accesses below is guaranteed by the kthread_stop() function.
10922 + * Once it is made sure that kmemleak_scan has stopped, it is safe to no
10923 + * longer track object freeing. Ordering of the scan thread stopping and
10924 + * the memory accesses below is guaranteed by the kthread_stop()
10925 + * function.
10926 */
10927 kmemleak_free_enabled = 0;
10928 + mutex_unlock(&scan_mutex);
10929
10930 if (!kmemleak_found_leaks)
10931 __kmemleak_do_cleanup();
10932 diff --git a/mm/ksm.c b/mm/ksm.c
10933 index caa54a55a357..614b2cce9ad7 100644
10934 --- a/mm/ksm.c
10935 +++ b/mm/ksm.c
10936 @@ -1469,8 +1469,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
10937 tree_rmap_item =
10938 unstable_tree_search_insert(rmap_item, page, &tree_page);
10939 if (tree_rmap_item) {
10940 + bool split;
10941 +
10942 kpage = try_to_merge_two_pages(rmap_item, page,
10943 tree_rmap_item, tree_page);
10944 + /*
10945 + * If both pages we tried to merge belong to the same compound
10946 + * page, then we actually ended up increasing the reference
10947 + * count of the same compound page twice, and split_huge_page
10948 + * failed.
10949 + * Here we set a flag if that happened, and we use it later to
10950 + * try split_huge_page again. Since we call put_page right
10951 + * afterwards, the reference count will be correct and
10952 + * split_huge_page should succeed.
10953 + */
10954 + split = PageTransCompound(page)
10955 + && compound_head(page) == compound_head(tree_page);
10956 put_page(tree_page);
10957 if (kpage) {
10958 /*
10959 @@ -1495,6 +1509,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
10960 break_cow(tree_rmap_item);
10961 break_cow(rmap_item);
10962 }
10963 + } else if (split) {
10964 + /*
10965 + * We are here if we tried to merge two pages and
10966 + * failed because they both belonged to the same
10967 + * compound page. We will split the page now, but no
10968 + * merging will take place.
10969 + * We do not want to add the cost of a full lock; if
10970 + * the page is locked, it is better to skip it and
10971 + * perhaps try again later.
10972 + */
10973 + if (!trylock_page(page))
10974 + return;
10975 + split_huge_page(page);
10976 + unlock_page(page);
10977 }
10978 }
10979 }
10980 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
10981 index a8ab5e73dc61..69c4a0c92ebb 100644
10982 --- a/mm/mempolicy.c
10983 +++ b/mm/mempolicy.c
10984 @@ -1264,6 +1264,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
10985 unsigned long maxnode)
10986 {
10987 unsigned long k;
10988 + unsigned long t;
10989 unsigned long nlongs;
10990 unsigned long endmask;
10991
10992 @@ -1280,13 +1281,19 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
10993 else
10994 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
10995
10996 - /* When the user specified more nodes than supported just check
10997 - if the non supported part is all zero. */
10998 + /*
10999 + * When the user specified more nodes than supported just check
11000 + * if the non supported part is all zero.
11001 + *
11002 + * If maxnode have more longs than MAX_NUMNODES, check
11003 + * the bits in that area first. And then go through to
11004 + * check the rest bits which equal or bigger than MAX_NUMNODES.
11005 + * Otherwise, just check bits [MAX_NUMNODES, maxnode).
11006 + */
11007 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
11008 if (nlongs > PAGE_SIZE/sizeof(long))
11009 return -EINVAL;
11010 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
11011 - unsigned long t;
11012 if (get_user(t, nmask + k))
11013 return -EFAULT;
11014 if (k == nlongs - 1) {
11015 @@ -1299,6 +1306,16 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
11016 endmask = ~0UL;
11017 }
11018
11019 + if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
11020 + unsigned long valid_mask = endmask;
11021 +
11022 + valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
11023 + if (get_user(t, nmask + nlongs - 1))
11024 + return -EFAULT;
11025 + if (t & valid_mask)
11026 + return -EINVAL;
11027 + }
11028 +
11029 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
11030 return -EFAULT;
11031 nodes_addr(*nodes)[nlongs-1] &= endmask;
11032 @@ -1425,10 +1442,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
11033 goto out_put;
11034 }
11035
11036 - if (!nodes_subset(*new, node_states[N_MEMORY])) {
11037 - err = -EINVAL;
11038 + task_nodes = cpuset_mems_allowed(current);
11039 + nodes_and(*new, *new, task_nodes);
11040 + if (nodes_empty(*new))
11041 + goto out_put;
11042 +
11043 + nodes_and(*new, *new, node_states[N_MEMORY]);
11044 + if (nodes_empty(*new))
11045 goto out_put;
11046 - }
11047
11048 err = security_task_movememory(task);
11049 if (err)
11050 @@ -2138,6 +2159,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
11051 case MPOL_INTERLEAVE:
11052 return !!nodes_equal(a->v.nodes, b->v.nodes);
11053 case MPOL_PREFERRED:
11054 + /* a's ->flags is the same as b's */
11055 + if (a->flags & MPOL_F_LOCAL)
11056 + return true;
11057 return a->v.preferred_node == b->v.preferred_node;
11058 default:
11059 BUG();
11060 diff --git a/mm/swapfile.c b/mm/swapfile.c
11061 index d76b2a18f044..79c03ecd31c8 100644
11062 --- a/mm/swapfile.c
11063 +++ b/mm/swapfile.c
11064 @@ -2271,6 +2271,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
11065 maxpages = swp_offset(pte_to_swp_entry(
11066 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
11067 last_page = swap_header->info.last_page;
11068 + if (!last_page) {
11069 + pr_warn("Empty swap-file\n");
11070 + return 0;
11071 + }
11072 if (last_page > maxpages) {
11073 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
11074 maxpages << (PAGE_SHIFT - 10),
11075 diff --git a/mm/vmscan.c b/mm/vmscan.c
11076 index 557ad1367595..2d4b6478237b 100644
11077 --- a/mm/vmscan.c
11078 +++ b/mm/vmscan.c
11079 @@ -1374,6 +1374,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
11080
11081 if (PageDirty(page)) {
11082 struct address_space *mapping;
11083 + bool migrate_dirty;
11084
11085 /* ISOLATE_CLEAN means only clean pages */
11086 if (mode & ISOLATE_CLEAN)
11087 @@ -1382,10 +1383,19 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
11088 /*
11089 * Only pages without mappings or that have a
11090 * ->migratepage callback are possible to migrate
11091 - * without blocking
11092 + * without blocking. However, we can be racing with
11093 + * truncation so it's necessary to lock the page
11094 + * to stabilise the mapping as truncation holds
11095 + * the page lock until after the page is removed
11096 + * from the page cache.
11097 */
11098 + if (!trylock_page(page))
11099 + return ret;
11100 +
11101 mapping = page_mapping(page);
11102 - if (mapping && !mapping->a_ops->migratepage)
11103 + migrate_dirty = mapping && mapping->a_ops->migratepage;
11104 + unlock_page(page);
11105 + if (!migrate_dirty)
11106 return ret;
11107 }
11108 }
11109 @@ -3847,7 +3857,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
11110 */
11111 int page_evictable(struct page *page)
11112 {
11113 - return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
11114 + int ret;
11115 +
11116 + /* Prevent address_space of inode and swap cache from being freed */
11117 + rcu_read_lock();
11118 + ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
11119 + rcu_read_unlock();
11120 + return ret;
11121 }
11122
11123 #ifdef CONFIG_SHMEM
11124 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
11125 index e2d18d0b1f06..946f1c269b1f 100644
11126 --- a/net/batman-adv/bat_iv_ogm.c
11127 +++ b/net/batman-adv/bat_iv_ogm.c
11128 @@ -2705,7 +2705,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
11129 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
11130 struct batadv_neigh_node *router;
11131 struct batadv_gw_node *curr_gw;
11132 - int ret = -EINVAL;
11133 + int ret = 0;
11134 void *hdr;
11135
11136 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
11137 diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
11138 index e79f6f01182e..ed4ddf2059a6 100644
11139 --- a/net/batman-adv/bat_v.c
11140 +++ b/net/batman-adv/bat_v.c
11141 @@ -920,7 +920,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
11142 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
11143 struct batadv_neigh_node *router;
11144 struct batadv_gw_node *curr_gw;
11145 - int ret = -EINVAL;
11146 + int ret = 0;
11147 void *hdr;
11148
11149 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
11150 diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
11151 index 5419b1214abd..582e27698bf0 100644
11152 --- a/net/batman-adv/bridge_loop_avoidance.c
11153 +++ b/net/batman-adv/bridge_loop_avoidance.c
11154 @@ -2149,22 +2149,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
11155 {
11156 struct batadv_bla_claim *claim;
11157 int idx = 0;
11158 + int ret = 0;
11159
11160 rcu_read_lock();
11161 hlist_for_each_entry_rcu(claim, head, hash_entry) {
11162 if (idx++ < *idx_skip)
11163 continue;
11164 - if (batadv_bla_claim_dump_entry(msg, portid, seq,
11165 - primary_if, claim)) {
11166 +
11167 + ret = batadv_bla_claim_dump_entry(msg, portid, seq,
11168 + primary_if, claim);
11169 + if (ret) {
11170 *idx_skip = idx - 1;
11171 goto unlock;
11172 }
11173 }
11174
11175 - *idx_skip = idx;
11176 + *idx_skip = 0;
11177 unlock:
11178 rcu_read_unlock();
11179 - return 0;
11180 + return ret;
11181 }
11182
11183 /**
11184 @@ -2379,22 +2382,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
11185 {
11186 struct batadv_bla_backbone_gw *backbone_gw;
11187 int idx = 0;
11188 + int ret = 0;
11189
11190 rcu_read_lock();
11191 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
11192 if (idx++ < *idx_skip)
11193 continue;
11194 - if (batadv_bla_backbone_dump_entry(msg, portid, seq,
11195 - primary_if, backbone_gw)) {
11196 +
11197 + ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
11198 + primary_if, backbone_gw);
11199 + if (ret) {
11200 *idx_skip = idx - 1;
11201 goto unlock;
11202 }
11203 }
11204
11205 - *idx_skip = idx;
11206 + *idx_skip = 0;
11207 unlock:
11208 rcu_read_unlock();
11209 - return 0;
11210 + return ret;
11211 }
11212
11213 /**
11214 diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
11215 index e257efdc5d03..df7c6a080188 100644
11216 --- a/net/batman-adv/distributed-arp-table.c
11217 +++ b/net/batman-adv/distributed-arp-table.c
11218 @@ -391,7 +391,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
11219 batadv_arp_hw_src(skb, hdr_size), &ip_src,
11220 batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
11221
11222 - if (hdr_size == 0)
11223 + if (hdr_size < sizeof(struct batadv_unicast_packet))
11224 return;
11225
11226 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
11227 diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
11228 index 0934730fb7ff..57215e3fd1a0 100644
11229 --- a/net/batman-adv/fragmentation.c
11230 +++ b/net/batman-adv/fragmentation.c
11231 @@ -276,7 +276,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
11232 /* Move the existing MAC header to just before the payload. (Override
11233 * the fragment header.)
11234 */
11235 - skb_pull_rcsum(skb_out, hdr_size);
11236 + skb_pull(skb_out, hdr_size);
11237 + skb_out->ip_summed = CHECKSUM_NONE;
11238 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
11239 skb_set_mac_header(skb_out, -ETH_HLEN);
11240 skb_reset_network_header(skb_out);
11241 diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
11242 index de055d64debe..ed9aaf30fbcf 100644
11243 --- a/net/batman-adv/gateway_client.c
11244 +++ b/net/batman-adv/gateway_client.c
11245 @@ -715,6 +715,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
11246
11247 vid = batadv_get_vid(skb, 0);
11248
11249 + if (is_multicast_ether_addr(ethhdr->h_dest))
11250 + goto out;
11251 +
11252 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
11253 ethhdr->h_dest, vid);
11254 if (!orig_dst_node)
11255 diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
11256 index 13661f43386f..5a2aac17805b 100644
11257 --- a/net/batman-adv/multicast.c
11258 +++ b/net/batman-adv/multicast.c
11259 @@ -527,8 +527,8 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
11260 bat_priv->mcast.enabled = true;
11261 }
11262
11263 - return !(mcast_data.flags &
11264 - (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6));
11265 + return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
11266 + mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
11267 }
11268
11269 /**
11270 @@ -769,8 +769,8 @@ static struct batadv_orig_node *
11271 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
11272 struct ethhdr *ethhdr)
11273 {
11274 - return batadv_transtable_search(bat_priv, ethhdr->h_source,
11275 - ethhdr->h_dest, BATADV_NO_FLAGS);
11276 + return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
11277 + BATADV_NO_FLAGS);
11278 }
11279
11280 /**
11281 diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
11282 index 7e8dc648b95a..8b98609ebc1e 100644
11283 --- a/net/batman-adv/routing.c
11284 +++ b/net/batman-adv/routing.c
11285 @@ -724,6 +724,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
11286 /**
11287 * batadv_reroute_unicast_packet - update the unicast header for re-routing
11288 * @bat_priv: the bat priv with all the soft interface information
11289 + * @skb: unicast packet to process
11290 * @unicast_packet: the unicast header to be updated
11291 * @dst_addr: the payload destination
11292 * @vid: VLAN identifier
11293 @@ -735,7 +736,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
11294 * Return: true if the packet header has been updated, false otherwise
11295 */
11296 static bool
11297 -batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
11298 +batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
11299 struct batadv_unicast_packet *unicast_packet,
11300 u8 *dst_addr, unsigned short vid)
11301 {
11302 @@ -764,8 +765,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
11303 }
11304
11305 /* update the packet header */
11306 + skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
11307 ether_addr_copy(unicast_packet->dest, orig_addr);
11308 unicast_packet->ttvn = orig_ttvn;
11309 + skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
11310
11311 ret = true;
11312 out:
11313 @@ -806,7 +809,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
11314 * the packet to
11315 */
11316 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
11317 - if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
11318 + if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
11319 ethhdr->h_dest, vid))
11320 batadv_dbg_ratelimited(BATADV_DBG_TT,
11321 bat_priv,
11322 @@ -852,7 +855,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
11323 * destination can possibly be updated and forwarded towards the new
11324 * target host
11325 */
11326 - if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
11327 + if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
11328 ethhdr->h_dest, vid)) {
11329 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
11330 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
11331 @@ -875,12 +878,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
11332 if (!primary_if)
11333 return false;
11334
11335 + /* update the packet header */
11336 + skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
11337 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
11338 + unicast_packet->ttvn = curr_ttvn;
11339 + skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
11340
11341 batadv_hardif_put(primary_if);
11342
11343 - unicast_packet->ttvn = curr_ttvn;
11344 -
11345 return true;
11346 }
11347
11348 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
11349 index 49e16b6e0ba3..84c1b388d9ed 100644
11350 --- a/net/batman-adv/soft-interface.c
11351 +++ b/net/batman-adv/soft-interface.c
11352 @@ -448,13 +448,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
11353
11354 /* skb->dev & skb->pkt_type are set here */
11355 skb->protocol = eth_type_trans(skb, soft_iface);
11356 -
11357 - /* should not be necessary anymore as we use skb_pull_rcsum()
11358 - * TODO: please verify this and remove this TODO
11359 - * -- Dec 21st 2009, Simon Wunderlich
11360 - */
11361 -
11362 - /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
11363 + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
11364
11365 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
11366 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
11367 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
11368 index 5a89a4ac86ef..0a9222ef904c 100644
11369 --- a/net/bridge/netfilter/ebtables.c
11370 +++ b/net/bridge/netfilter/ebtables.c
11371 @@ -1625,7 +1625,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
11372 int off = ebt_compat_match_offset(match, m->match_size);
11373 compat_uint_t msize = m->match_size - off;
11374
11375 - BUG_ON(off >= m->match_size);
11376 + if (WARN_ON(off >= m->match_size))
11377 + return -EINVAL;
11378
11379 if (copy_to_user(cm->u.name, match->name,
11380 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
11381 @@ -1652,7 +1653,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
11382 int off = xt_compat_target_offset(target);
11383 compat_uint_t tsize = t->target_size - off;
11384
11385 - BUG_ON(off >= t->target_size);
11386 + if (WARN_ON(off >= t->target_size))
11387 + return -EINVAL;
11388
11389 if (copy_to_user(cm->u.name, target->name,
11390 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
11391 @@ -1880,7 +1882,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
11392 if (state->buf_kern_start == NULL)
11393 goto count_only;
11394
11395 - BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
11396 + if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
11397 + return -EINVAL;
11398
11399 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
11400
11401 @@ -1893,7 +1896,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
11402 {
11403 char *b = state->buf_kern_start;
11404
11405 - BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
11406 + if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
11407 + return -EINVAL;
11408
11409 if (b != NULL && sz > 0)
11410 memset(b + state->buf_kern_offset, 0, sz);
11411 @@ -1970,8 +1974,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
11412 pad = XT_ALIGN(size_kern) - size_kern;
11413
11414 if (pad > 0 && dst) {
11415 - BUG_ON(state->buf_kern_len <= pad);
11416 - BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
11417 + if (WARN_ON(state->buf_kern_len <= pad))
11418 + return -EINVAL;
11419 + if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
11420 + return -EINVAL;
11421 memset(dst + size_kern, 0, pad);
11422 }
11423 return off + match_size;
11424 @@ -2021,7 +2027,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
11425 if (ret < 0)
11426 return ret;
11427
11428 - BUG_ON(ret < match32->match_size);
11429 + if (WARN_ON(ret < match32->match_size))
11430 + return -EINVAL;
11431 growth += ret - match32->match_size;
11432 growth += ebt_compat_entry_padsize();
11433
11434 @@ -2090,8 +2097,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
11435 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
11436 */
11437 for (i = 0; i < 4 ; ++i) {
11438 - if (offsets[i] >= *total)
11439 + if (offsets[i] > *total)
11440 + return -EINVAL;
11441 +
11442 + if (i < 3 && offsets[i] == *total)
11443 return -EINVAL;
11444 +
11445 if (i == 0)
11446 continue;
11447 if (offsets[i-1] > offsets[i])
11448 @@ -2130,7 +2141,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
11449
11450 startoff = state->buf_user_offset - startoff;
11451
11452 - BUG_ON(*total < startoff);
11453 + if (WARN_ON(*total < startoff))
11454 + return -EINVAL;
11455 *total -= startoff;
11456 return 0;
11457 }
11458 @@ -2257,7 +2269,8 @@ static int compat_do_replace(struct net *net, void __user *user,
11459 state.buf_kern_len = size64;
11460
11461 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
11462 - BUG_ON(ret < 0); /* parses same data again */
11463 + if (WARN_ON(ret < 0))
11464 + goto out_unlock;
11465
11466 vfree(entries_tmp);
11467 tmp.entries_size = size64;
11468 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
11469 index a40ccc184b83..9f697b00158d 100644
11470 --- a/net/core/skbuff.c
11471 +++ b/net/core/skbuff.c
11472 @@ -4475,13 +4475,18 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
11473
11474 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
11475 {
11476 + int mac_len;
11477 +
11478 if (skb_cow(skb, skb_headroom(skb)) < 0) {
11479 kfree_skb(skb);
11480 return NULL;
11481 }
11482
11483 - memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
11484 - 2 * ETH_ALEN);
11485 + mac_len = skb->data - skb_mac_header(skb);
11486 + if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
11487 + memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
11488 + mac_len - VLAN_HLEN - ETH_TLEN);
11489 + }
11490 skb->mac_header += VLAN_HLEN;
11491 return skb;
11492 }
11493 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
11494 index b120b9b11402..1ac55b116d5a 100644
11495 --- a/net/ipv4/ip_vti.c
11496 +++ b/net/ipv4/ip_vti.c
11497 @@ -396,8 +396,6 @@ static int vti_tunnel_init(struct net_device *dev)
11498 memcpy(dev->dev_addr, &iph->saddr, 4);
11499 memcpy(dev->broadcast, &iph->daddr, 4);
11500
11501 - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
11502 - dev->mtu = ETH_DATA_LEN;
11503 dev->flags = IFF_NOARP;
11504 dev->addr_len = 4;
11505 dev->features |= NETIF_F_LLTX;
11506 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11507 index 4c9fbf4f5905..890141d32ab9 100644
11508 --- a/net/ipv4/route.c
11509 +++ b/net/ipv4/route.c
11510 @@ -618,6 +618,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
11511 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
11512 {
11513 rt->rt_pmtu = fnhe->fnhe_pmtu;
11514 + rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
11515 rt->dst.expires = fnhe->fnhe_expires;
11516
11517 if (fnhe->fnhe_gw) {
11518 @@ -628,7 +629,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
11519 }
11520
11521 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
11522 - u32 pmtu, unsigned long expires)
11523 + u32 pmtu, bool lock, unsigned long expires)
11524 {
11525 struct fnhe_hash_bucket *hash;
11526 struct fib_nh_exception *fnhe;
11527 @@ -665,8 +666,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
11528 fnhe->fnhe_genid = genid;
11529 if (gw)
11530 fnhe->fnhe_gw = gw;
11531 - if (pmtu)
11532 + if (pmtu) {
11533 fnhe->fnhe_pmtu = pmtu;
11534 + fnhe->fnhe_mtu_locked = lock;
11535 + }
11536 fnhe->fnhe_expires = max(1UL, expires);
11537 /* Update all cached dsts too */
11538 rt = rcu_dereference(fnhe->fnhe_rth_input);
11539 @@ -690,6 +693,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
11540 fnhe->fnhe_daddr = daddr;
11541 fnhe->fnhe_gw = gw;
11542 fnhe->fnhe_pmtu = pmtu;
11543 + fnhe->fnhe_mtu_locked = lock;
11544 fnhe->fnhe_expires = expires;
11545
11546 /* Exception created; mark the cached routes for the nexthop
11547 @@ -771,7 +775,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
11548 struct fib_nh *nh = &FIB_RES_NH(res);
11549
11550 update_or_create_fnhe(nh, fl4->daddr, new_gw,
11551 - 0, jiffies + ip_rt_gc_timeout);
11552 + 0, false,
11553 + jiffies + ip_rt_gc_timeout);
11554 }
11555 if (kill_route)
11556 rt->dst.obsolete = DST_OBSOLETE_KILL;
11557 @@ -983,15 +988,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
11558 {
11559 struct dst_entry *dst = &rt->dst;
11560 struct fib_result res;
11561 + bool lock = false;
11562
11563 - if (dst_metric_locked(dst, RTAX_MTU))
11564 + if (ip_mtu_locked(dst))
11565 return;
11566
11567 if (ipv4_mtu(dst) < mtu)
11568 return;
11569
11570 - if (mtu < ip_rt_min_pmtu)
11571 + if (mtu < ip_rt_min_pmtu) {
11572 + lock = true;
11573 mtu = ip_rt_min_pmtu;
11574 + }
11575
11576 if (rt->rt_pmtu == mtu &&
11577 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
11578 @@ -1001,7 +1009,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
11579 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
11580 struct fib_nh *nh = &FIB_RES_NH(res);
11581
11582 - update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
11583 + update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
11584 jiffies + ip_rt_mtu_expires);
11585 }
11586 rcu_read_unlock();
11587 @@ -1256,7 +1264,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
11588
11589 mtu = READ_ONCE(dst->dev->mtu);
11590
11591 - if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
11592 + if (unlikely(ip_mtu_locked(dst))) {
11593 if (rt->rt_uses_gateway && mtu > 576)
11594 mtu = 576;
11595 }
11596 @@ -1481,6 +1489,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
11597 rt->rt_is_input = 0;
11598 rt->rt_iif = 0;
11599 rt->rt_pmtu = 0;
11600 + rt->rt_mtu_locked = 0;
11601 rt->rt_gateway = 0;
11602 rt->rt_uses_gateway = 0;
11603 rt->rt_table_id = 0;
11604 @@ -2403,6 +2412,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
11605 rt->rt_is_input = ort->rt_is_input;
11606 rt->rt_iif = ort->rt_iif;
11607 rt->rt_pmtu = ort->rt_pmtu;
11608 + rt->rt_mtu_locked = ort->rt_mtu_locked;
11609
11610 rt->rt_genid = rt_genid_ipv4(net);
11611 rt->rt_flags = ort->rt_flags;
11612 @@ -2505,6 +2515,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
11613 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
11614 if (rt->rt_pmtu && expires)
11615 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
11616 + if (rt->rt_mtu_locked && expires)
11617 + metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
11618 if (rtnetlink_put_metrics(skb, metrics) < 0)
11619 goto nla_put_failure;
11620
11621 diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
11622 index c8e6d86be114..95ca88731ff5 100644
11623 --- a/net/ipv4/tcp_illinois.c
11624 +++ b/net/ipv4/tcp_illinois.c
11625 @@ -6,7 +6,7 @@
11626 * The algorithm is described in:
11627 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
11628 * for High-Speed Networks"
11629 - * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
11630 + * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
11631 *
11632 * Implemented from description in paper and ns-2 simulation.
11633 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
11634 diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
11635 index e45e2c41c7bd..37a3cb999859 100644
11636 --- a/net/ipv4/tcp_nv.c
11637 +++ b/net/ipv4/tcp_nv.c
11638 @@ -338,7 +338,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
11639 */
11640 cwnd_by_slope = (u32)
11641 div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
11642 - (u64)(80000 * tp->mss_cache));
11643 + 80000ULL * tp->mss_cache);
11644 max_win = cwnd_by_slope + nv_pad;
11645
11646 /* If cwnd > max_win, decrease cwnd
11647 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
11648 index 6a7ff6957535..622e158a6fc4 100644
11649 --- a/net/ipv4/xfrm4_policy.c
11650 +++ b/net/ipv4/xfrm4_policy.c
11651 @@ -97,6 +97,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
11652 xdst->u.rt.rt_gateway = rt->rt_gateway;
11653 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
11654 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
11655 + xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
11656 xdst->u.rt.rt_table_id = rt->rt_table_id;
11657 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
11658
11659 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
11660 index 417af5ea2509..c7b202c1720d 100644
11661 --- a/net/ipv6/ip6_tunnel.c
11662 +++ b/net/ipv6/ip6_tunnel.c
11663 @@ -1972,14 +1972,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
11664 {
11665 struct net *net = dev_net(dev);
11666 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
11667 - struct ip6_tnl *nt, *t;
11668 struct ip_tunnel_encap ipencap;
11669 + struct ip6_tnl *nt, *t;
11670 + int err;
11671
11672 nt = netdev_priv(dev);
11673
11674 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
11675 - int err = ip6_tnl_encap_setup(nt, &ipencap);
11676 -
11677 + err = ip6_tnl_encap_setup(nt, &ipencap);
11678 if (err < 0)
11679 return err;
11680 }
11681 @@ -1995,7 +1995,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
11682 return -EEXIST;
11683 }
11684
11685 - return ip6_tnl_create2(dev);
11686 + err = ip6_tnl_create2(dev);
11687 + if (!err && tb[IFLA_MTU])
11688 + ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
11689 +
11690 + return err;
11691 }
11692
11693 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
11694 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
11695 index b263bf3a19f7..64ec23388450 100644
11696 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
11697 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
11698 @@ -230,7 +230,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
11699
11700 if ((unsigned int)end > IPV6_MAXPLEN) {
11701 pr_debug("offset is too large.\n");
11702 - return -1;
11703 + return -EINVAL;
11704 }
11705
11706 ecn = ip6_frag_ecn(ipv6_hdr(skb));
11707 @@ -263,7 +263,8 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
11708 * this case. -DaveM
11709 */
11710 pr_debug("end of fragment not rounded to 8 bytes.\n");
11711 - return -1;
11712 + inet_frag_kill(&fq->q, &nf_frags);
11713 + return -EPROTO;
11714 }
11715 if (end > fq->q.len) {
11716 /* Some bits beyond end -> corruption. */
11717 @@ -357,7 +358,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
11718 discard_fq:
11719 inet_frag_kill(&fq->q, &nf_frags);
11720 err:
11721 - return -1;
11722 + return -EINVAL;
11723 }
11724
11725 /*
11726 @@ -566,6 +567,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
11727
11728 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
11729 {
11730 + u16 savethdr = skb->transport_header;
11731 struct net_device *dev = skb->dev;
11732 int fhoff, nhoff, ret;
11733 struct frag_hdr *fhdr;
11734 @@ -599,8 +601,12 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
11735
11736 spin_lock_bh(&fq->q.lock);
11737
11738 - if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
11739 - ret = -EINVAL;
11740 + ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
11741 + if (ret < 0) {
11742 + if (ret == -EPROTO) {
11743 + skb->transport_header = savethdr;
11744 + ret = 0;
11745 + }
11746 goto out_unlock;
11747 }
11748
11749 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
11750 index dcb292134c21..ae0485d776f4 100644
11751 --- a/net/ipv6/sit.c
11752 +++ b/net/ipv6/sit.c
11753 @@ -1572,6 +1572,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
11754 if (err < 0)
11755 return err;
11756
11757 + if (tb[IFLA_MTU]) {
11758 + u32 mtu = nla_get_u32(tb[IFLA_MTU]);
11759 +
11760 + if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
11761 + dev->mtu = mtu;
11762 + }
11763 +
11764 #ifdef CONFIG_IPV6_SIT_6RD
11765 if (ipip6_netlink_6rd_parms(data, &ip6rd))
11766 err = ipip6_tunnel_update_6rd(nt, &ip6rd);
11767 diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
11768 index f8d4ab8ca1a5..4b60f68cb492 100644
11769 --- a/net/llc/llc_c_ac.c
11770 +++ b/net/llc/llc_c_ac.c
11771 @@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
11772 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
11773 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
11774 if (likely(!rc)) {
11775 - llc_conn_send_pdu(sk, skb);
11776 + rc = llc_conn_send_pdu(sk, skb);
11777 llc_conn_ac_inc_vs_by_1(sk, skb);
11778 }
11779 return rc;
11780 @@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
11781 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
11782 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
11783 if (likely(!rc)) {
11784 - llc_conn_send_pdu(sk, skb);
11785 + rc = llc_conn_send_pdu(sk, skb);
11786 llc_conn_ac_inc_vs_by_1(sk, skb);
11787 }
11788 return rc;
11789 @@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
11790 int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
11791 {
11792 struct llc_sock *llc = llc_sk(sk);
11793 + int ret;
11794
11795 if (llc->ack_must_be_send) {
11796 - llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
11797 + ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
11798 llc->ack_must_be_send = 0 ;
11799 llc->ack_pf = 0;
11800 - } else
11801 - llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
11802 - return 0;
11803 + } else {
11804 + ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
11805 + }
11806 +
11807 + return ret;
11808 }
11809
11810 /**
11811 diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
11812 index d861b74ad068..79c346fd859b 100644
11813 --- a/net/llc/llc_conn.c
11814 +++ b/net/llc/llc_conn.c
11815 @@ -30,7 +30,7 @@
11816 #endif
11817
11818 static int llc_find_offset(int state, int ev_type);
11819 -static void llc_conn_send_pdus(struct sock *sk);
11820 +static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
11821 static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
11822 static int llc_exec_conn_trans_actions(struct sock *sk,
11823 struct llc_conn_state_trans *trans,
11824 @@ -193,11 +193,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
11825 return rc;
11826 }
11827
11828 -void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
11829 +int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
11830 {
11831 /* queue PDU to send to MAC layer */
11832 skb_queue_tail(&sk->sk_write_queue, skb);
11833 - llc_conn_send_pdus(sk);
11834 + return llc_conn_send_pdus(sk, skb);
11835 }
11836
11837 /**
11838 @@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
11839 if (howmany_resend > 0)
11840 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
11841 /* any PDUs to re-send are queued up; start sending to MAC */
11842 - llc_conn_send_pdus(sk);
11843 + llc_conn_send_pdus(sk, NULL);
11844 out:;
11845 }
11846
11847 @@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
11848 if (howmany_resend > 0)
11849 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
11850 /* any PDUs to re-send are queued up; start sending to MAC */
11851 - llc_conn_send_pdus(sk);
11852 + llc_conn_send_pdus(sk, NULL);
11853 out:;
11854 }
11855
11856 @@ -340,12 +340,16 @@ int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked)
11857 /**
11858 * llc_conn_send_pdus - Sends queued PDUs
11859 * @sk: active connection
11860 + * @hold_skb: the skb held by caller, or NULL if does not care
11861 *
11862 - * Sends queued pdus to MAC layer for transmission.
11863 + * Sends queued pdus to MAC layer for transmission. When @hold_skb is
11864 + * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
11865 + * successfully, or 1 for failure.
11866 */
11867 -static void llc_conn_send_pdus(struct sock *sk)
11868 +static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
11869 {
11870 struct sk_buff *skb;
11871 + int ret = 0;
11872
11873 while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
11874 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
11875 @@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk)
11876 skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
11877 if (!skb2)
11878 break;
11879 - skb = skb2;
11880 + dev_queue_xmit(skb2);
11881 + } else {
11882 + bool is_target = skb == hold_skb;
11883 + int rc;
11884 +
11885 + if (is_target)
11886 + skb_get(skb);
11887 + rc = dev_queue_xmit(skb);
11888 + if (is_target)
11889 + ret = rc;
11890 }
11891 - dev_queue_xmit(skb);
11892 }
11893 +
11894 + return ret;
11895 }
11896
11897 /**
11898 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
11899 index 404284a14d75..474655a2aeae 100644
11900 --- a/net/mac80211/rx.c
11901 +++ b/net/mac80211/rx.c
11902 @@ -3907,7 +3907,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
11903 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
11904 IEEE80211_FCTL_TODS)) !=
11905 fast_rx->expected_ds_bits)
11906 - goto drop;
11907 + return false;
11908
11909 /* assign the key to drop unencrypted frames (later)
11910 * and strip the IV/MIC if necessary
11911 diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
11912 index 97f4c9d6b54c..9249712765d7 100644
11913 --- a/net/mac80211/spectmgmt.c
11914 +++ b/net/mac80211/spectmgmt.c
11915 @@ -8,6 +8,7 @@
11916 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
11917 * Copyright 2007-2008, Intel Corporation
11918 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
11919 + * Copyright (C) 2018 Intel Corporation
11920 *
11921 * This program is free software; you can redistribute it and/or modify
11922 * it under the terms of the GNU General Public License version 2 as
11923 @@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
11924 u32 sta_flags, u8 *bssid,
11925 struct ieee80211_csa_ie *csa_ie)
11926 {
11927 - enum nl80211_band new_band;
11928 + enum nl80211_band new_band = current_band;
11929 int new_freq;
11930 u8 new_chan_no;
11931 struct ieee80211_channel *new_chan;
11932 @@ -53,15 +54,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
11933 elems->ext_chansw_ie->new_operating_class,
11934 &new_band)) {
11935 sdata_info(sdata,
11936 - "cannot understand ECSA IE operating class %d, disconnecting\n",
11937 + "cannot understand ECSA IE operating class, %d, ignoring\n",
11938 elems->ext_chansw_ie->new_operating_class);
11939 - return -EINVAL;
11940 }
11941 new_chan_no = elems->ext_chansw_ie->new_ch_num;
11942 csa_ie->count = elems->ext_chansw_ie->count;
11943 csa_ie->mode = elems->ext_chansw_ie->mode;
11944 } else if (elems->ch_switch_ie) {
11945 - new_band = current_band;
11946 new_chan_no = elems->ch_switch_ie->new_ch_num;
11947 csa_ie->count = elems->ch_switch_ie->count;
11948 csa_ie->mode = elems->ch_switch_ie->mode;
11949 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
11950 index 1ecf3d07d1f5..892c392ff8fc 100644
11951 --- a/net/mac80211/sta_info.c
11952 +++ b/net/mac80211/sta_info.c
11953 @@ -313,7 +313,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
11954
11955 if (ieee80211_hw_check(hw, USES_RSS)) {
11956 sta->pcpu_rx_stats =
11957 - alloc_percpu(struct ieee80211_sta_rx_stats);
11958 + alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
11959 if (!sta->pcpu_rx_stats)
11960 goto free;
11961 }
11962 @@ -433,6 +433,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
11963 if (sta->sta.txq[0])
11964 kfree(to_txq_info(sta->sta.txq[0]));
11965 free:
11966 + free_percpu(sta->pcpu_rx_stats);
11967 #ifdef CONFIG_MAC80211_MESH
11968 kfree(sta->mesh);
11969 #endif
11970 diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
11971 index 4528cff9138b..a123d0dc1ef9 100644
11972 --- a/net/netlabel/netlabel_unlabeled.c
11973 +++ b/net/netlabel/netlabel_unlabeled.c
11974 @@ -1469,6 +1469,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
11975 iface = rcu_dereference(netlbl_unlhsh_def);
11976 if (iface == NULL || !iface->valid)
11977 goto unlabel_getattr_nolabel;
11978 +
11979 +#if IS_ENABLED(CONFIG_IPV6)
11980 + /* When resolving a fallback label, check the sk_buff version as
11981 + * it is possible (e.g. SCTP) to have family = PF_INET6 while
11982 + * receiving ip_hdr(skb)->version = 4.
11983 + */
11984 + if (family == PF_INET6 && ip_hdr(skb)->version == 4)
11985 + family = PF_INET;
11986 +#endif /* IPv6 */
11987 +
11988 switch (family) {
11989 case PF_INET: {
11990 struct iphdr *hdr4;
11991 diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
11992 index c5959ce503e6..3f266115294f 100644
11993 --- a/net/nfc/llcp_commands.c
11994 +++ b/net/nfc/llcp_commands.c
11995 @@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
11996
11997 pr_debug("uri: %s, len: %zu\n", uri, uri_len);
11998
11999 + /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
12000 + if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
12001 + return NULL;
12002 +
12003 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
12004 if (sdreq == NULL)
12005 return NULL;
12006 diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
12007 index 102c681c48b5..dbf74afe82fb 100644
12008 --- a/net/nfc/netlink.c
12009 +++ b/net/nfc/netlink.c
12010 @@ -68,7 +68,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
12011 };
12012
12013 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
12014 - [NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
12015 + [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
12016 + .len = U8_MAX - 4 },
12017 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
12018 };
12019
12020 diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
12021 index 466393936db9..f135814c34ad 100644
12022 --- a/net/openvswitch/conntrack.c
12023 +++ b/net/openvswitch/conntrack.c
12024 @@ -906,6 +906,36 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
12025 return 0;
12026 }
12027
12028 +/* Trim the skb to the length specified by the IP/IPv6 header,
12029 + * removing any trailing lower-layer padding. This prepares the skb
12030 + * for higher-layer processing that assumes skb->len excludes padding
12031 + * (such as nf_ip_checksum). The caller needs to pull the skb to the
12032 + * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
12033 + */
12034 +static int ovs_skb_network_trim(struct sk_buff *skb)
12035 +{
12036 + unsigned int len;
12037 + int err;
12038 +
12039 + switch (skb->protocol) {
12040 + case htons(ETH_P_IP):
12041 + len = ntohs(ip_hdr(skb)->tot_len);
12042 + break;
12043 + case htons(ETH_P_IPV6):
12044 + len = sizeof(struct ipv6hdr)
12045 + + ntohs(ipv6_hdr(skb)->payload_len);
12046 + break;
12047 + default:
12048 + len = skb->len;
12049 + }
12050 +
12051 + err = pskb_trim_rcsum(skb, len);
12052 + if (err)
12053 + kfree_skb(skb);
12054 +
12055 + return err;
12056 +}
12057 +
12058 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
12059 * value if 'skb' is freed.
12060 */
12061 @@ -920,6 +950,10 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
12062 nh_ofs = skb_network_offset(skb);
12063 skb_pull_rcsum(skb, nh_ofs);
12064
12065 + err = ovs_skb_network_trim(skb);
12066 + if (err)
12067 + return err;
12068 +
12069 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
12070 err = handle_fragments(net, key, info->zone.id, skb);
12071 if (err)
12072 diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
12073 index 0d11132b3370..ff0112bc247f 100644
12074 --- a/net/qrtr/smd.c
12075 +++ b/net/qrtr/smd.c
12076 @@ -116,5 +116,6 @@ static struct qcom_smd_driver qcom_smd_qrtr_driver = {
12077
12078 module_qcom_smd_driver(qcom_smd_qrtr_driver);
12079
12080 +MODULE_ALIAS("rpmsg:IPCRTR");
12081 MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
12082 MODULE_LICENSE("GPL v2");
12083 diff --git a/net/rds/ib.c b/net/rds/ib.c
12084 index 5680d90b0b77..0efb3d2b338d 100644
12085 --- a/net/rds/ib.c
12086 +++ b/net/rds/ib.c
12087 @@ -336,7 +336,8 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr)
12088 /* Create a CMA ID and try to bind it. This catches both
12089 * IB and iWARP capable NICs.
12090 */
12091 - cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
12092 + cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
12093 + NULL, RDMA_PS_TCP, IB_QPT_RC);
12094 if (IS_ERR(cm_id))
12095 return PTR_ERR(cm_id);
12096
12097 diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
12098 index 1060d14d4e6a..f3ac85a285a2 100644
12099 --- a/net/rxrpc/input.c
12100 +++ b/net/rxrpc/input.c
12101 @@ -1166,16 +1166,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
12102 goto discard_unlock;
12103
12104 if (sp->hdr.callNumber == chan->last_call) {
12105 - /* For the previous service call, if completed successfully, we
12106 - * discard all further packets.
12107 + if (chan->call ||
12108 + sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
12109 + goto discard_unlock;
12110 +
12111 + /* For the previous service call, if completed
12112 + * successfully, we discard all further packets.
12113 */
12114 if (rxrpc_conn_is_service(conn) &&
12115 - (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
12116 - sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
12117 + chan->last_type == RXRPC_PACKET_TYPE_ACK)
12118 goto discard_unlock;
12119
12120 - /* But otherwise we need to retransmit the final packet from
12121 - * data cached in the connection record.
12122 + /* But otherwise we need to retransmit the final packet
12123 + * from data cached in the connection record.
12124 */
12125 rxrpc_post_packet_to_conn(conn, skb);
12126 goto out_unlock;
12127 diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
12128 index c29362d50a92..3e52b7fdc35d 100644
12129 --- a/net/rxrpc/recvmsg.c
12130 +++ b/net/rxrpc/recvmsg.c
12131 @@ -493,9 +493,10 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
12132 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
12133 sizeof(unsigned int), &id32);
12134 } else {
12135 + unsigned long idl = call->user_call_ID;
12136 +
12137 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
12138 - sizeof(unsigned long),
12139 - &call->user_call_ID);
12140 + sizeof(unsigned long), &idl);
12141 }
12142 if (ret < 0)
12143 goto error;
12144 diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
12145 index b214a4d4a641..1de27c39564b 100644
12146 --- a/net/rxrpc/sendmsg.c
12147 +++ b/net/rxrpc/sendmsg.c
12148 @@ -78,7 +78,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
12149 spin_lock_bh(&call->lock);
12150
12151 if (call->state < RXRPC_CALL_COMPLETE) {
12152 - call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
12153 + call->rxtx_annotations[ix] =
12154 + (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
12155 + RXRPC_TX_ANNO_RETRANS;
12156 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
12157 rxrpc_queue_call(call);
12158 }
12159 diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
12160 index 8dc1918b6783..564db3542ec2 100755
12161 --- a/scripts/adjust_autoksyms.sh
12162 +++ b/scripts/adjust_autoksyms.sh
12163 @@ -83,6 +83,13 @@ while read sympath; do
12164 depfile="include/config/ksym/${sympath}.h"
12165 mkdir -p "$(dirname "$depfile")"
12166 touch "$depfile"
12167 + # Filesystems with coarse time precision may create timestamps
12168 + # equal to the one from a file that was very recently built and that
12169 + # needs to be rebuild. Let's guard against that by making sure our
12170 + # dep files are always newer than the first file we created here.
12171 + while [ ! "$depfile" -nt "$new_ksyms_file" ]; do
12172 + touch "$depfile"
12173 + done
12174 echo $((count += 1))
12175 done | tail -1 )
12176 changed=${changed:-0}
12177 diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
12178 index cbf4996dd9c1..ed29bad1f03a 100644
12179 --- a/scripts/kconfig/expr.c
12180 +++ b/scripts/kconfig/expr.c
12181 @@ -113,7 +113,7 @@ void expr_free(struct expr *e)
12182 break;
12183 case E_NOT:
12184 expr_free(e->left.expr);
12185 - return;
12186 + break;
12187 case E_EQUAL:
12188 case E_GEQ:
12189 case E_GTH:
12190 diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
12191 index aed678e8a777..4a61636158dd 100644
12192 --- a/scripts/kconfig/menu.c
12193 +++ b/scripts/kconfig/menu.c
12194 @@ -364,6 +364,7 @@ void menu_finalize(struct menu *parent)
12195 menu->parent = parent;
12196 last_menu = menu;
12197 }
12198 + expr_free(basedep);
12199 if (last_menu) {
12200 parent->list = parent->next;
12201 parent->next = last_menu->next;
12202 diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
12203 index 71bf8bff696a..5122ed2d839a 100644
12204 --- a/scripts/kconfig/zconf.y
12205 +++ b/scripts/kconfig/zconf.y
12206 @@ -107,7 +107,27 @@ static struct menu *current_menu, *current_entry;
12207 %%
12208 input: nl start | start;
12209
12210 -start: mainmenu_stmt stmt_list | stmt_list;
12211 +start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
12212 +
12213 +/* mainmenu entry */
12214 +
12215 +mainmenu_stmt: T_MAINMENU prompt nl
12216 +{
12217 + menu_add_prompt(P_MENU, $2, NULL);
12218 +};
12219 +
12220 +/* Default main menu, if there's no mainmenu entry */
12221 +
12222 +no_mainmenu_stmt: /* empty */
12223 +{
12224 + /*
12225 + * Hack: Keep the main menu title on the heap so we can safely free it
12226 + * later regardless of whether it comes from the 'prompt' in
12227 + * mainmenu_stmt or here
12228 + */
12229 + menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
12230 +};
12231 +
12232
12233 stmt_list:
12234 /* empty */
12235 @@ -344,13 +364,6 @@ if_block:
12236 | if_block choice_stmt
12237 ;
12238
12239 -/* mainmenu entry */
12240 -
12241 -mainmenu_stmt: T_MAINMENU prompt nl
12242 -{
12243 - menu_add_prompt(P_MENU, $2, NULL);
12244 -};
12245 -
12246 /* menu entry */
12247
12248 menu: T_MENU prompt T_EOL
12249 @@ -495,6 +508,7 @@ word_opt: /* empty */ { $$ = NULL; }
12250
12251 void conf_parse(const char *name)
12252 {
12253 + const char *tmp;
12254 struct symbol *sym;
12255 int i;
12256
12257 @@ -502,7 +516,6 @@ void conf_parse(const char *name)
12258
12259 sym_init();
12260 _menu_init();
12261 - rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
12262
12263 if (getenv("ZCONF_DEBUG"))
12264 zconfdebug = 1;
12265 @@ -512,8 +525,10 @@ void conf_parse(const char *name)
12266 if (!modules_sym)
12267 modules_sym = sym_find( "n" );
12268
12269 + tmp = rootmenu.prompt->text;
12270 rootmenu.prompt->text = _(rootmenu.prompt->text);
12271 rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
12272 + free((char*)tmp);
12273
12274 menu_finalize(&rootmenu);
12275 for_all_symbols(i, sym) {
12276 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
12277 index 3c575cd07888..0a2a7372538c 100755
12278 --- a/scripts/package/builddeb
12279 +++ b/scripts/package/builddeb
12280 @@ -325,7 +325,7 @@ fi
12281
12282 # Build kernel header package
12283 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
12284 -(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
12285 +(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
12286 (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
12287 (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
12288 if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
12289 diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
12290 index 4304372b323f..95433acde1c1 100644
12291 --- a/security/integrity/digsig.c
12292 +++ b/security/integrity/digsig.c
12293 @@ -18,6 +18,7 @@
12294 #include <linux/cred.h>
12295 #include <linux/key-type.h>
12296 #include <linux/digsig.h>
12297 +#include <linux/vmalloc.h>
12298 #include <crypto/public_key.h>
12299 #include <keys/system_keyring.h>
12300
12301 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
12302 index 38f2ed830dd6..93f09173cc49 100644
12303 --- a/security/integrity/ima/ima_crypto.c
12304 +++ b/security/integrity/ima/ima_crypto.c
12305 @@ -78,6 +78,8 @@ int __init ima_init_crypto(void)
12306 hash_algo_name[ima_hash_algo], rc);
12307 return rc;
12308 }
12309 + pr_info("Allocated hash algorithm: %s\n",
12310 + hash_algo_name[ima_hash_algo]);
12311 return 0;
12312 }
12313
12314 diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
12315 index 2b3def14b4fb..a71f906b4f7a 100644
12316 --- a/security/integrity/ima/ima_main.c
12317 +++ b/security/integrity/ima/ima_main.c
12318 @@ -16,6 +16,9 @@
12319 * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
12320 * and ima_file_check.
12321 */
12322 +
12323 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12324 +
12325 #include <linux/module.h>
12326 #include <linux/file.h>
12327 #include <linux/binfmts.h>
12328 @@ -426,6 +429,16 @@ static int __init init_ima(void)
12329
12330 hash_setup(CONFIG_IMA_DEFAULT_HASH);
12331 error = ima_init();
12332 +
12333 + if (error && strcmp(hash_algo_name[ima_hash_algo],
12334 + CONFIG_IMA_DEFAULT_HASH) != 0) {
12335 + pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
12336 + hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
12337 + hash_setup_done = 0;
12338 + hash_setup(CONFIG_IMA_DEFAULT_HASH);
12339 + error = ima_init();
12340 + }
12341 +
12342 if (!error) {
12343 ima_initialized = 1;
12344 ima_update_policy_flag();
12345 diff --git a/sound/core/timer.c b/sound/core/timer.c
12346 index e5ddc475dca4..152254193c69 100644
12347 --- a/sound/core/timer.c
12348 +++ b/sound/core/timer.c
12349 @@ -547,7 +547,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
12350 else
12351 timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
12352 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
12353 - SNDRV_TIMER_EVENT_CONTINUE);
12354 + SNDRV_TIMER_EVENT_PAUSE);
12355 unlock:
12356 spin_unlock_irqrestore(&timer->lock, flags);
12357 return result;
12358 @@ -569,7 +569,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
12359 list_del_init(&timeri->ack_list);
12360 list_del_init(&timeri->active_list);
12361 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
12362 - SNDRV_TIMER_EVENT_CONTINUE);
12363 + SNDRV_TIMER_EVENT_PAUSE);
12364 spin_unlock(&timeri->timer->lock);
12365 }
12366 spin_unlock_irqrestore(&slave_active_lock, flags);
12367 diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
12368 index 6c58e6f73a01..7c6ef879c520 100644
12369 --- a/sound/core/vmaster.c
12370 +++ b/sound/core/vmaster.c
12371 @@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave)
12372 return -ENOMEM;
12373 uctl->id = slave->slave.id;
12374 err = slave->slave.get(&slave->slave, uctl);
12375 + if (err < 0)
12376 + goto error;
12377 for (ch = 0; ch < slave->info.count; ch++)
12378 slave->vals[ch] = uctl->value.integer.value[ch];
12379 + error:
12380 kfree(uctl);
12381 - return 0;
12382 + return err < 0 ? err : 0;
12383 }
12384
12385 /* get the slave ctl info and save the initial values */
12386 diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
12387 index 7f3b5ed81995..f7a492c382d9 100644
12388 --- a/sound/pci/hda/Kconfig
12389 +++ b/sound/pci/hda/Kconfig
12390 @@ -88,7 +88,6 @@ config SND_HDA_PATCH_LOADER
12391 config SND_HDA_CODEC_REALTEK
12392 tristate "Build Realtek HD-audio codec support"
12393 select SND_HDA_GENERIC
12394 - select INPUT
12395 help
12396 Say Y or M here to include Realtek HD-audio codec support in
12397 snd-hda-intel driver, such as ALC880.
12398 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
12399 index 7ece1ab57eef..39cd35f6a6df 100644
12400 --- a/sound/pci/hda/patch_realtek.c
12401 +++ b/sound/pci/hda/patch_realtek.c
12402 @@ -3495,6 +3495,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
12403 }
12404 }
12405
12406 +#if IS_REACHABLE(INPUT)
12407 static void gpio2_mic_hotkey_event(struct hda_codec *codec,
12408 struct hda_jack_callback *event)
12409 {
12410 @@ -3627,6 +3628,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
12411 spec->kb_dev = NULL;
12412 }
12413 }
12414 +#else /* INPUT */
12415 +#define alc280_fixup_hp_gpio2_mic_hotkey NULL
12416 +#define alc233_fixup_lenovo_line2_mic_hotkey NULL
12417 +#endif /* INPUT */
12418
12419 static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
12420 const struct hda_fixup *fix, int action)
12421 diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
12422 index b699aea9a025..7788cfb7cd7e 100644
12423 --- a/tools/lib/bpf/libbpf.c
12424 +++ b/tools/lib/bpf/libbpf.c
12425 @@ -590,6 +590,24 @@ bpf_object__init_maps_name(struct bpf_object *obj)
12426 return 0;
12427 }
12428
12429 +static bool section_have_execinstr(struct bpf_object *obj, int idx)
12430 +{
12431 + Elf_Scn *scn;
12432 + GElf_Shdr sh;
12433 +
12434 + scn = elf_getscn(obj->efile.elf, idx);
12435 + if (!scn)
12436 + return false;
12437 +
12438 + if (gelf_getshdr(scn, &sh) != &sh)
12439 + return false;
12440 +
12441 + if (sh.sh_flags & SHF_EXECINSTR)
12442 + return true;
12443 +
12444 + return false;
12445 +}
12446 +
12447 static int bpf_object__elf_collect(struct bpf_object *obj)
12448 {
12449 Elf *elf = obj->efile.elf;
12450 @@ -673,6 +691,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
12451 } else if (sh.sh_type == SHT_REL) {
12452 void *reloc = obj->efile.reloc;
12453 int nr_reloc = obj->efile.nr_reloc + 1;
12454 + int sec = sh.sh_info; /* points to other section */
12455 +
12456 + /* Only do relo for section with exec instructions */
12457 + if (!section_have_execinstr(obj, sec)) {
12458 + pr_debug("skip relo %s(%d) for section(%d)\n",
12459 + name, idx, sec);
12460 + continue;
12461 + }
12462
12463 reloc = realloc(reloc,
12464 sizeof(*obj->efile.reloc) * nr_reloc);
12465 diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
12466 index 664c90c8e22b..669475300ba8 100644
12467 --- a/tools/lib/traceevent/event-parse.c
12468 +++ b/tools/lib/traceevent/event-parse.c
12469 @@ -4927,21 +4927,22 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
12470 else
12471 ls = 2;
12472
12473 - if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
12474 - *(ptr+1) == 'S' || *(ptr+1) == 's') {
12475 + if (isalnum(ptr[1]))
12476 ptr++;
12477 +
12478 + if (*ptr == 'F' || *ptr == 'f' ||
12479 + *ptr == 'S' || *ptr == 's') {
12480 show_func = *ptr;
12481 - } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
12482 - print_mac_arg(s, *(ptr+1), data, size, event, arg);
12483 - ptr++;
12484 + } else if (*ptr == 'M' || *ptr == 'm') {
12485 + print_mac_arg(s, *ptr, data, size, event, arg);
12486 arg = arg->next;
12487 break;
12488 - } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
12489 + } else if (*ptr == 'I' || *ptr == 'i') {
12490 int n;
12491
12492 - n = print_ip_arg(s, ptr+1, data, size, event, arg);
12493 + n = print_ip_arg(s, ptr, data, size, event, arg);
12494 if (n > 0) {
12495 - ptr += n;
12496 + ptr += n - 1;
12497 arg = arg->next;
12498 break;
12499 }
12500 diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
12501 index 7c214ceb9386..5e10ba796a6f 100644
12502 --- a/tools/lib/traceevent/parse-filter.c
12503 +++ b/tools/lib/traceevent/parse-filter.c
12504 @@ -1879,17 +1879,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
12505 struct pevent *pevent;
12506 unsigned long long addr;
12507 const char *val = NULL;
12508 + unsigned int size;
12509 char hex[64];
12510
12511 /* If the field is not a string convert it */
12512 if (arg->str.field->flags & FIELD_IS_STRING) {
12513 val = record->data + arg->str.field->offset;
12514 + size = arg->str.field->size;
12515 +
12516 + if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
12517 + addr = *(unsigned int *)val;
12518 + val = record->data + (addr & 0xffff);
12519 + size = addr >> 16;
12520 + }
12521
12522 /*
12523 * We need to copy the data since we can't be sure the field
12524 * is null terminated.
12525 */
12526 - if (*(val + arg->str.field->size - 1)) {
12527 + if (*(val + size - 1)) {
12528 /* copy it */
12529 memcpy(arg->str.buffer, val, arg->str.field->size);
12530 /* the buffer is already NULL terminated */
12531 diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
12532 index a74a48db26f5..2eb11543e2e9 100644
12533 --- a/tools/perf/arch/x86/util/header.c
12534 +++ b/tools/perf/arch/x86/util/header.c
12535 @@ -69,7 +69,7 @@ get_cpuid_str(void)
12536 {
12537 char *buf = malloc(128);
12538
12539 - if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
12540 + if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
12541 free(buf);
12542 return NULL;
12543 }
12544 diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
12545 index 68861e81f06c..43d5f35e9074 100644
12546 --- a/tools/perf/builtin-stat.c
12547 +++ b/tools/perf/builtin-stat.c
12548 @@ -2042,11 +2042,16 @@ static int add_default_attributes(void)
12549 return 0;
12550
12551 if (transaction_run) {
12552 + struct parse_events_error errinfo;
12553 +
12554 if (pmu_have_event("cpu", "cycles-ct") &&
12555 pmu_have_event("cpu", "el-start"))
12556 - err = parse_events(evsel_list, transaction_attrs, NULL);
12557 + err = parse_events(evsel_list, transaction_attrs,
12558 + &errinfo);
12559 else
12560 - err = parse_events(evsel_list, transaction_limited_attrs, NULL);
12561 + err = parse_events(evsel_list,
12562 + transaction_limited_attrs,
12563 + &errinfo);
12564 if (err) {
12565 fprintf(stderr, "Cannot set up transaction events\n");
12566 return -1;
12567 diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
12568 index c61e012e9771..e68c866ae798 100644
12569 --- a/tools/perf/builtin-top.c
12570 +++ b/tools/perf/builtin-top.c
12571 @@ -1061,8 +1061,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
12572
12573 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
12574 {
12575 - if (!strcmp(var, "top.call-graph"))
12576 - var = "call-graph.record-mode"; /* fall-through */
12577 + if (!strcmp(var, "top.call-graph")) {
12578 + var = "call-graph.record-mode";
12579 + return perf_default_config(var, value, cb);
12580 + }
12581 if (!strcmp(var, "top.children")) {
12582 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
12583 return 0;
12584 diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
12585 index a5082331f246..2aabf0ae7c0d 100644
12586 --- a/tools/perf/tests/vmlinux-kallsyms.c
12587 +++ b/tools/perf/tests/vmlinux-kallsyms.c
12588 @@ -123,7 +123,7 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
12589
12590 if (pair && UM(pair->start) == mem_start) {
12591 next_pair:
12592 - if (strcmp(sym->name, pair->name) == 0) {
12593 + if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
12594 /*
12595 * kallsyms don't have the symbol end, so we
12596 * set that by using the next symbol start - 1,
12597 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
12598 index bce80f866dd0..f55d10854565 100644
12599 --- a/tools/perf/util/evsel.c
12600 +++ b/tools/perf/util/evsel.c
12601 @@ -681,14 +681,14 @@ static void apply_config_terms(struct perf_evsel *evsel,
12602 struct perf_evsel_config_term *term;
12603 struct list_head *config_terms = &evsel->config_terms;
12604 struct perf_event_attr *attr = &evsel->attr;
12605 - struct callchain_param param;
12606 + /* callgraph default */
12607 + struct callchain_param param = {
12608 + .record_mode = callchain_param.record_mode,
12609 + };
12610 u32 dump_size = 0;
12611 int max_stack = 0;
12612 const char *callgraph_buf = NULL;
12613
12614 - /* callgraph default */
12615 - param.record_mode = callchain_param.record_mode;
12616 -
12617 list_for_each_entry(term, config_terms, list) {
12618 switch (term->type) {
12619 case PERF_EVSEL__CONFIG_TERM_PERIOD:
12620 diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
12621 index 10849a079026..ad613ea51434 100644
12622 --- a/tools/perf/util/hist.c
12623 +++ b/tools/perf/util/hist.c
12624 @@ -865,7 +865,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
12625 * cumulated only one time to prevent entries more than 100%
12626 * overhead.
12627 */
12628 - he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
12629 + he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
12630 if (he_cache == NULL)
12631 return -ENOMEM;
12632
12633 @@ -1030,8 +1030,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
12634 if (err)
12635 return err;
12636
12637 - iter->max_stack = max_stack_depth;
12638 -
12639 err = iter->ops->prepare_entry(iter, al);
12640 if (err)
12641 goto out;
12642 diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
12643 index a440a04a29ff..159d616e170b 100644
12644 --- a/tools/perf/util/hist.h
12645 +++ b/tools/perf/util/hist.h
12646 @@ -102,7 +102,6 @@ struct hist_entry_iter {
12647 int curr;
12648
12649 bool hide_unresolved;
12650 - int max_stack;
12651
12652 struct perf_evsel *evsel;
12653 struct perf_sample *sample;
12654 diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
12655 index a899ef81c705..76faf5bf0b32 100644
12656 --- a/tools/testing/selftests/Makefile
12657 +++ b/tools/testing/selftests/Makefile
12658 @@ -94,6 +94,7 @@ ifdef INSTALL_PATH
12659 for TARGET in $(TARGETS); do \
12660 echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
12661 echo "echo ========================================" >> $(ALL_SCRIPT); \
12662 + echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
12663 echo "cd $$TARGET" >> $(ALL_SCRIPT); \
12664 make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
12665 echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
12666 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
12667 new file mode 100644
12668 index 000000000000..5ba73035e1d9
12669 --- /dev/null
12670 +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
12671 @@ -0,0 +1,46 @@
12672 +#!/bin/sh
12673 +# SPDX-License-Identifier: GPL-2.0
12674 +# description: Kprobe event string type argument
12675 +
12676 +[ -f kprobe_events ] || exit_unsupported # this is configurable
12677 +
12678 +echo 0 > events/enable
12679 +echo > kprobe_events
12680 +
12681 +case `uname -m` in
12682 +x86_64)
12683 + ARG2=%si
12684 + OFFS=8
12685 +;;
12686 +i[3456]86)
12687 + ARG2=%cx
12688 + OFFS=4
12689 +;;
12690 +aarch64)
12691 + ARG2=%x1
12692 + OFFS=8
12693 +;;
12694 +arm*)
12695 + ARG2=%r1
12696 + OFFS=4
12697 +;;
12698 +*)
12699 + echo "Please implement other architecture here"
12700 + exit_untested
12701 +esac
12702 +
12703 +: "Test get argument (1)"
12704 +echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events
12705 +echo 1 > events/kprobes/testprobe/enable
12706 +! echo test >> kprobe_events
12707 +tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\""
12708 +
12709 +echo 0 > events/kprobes/testprobe/enable
12710 +: "Test get argument (2)"
12711 +echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events
12712 +echo 1 > events/kprobes/testprobe/enable
12713 +! echo test1 test2 >> kprobe_events
12714 +tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\""
12715 +
12716 +echo 0 > events/enable
12717 +echo > kprobe_events
12718 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
12719 new file mode 100644
12720 index 000000000000..231bcd2c4eb5
12721 --- /dev/null
12722 +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
12723 @@ -0,0 +1,97 @@
12724 +#!/bin/sh
12725 +# SPDX-License-Identifier: GPL-2.0
12726 +# description: Kprobe event argument syntax
12727 +
12728 +[ -f kprobe_events ] || exit_unsupported # this is configurable
12729 +
12730 +grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
12731 +
12732 +echo 0 > events/enable
12733 +echo > kprobe_events
12734 +
12735 +PROBEFUNC="vfs_read"
12736 +GOODREG=
12737 +BADREG=
12738 +GOODSYM="_sdata"
12739 +if ! grep -qw ${GOODSYM} /proc/kallsyms ; then
12740 + GOODSYM=$PROBEFUNC
12741 +fi
12742 +BADSYM="deaqswdefr"
12743 +SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "`
12744 +GOODTYPE="x16"
12745 +BADTYPE="y16"
12746 +
12747 +case `uname -m` in
12748 +x86_64|i[3456]86)
12749 + GOODREG=%ax
12750 + BADREG=%ex
12751 +;;
12752 +aarch64)
12753 + GOODREG=%x0
12754 + BADREG=%ax
12755 +;;
12756 +arm*)
12757 + GOODREG=%r0
12758 + BADREG=%ax
12759 +;;
12760 +esac
12761 +
12762 +test_goodarg() # Good-args
12763 +{
12764 + while [ "$1" ]; do
12765 + echo "p ${PROBEFUNC} $1" > kprobe_events
12766 + shift 1
12767 + done;
12768 +}
12769 +
12770 +test_badarg() # Bad-args
12771 +{
12772 + while [ "$1" ]; do
12773 + ! echo "p ${PROBEFUNC} $1" > kprobe_events
12774 + shift 1
12775 + done;
12776 +}
12777 +
12778 +echo > kprobe_events
12779 +
12780 +: "Register access"
12781 +test_goodarg ${GOODREG}
12782 +test_badarg ${BADREG}
12783 +
12784 +: "Symbol access"
12785 +test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10"
12786 +test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \
12787 + "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10"
12788 +
12789 +: "Stack access"
12790 +test_goodarg "\$stack" "\$stack0" "\$stack1"
12791 +test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
12792 +
12793 +: "Retval access"
12794 +echo "r ${PROBEFUNC} \$retval" > kprobe_events
12795 +! echo "p ${PROBEFUNC} \$retval" > kprobe_events
12796 +
12797 +: "Comm access"
12798 +test_goodarg "\$comm"
12799 +
12800 +: "Indirect memory access"
12801 +test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
12802 + "+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))"
12803 +test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \
12804 + "+10(\$comm)" "+0(${GOODREG})+10"
12805 +
12806 +: "Name assignment"
12807 +test_goodarg "varname=${GOODREG}"
12808 +test_badarg "varname=varname2=${GOODREG}"
12809 +
12810 +: "Type syntax"
12811 +test_goodarg "${GOODREG}:${GOODTYPE}"
12812 +test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \
12813 + "${GOODTYPE}:${GOODREG}"
12814 +
12815 +: "Combination check"
12816 +
12817 +test_goodarg "\$comm:string" "+0(\$stack):string"
12818 +test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string"
12819 +
12820 +echo > kprobe_events
12821 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
12822 new file mode 100644
12823 index 000000000000..4fda01a08da4
12824 --- /dev/null
12825 +++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
12826 @@ -0,0 +1,43 @@
12827 +#!/bin/sh
12828 +# SPDX-License-Identifier: GPL-2.0
12829 +# description: Kprobe events - probe points
12830 +
12831 +[ -f kprobe_events ] || exit_unsupported # this is configurable
12832 +
12833 +TARGET_FUNC=create_trace_kprobe
12834 +
12835 +dec_addr() { # hexaddr
12836 + printf "%d" "0x"`echo $1 | tail -c 8`
12837 +}
12838 +
12839 +set_offs() { # prev target next
12840 + A1=`dec_addr $1`
12841 + A2=`dec_addr $2`
12842 + A3=`dec_addr $3`
12843 + TARGET="0x$2" # an address
12844 + PREV=`expr $A1 - $A2` # offset to previous symbol
12845 + NEXT=+`expr $A3 - $A2` # offset to next symbol
12846 + OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol
12847 +}
12848 +
12849 +# We have to decode symbol addresses to get correct offsets.
12850 +# If the offset is not an instruction boundary, it cause -EILSEQ.
12851 +set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
12852 +
12853 +UINT_TEST=no
12854 +# printf "%x" -1 returns (unsigned long)-1.
12855 +if [ `printf "%x" -1 | wc -c` != 9 ]; then
12856 + UINT_TEST=yes
12857 +fi
12858 +
12859 +echo 0 > events/enable
12860 +echo > kprobe_events
12861 +echo "p:testprobe ${TARGET_FUNC}" > kprobe_events
12862 +echo "p:testprobe ${TARGET}" > kprobe_events
12863 +echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events
12864 +! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events
12865 +if [ "${UINT_TEST}" = yes ]; then
12866 +! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events
12867 +fi
12868 +echo > kprobe_events
12869 +clear_trace
12870 diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
12871 new file mode 100644
12872 index 000000000000..835c7f4dadcd
12873 --- /dev/null
12874 +++ b/tools/testing/selftests/memfd/config
12875 @@ -0,0 +1 @@
12876 +CONFIG_FUSE_FS=m
12877 diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
12878 index 412459369686..9b654a070e7d 100644
12879 --- a/tools/testing/selftests/net/psock_fanout.c
12880 +++ b/tools/testing/selftests/net/psock_fanout.c
12881 @@ -97,6 +97,8 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
12882
12883 static void sock_fanout_set_ebpf(int fd)
12884 {
12885 + static char log_buf[65536];
12886 +
12887 const int len_off = __builtin_offsetof(struct __sk_buff, len);
12888 struct bpf_insn prog[] = {
12889 { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 },
12890 @@ -109,7 +111,6 @@ static void sock_fanout_set_ebpf(int fd)
12891 { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 },
12892 { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
12893 };
12894 - char log_buf[512];
12895 union bpf_attr attr;
12896 int pfd;
12897
12898 diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
12899 index 4a8217448f20..cad14cd0ea92 100644
12900 --- a/tools/testing/selftests/net/reuseport_bpf.c
12901 +++ b/tools/testing/selftests/net/reuseport_bpf.c
12902 @@ -21,6 +21,7 @@
12903 #include <sys/epoll.h>
12904 #include <sys/types.h>
12905 #include <sys/socket.h>
12906 +#include <sys/resource.h>
12907 #include <unistd.h>
12908
12909 #ifndef ARRAY_SIZE
12910 @@ -190,11 +191,14 @@ static void send_from(struct test_params p, uint16_t sport, char *buf,
12911 struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
12912 struct sockaddr * const daddr =
12913 new_loopback_sockaddr(p.send_family, p.recv_port);
12914 - const int fd = socket(p.send_family, p.protocol, 0);
12915 + const int fd = socket(p.send_family, p.protocol, 0), one = 1;
12916
12917 if (fd < 0)
12918 error(1, errno, "failed to create send socket");
12919
12920 + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)))
12921 + error(1, errno, "failed to set reuseaddr");
12922 +
12923 if (bind(fd, saddr, sockaddr_size()))
12924 error(1, errno, "failed to bind send socket");
12925
12926 @@ -433,6 +437,21 @@ void enable_fastopen(void)
12927 }
12928 }
12929
12930 +static struct rlimit rlim_old, rlim_new;
12931 +
12932 +static __attribute__((constructor)) void main_ctor(void)
12933 +{
12934 + getrlimit(RLIMIT_MEMLOCK, &rlim_old);
12935 + rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
12936 + rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
12937 + setrlimit(RLIMIT_MEMLOCK, &rlim_new);
12938 +}
12939 +
12940 +static __attribute__((destructor)) void main_dtor(void)
12941 +{
12942 + setrlimit(RLIMIT_MEMLOCK, &rlim_old);
12943 +}
12944 +
12945 int main(void)
12946 {
12947 fprintf(stderr, "---- IPv4 UDP ----\n");
12948 diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
12949 index 35ade7406dcd..3ae77ba93208 100644
12950 --- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
12951 +++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
12952 @@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size)
12953 return 0;
12954 }
12955
12956 +static int syscall_available(void)
12957 +{
12958 + int rc;
12959 +
12960 + errno = 0;
12961 + rc = syscall(__NR_subpage_prot, 0, 0, 0);
12962 +
12963 + return rc == 0 || (errno != ENOENT && errno != ENOSYS);
12964 +}
12965 +
12966 int test_anon(void)
12967 {
12968 unsigned long align;
12969 @@ -145,6 +155,8 @@ int test_anon(void)
12970 void *mallocblock;
12971 unsigned long mallocsize;
12972
12973 + SKIP_IF(!syscall_available());
12974 +
12975 if (getpagesize() != 0x10000) {
12976 fprintf(stderr, "Kernel page size must be 64K!\n");
12977 return 1;
12978 @@ -180,6 +192,8 @@ int test_file(void)
12979 off_t filesize;
12980 int fd;
12981
12982 + SKIP_IF(!syscall_available());
12983 +
12984 fd = open(file_name, O_RDWR);
12985 if (fd == -1) {
12986 perror("failed to open file");
12987 diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config
12988 index 6a8e5a9bfc10..d148f9f89fb6 100644
12989 --- a/tools/testing/selftests/pstore/config
12990 +++ b/tools/testing/selftests/pstore/config
12991 @@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y
12992 CONFIG_PSTORE=y
12993 CONFIG_PSTORE_PMSG=y
12994 CONFIG_PSTORE_CONSOLE=y
12995 +CONFIG_PSTORE_RAM=m
12996 diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
12997 index 1c12536f2081..18f523557983 100644
12998 --- a/tools/thermal/tmon/sysfs.c
12999 +++ b/tools/thermal/tmon/sysfs.c
13000 @@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst)
13001 int update_thermal_data()
13002 {
13003 int i;
13004 + int next_thermal_record = cur_thermal_record + 1;
13005 char tz_name[256];
13006 static unsigned long samples;
13007
13008 @@ -495,9 +496,9 @@ int update_thermal_data()
13009 }
13010
13011 /* circular buffer for keeping historic data */
13012 - if (cur_thermal_record >= NR_THERMAL_RECORDS)
13013 - cur_thermal_record = 0;
13014 - gettimeofday(&trec[cur_thermal_record].tv, NULL);
13015 + if (next_thermal_record >= NR_THERMAL_RECORDS)
13016 + next_thermal_record = 0;
13017 + gettimeofday(&trec[next_thermal_record].tv, NULL);
13018 if (tmon_log) {
13019 fprintf(tmon_log, "%lu ", ++samples);
13020 fprintf(tmon_log, "%3.1f ", p_param.t_target);
13021 @@ -507,11 +508,12 @@ int update_thermal_data()
13022 snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
13023 ptdata.tzi[i].instance);
13024 sysfs_get_ulong(tz_name, "temp",
13025 - &trec[cur_thermal_record].temp[i]);
13026 + &trec[next_thermal_record].temp[i]);
13027 if (tmon_log)
13028 fprintf(tmon_log, "%lu ",
13029 - trec[cur_thermal_record].temp[i]/1000);
13030 + trec[next_thermal_record].temp[i] / 1000);
13031 }
13032 + cur_thermal_record = next_thermal_record;
13033 for (i = 0; i < ptdata.nr_cooling_dev; i++) {
13034 char cdev_name[256];
13035 unsigned long val;
13036 diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
13037 index 9aa19652e8e8..b43138f8b862 100644
13038 --- a/tools/thermal/tmon/tmon.c
13039 +++ b/tools/thermal/tmon/tmon.c
13040 @@ -336,7 +336,6 @@ int main(int argc, char **argv)
13041 show_data_w();
13042 show_cooling_device();
13043 }
13044 - cur_thermal_record++;
13045 time_elapsed += ticktime;
13046 controller_handler(trec[0].temp[target_tz_index] / 1000,
13047 &yk);
13048 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
13049 index eaae7252f60c..4f2a2df85b1f 100644
13050 --- a/virt/kvm/kvm_main.c
13051 +++ b/virt/kvm/kvm_main.c
13052 @@ -1466,7 +1466,8 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
13053
13054 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
13055 unsigned long addr, bool *async,
13056 - bool write_fault, kvm_pfn_t *p_pfn)
13057 + bool write_fault, bool *writable,
13058 + kvm_pfn_t *p_pfn)
13059 {
13060 unsigned long pfn;
13061 int r;
13062 @@ -1492,6 +1493,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
13063
13064 }
13065
13066 + if (writable)
13067 + *writable = true;
13068
13069 /*
13070 * Get a reference here because callers of *hva_to_pfn* and
13071 @@ -1557,7 +1560,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
13072 if (vma == NULL)
13073 pfn = KVM_PFN_ERR_FAULT;
13074 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
13075 - r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
13076 + r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
13077 if (r == -EAGAIN)
13078 goto retry;
13079 if (r < 0)