Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0208-5.4.109-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 111120 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index b0abe257221a7..e037662c369ba 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 108
10 +SUBLEVEL = 109
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
15 index 7788d5db65c25..ae6d07dc02832 100644
16 --- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
17 +++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
18 @@ -44,8 +44,8 @@
19 pinctrl-0 = <&pinctrl_macb0_default>;
20 phy-mode = "rmii";
21
22 - ethernet-phy@0 {
23 - reg = <0x0>;
24 + ethernet-phy@7 {
25 + reg = <0x7>;
26 interrupt-parent = <&pioA>;
27 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
28 pinctrl-names = "default";
29 diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
30 index 337919366dc85..ec141c9852893 100644
31 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
32 +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
33 @@ -177,6 +177,7 @@
34 ranges = <0x0 0x00 0x1700000 0x100000>;
35 reg = <0x00 0x1700000 0x0 0x100000>;
36 interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
37 + dma-coherent;
38
39 sec_jr0: jr@10000 {
40 compatible = "fsl,sec-v5.4-job-ring",
41 diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
42 index c084c7a4b6a6f..b611d835dc25a 100644
43 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
44 +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
45 @@ -241,6 +241,7 @@
46 ranges = <0x0 0x00 0x1700000 0x100000>;
47 reg = <0x00 0x1700000 0x0 0x100000>;
48 interrupts = <0 75 0x4>;
49 + dma-coherent;
50
51 sec_jr0: jr@10000 {
52 compatible = "fsl,sec-v5.4-job-ring",
53 diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
54 index 04d4b1b11a00a..ca087918c250a 100644
55 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
56 +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
57 @@ -244,6 +244,7 @@
58 ranges = <0x0 0x00 0x1700000 0x100000>;
59 reg = <0x00 0x1700000 0x0 0x100000>;
60 interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
61 + dma-coherent;
62
63 sec_jr0: jr@10000 {
64 compatible = "fsl,sec-v5.4-job-ring",
65 diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
66 index e6e284265f19d..58303a9ec32c4 100644
67 --- a/arch/arm64/kernel/crash_dump.c
68 +++ b/arch/arm64/kernel/crash_dump.c
69 @@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
70 ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
71 {
72 memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
73 + *ppos += count;
74 +
75 return count;
76 }
77 diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
78 index 6c6f16e409a87..0d23c00493018 100644
79 --- a/arch/ia64/include/asm/syscall.h
80 +++ b/arch/ia64/include/asm/syscall.h
81 @@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
82 static inline long syscall_get_error(struct task_struct *task,
83 struct pt_regs *regs)
84 {
85 - return regs->r10 == -1 ? regs->r8:0;
86 + return regs->r10 == -1 ? -regs->r8:0;
87 }
88
89 static inline long syscall_get_return_value(struct task_struct *task,
90 diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
91 index bf9c24d9ce84e..54e12b0ecebdf 100644
92 --- a/arch/ia64/kernel/ptrace.c
93 +++ b/arch/ia64/kernel/ptrace.c
94 @@ -2147,27 +2147,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
95 {
96 struct syscall_get_set_args *args = data;
97 struct pt_regs *pt = args->regs;
98 - unsigned long *krbs, cfm, ndirty;
99 + unsigned long *krbs, cfm, ndirty, nlocals, nouts;
100 int i, count;
101
102 if (unw_unwind_to_user(info) < 0)
103 return;
104
105 + /*
106 + * We get here via a few paths:
107 + * - break instruction: cfm is shared with caller.
108 + * syscall args are in out= regs, locals are non-empty.
109 + * - epsinstruction: cfm is set by br.call
110 + * locals don't exist.
111 + *
112 + * For both cases argguments are reachable in cfm.sof - cfm.sol.
113 + * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
114 + */
115 cfm = pt->cr_ifs;
116 + nlocals = (cfm >> 7) & 0x7f; /* aka sol */
117 + nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
118 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
119 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
120
121 count = 0;
122 if (in_syscall(pt))
123 - count = min_t(int, args->n, cfm & 0x7f);
124 + count = min_t(int, args->n, nouts);
125
126 + /* Iterate over outs. */
127 for (i = 0; i < count; i++) {
128 + int j = ndirty + nlocals + i + args->i;
129 if (args->rw)
130 - *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
131 - args->args[i];
132 + *ia64_rse_skip_regs(krbs, j) = args->args[i];
133 else
134 - args->args[i] = *ia64_rse_skip_regs(krbs,
135 - ndirty + i + args->i);
136 + args->args[i] = *ia64_rse_skip_regs(krbs, j);
137 }
138
139 if (!args->rw) {
140 diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
141 index 7141ccea8c94e..a92059964579b 100644
142 --- a/arch/powerpc/include/asm/dcr-native.h
143 +++ b/arch/powerpc/include/asm/dcr-native.h
144 @@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
145 #define mfdcr(rn) \
146 ({unsigned int rval; \
147 if (__builtin_constant_p(rn) && rn < 1024) \
148 - asm volatile("mfdcr %0," __stringify(rn) \
149 - : "=r" (rval)); \
150 + asm volatile("mfdcr %0, %1" : "=r" (rval) \
151 + : "n" (rn)); \
152 else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
153 rval = mfdcrx(rn); \
154 else \
155 @@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
156 #define mtdcr(rn, v) \
157 do { \
158 if (__builtin_constant_p(rn) && rn < 1024) \
159 - asm volatile("mtdcr " __stringify(rn) ",%0" \
160 - : : "r" (v)); \
161 + asm volatile("mtdcr %0, %1" \
162 + : : "n" (rn), "r" (v)); \
163 else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
164 mtdcrx(rn, v); \
165 else \
166 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
167 index 27778b65a965e..f2b22c496fb97 100644
168 --- a/arch/sparc/kernel/traps_64.c
169 +++ b/arch/sparc/kernel/traps_64.c
170 @@ -275,14 +275,13 @@ bool is_no_fault_exception(struct pt_regs *regs)
171 asi = (regs->tstate >> 24); /* saved %asi */
172 else
173 asi = (insn >> 5); /* immediate asi */
174 - if ((asi & 0xf2) == ASI_PNF) {
175 - if (insn & 0x1000000) { /* op3[5:4]=3 */
176 - handle_ldf_stq(insn, regs);
177 - return true;
178 - } else if (insn & 0x200000) { /* op3[2], stores */
179 + if ((asi & 0xf6) == ASI_PNF) {
180 + if (insn & 0x200000) /* op3[2], stores */
181 return false;
182 - }
183 - handle_ld_nf(insn, regs);
184 + if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
185 + handle_ldf_stq(insn, regs);
186 + else
187 + handle_ld_nf(insn, regs);
188 return true;
189 }
190 }
191 diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
192 index dfa01bcdc3694..7b558939b89c1 100644
193 --- a/arch/x86/mm/mem_encrypt.c
194 +++ b/arch/x86/mm/mem_encrypt.c
195 @@ -229,7 +229,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
196 if (pgprot_val(old_prot) == pgprot_val(new_prot))
197 return;
198
199 - pa = pfn << page_level_shift(level);
200 + pa = pfn << PAGE_SHIFT;
201 size = page_level_size(level);
202
203 /*
204 diff --git a/block/blk-merge.c b/block/blk-merge.c
205 index 86c4c1ef87429..03959bfe961cf 100644
206 --- a/block/blk-merge.c
207 +++ b/block/blk-merge.c
208 @@ -370,6 +370,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
209 switch (bio_op(rq->bio)) {
210 case REQ_OP_DISCARD:
211 case REQ_OP_SECURE_ERASE:
212 + if (queue_max_discard_segments(rq->q) > 1) {
213 + struct bio *bio = rq->bio;
214 +
215 + for_each_bio(bio)
216 + nr_phys_segs++;
217 + return nr_phys_segs;
218 + }
219 + return 1;
220 case REQ_OP_WRITE_ZEROES:
221 return 0;
222 case REQ_OP_WRITE_SAME:
223 diff --git a/block/genhd.c b/block/genhd.c
224 index 604f0a2cbc9a0..2f6f341a8fbb7 100644
225 --- a/block/genhd.c
226 +++ b/block/genhd.c
227 @@ -637,10 +637,8 @@ static void register_disk(struct device *parent, struct gendisk *disk,
228 disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
229 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
230
231 - if (disk->flags & GENHD_FL_HIDDEN) {
232 - dev_set_uevent_suppress(ddev, 0);
233 + if (disk->flags & GENHD_FL_HIDDEN)
234 return;
235 - }
236
237 /* No minors to use for partitions */
238 if (!disk_part_scan_enabled(disk))
239 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
240 index 1db2e1bb72ba6..159c422601bc4 100644
241 --- a/drivers/acpi/internal.h
242 +++ b/drivers/acpi/internal.h
243 @@ -9,6 +9,8 @@
244 #ifndef _ACPI_INTERNAL_H_
245 #define _ACPI_INTERNAL_H_
246
247 +#include <linux/idr.h>
248 +
249 #define PREFIX "ACPI: "
250
251 int early_acpi_osi_init(void);
252 @@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
253
254 extern struct list_head acpi_bus_id_list;
255
256 +#define ACPI_MAX_DEVICE_INSTANCES 4096
257 +
258 struct acpi_device_bus_id {
259 const char *bus_id;
260 - unsigned int instance_no;
261 + struct ida instance_ida;
262 struct list_head node;
263 };
264
265 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
266 index 8887a72712d4b..dbb5919f23e2d 100644
267 --- a/drivers/acpi/scan.c
268 +++ b/drivers/acpi/scan.c
269 @@ -483,9 +483,8 @@ static void acpi_device_del(struct acpi_device *device)
270 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
271 if (!strcmp(acpi_device_bus_id->bus_id,
272 acpi_device_hid(device))) {
273 - if (acpi_device_bus_id->instance_no > 0)
274 - acpi_device_bus_id->instance_no--;
275 - else {
276 + ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
277 + if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
278 list_del(&acpi_device_bus_id->node);
279 kfree_const(acpi_device_bus_id->bus_id);
280 kfree(acpi_device_bus_id);
281 @@ -624,12 +623,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
282 put_device(&adev->dev);
283 }
284
285 +static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
286 +{
287 + struct acpi_device_bus_id *acpi_device_bus_id;
288 +
289 + /* Find suitable bus_id and instance number in acpi_bus_id_list. */
290 + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
291 + if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
292 + return acpi_device_bus_id;
293 + }
294 + return NULL;
295 +}
296 +
297 +static int acpi_device_set_name(struct acpi_device *device,
298 + struct acpi_device_bus_id *acpi_device_bus_id)
299 +{
300 + struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
301 + int result;
302 +
303 + result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
304 + if (result < 0)
305 + return result;
306 +
307 + device->pnp.instance_no = result;
308 + dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
309 + return 0;
310 +}
311 +
312 int acpi_device_add(struct acpi_device *device,
313 void (*release)(struct device *))
314 {
315 + struct acpi_device_bus_id *acpi_device_bus_id;
316 int result;
317 - struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
318 - int found = 0;
319
320 if (device->handle) {
321 acpi_status status;
322 @@ -655,41 +680,38 @@ int acpi_device_add(struct acpi_device *device,
323 INIT_LIST_HEAD(&device->del_list);
324 mutex_init(&device->physical_node_lock);
325
326 - new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
327 - if (!new_bus_id) {
328 - pr_err(PREFIX "Memory allocation error\n");
329 - result = -ENOMEM;
330 - goto err_detach;
331 - }
332 -
333 mutex_lock(&acpi_device_lock);
334 - /*
335 - * Find suitable bus_id and instance number in acpi_bus_id_list
336 - * If failed, create one and link it into acpi_bus_id_list
337 - */
338 - list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
339 - if (!strcmp(acpi_device_bus_id->bus_id,
340 - acpi_device_hid(device))) {
341 - acpi_device_bus_id->instance_no++;
342 - found = 1;
343 - kfree(new_bus_id);
344 - break;
345 +
346 + acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
347 + if (acpi_device_bus_id) {
348 + result = acpi_device_set_name(device, acpi_device_bus_id);
349 + if (result)
350 + goto err_unlock;
351 + } else {
352 + acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
353 + GFP_KERNEL);
354 + if (!acpi_device_bus_id) {
355 + result = -ENOMEM;
356 + goto err_unlock;
357 }
358 - }
359 - if (!found) {
360 - acpi_device_bus_id = new_bus_id;
361 acpi_device_bus_id->bus_id =
362 kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
363 if (!acpi_device_bus_id->bus_id) {
364 - pr_err(PREFIX "Memory allocation error for bus id\n");
365 + kfree(acpi_device_bus_id);
366 result = -ENOMEM;
367 - goto err_free_new_bus_id;
368 + goto err_unlock;
369 + }
370 +
371 + ida_init(&acpi_device_bus_id->instance_ida);
372 +
373 + result = acpi_device_set_name(device, acpi_device_bus_id);
374 + if (result) {
375 + kfree(acpi_device_bus_id);
376 + goto err_unlock;
377 }
378
379 - acpi_device_bus_id->instance_no = 0;
380 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
381 }
382 - dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
383
384 if (device->parent)
385 list_add_tail(&device->node, &device->parent->children);
386 @@ -721,13 +743,9 @@ int acpi_device_add(struct acpi_device *device,
387 list_del(&device->node);
388 list_del(&device->wakeup_list);
389
390 - err_free_new_bus_id:
391 - if (!found)
392 - kfree(new_bus_id);
393 -
394 + err_unlock:
395 mutex_unlock(&acpi_device_lock);
396
397 - err_detach:
398 acpi_detach_data(device->handle, acpi_scan_drop_device);
399 return result;
400 }
401 diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
402 index 301ffe5b8feb0..e7978d983b263 100644
403 --- a/drivers/acpi/video_detect.c
404 +++ b/drivers/acpi/video_detect.c
405 @@ -150,6 +150,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
406 },
407 },
408 {
409 + .callback = video_detect_force_vendor,
410 .ident = "Sony VPCEH3U1E",
411 .matches = {
412 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
413 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
414 index bedaebd5a4956..de52428b8833d 100644
415 --- a/drivers/atm/eni.c
416 +++ b/drivers/atm/eni.c
417 @@ -2281,7 +2281,8 @@ out:
418 return rc;
419
420 err_eni_release:
421 - eni_do_release(dev);
422 + dev->phy = NULL;
423 + iounmap(ENI_DEV(dev)->ioaddr);
424 err_unregister:
425 atm_dev_deregister(dev);
426 err_free_consistent:
427 diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
428 index 63871859e6e8e..52c2878b755db 100644
429 --- a/drivers/atm/idt77105.c
430 +++ b/drivers/atm/idt77105.c
431 @@ -262,7 +262,7 @@ static int idt77105_start(struct atm_dev *dev)
432 {
433 unsigned long flags;
434
435 - if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
436 + if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
437 return -ENOMEM;
438 PRIV(dev)->dev = dev;
439 spin_lock_irqsave(&idt77105_priv_lock, flags);
440 @@ -337,7 +337,7 @@ static int idt77105_stop(struct atm_dev *dev)
441 else
442 idt77105_all = walk->next;
443 dev->phy = NULL;
444 - dev->dev_data = NULL;
445 + dev->phy_data = NULL;
446 kfree(walk);
447 break;
448 }
449 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
450 index 645a6bc1df888..c6b38112bcf4f 100644
451 --- a/drivers/atm/lanai.c
452 +++ b/drivers/atm/lanai.c
453 @@ -2234,6 +2234,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
454 conf1_write(lanai);
455 #endif
456 iounmap(lanai->base);
457 + lanai->base = NULL;
458 error_pci:
459 pci_disable_device(lanai->pci);
460 error:
461 @@ -2246,6 +2247,8 @@ static int lanai_dev_open(struct atm_dev *atmdev)
462 static void lanai_dev_close(struct atm_dev *atmdev)
463 {
464 struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
465 + if (lanai->base==NULL)
466 + return;
467 printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n",
468 lanai->number);
469 lanai_timed_poll_stop(lanai);
470 @@ -2555,7 +2558,7 @@ static int lanai_init_one(struct pci_dev *pci,
471 struct atm_dev *atmdev;
472 int result;
473
474 - lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
475 + lanai = kzalloc(sizeof(*lanai), GFP_KERNEL);
476 if (lanai == NULL) {
477 printk(KERN_ERR DEV_LABEL
478 ": couldn't allocate dev_data structure!\n");
479 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
480 index 7850758b5bb82..239852d855589 100644
481 --- a/drivers/atm/uPD98402.c
482 +++ b/drivers/atm/uPD98402.c
483 @@ -211,7 +211,7 @@ static void uPD98402_int(struct atm_dev *dev)
484 static int uPD98402_start(struct atm_dev *dev)
485 {
486 DPRINTK("phy_start\n");
487 - if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
488 + if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
489 return -ENOMEM;
490 spin_lock_init(&PRIV(dev)->lock);
491 memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
492 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
493 index 137a7ba053d78..e0c4ef06ca917 100644
494 --- a/drivers/base/power/runtime.c
495 +++ b/drivers/base/power/runtime.c
496 @@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
497 return 0;
498 }
499
500 -static void rpm_put_suppliers(struct device *dev)
501 +static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
502 {
503 struct device_link *link;
504
505 @@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
506 device_links_read_lock_held()) {
507
508 while (refcount_dec_not_one(&link->rpm_active))
509 - pm_runtime_put(link->supplier);
510 + pm_runtime_put_noidle(link->supplier);
511 +
512 + if (try_to_suspend)
513 + pm_request_idle(link->supplier);
514 }
515 }
516
517 +static void rpm_put_suppliers(struct device *dev)
518 +{
519 + __rpm_put_suppliers(dev, true);
520 +}
521 +
522 +static void rpm_suspend_suppliers(struct device *dev)
523 +{
524 + struct device_link *link;
525 + int idx = device_links_read_lock();
526 +
527 + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
528 + device_links_read_lock_held())
529 + pm_request_idle(link->supplier);
530 +
531 + device_links_read_unlock(idx);
532 +}
533 +
534 /**
535 * __rpm_callback - Run a given runtime PM callback for a given device.
536 * @cb: Runtime PM callback to run.
537 @@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
538 idx = device_links_read_lock();
539
540 retval = rpm_get_suppliers(dev);
541 - if (retval)
542 + if (retval) {
543 + rpm_put_suppliers(dev);
544 goto fail;
545 + }
546
547 device_links_read_unlock(idx);
548 }
549 @@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
550 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
551 idx = device_links_read_lock();
552
553 - fail:
554 - rpm_put_suppliers(dev);
555 + __rpm_put_suppliers(dev, false);
556
557 +fail:
558 device_links_read_unlock(idx);
559 }
560
561 @@ -644,8 +666,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
562 goto out;
563 }
564
565 + if (dev->power.irq_safe)
566 + goto out;
567 +
568 /* Maybe the parent is now able to suspend. */
569 - if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
570 + if (parent && !parent->power.ignore_children) {
571 spin_unlock(&dev->power.lock);
572
573 spin_lock(&parent->power.lock);
574 @@ -654,6 +679,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
575
576 spin_lock(&dev->power.lock);
577 }
578 + /* Maybe the suppliers are now able to suspend. */
579 + if (dev->power.links_count > 0) {
580 + spin_unlock_irq(&dev->power.lock);
581 +
582 + rpm_suspend_suppliers(dev);
583 +
584 + spin_lock_irq(&dev->power.lock);
585 + }
586
587 out:
588 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
589 diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
590 index 208f3eea3641f..d98cfd3b64ff0 100644
591 --- a/drivers/block/xen-blkback/blkback.c
592 +++ b/drivers/block/xen-blkback/blkback.c
593 @@ -944,7 +944,7 @@ next:
594 out:
595 for (i = last_map; i < num; i++) {
596 /* Don't zap current batch's valid persistent grants. */
597 - if(i >= last_map + segs_to_map)
598 + if(i >= map_until)
599 pages[i]->persistent_gnt = NULL;
600 pages[i]->handle = BLKBACK_INVALID_HANDLE;
601 }
602 diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
603 index b040447575adc..dcfb32ee5cb60 100644
604 --- a/drivers/bus/omap_l3_noc.c
605 +++ b/drivers/bus/omap_l3_noc.c
606 @@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
607 */
608 l3->debug_irq = platform_get_irq(pdev, 0);
609 ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
610 - 0x0, "l3-dbg-irq", l3);
611 + IRQF_NO_THREAD, "l3-dbg-irq", l3);
612 if (ret) {
613 dev_err(l3->dev, "request_irq failed for %d\n",
614 l3->debug_irq);
615 @@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
616
617 l3->app_irq = platform_get_irq(pdev, 1);
618 ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
619 - 0x0, "l3-app-irq", l3);
620 + IRQF_NO_THREAD, "l3-app-irq", l3);
621 if (ret)
622 dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
623
624 diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
625 index bca8d1f47fd2c..1200842c3da42 100644
626 --- a/drivers/cpufreq/cpufreq-dt-platdev.c
627 +++ b/drivers/cpufreq/cpufreq-dt-platdev.c
628 @@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = {
629 static const struct of_device_id blacklist[] __initconst = {
630 { .compatible = "allwinner,sun50i-h6", },
631
632 + { .compatible = "arm,vexpress", },
633 +
634 { .compatible = "calxeda,highbank", },
635 { .compatible = "calxeda,ecx-2000", },
636
637 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
638 index b2e186047014f..66dcab6ab26dd 100644
639 --- a/drivers/gpio/gpiolib-acpi.c
640 +++ b/drivers/gpio/gpiolib-acpi.c
641 @@ -174,7 +174,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
642 int ret, value;
643
644 ret = request_threaded_irq(event->irq, NULL, event->handler,
645 - event->irqflags, "ACPI:Event", event);
646 + event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
647 if (ret) {
648 dev_err(acpi_gpio->chip->parent,
649 "Failed to setup interrupt handler for %d\n",
650 diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
651 index e67c194c2acad..649f17dfcf459 100644
652 --- a/drivers/gpu/drm/Kconfig
653 +++ b/drivers/gpu/drm/Kconfig
654 @@ -206,6 +206,7 @@ source "drivers/gpu/drm/arm/Kconfig"
655 config DRM_RADEON
656 tristate "ATI Radeon"
657 depends on DRM && PCI && MMU
658 + depends on AGP || !AGP
659 select FW_LOADER
660 select DRM_KMS_HELPER
661 select DRM_TTM
662 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
663 index eaa5e7b7c19d6..fd94a17fb2c6d 100644
664 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
665 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
666 @@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
667 size = mode_cmd->pitches[0] * height;
668 aligned_size = ALIGN(size, PAGE_SIZE);
669 ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
670 - ttm_bo_type_kernel, NULL, &gobj);
671 + ttm_bo_type_device, NULL, &gobj);
672 if (ret) {
673 pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
674 return -ENOMEM;
675 diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
676 index f63cbbee7b337..11a4c4029a902 100644
677 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
678 +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
679 @@ -257,7 +257,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
680 .num_banks = 8,
681 .num_chans = 4,
682 .vmm_page_size_bytes = 4096,
683 - .dram_clock_change_latency_us = 11.72,
684 + .dram_clock_change_latency_us = 23.84,
685 .return_bus_width_bytes = 64,
686 .dispclk_dppclk_vco_speed_mhz = 3600,
687 .xfc_bus_transport_time_us = 4,
688 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
689 index 8d9d86c76a4e9..896d6f95a9604 100644
690 --- a/drivers/gpu/drm/msm/msm_drv.c
691 +++ b/drivers/gpu/drm/msm/msm_drv.c
692 @@ -1326,6 +1326,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
693 static void msm_pdev_shutdown(struct platform_device *pdev)
694 {
695 struct drm_device *drm = platform_get_drvdata(pdev);
696 + struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
697 +
698 + if (!priv || !priv->kms)
699 + return;
700
701 drm_atomic_helper_shutdown(drm);
702 }
703 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
704 index 30e08bcc9afb5..3c78f8c32d12b 100644
705 --- a/drivers/infiniband/hw/cxgb4/cm.c
706 +++ b/drivers/infiniband/hw/cxgb4/cm.c
707 @@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
708 ep->com.local_addr.ss_family == AF_INET) {
709 err = cxgb4_remove_server_filter(
710 ep->com.dev->rdev.lldi.ports[0], ep->stid,
711 - ep->com.dev->rdev.lldi.rxq_ids[0], 0);
712 + ep->com.dev->rdev.lldi.rxq_ids[0], false);
713 } else {
714 struct sockaddr_in6 *sin6;
715 c4iw_init_wr_wait(ep->com.wr_waitp);
716 err = cxgb4_remove_server(
717 ep->com.dev->rdev.lldi.ports[0], ep->stid,
718 - ep->com.dev->rdev.lldi.rxq_ids[0], 0);
719 + ep->com.dev->rdev.lldi.rxq_ids[0], true);
720 if (err)
721 goto done;
722 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
723 diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
724 index 6d05cefe9d795..02a82723a57ab 100644
725 --- a/drivers/irqchip/irq-ingenic-tcu.c
726 +++ b/drivers/irqchip/irq-ingenic-tcu.c
727 @@ -179,4 +179,5 @@ err_free_tcu:
728 }
729 IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
730 IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
731 +IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init);
732 IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
733 diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
734 index dda512dfe2c17..31bc11f15bfa4 100644
735 --- a/drivers/irqchip/irq-ingenic.c
736 +++ b/drivers/irqchip/irq-ingenic.c
737 @@ -168,6 +168,7 @@ static int __init intc_2chip_of_init(struct device_node *node,
738 {
739 return ingenic_intc_of_init(node, 2);
740 }
741 +IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init);
742 IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init);
743 IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init);
744 IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init);
745 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
746 index 1c5133f71af39..3f15d8dc2b71f 100644
747 --- a/drivers/md/dm-ioctl.c
748 +++ b/drivers/md/dm-ioctl.c
749 @@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
750 * Grab our output buffer.
751 */
752 nl = orig_nl = get_result_buffer(param, param_size, &len);
753 - if (len < needed) {
754 + if (len < needed || len < sizeof(nl->dev)) {
755 param->flags |= DM_BUFFER_FULL_FLAG;
756 goto out;
757 }
758 diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
759 index 2aeb922e2365c..711f101447e3e 100644
760 --- a/drivers/md/dm-verity-target.c
761 +++ b/drivers/md/dm-verity-target.c
762 @@ -33,7 +33,7 @@
763 #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
764 #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
765
766 -#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \
767 +#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
768 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
769
770 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
771 diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
772 index 3486bf33474d9..e3d943c65419b 100644
773 --- a/drivers/misc/habanalabs/device.c
774 +++ b/drivers/misc/habanalabs/device.c
775 @@ -108,6 +108,8 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
776 list_del(&hpriv->dev_node);
777 mutex_unlock(&hdev->fpriv_list_lock);
778
779 + put_pid(hpriv->taskpid);
780 +
781 kfree(hpriv);
782
783 return 0;
784 diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
785 index 8e9f5620c9a21..f14e739ba3f45 100644
786 --- a/drivers/net/can/c_can/c_can.c
787 +++ b/drivers/net/can/c_can/c_can.c
788 @@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
789 .brp_inc = 1,
790 };
791
792 -static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
793 -{
794 - if (priv->device)
795 - pm_runtime_enable(priv->device);
796 -}
797 -
798 -static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
799 -{
800 - if (priv->device)
801 - pm_runtime_disable(priv->device);
802 -}
803 -
804 static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
805 {
806 if (priv->device)
807 @@ -1334,7 +1322,6 @@ static const struct net_device_ops c_can_netdev_ops = {
808
809 int register_c_can_dev(struct net_device *dev)
810 {
811 - struct c_can_priv *priv = netdev_priv(dev);
812 int err;
813
814 /* Deactivate pins to prevent DRA7 DCAN IP from being
815 @@ -1344,28 +1331,19 @@ int register_c_can_dev(struct net_device *dev)
816 */
817 pinctrl_pm_select_sleep_state(dev->dev.parent);
818
819 - c_can_pm_runtime_enable(priv);
820 -
821 dev->flags |= IFF_ECHO; /* we support local echo */
822 dev->netdev_ops = &c_can_netdev_ops;
823
824 err = register_candev(dev);
825 - if (err)
826 - c_can_pm_runtime_disable(priv);
827 - else
828 + if (!err)
829 devm_can_led_init(dev);
830 -
831 return err;
832 }
833 EXPORT_SYMBOL_GPL(register_c_can_dev);
834
835 void unregister_c_can_dev(struct net_device *dev)
836 {
837 - struct c_can_priv *priv = netdev_priv(dev);
838 -
839 unregister_candev(dev);
840 -
841 - c_can_pm_runtime_disable(priv);
842 }
843 EXPORT_SYMBOL_GPL(unregister_c_can_dev);
844
845 diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
846 index 406b4847e5dc3..7efb60b508762 100644
847 --- a/drivers/net/can/c_can/c_can_pci.c
848 +++ b/drivers/net/can/c_can/c_can_pci.c
849 @@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
850 {
851 struct net_device *dev = pci_get_drvdata(pdev);
852 struct c_can_priv *priv = netdev_priv(dev);
853 + void __iomem *addr = priv->base;
854
855 unregister_c_can_dev(dev);
856
857 free_c_can_dev(dev);
858
859 - pci_iounmap(pdev, priv->base);
860 + pci_iounmap(pdev, addr);
861 pci_disable_msi(pdev);
862 pci_clear_master(pdev);
863 pci_release_regions(pdev);
864 diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
865 index b5145a7f874c2..f2b0408ce87d1 100644
866 --- a/drivers/net/can/c_can/c_can_platform.c
867 +++ b/drivers/net/can/c_can/c_can_platform.c
868 @@ -29,6 +29,7 @@
869 #include <linux/list.h>
870 #include <linux/io.h>
871 #include <linux/platform_device.h>
872 +#include <linux/pm_runtime.h>
873 #include <linux/clk.h>
874 #include <linux/of.h>
875 #include <linux/of_device.h>
876 @@ -385,6 +386,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
877 platform_set_drvdata(pdev, dev);
878 SET_NETDEV_DEV(dev, &pdev->dev);
879
880 + pm_runtime_enable(priv->device);
881 ret = register_c_can_dev(dev);
882 if (ret) {
883 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
884 @@ -397,6 +399,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
885 return 0;
886
887 exit_free_device:
888 + pm_runtime_disable(priv->device);
889 free_c_can_dev(dev);
890 exit:
891 dev_err(&pdev->dev, "probe failed\n");
892 @@ -407,9 +410,10 @@ exit:
893 static int c_can_plat_remove(struct platform_device *pdev)
894 {
895 struct net_device *dev = platform_get_drvdata(pdev);
896 + struct c_can_priv *priv = netdev_priv(dev);
897
898 unregister_c_can_dev(dev);
899 -
900 + pm_runtime_disable(priv->device);
901 free_c_can_dev(dev);
902
903 return 0;
904 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
905 index 2ae9feb99a07d..1e0c1a05df82d 100644
906 --- a/drivers/net/can/dev.c
907 +++ b/drivers/net/can/dev.c
908 @@ -1226,6 +1226,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
909
910 static struct rtnl_link_ops can_link_ops __read_mostly = {
911 .kind = "can",
912 + .netns_refund = true,
913 .maxtype = IFLA_CAN_MAX,
914 .policy = can_policy,
915 .setup = can_setup,
916 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
917 index b6d00dfa8b8f6..7ec15cb356c01 100644
918 --- a/drivers/net/can/flexcan.c
919 +++ b/drivers/net/can/flexcan.c
920 @@ -544,9 +544,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
921 static int flexcan_chip_freeze(struct flexcan_priv *priv)
922 {
923 struct flexcan_regs __iomem *regs = priv->regs;
924 - unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
925 + unsigned int timeout;
926 + u32 bitrate = priv->can.bittiming.bitrate;
927 u32 reg;
928
929 + if (bitrate)
930 + timeout = 1000 * 1000 * 10 / bitrate;
931 + else
932 + timeout = FLEXCAN_TIMEOUT_US / 10;
933 +
934 reg = priv->read(&regs->mcr);
935 reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
936 priv->write(reg, &regs->mcr);
937 diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
938 index 72acd1ba162d2..e7a26ec9bdc11 100644
939 --- a/drivers/net/can/kvaser_pciefd.c
940 +++ b/drivers/net/can/kvaser_pciefd.c
941 @@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
942 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
943 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
944 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
945 +#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
946 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
947 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
948 /* Loopback control register */
949 @@ -947,6 +948,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
950 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
951 0);
952
953 + /* Disable Bus load reporting */
954 + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
955 +
956 tx_npackets = ioread32(can->reg_base +
957 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
958 if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
959 diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
960 index 8a842545e3f69..b2224113987c7 100644
961 --- a/drivers/net/can/m_can/m_can.c
962 +++ b/drivers/net/can/m_can/m_can.c
963 @@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
964 }
965
966 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
967 - if (rxfs & RXFS_RFL)
968 - netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
969 -
970 m_can_read_fifo(dev, rxfs);
971
972 quota--;
973 @@ -842,7 +839,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
974 {
975 struct m_can_classdev *cdev = netdev_priv(dev);
976
977 - m_can_rx_handler(dev, 1);
978 + m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
979
980 m_can_enable_all_interrupts(cdev);
981
982 diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
983 index f35757b63ea78..e78b683f73052 100644
984 --- a/drivers/net/dsa/b53/b53_common.c
985 +++ b/drivers/net/dsa/b53/b53_common.c
986 @@ -996,13 +996,6 @@ static int b53_setup(struct dsa_switch *ds)
987 b53_disable_port(ds, port);
988 }
989
990 - /* Let DSA handle the case were multiple bridges span the same switch
991 - * device and different VLAN awareness settings are requested, which
992 - * would be breaking filtering semantics for any of the other bridge
993 - * devices. (not hardware supported)
994 - */
995 - ds->vlan_filtering_is_global = true;
996 -
997 return ret;
998 }
999
1000 @@ -2418,6 +2411,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
1001 dev->priv = priv;
1002 dev->ops = ops;
1003 ds->ops = &b53_switch_ops;
1004 + /* Let DSA handle the case were multiple bridges span the same switch
1005 + * device and different VLAN awareness settings are requested, which
1006 + * would be breaking filtering semantics for any of the other bridge
1007 + * devices. (not hardware supported)
1008 + */
1009 + ds->vlan_filtering_is_global = true;
1010 +
1011 mutex_init(&dev->reg_mutex);
1012 mutex_init(&dev->stats_mutex);
1013
1014 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
1015 index ca425c15953b1..0ee1c0a7b165b 100644
1016 --- a/drivers/net/dsa/bcm_sf2.c
1017 +++ b/drivers/net/dsa/bcm_sf2.c
1018 @@ -479,8 +479,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
1019 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
1020 * the REG_PHY_REVISION register layout is.
1021 */
1022 -
1023 - return priv->hw_params.gphy_rev;
1024 + if (priv->int_phy_mask & BIT(port))
1025 + return priv->hw_params.gphy_rev;
1026 + else
1027 + return 0;
1028 }
1029
1030 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
1031 diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
1032 index 0928bec79fe4b..4b958681d66e7 100644
1033 --- a/drivers/net/ethernet/davicom/dm9000.c
1034 +++ b/drivers/net/ethernet/davicom/dm9000.c
1035 @@ -1512,7 +1512,7 @@ dm9000_probe(struct platform_device *pdev)
1036 goto out;
1037 }
1038
1039 - db->irq_wake = platform_get_irq(pdev, 1);
1040 + db->irq_wake = platform_get_irq_optional(pdev, 1);
1041 if (db->irq_wake >= 0) {
1042 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1043
1044 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
1045 index 1aea22d2540fb..4050f81f788c3 100644
1046 --- a/drivers/net/ethernet/faraday/ftgmac100.c
1047 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
1048 @@ -1307,6 +1307,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
1049 */
1050 if (unlikely(priv->need_mac_restart)) {
1051 ftgmac100_start_hw(priv);
1052 + priv->need_mac_restart = false;
1053
1054 /* Re-enable "bad" interrupts */
1055 iowrite32(FTGMAC100_INT_BAD,
1056 diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
1057 index 945643c026155..49fad118988bc 100644
1058 --- a/drivers/net/ethernet/freescale/fec_ptp.c
1059 +++ b/drivers/net/ethernet/freescale/fec_ptp.c
1060 @@ -382,9 +382,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
1061 u64 ns;
1062 unsigned long flags;
1063
1064 + mutex_lock(&adapter->ptp_clk_mutex);
1065 + /* Check the ptp clock */
1066 + if (!adapter->ptp_clk_on) {
1067 + mutex_unlock(&adapter->ptp_clk_mutex);
1068 + return -EINVAL;
1069 + }
1070 spin_lock_irqsave(&adapter->tmreg_lock, flags);
1071 ns = timecounter_read(&adapter->tc);
1072 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
1073 + mutex_unlock(&adapter->ptp_clk_mutex);
1074
1075 *ts = ns_to_timespec64(ns);
1076
1077 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
1078 index 5cb58ab1eec97..a8959a092344f 100644
1079 --- a/drivers/net/ethernet/freescale/gianfar.c
1080 +++ b/drivers/net/ethernet/freescale/gianfar.c
1081 @@ -2388,6 +2388,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
1082 if (lstatus & BD_LFLAG(RXBD_LAST))
1083 size -= skb->len;
1084
1085 + WARN(size < 0, "gianfar: rx fragment size underflow");
1086 + if (size < 0)
1087 + return false;
1088 +
1089 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1090 rxb->page_offset + RXBUF_ALIGNMENT,
1091 size, GFAR_RXB_TRUESIZE);
1092 @@ -2550,6 +2554,17 @@ static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
1093 if (lstatus & BD_LFLAG(RXBD_EMPTY))
1094 break;
1095
1096 + /* lost RXBD_LAST descriptor due to overrun */
1097 + if (skb &&
1098 + (lstatus & BD_LFLAG(RXBD_FIRST))) {
1099 + /* discard faulty buffer */
1100 + dev_kfree_skb(skb);
1101 + skb = NULL;
1102 + rx_queue->stats.rx_dropped++;
1103 +
1104 + /* can continue normally */
1105 + }
1106 +
1107 /* order rx buffer descriptor reads */
1108 rmb();
1109
1110 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1111 index 6d5d53cfc7ab4..7516f68230900 100644
1112 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1113 +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1114 @@ -1677,8 +1677,10 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
1115 for (j = 0; j < fetch_num; j++) {
1116 /* alloc one skb and init */
1117 skb = hns_assemble_skb(ndev);
1118 - if (!skb)
1119 + if (!skb) {
1120 + ret = -ENOMEM;
1121 goto out;
1122 + }
1123 rd = &tx_ring_data(priv, skb->queue_mapping);
1124 hns_nic_net_xmit_hw(ndev, skb, rd);
1125
1126 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
1127 index 2c1bab377b2a5..1fd4406173a87 100644
1128 --- a/drivers/net/ethernet/intel/e1000e/82571.c
1129 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
1130 @@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
1131 } else {
1132 data &= ~IGP02E1000_PM_D0_LPLU;
1133 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
1134 + if (ret_val)
1135 + return ret_val;
1136 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1137 * during Dx states where the power conservation is most
1138 * important. During driver activity we should enable
1139 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
1140 index 4cb05a31e66df..c2feedfd321dc 100644
1141 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
1142 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
1143 @@ -5953,15 +5953,19 @@ static void e1000_reset_task(struct work_struct *work)
1144 struct e1000_adapter *adapter;
1145 adapter = container_of(work, struct e1000_adapter, reset_task);
1146
1147 + rtnl_lock();
1148 /* don't run the task if already down */
1149 - if (test_bit(__E1000_DOWN, &adapter->state))
1150 + if (test_bit(__E1000_DOWN, &adapter->state)) {
1151 + rtnl_unlock();
1152 return;
1153 + }
1154
1155 if (!(adapter->flags & FLAG_RESTART_NOW)) {
1156 e1000e_dump(adapter);
1157 e_err("Reset adapter unexpectedly\n");
1158 }
1159 e1000e_reinit_locked(adapter);
1160 + rtnl_unlock();
1161 }
1162
1163 /**
1164 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
1165 index 56e6bec9af797..cffc8c1044f20 100644
1166 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
1167 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
1168 @@ -1786,7 +1786,8 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
1169 goto err_alloc;
1170 }
1171
1172 - if (iavf_process_config(adapter))
1173 + err = iavf_process_config(adapter);
1174 + if (err)
1175 goto err_alloc;
1176 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1177
1178 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
1179 index 0365bf2b480e3..cbcb8611ab50d 100644
1180 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
1181 +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
1182 @@ -1690,6 +1690,9 @@ static int igc_get_link_ksettings(struct net_device *netdev,
1183 Autoneg);
1184 }
1185
1186 + /* Set pause flow control settings */
1187 + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
1188 +
1189 switch (hw->fc.requested_mode) {
1190 case igc_fc_full:
1191 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
1192 @@ -1704,9 +1707,7 @@ static int igc_get_link_ksettings(struct net_device *netdev,
1193 Asym_Pause);
1194 break;
1195 default:
1196 - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
1197 - ethtool_link_ksettings_add_link_mode(cmd, advertising,
1198 - Asym_Pause);
1199 + break;
1200 }
1201
1202 status = pm_runtime_suspended(&adapter->pdev->dev) ?
1203 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1204 index f605540644035..1b8e70585c44a 100644
1205 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1206 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1207 @@ -9595,8 +9595,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
1208 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
1209 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
1210 input->sw_idx, queue);
1211 - if (!err)
1212 - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
1213 + if (err)
1214 + goto err_out_w_lock;
1215 +
1216 + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
1217 spin_unlock(&adapter->fdir_perfect_lock);
1218
1219 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
1220 diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
1221 index e581091c09c4e..02b4620f7368a 100644
1222 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
1223 +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
1224 @@ -1980,8 +1980,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
1225 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
1226
1227 for (irq = 0; irq < rvu->num_vec; irq++) {
1228 - if (rvu->irq_allocated[irq])
1229 + if (rvu->irq_allocated[irq]) {
1230 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
1231 + rvu->irq_allocated[irq] = false;
1232 + }
1233 }
1234
1235 pci_free_irq_vectors(rvu->pdev);
1236 diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
1237 index 15f70273e29c7..d82a519a0cd9a 100644
1238 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
1239 +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
1240 @@ -1967,10 +1967,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
1241 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
1242 if (index >= mcam->bmap_entries)
1243 break;
1244 + entry = index + 1;
1245 if (mcam->entry2cntr_map[index] != req->cntr)
1246 continue;
1247
1248 - entry = index + 1;
1249 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
1250 index, req->cntr);
1251 }
1252 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
1253 index 951ea26d96bc3..6d27f69cc7fc9 100644
1254 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
1255 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
1256 @@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
1257 option_key = (struct geneve_opt *)&enc_opts.key->data[0];
1258 option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
1259
1260 + if (option_mask->opt_class == 0 && option_mask->type == 0 &&
1261 + !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4))
1262 + return 0;
1263 +
1264 if (option_key->length > max_tlv_option_data_len) {
1265 NL_SET_ERR_MSG_MOD(extack,
1266 "Matching on GENEVE options: unsupported option len");
1267 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1268 index 01089c2283d7f..e09b4a96a1d5f 100644
1269 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1270 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1271 @@ -1811,6 +1811,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1272 {
1273 struct mlx5e_priv *priv = netdev_priv(netdev);
1274 struct mlx5_core_dev *mdev = priv->mdev;
1275 + int err;
1276
1277 if (!MLX5_CAP_GEN(mdev, cqe_compression))
1278 return -EOPNOTSUPP;
1279 @@ -1820,7 +1821,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1280 return -EINVAL;
1281 }
1282
1283 - mlx5e_modify_rx_cqe_compression_locked(priv, enable);
1284 + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
1285 + if (err)
1286 + return err;
1287 +
1288 priv->channels.params.rx_cqe_compress_def = enable;
1289
1290 return 0;
1291 diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
1292 index 5defd31d481c2..aa06fcb38f8b9 100644
1293 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
1294 +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
1295 @@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
1296 goto err_free_ctx_entry;
1297 }
1298
1299 + /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
1300 + * configure the pre_tun table and are never actually send to the
1301 + * firmware as an add-flow message. This causes the mask-id allocation
1302 + * on the firmware to get out of sync if allocated here.
1303 + */
1304 new_mask_id = 0;
1305 - if (!nfp_check_mask_add(app, nfp_flow->mask_data,
1306 + if (!nfp_flow->pre_tun_rule.dev &&
1307 + !nfp_check_mask_add(app, nfp_flow->mask_data,
1308 nfp_flow->meta.mask_len,
1309 &nfp_flow->meta.flags, &new_mask_id)) {
1310 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
1311 @@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
1312 goto err_remove_mask;
1313 }
1314
1315 - if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
1316 + if (!nfp_flow->pre_tun_rule.dev &&
1317 + !nfp_check_mask_remove(app, nfp_flow->mask_data,
1318 nfp_flow->meta.mask_len,
1319 NULL, &new_mask_id)) {
1320 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
1321 @@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
1322 return 0;
1323
1324 err_remove_mask:
1325 - nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
1326 - NULL, &new_mask_id);
1327 + if (!nfp_flow->pre_tun_rule.dev)
1328 + nfp_check_mask_remove(app, nfp_flow->mask_data,
1329 + nfp_flow->meta.mask_len,
1330 + NULL, &new_mask_id);
1331 err_remove_rhash:
1332 WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
1333 &ctx_entry->ht_node,
1334 @@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
1335
1336 __nfp_modify_flow_metadata(priv, nfp_flow);
1337
1338 - nfp_check_mask_remove(app, nfp_flow->mask_data,
1339 - nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
1340 - &new_mask_id);
1341 + if (!nfp_flow->pre_tun_rule.dev)
1342 + nfp_check_mask_remove(app, nfp_flow->mask_data,
1343 + nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
1344 + &new_mask_id);
1345
1346 /* Update flow payload with mask ids. */
1347 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
1348 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
1349 index f34ae8c75bc5e..61a39d167c8bc 100644
1350 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
1351 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
1352 @@ -1426,6 +1426,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1353
1354 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1355 vfree(fw_dump->tmpl_hdr);
1356 + fw_dump->tmpl_hdr = NULL;
1357
1358 if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
1359 extended = !qlcnic_83xx_extend_md_capab(adapter);
1360 @@ -1444,6 +1445,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1361 struct qlcnic_83xx_dump_template_hdr *hdr;
1362
1363 hdr = fw_dump->tmpl_hdr;
1364 + if (!hdr)
1365 + return;
1366 hdr->drv_cap_mask = 0x1f;
1367 fw_dump->cap_mask = 0x1f;
1368 dev_info(&pdev->dev,
1369 diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
1370 index 8bd2912bf713c..33d7c2940ba9d 100644
1371 --- a/drivers/net/ethernet/socionext/netsec.c
1372 +++ b/drivers/net/ethernet/socionext/netsec.c
1373 @@ -1693,14 +1693,17 @@ static int netsec_netdev_init(struct net_device *ndev)
1374 goto err1;
1375
1376 /* set phy power down */
1377 - data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
1378 - BMCR_PDOWN;
1379 - netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1380 + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
1381 + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
1382 + data | BMCR_PDOWN);
1383
1384 ret = netsec_reset_hardware(priv, true);
1385 if (ret)
1386 goto err2;
1387
1388 + /* Restore phy power state */
1389 + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1390 +
1391 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
1392 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
1393
1394 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1395 index c4c9cbdeb601e..2f6258ca95155 100644
1396 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1397 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1398 @@ -1206,6 +1206,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
1399 plat_dat->init = sun8i_dwmac_init;
1400 plat_dat->exit = sun8i_dwmac_exit;
1401 plat_dat->setup = sun8i_dwmac_setup;
1402 + plat_dat->tx_fifo_size = 4096;
1403 + plat_dat->rx_fifo_size = 16384;
1404
1405 ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
1406 if (ret)
1407 diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
1408 index f5fd1f3c07cc5..2911740af7061 100644
1409 --- a/drivers/net/ethernet/sun/niu.c
1410 +++ b/drivers/net/ethernet/sun/niu.c
1411 @@ -3931,8 +3931,6 @@ static void niu_xmac_interrupt(struct niu *np)
1412 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
1413 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
1414 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
1415 - if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
1416 - mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
1417 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
1418 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
1419 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
1420 diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
1421 index 0f8a924fc60c3..c6c1bb15557f4 100644
1422 --- a/drivers/net/ethernet/tehuti/tehuti.c
1423 +++ b/drivers/net/ethernet/tehuti/tehuti.c
1424 @@ -2052,6 +2052,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1425 /*bdx_hw_reset(priv); */
1426 if (bdx_read_mac(priv)) {
1427 pr_err("load MAC address failed\n");
1428 + err = -EFAULT;
1429 goto err_out_iomap;
1430 }
1431 SET_NETDEV_DEV(ndev, &pdev->dev);
1432 diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
1433 index bcabd39d136ae..f778172356e68 100644
1434 --- a/drivers/net/usb/cdc-phonet.c
1435 +++ b/drivers/net/usb/cdc-phonet.c
1436 @@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
1437
1438 err = register_netdev(dev);
1439 if (err) {
1440 + /* Set disconnected flag so that disconnect() returns early. */
1441 + pnd->disconnected = 1;
1442 usb_driver_release_interface(&usbpn_driver, data_intf);
1443 goto out;
1444 }
1445 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1446 index 22f093797f417..f6d643ecaf39b 100644
1447 --- a/drivers/net/usb/r8152.c
1448 +++ b/drivers/net/usb/r8152.c
1449 @@ -2836,29 +2836,6 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
1450 device_set_wakeup_enable(&tp->udev->dev, false);
1451 }
1452
1453 -static void r8153_mac_clk_spd(struct r8152 *tp, bool enable)
1454 -{
1455 - /* MAC clock speed down */
1456 - if (enable) {
1457 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL,
1458 - ALDPS_SPDWN_RATIO);
1459 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2,
1460 - EEE_SPDWN_RATIO);
1461 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
1462 - PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
1463 - U1U2_SPDWN_EN | L1_SPDWN_EN);
1464 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
1465 - PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
1466 - TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN |
1467 - TP1000_SPDWN_EN);
1468 - } else {
1469 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
1470 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
1471 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
1472 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
1473 - }
1474 -}
1475 -
1476 static void r8153_u1u2en(struct r8152 *tp, bool enable)
1477 {
1478 u8 u1u2[8];
1479 @@ -3158,11 +3135,9 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
1480 if (enable) {
1481 r8153_u1u2en(tp, false);
1482 r8153_u2p3en(tp, false);
1483 - r8153_mac_clk_spd(tp, true);
1484 rtl_runtime_suspend_enable(tp, true);
1485 } else {
1486 rtl_runtime_suspend_enable(tp, false);
1487 - r8153_mac_clk_spd(tp, false);
1488
1489 switch (tp->version) {
1490 case RTL_VER_03:
1491 @@ -3727,7 +3702,6 @@ static void r8153_first_init(struct r8152 *tp)
1492 u32 ocp_data;
1493 int i;
1494
1495 - r8153_mac_clk_spd(tp, false);
1496 rxdy_gated_en(tp, true);
1497 r8153_teredo_off(tp);
1498
1499 @@ -3789,8 +3763,6 @@ static void r8153_enter_oob(struct r8152 *tp)
1500 u32 ocp_data;
1501 int i;
1502
1503 - r8153_mac_clk_spd(tp, true);
1504 -
1505 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
1506 ocp_data &= ~NOW_IS_OOB;
1507 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
1508 @@ -4498,9 +4470,14 @@ static void r8153_init(struct r8152 *tp)
1509
1510 ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
1511
1512 + /* MAC clock speed down */
1513 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
1514 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
1515 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
1516 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
1517 +
1518 r8153_power_cut_en(tp, false);
1519 r8153_u1u2en(tp, true);
1520 - r8153_mac_clk_spd(tp, false);
1521 usb_enable_lpm(tp->udev);
1522
1523 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
1524 @@ -5552,7 +5529,10 @@ static int rtl_ops_init(struct r8152 *tp)
1525 ops->in_nway = rtl8153_in_nway;
1526 ops->hw_phy_cfg = r8153_hw_phy_cfg;
1527 ops->autosuspend_en = rtl8153_runtime_enable;
1528 - tp->rx_buf_sz = 32 * 1024;
1529 + if (tp->udev->speed < USB_SPEED_SUPER)
1530 + tp->rx_buf_sz = 16 * 1024;
1531 + else
1532 + tp->rx_buf_sz = 32 * 1024;
1533 tp->eee_en = true;
1534 tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
1535 break;
1536 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
1537 index 88cfd63f08a6a..44ad412f9a06f 100644
1538 --- a/drivers/net/veth.c
1539 +++ b/drivers/net/veth.c
1540 @@ -254,8 +254,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
1541 if (rxq < rcv->real_num_rx_queues) {
1542 rq = &rcv_priv->rq[rxq];
1543 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
1544 - if (rcv_xdp)
1545 - skb_record_rx_queue(skb, rxq);
1546 + skb_record_rx_queue(skb, rxq);
1547 }
1548
1549 skb_tx_timestamp(skb);
1550 diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
1551 index 4ad0a0c33d853..034eb6535ab7d 100644
1552 --- a/drivers/net/wan/fsl_ucc_hdlc.c
1553 +++ b/drivers/net/wan/fsl_ucc_hdlc.c
1554 @@ -204,14 +204,18 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
1555 priv->rx_skbuff = kcalloc(priv->rx_ring_size,
1556 sizeof(*priv->rx_skbuff),
1557 GFP_KERNEL);
1558 - if (!priv->rx_skbuff)
1559 + if (!priv->rx_skbuff) {
1560 + ret = -ENOMEM;
1561 goto free_ucc_pram;
1562 + }
1563
1564 priv->tx_skbuff = kcalloc(priv->tx_ring_size,
1565 sizeof(*priv->tx_skbuff),
1566 GFP_KERNEL);
1567 - if (!priv->tx_skbuff)
1568 + if (!priv->tx_skbuff) {
1569 + ret = -ENOMEM;
1570 goto free_rx_skbuff;
1571 + }
1572
1573 priv->skb_curtx = 0;
1574 priv->skb_dirtytx = 0;
1575 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1576 index 308f3a28e12a2..67ea531e8b34b 100644
1577 --- a/drivers/nvme/host/core.c
1578 +++ b/drivers/nvme/host/core.c
1579 @@ -312,6 +312,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
1580 return true;
1581
1582 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
1583 + nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1584 blk_mq_complete_request(req);
1585 return true;
1586 }
1587 diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
1588 index 65b3dc9cd693b..0d2c22cf12a08 100644
1589 --- a/drivers/nvme/host/fc.c
1590 +++ b/drivers/nvme/host/fc.c
1591 @@ -1608,7 +1608,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1592 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1593
1594 if (opstate == FCPOP_STATE_ABORTED)
1595 - status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1596 + status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
1597 else if (freq->status) {
1598 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1599 dev_info(ctrl->ctrl.device,
1600 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1601 index fc18738dcf8ff..3bee3724e9fa7 100644
1602 --- a/drivers/nvme/host/pci.c
1603 +++ b/drivers/nvme/host/pci.c
1604 @@ -3176,6 +3176,7 @@ static const struct pci_device_id nvme_id_table[] = {
1605 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
1606 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
1607 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
1608 + NVME_QUIRK_DISABLE_WRITE_ZEROES|
1609 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
1610 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
1611 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
1612 diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
1613 index d4fc2cbf78703..6aaceef3326c7 100644
1614 --- a/drivers/platform/x86/intel-vbtn.c
1615 +++ b/drivers/platform/x86/intel-vbtn.c
1616 @@ -46,8 +46,16 @@ static const struct key_entry intel_vbtn_keymap[] = {
1617 };
1618
1619 static const struct key_entry intel_vbtn_switchmap[] = {
1620 - { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
1621 - { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
1622 + /*
1623 + * SW_DOCK should only be reported for docking stations, but DSDTs using the
1624 + * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
1625 + * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
1626 + * This causes userspace to think the laptop is docked to a port-replicator
1627 + * and to disable suspend-on-lid-close, which is undesirable.
1628 + * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
1629 + */
1630 + { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
1631 + { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
1632 { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
1633 { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
1634 };
1635 diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
1636 index 68d22acdb037a..2de7af13288e3 100644
1637 --- a/drivers/regulator/qcom-rpmh-regulator.c
1638 +++ b/drivers/regulator/qcom-rpmh-regulator.c
1639 @@ -726,8 +726,8 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
1640 static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
1641 .regulator_type = VRM,
1642 .ops = &rpmh_regulator_vrm_ops,
1643 - .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
1644 - .n_voltages = 5,
1645 + .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
1646 + .n_voltages = 236,
1647 .pmic_mode_map = pmic_mode_map_pmic5_smps,
1648 .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
1649 };
1650 diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
1651 index 7532603aafb15..b6d42b2ce6fe4 100644
1652 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
1653 +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
1654 @@ -7102,14 +7102,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
1655 ioc->pend_os_device_add_sz++;
1656 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
1657 GFP_KERNEL);
1658 - if (!ioc->pend_os_device_add)
1659 + if (!ioc->pend_os_device_add) {
1660 + r = -ENOMEM;
1661 goto out_free_resources;
1662 + }
1663
1664 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
1665 ioc->device_remove_in_progress =
1666 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
1667 - if (!ioc->device_remove_in_progress)
1668 + if (!ioc->device_remove_in_progress) {
1669 + r = -ENOMEM;
1670 goto out_free_resources;
1671 + }
1672
1673 ioc->fwfault_debug = mpt3sas_fwfault_debug;
1674
1675 diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
1676 index fdd966fea7f6a..4498add3d4d66 100644
1677 --- a/drivers/scsi/qedi/qedi_main.c
1678 +++ b/drivers/scsi/qedi/qedi_main.c
1679 @@ -1605,6 +1605,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1680 if (!qedi->global_queues[i]) {
1681 QEDI_ERR(&qedi->dbg_ctx,
1682 "Unable to allocation global queue %d.\n", i);
1683 + status = -ENOMEM;
1684 goto mem_alloc_failure;
1685 }
1686
1687 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
1688 index 412009e2b9488..8fd0a568303b5 100644
1689 --- a/drivers/scsi/qla2xxx/qla_target.c
1690 +++ b/drivers/scsi/qla2xxx/qla_target.c
1691 @@ -3216,8 +3216,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1692 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
1693 (cmd->sess && cmd->sess->deleted)) {
1694 cmd->state = QLA_TGT_STATE_PROCESSED;
1695 - res = 0;
1696 - goto free;
1697 + return 0;
1698 }
1699
1700 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
1701 @@ -3228,8 +3227,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1702
1703 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
1704 &full_req_cnt);
1705 - if (unlikely(res != 0))
1706 - goto free;
1707 + if (unlikely(res != 0)) {
1708 + return res;
1709 + }
1710
1711 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1712
1713 @@ -3249,8 +3249,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1714 vha->flags.online, qla2x00_reset_active(vha),
1715 cmd->reset_count, qpair->chip_reset);
1716 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1717 - res = 0;
1718 - goto free;
1719 + return 0;
1720 }
1721
1722 /* Does F/W have an IOCBs for this request */
1723 @@ -3353,8 +3352,6 @@ out_unmap_unlock:
1724 qlt_unmap_sg(vha, cmd);
1725 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1726
1727 -free:
1728 - vha->hw->tgt.tgt_ops->free_cmd(cmd);
1729 return res;
1730 }
1731 EXPORT_SYMBOL(qlt_xmit_response);
1732 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
1733 index 744cd93189da3..df8644da2c323 100644
1734 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
1735 +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
1736 @@ -623,7 +623,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
1737 {
1738 struct qla_tgt_cmd *cmd = container_of(se_cmd,
1739 struct qla_tgt_cmd, se_cmd);
1740 - struct scsi_qla_host *vha = cmd->vha;
1741
1742 if (cmd->aborted) {
1743 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
1744 @@ -636,7 +635,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
1745 cmd->se_cmd.transport_state,
1746 cmd->se_cmd.t_state,
1747 cmd->se_cmd.se_cmd_flags);
1748 - vha->hw->tgt.tgt_ops->free_cmd(cmd);
1749 return 0;
1750 }
1751
1752 @@ -664,7 +662,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
1753 {
1754 struct qla_tgt_cmd *cmd = container_of(se_cmd,
1755 struct qla_tgt_cmd, se_cmd);
1756 - struct scsi_qla_host *vha = cmd->vha;
1757 int xmit_type = QLA_TGT_XMIT_STATUS;
1758
1759 if (cmd->aborted) {
1760 @@ -678,7 +675,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
1761 cmd, kref_read(&cmd->se_cmd.cmd_kref),
1762 cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
1763 cmd->se_cmd.se_cmd_flags);
1764 - vha->hw->tgt.tgt_ops->free_cmd(cmd);
1765 return 0;
1766 }
1767 cmd->bufflen = se_cmd->data_length;
1768 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1769 index 7adecfd0c1e99..81d9c4ea0e8f3 100644
1770 --- a/fs/cifs/smb2pdu.c
1771 +++ b/fs/cifs/smb2pdu.c
1772 @@ -3743,8 +3743,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
1773 if (rdata->credits.value > 0) {
1774 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
1775 SMB2_MAX_BUFFER_SIZE));
1776 - shdr->CreditRequest =
1777 - cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
1778 + shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
1779
1780 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
1781 if (rc)
1782 @@ -4038,8 +4037,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
1783 if (wdata->credits.value > 0) {
1784 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
1785 SMB2_MAX_BUFFER_SIZE));
1786 - shdr->CreditRequest =
1787 - cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
1788 + shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
1789
1790 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
1791 if (rc)
1792 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
1793 index e99ecfafffac3..61e7df4d9cb11 100644
1794 --- a/fs/cifs/transport.c
1795 +++ b/fs/cifs/transport.c
1796 @@ -1148,7 +1148,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1797 }
1798 if (rc != 0) {
1799 for (; i < num_rqst; i++) {
1800 - cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1801 + cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1802 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1803 send_cancel(server, &rqst[i], midQ[i]);
1804 spin_lock(&GlobalMid_Lock);
1805 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1806 index 5451f10800065..20e40cac819e4 100644
1807 --- a/fs/ext4/xattr.c
1808 +++ b/fs/ext4/xattr.c
1809 @@ -1476,6 +1476,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
1810 if (!ce)
1811 return NULL;
1812
1813 + WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
1814 + !(current->flags & PF_MEMALLOC_NOFS));
1815 +
1816 ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
1817 if (!ea_data) {
1818 mb_cache_entry_put(ea_inode_cache, ce);
1819 @@ -2342,6 +2345,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1820 error = -ENOSPC;
1821 goto cleanup;
1822 }
1823 + WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
1824 }
1825
1826 error = ext4_reserve_inode_write(handle, inode, &is.iloc);
1827 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
1828 index cf3af2140c3d8..a2e9354b9d534 100644
1829 --- a/fs/hugetlbfs/inode.c
1830 +++ b/fs/hugetlbfs/inode.c
1831 @@ -440,7 +440,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
1832 u32 hash;
1833
1834 index = page->index;
1835 - hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
1836 + hash = hugetlb_fault_mutex_hash(h, mapping, index);
1837 mutex_lock(&hugetlb_fault_mutex_table[hash]);
1838
1839 /*
1840 @@ -644,7 +644,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
1841 addr = index * hpage_size;
1842
1843 /* mutex taken here, fault path and hole punch */
1844 - hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
1845 + hash = hugetlb_fault_mutex_hash(h, mapping, index);
1846 mutex_lock(&hugetlb_fault_mutex_table[hash]);
1847
1848 /* See if already present in mapping to avoid alloc/free */
1849 diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
1850 index e7dd07f478259..e84c187d942e8 100644
1851 --- a/fs/nfs/Kconfig
1852 +++ b/fs/nfs/Kconfig
1853 @@ -127,7 +127,7 @@ config PNFS_BLOCK
1854 config PNFS_FLEXFILE_LAYOUT
1855 tristate
1856 depends on NFS_V4_1 && NFS_V3
1857 - default m
1858 + default NFS_V4
1859
1860 config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
1861 string "NFSv4.1 Implementation ID Domain"
1862 diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
1863 index 1f60ab2535eed..23d75cddbb2ee 100644
1864 --- a/fs/nfs/nfs3xdr.c
1865 +++ b/fs/nfs/nfs3xdr.c
1866 @@ -35,6 +35,7 @@
1867 */
1868 #define NFS3_fhandle_sz (1+16)
1869 #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
1870 +#define NFS3_post_op_fh_sz (1+NFS3_fh_sz)
1871 #define NFS3_sattr_sz (15)
1872 #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
1873 #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
1874 @@ -72,7 +73,7 @@
1875 #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+1)
1876 #define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+1)
1877 #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
1878 -#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
1879 +#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
1880 #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
1881 #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
1882 #define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+1)
1883 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1884 index b2119159dead2..304ab4cdaa8c1 100644
1885 --- a/fs/nfs/nfs4proc.c
1886 +++ b/fs/nfs/nfs4proc.c
1887 @@ -5754,6 +5754,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
1888 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
1889 int ret, i;
1890
1891 + /* You can't remove system.nfs4_acl: */
1892 + if (buflen == 0)
1893 + return -EINVAL;
1894 if (!nfs4_server_supports_acls(server))
1895 return -EOPNOTSUPP;
1896 if (npages > ARRAY_SIZE(pages))
1897 diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
1898 index eb02072d28dd6..723763746238d 100644
1899 --- a/fs/squashfs/export.c
1900 +++ b/fs/squashfs/export.c
1901 @@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
1902 start = le64_to_cpu(table[n]);
1903 end = le64_to_cpu(table[n + 1]);
1904
1905 - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
1906 + if (start >= end
1907 + || (end - start) >
1908 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1909 kfree(table);
1910 return ERR_PTR(-EINVAL);
1911 }
1912 }
1913
1914 start = le64_to_cpu(table[indexes - 1]);
1915 - if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
1916 + if (start >= lookup_table_start ||
1917 + (lookup_table_start - start) >
1918 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1919 kfree(table);
1920 return ERR_PTR(-EINVAL);
1921 }
1922 diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
1923 index 11581bf31af41..ea5387679723f 100644
1924 --- a/fs/squashfs/id.c
1925 +++ b/fs/squashfs/id.c
1926 @@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
1927 start = le64_to_cpu(table[n]);
1928 end = le64_to_cpu(table[n + 1]);
1929
1930 - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
1931 + if (start >= end || (end - start) >
1932 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1933 kfree(table);
1934 return ERR_PTR(-EINVAL);
1935 }
1936 }
1937
1938 start = le64_to_cpu(table[indexes - 1]);
1939 - if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
1940 + if (start >= id_table_start || (id_table_start - start) >
1941 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1942 kfree(table);
1943 return ERR_PTR(-EINVAL);
1944 }
1945 diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
1946 index 7187bd1a30ea5..236664d691419 100644
1947 --- a/fs/squashfs/squashfs_fs.h
1948 +++ b/fs/squashfs/squashfs_fs.h
1949 @@ -17,6 +17,7 @@
1950
1951 /* size of metadata (inode and directory) blocks */
1952 #define SQUASHFS_METADATA_SIZE 8192
1953 +#define SQUASHFS_BLOCK_OFFSET 2
1954
1955 /* default size of block device I/O */
1956 #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
1957 diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
1958 index ead66670b41a5..087cab8c78f4e 100644
1959 --- a/fs/squashfs/xattr_id.c
1960 +++ b/fs/squashfs/xattr_id.c
1961 @@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
1962 start = le64_to_cpu(table[n]);
1963 end = le64_to_cpu(table[n + 1]);
1964
1965 - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
1966 + if (start >= end || (end - start) >
1967 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1968 kfree(table);
1969 return ERR_PTR(-EINVAL);
1970 }
1971 }
1972
1973 start = le64_to_cpu(table[indexes - 1]);
1974 - if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
1975 + if (start >= table_start || (table_start - start) >
1976 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1977 kfree(table);
1978 return ERR_PTR(-EINVAL);
1979 }
1980 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
1981 index defed629073bf..4d67a67964fa3 100644
1982 --- a/include/acpi/acpi_bus.h
1983 +++ b/include/acpi/acpi_bus.h
1984 @@ -232,6 +232,7 @@ struct acpi_pnp_type {
1985
1986 struct acpi_device_pnp {
1987 acpi_bus_id bus_id; /* Object name */
1988 + int instance_no; /* Instance number of this object */
1989 struct acpi_pnp_type type; /* ID type */
1990 acpi_bus_address bus_address; /* _ADR */
1991 char *unique_id; /* _UID */
1992 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
1993 index 007147f643908..66590ae89c97c 100644
1994 --- a/include/linux/bpf.h
1995 +++ b/include/linux/bpf.h
1996 @@ -535,7 +535,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1997 struct bpf_prog *include_prog,
1998 struct bpf_prog_array **new_array);
1999
2000 -#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
2001 +#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
2002 ({ \
2003 struct bpf_prog_array_item *_item; \
2004 struct bpf_prog *_prog; \
2005 @@ -548,7 +548,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2006 goto _out; \
2007 _item = &_array->items[0]; \
2008 while ((_prog = READ_ONCE(_item->prog))) { \
2009 - bpf_cgroup_storage_set(_item->cgroup_storage); \
2010 + if (set_cg_storage) \
2011 + bpf_cgroup_storage_set(_item->cgroup_storage); \
2012 _ret &= func(_prog, ctx); \
2013 _item++; \
2014 } \
2015 @@ -609,10 +610,10 @@ _out: \
2016 })
2017
2018 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \
2019 - __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
2020 + __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
2021
2022 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
2023 - __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
2024 + __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
2025
2026 #ifdef CONFIG_BPF_SYSCALL
2027 DECLARE_PER_CPU(int, bpf_prog_active);
2028 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
2029 index 0e080ba5efbcc..fc717aeb2b3de 100644
2030 --- a/include/linux/hugetlb.h
2031 +++ b/include/linux/hugetlb.h
2032 @@ -106,7 +106,7 @@ void free_huge_page(struct page *page);
2033 void hugetlb_fix_reserve_counts(struct inode *inode);
2034 extern struct mutex *hugetlb_fault_mutex_table;
2035 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
2036 - pgoff_t idx, unsigned long address);
2037 + pgoff_t idx);
2038
2039 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
2040
2041 diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
2042 index a367ead4bf4bb..e11555989090c 100644
2043 --- a/include/linux/if_macvlan.h
2044 +++ b/include/linux/if_macvlan.h
2045 @@ -42,13 +42,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
2046 if (likely(success)) {
2047 struct vlan_pcpu_stats *pcpu_stats;
2048
2049 - pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
2050 + pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
2051 u64_stats_update_begin(&pcpu_stats->syncp);
2052 pcpu_stats->rx_packets++;
2053 pcpu_stats->rx_bytes += len;
2054 if (multicast)
2055 pcpu_stats->rx_multicast++;
2056 u64_stats_update_end(&pcpu_stats->syncp);
2057 + put_cpu_ptr(vlan->pcpu_stats);
2058 } else {
2059 this_cpu_inc(vlan->pcpu_stats->rx_errors);
2060 }
2061 diff --git a/include/linux/mm.h b/include/linux/mm.h
2062 index c63e4b38b7fe0..703e0d72a05c7 100644
2063 --- a/include/linux/mm.h
2064 +++ b/include/linux/mm.h
2065 @@ -1226,13 +1226,26 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
2066 #endif /* CONFIG_NUMA_BALANCING */
2067
2068 #ifdef CONFIG_KASAN_SW_TAGS
2069 +
2070 +/*
2071 + * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
2072 + * setting tags for all pages to native kernel tag value 0xff, as the default
2073 + * value 0x00 maps to 0xff.
2074 + */
2075 +
2076 static inline u8 page_kasan_tag(const struct page *page)
2077 {
2078 - return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
2079 + u8 tag;
2080 +
2081 + tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
2082 + tag ^= 0xff;
2083 +
2084 + return tag;
2085 }
2086
2087 static inline void page_kasan_tag_set(struct page *page, u8 tag)
2088 {
2089 + tag ^= 0xff;
2090 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
2091 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
2092 }
2093 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
2094 index aca8f36dfac9a..479bc96c3e63a 100644
2095 --- a/include/linux/mutex.h
2096 +++ b/include/linux/mutex.h
2097 @@ -171,7 +171,7 @@ extern void mutex_lock_io(struct mutex *lock);
2098 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
2099 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
2100 # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
2101 -# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
2102 +# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
2103 #endif
2104
2105 /*
2106 diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
2107 index f5c21b7d29748..04e7f5630509c 100644
2108 --- a/include/linux/netfilter/x_tables.h
2109 +++ b/include/linux/netfilter/x_tables.h
2110 @@ -227,7 +227,7 @@ struct xt_table {
2111 unsigned int valid_hooks;
2112
2113 /* Man behind the curtain... */
2114 - struct xt_table_info __rcu *private;
2115 + struct xt_table_info *private;
2116
2117 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
2118 struct module *me;
2119 @@ -376,7 +376,7 @@ static inline unsigned int xt_write_recseq_begin(void)
2120 * since addend is most likely 1
2121 */
2122 __this_cpu_add(xt_recseq.sequence, addend);
2123 - smp_wmb();
2124 + smp_mb();
2125
2126 return addend;
2127 }
2128 @@ -448,9 +448,6 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
2129
2130 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
2131
2132 -struct xt_table_info
2133 -*xt_table_get_private_protected(const struct xt_table *table);
2134 -
2135 #ifdef CONFIG_COMPAT
2136 #include <net/compat.h>
2137
2138 diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
2139 index a27604f99ed04..11096b561dab6 100644
2140 --- a/include/linux/u64_stats_sync.h
2141 +++ b/include/linux/u64_stats_sync.h
2142 @@ -69,12 +69,13 @@ struct u64_stats_sync {
2143 };
2144
2145
2146 +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
2147 +#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
2148 +#else
2149 static inline void u64_stats_init(struct u64_stats_sync *syncp)
2150 {
2151 -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
2152 - seqcount_init(&syncp->seq);
2153 -#endif
2154 }
2155 +#endif
2156
2157 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
2158 {
2159 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
2160 index 6c8f8e5e33c3d..13792c0ef46e3 100644
2161 --- a/include/net/inet_connection_sock.h
2162 +++ b/include/net/inet_connection_sock.h
2163 @@ -287,7 +287,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
2164 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
2165 }
2166
2167 -void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
2168 +bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
2169 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
2170
2171 void inet_csk_destroy_sock(struct sock *sk);
2172 diff --git a/include/net/nexthop.h b/include/net/nexthop.h
2173 index 3bb618e5ecf72..18a5aca264767 100644
2174 --- a/include/net/nexthop.h
2175 +++ b/include/net/nexthop.h
2176 @@ -291,6 +291,7 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
2177 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
2178 struct netlink_ext_ack *extack);
2179
2180 +/* Caller should either hold rcu_read_lock(), or RTNL. */
2181 static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
2182 {
2183 struct nh_info *nhi;
2184 @@ -311,6 +312,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
2185 return NULL;
2186 }
2187
2188 +/* Variant of nexthop_fib6_nh().
2189 + * Caller should either hold rcu_read_lock_bh(), or RTNL.
2190 + */
2191 +static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
2192 +{
2193 + struct nh_info *nhi;
2194 +
2195 + if (nh->is_group) {
2196 + struct nh_group *nh_grp;
2197 +
2198 + nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
2199 + nh = nexthop_mpath_select(nh_grp, 0);
2200 + if (!nh)
2201 + return NULL;
2202 + }
2203 +
2204 + nhi = rcu_dereference_bh_rtnl(nh->nh_info);
2205 + if (nhi->family == AF_INET6)
2206 + return &nhi->fib6_nh;
2207 +
2208 + return NULL;
2209 +}
2210 +
2211 static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
2212 {
2213 struct fib6_nh *fib6_nh;
2214 diff --git a/include/net/red.h b/include/net/red.h
2215 index e21e7fd4fe077..8fe55b8b2fb81 100644
2216 --- a/include/net/red.h
2217 +++ b/include/net/red.h
2218 @@ -168,7 +168,8 @@ static inline void red_set_vars(struct red_vars *v)
2219 v->qcount = -1;
2220 }
2221
2222 -static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
2223 +static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
2224 + u8 Scell_log, u8 *stab)
2225 {
2226 if (fls(qth_min) + Wlog > 32)
2227 return false;
2228 @@ -178,6 +179,13 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_
2229 return false;
2230 if (qth_max < qth_min)
2231 return false;
2232 + if (stab) {
2233 + int i;
2234 +
2235 + for (i = 0; i < RED_STAB_SIZE; i++)
2236 + if (stab[i] >= 32)
2237 + return false;
2238 + }
2239 return true;
2240 }
2241
2242 diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
2243 index e2091bb2b3a8e..4da61c950e931 100644
2244 --- a/include/net/rtnetlink.h
2245 +++ b/include/net/rtnetlink.h
2246 @@ -33,6 +33,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
2247 *
2248 * @list: Used internally
2249 * @kind: Identifier
2250 + * @netns_refund: Physical device, move to init_net on netns exit
2251 * @maxtype: Highest device specific netlink attribute number
2252 * @policy: Netlink policy for device specific attribute validation
2253 * @validate: Optional validation function for netlink/changelink parameters
2254 @@ -64,6 +65,7 @@ struct rtnl_link_ops {
2255 size_t priv_size;
2256 void (*setup)(struct net_device *dev);
2257
2258 + bool netns_refund;
2259 unsigned int maxtype;
2260 const struct nla_policy *policy;
2261 int (*validate)(struct nlattr *tb[],
2262 diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
2263 index c94b820a1b62c..8743150db2acc 100644
2264 --- a/kernel/gcov/clang.c
2265 +++ b/kernel/gcov/clang.c
2266 @@ -75,7 +75,9 @@ struct gcov_fn_info {
2267
2268 u32 num_counters;
2269 u64 *counters;
2270 +#if CONFIG_CLANG_VERSION < 110000
2271 const char *function_name;
2272 +#endif
2273 };
2274
2275 static struct gcov_info *current_info;
2276 @@ -105,6 +107,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
2277 }
2278 EXPORT_SYMBOL(llvm_gcov_init);
2279
2280 +#if CONFIG_CLANG_VERSION < 110000
2281 void llvm_gcda_start_file(const char *orig_filename, const char version[4],
2282 u32 checksum)
2283 {
2284 @@ -113,7 +116,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
2285 current_info->checksum = checksum;
2286 }
2287 EXPORT_SYMBOL(llvm_gcda_start_file);
2288 +#else
2289 +void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum)
2290 +{
2291 + current_info->filename = orig_filename;
2292 + current_info->version = version;
2293 + current_info->checksum = checksum;
2294 +}
2295 +EXPORT_SYMBOL(llvm_gcda_start_file);
2296 +#endif
2297
2298 +#if CONFIG_CLANG_VERSION < 110000
2299 void llvm_gcda_emit_function(u32 ident, const char *function_name,
2300 u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
2301 {
2302 @@ -133,6 +146,24 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
2303 list_add_tail(&info->head, &current_info->functions);
2304 }
2305 EXPORT_SYMBOL(llvm_gcda_emit_function);
2306 +#else
2307 +void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
2308 + u8 use_extra_checksum, u32 cfg_checksum)
2309 +{
2310 + struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
2311 +
2312 + if (!info)
2313 + return;
2314 +
2315 + INIT_LIST_HEAD(&info->head);
2316 + info->ident = ident;
2317 + info->checksum = func_checksum;
2318 + info->use_extra_checksum = use_extra_checksum;
2319 + info->cfg_checksum = cfg_checksum;
2320 + list_add_tail(&info->head, &current_info->functions);
2321 +}
2322 +EXPORT_SYMBOL(llvm_gcda_emit_function);
2323 +#endif
2324
2325 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
2326 {
2327 @@ -295,6 +326,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
2328 }
2329 }
2330
2331 +#if CONFIG_CLANG_VERSION < 110000
2332 static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
2333 {
2334 size_t cv_size; /* counter values size */
2335 @@ -322,6 +354,28 @@ err_name:
2336 kfree(fn_dup);
2337 return NULL;
2338 }
2339 +#else
2340 +static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
2341 +{
2342 + size_t cv_size; /* counter values size */
2343 + struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
2344 + GFP_KERNEL);
2345 + if (!fn_dup)
2346 + return NULL;
2347 + INIT_LIST_HEAD(&fn_dup->head);
2348 +
2349 + cv_size = fn->num_counters * sizeof(fn->counters[0]);
2350 + fn_dup->counters = vmalloc(cv_size);
2351 + if (!fn_dup->counters) {
2352 + kfree(fn_dup);
2353 + return NULL;
2354 + }
2355 +
2356 + memcpy(fn_dup->counters, fn->counters, cv_size);
2357 +
2358 + return fn_dup;
2359 +}
2360 +#endif
2361
2362 /**
2363 * gcov_info_dup - duplicate profiling data set
2364 @@ -362,6 +416,7 @@ err:
2365 * gcov_info_free - release memory for profiling data set duplicate
2366 * @info: profiling data set duplicate to free
2367 */
2368 +#if CONFIG_CLANG_VERSION < 110000
2369 void gcov_info_free(struct gcov_info *info)
2370 {
2371 struct gcov_fn_info *fn, *tmp;
2372 @@ -375,6 +430,20 @@ void gcov_info_free(struct gcov_info *info)
2373 kfree(info->filename);
2374 kfree(info);
2375 }
2376 +#else
2377 +void gcov_info_free(struct gcov_info *info)
2378 +{
2379 + struct gcov_fn_info *fn, *tmp;
2380 +
2381 + list_for_each_entry_safe(fn, tmp, &info->functions, head) {
2382 + vfree(fn->counters);
2383 + list_del(&fn->head);
2384 + kfree(fn);
2385 + }
2386 + kfree(info->filename);
2387 + kfree(info);
2388 +}
2389 +#endif
2390
2391 #define ITER_STRIDE PAGE_SIZE
2392
2393 diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
2394 index 0a9326f5f4218..8dac32bd90894 100644
2395 --- a/kernel/power/energy_model.c
2396 +++ b/kernel/power/energy_model.c
2397 @@ -74,7 +74,7 @@ static int __init em_debug_init(void)
2398
2399 return 0;
2400 }
2401 -core_initcall(em_debug_init);
2402 +fs_initcall(em_debug_init);
2403 #else /* CONFIG_DEBUG_FS */
2404 static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
2405 #endif
2406 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2407 index 4033b6ce01c40..5253c67acb1df 100644
2408 --- a/mm/hugetlb.c
2409 +++ b/mm/hugetlb.c
2410 @@ -4020,7 +4020,7 @@ retry:
2411 * handling userfault. Reacquire after handling
2412 * fault to make calling code simpler.
2413 */
2414 - hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
2415 + hash = hugetlb_fault_mutex_hash(h, mapping, idx);
2416 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
2417 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
2418 mutex_lock(&hugetlb_fault_mutex_table[hash]);
2419 @@ -4148,7 +4148,7 @@ backout_unlocked:
2420
2421 #ifdef CONFIG_SMP
2422 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
2423 - pgoff_t idx, unsigned long address)
2424 + pgoff_t idx)
2425 {
2426 unsigned long key[2];
2427 u32 hash;
2428 @@ -4156,7 +4156,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
2429 key[0] = (unsigned long) mapping;
2430 key[1] = idx;
2431
2432 - hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
2433 + hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
2434
2435 return hash & (num_fault_mutexes - 1);
2436 }
2437 @@ -4166,7 +4166,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
2438 * return 0 and avoid the hashing overhead.
2439 */
2440 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
2441 - pgoff_t idx, unsigned long address)
2442 + pgoff_t idx)
2443 {
2444 return 0;
2445 }
2446 @@ -4210,7 +4210,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2447 * get spurious allocation failures if two CPUs race to instantiate
2448 * the same page in the page cache.
2449 */
2450 - hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
2451 + hash = hugetlb_fault_mutex_hash(h, mapping, idx);
2452 mutex_lock(&hugetlb_fault_mutex_table[hash]);
2453
2454 entry = huge_ptep_get(ptep);
2455 diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
2456 index c7ae74ce5ff3e..640ff2bd9a693 100644
2457 --- a/mm/userfaultfd.c
2458 +++ b/mm/userfaultfd.c
2459 @@ -269,7 +269,7 @@ retry:
2460 */
2461 idx = linear_page_index(dst_vma, dst_addr);
2462 mapping = dst_vma->vm_file->f_mapping;
2463 - hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
2464 + hash = hugetlb_fault_mutex_hash(h, mapping, idx);
2465 mutex_lock(&hugetlb_fault_mutex_table[hash]);
2466
2467 err = -ENOMEM;
2468 diff --git a/net/core/dev.c b/net/core/dev.c
2469 index e732faade5dca..2ec21380f86d9 100644
2470 --- a/net/core/dev.c
2471 +++ b/net/core/dev.c
2472 @@ -10121,7 +10121,7 @@ static void __net_exit default_device_exit(struct net *net)
2473 continue;
2474
2475 /* Leave virtual devices for the generic cleanup */
2476 - if (dev->rtnl_link_ops)
2477 + if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
2478 continue;
2479
2480 /* Push remaining network devices to init_net */
2481 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
2482 index ac5c4f6cdefee..85a88425edc48 100644
2483 --- a/net/ipv4/inet_connection_sock.c
2484 +++ b/net/ipv4/inet_connection_sock.c
2485 @@ -700,12 +700,15 @@ static bool reqsk_queue_unlink(struct request_sock *req)
2486 return found;
2487 }
2488
2489 -void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
2490 +bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
2491 {
2492 - if (reqsk_queue_unlink(req)) {
2493 + bool unlinked = reqsk_queue_unlink(req);
2494 +
2495 + if (unlinked) {
2496 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
2497 reqsk_put(req);
2498 }
2499 + return unlinked;
2500 }
2501 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
2502
2503 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2504 index 12d242fedffdc..f1f78a742b36a 100644
2505 --- a/net/ipv4/netfilter/arp_tables.c
2506 +++ b/net/ipv4/netfilter/arp_tables.c
2507 @@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
2508
2509 local_bh_disable();
2510 addend = xt_write_recseq_begin();
2511 - private = rcu_access_pointer(table->private);
2512 + private = READ_ONCE(table->private); /* Address dependency. */
2513 cpu = smp_processor_id();
2514 table_base = private->entries;
2515 jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
2516 @@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
2517 {
2518 unsigned int countersize;
2519 struct xt_counters *counters;
2520 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2521 + const struct xt_table_info *private = table->private;
2522
2523 /* We need atomic snapshot of counters: rest doesn't change
2524 * (other than comefrom, which userspace doesn't care
2525 @@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
2526 unsigned int off, num;
2527 const struct arpt_entry *e;
2528 struct xt_counters *counters;
2529 - struct xt_table_info *private = xt_table_get_private_protected(table);
2530 + struct xt_table_info *private = table->private;
2531 int ret = 0;
2532 void *loc_cpu_entry;
2533
2534 @@ -808,7 +808,7 @@ static int get_info(struct net *net, void __user *user,
2535 t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
2536 if (!IS_ERR(t)) {
2537 struct arpt_getinfo info;
2538 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2539 + const struct xt_table_info *private = t->private;
2540 #ifdef CONFIG_COMPAT
2541 struct xt_table_info tmp;
2542
2543 @@ -861,7 +861,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
2544
2545 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
2546 if (!IS_ERR(t)) {
2547 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2548 + const struct xt_table_info *private = t->private;
2549
2550 if (get.size == private->size)
2551 ret = copy_entries_to_user(private->size,
2552 @@ -1020,7 +1020,7 @@ static int do_add_counters(struct net *net, const void __user *user,
2553 }
2554
2555 local_bh_disable();
2556 - private = xt_table_get_private_protected(t);
2557 + private = t->private;
2558 if (private->number != tmp.num_counters) {
2559 ret = -EINVAL;
2560 goto unlock_up_free;
2561 @@ -1357,7 +1357,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
2562 void __user *userptr)
2563 {
2564 struct xt_counters *counters;
2565 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2566 + const struct xt_table_info *private = table->private;
2567 void __user *pos;
2568 unsigned int size;
2569 int ret = 0;
2570 @@ -1406,7 +1406,7 @@ static int compat_get_entries(struct net *net,
2571 xt_compat_lock(NFPROTO_ARP);
2572 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
2573 if (!IS_ERR(t)) {
2574 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2575 + const struct xt_table_info *private = t->private;
2576 struct xt_table_info info;
2577
2578 ret = compat_table_info(private, &info);
2579 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2580 index cbbc8a7b82788..10b91ebdf2131 100644
2581 --- a/net/ipv4/netfilter/ip_tables.c
2582 +++ b/net/ipv4/netfilter/ip_tables.c
2583 @@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
2584 WARN_ON(!(table->valid_hooks & (1 << hook)));
2585 local_bh_disable();
2586 addend = xt_write_recseq_begin();
2587 - private = rcu_access_pointer(table->private);
2588 + private = READ_ONCE(table->private); /* Address dependency. */
2589 cpu = smp_processor_id();
2590 table_base = private->entries;
2591 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
2592 @@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
2593 {
2594 unsigned int countersize;
2595 struct xt_counters *counters;
2596 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2597 + const struct xt_table_info *private = table->private;
2598
2599 /* We need atomic snapshot of counters: rest doesn't change
2600 (other than comefrom, which userspace doesn't care
2601 @@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
2602 unsigned int off, num;
2603 const struct ipt_entry *e;
2604 struct xt_counters *counters;
2605 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2606 + const struct xt_table_info *private = table->private;
2607 int ret = 0;
2608 const void *loc_cpu_entry;
2609
2610 @@ -965,7 +965,7 @@ static int get_info(struct net *net, void __user *user,
2611 t = xt_request_find_table_lock(net, AF_INET, name);
2612 if (!IS_ERR(t)) {
2613 struct ipt_getinfo info;
2614 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2615 + const struct xt_table_info *private = t->private;
2616 #ifdef CONFIG_COMPAT
2617 struct xt_table_info tmp;
2618
2619 @@ -1019,7 +1019,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
2620
2621 t = xt_find_table_lock(net, AF_INET, get.name);
2622 if (!IS_ERR(t)) {
2623 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2624 + const struct xt_table_info *private = t->private;
2625 if (get.size == private->size)
2626 ret = copy_entries_to_user(private->size,
2627 t, uptr->entrytable);
2628 @@ -1175,7 +1175,7 @@ do_add_counters(struct net *net, const void __user *user,
2629 }
2630
2631 local_bh_disable();
2632 - private = xt_table_get_private_protected(t);
2633 + private = t->private;
2634 if (private->number != tmp.num_counters) {
2635 ret = -EINVAL;
2636 goto unlock_up_free;
2637 @@ -1570,7 +1570,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
2638 void __user *userptr)
2639 {
2640 struct xt_counters *counters;
2641 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2642 + const struct xt_table_info *private = table->private;
2643 void __user *pos;
2644 unsigned int size;
2645 int ret = 0;
2646 @@ -1616,7 +1616,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
2647 xt_compat_lock(AF_INET);
2648 t = xt_find_table_lock(net, AF_INET, get.name);
2649 if (!IS_ERR(t)) {
2650 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2651 + const struct xt_table_info *private = t->private;
2652 struct xt_table_info info;
2653 ret = compat_table_info(private, &info);
2654 if (!ret && get.size == info.size)
2655 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
2656 index c802bc80c4006..194743bd3fc10 100644
2657 --- a/net/ipv4/tcp_minisocks.c
2658 +++ b/net/ipv4/tcp_minisocks.c
2659 @@ -796,8 +796,11 @@ embryonic_reset:
2660 tcp_reset(sk);
2661 }
2662 if (!fastopen) {
2663 - inet_csk_reqsk_queue_drop(sk, req);
2664 - __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
2665 + bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
2666 +
2667 + if (unlinked)
2668 + __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
2669 + *req_stolen = !unlinked;
2670 }
2671 return NULL;
2672 }
2673 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2674 index 906ac5e6d96cd..bb68290ad68d8 100644
2675 --- a/net/ipv6/ip6_fib.c
2676 +++ b/net/ipv6/ip6_fib.c
2677 @@ -2382,7 +2382,7 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v)
2678 const struct net_device *dev;
2679
2680 if (rt->nh)
2681 - fib6_nh = nexthop_fib6_nh(rt->nh);
2682 + fib6_nh = nexthop_fib6_nh_bh(rt->nh);
2683
2684 seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
2685
2686 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
2687 index 01cdde25eb16d..c973ace208c51 100644
2688 --- a/net/ipv6/netfilter/ip6_tables.c
2689 +++ b/net/ipv6/netfilter/ip6_tables.c
2690 @@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
2691
2692 local_bh_disable();
2693 addend = xt_write_recseq_begin();
2694 - private = rcu_access_pointer(table->private);
2695 + private = READ_ONCE(table->private); /* Address dependency. */
2696 cpu = smp_processor_id();
2697 table_base = private->entries;
2698 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
2699 @@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
2700 {
2701 unsigned int countersize;
2702 struct xt_counters *counters;
2703 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2704 + const struct xt_table_info *private = table->private;
2705
2706 /* We need atomic snapshot of counters: rest doesn't change
2707 (other than comefrom, which userspace doesn't care
2708 @@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
2709 unsigned int off, num;
2710 const struct ip6t_entry *e;
2711 struct xt_counters *counters;
2712 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2713 + const struct xt_table_info *private = table->private;
2714 int ret = 0;
2715 const void *loc_cpu_entry;
2716
2717 @@ -981,7 +981,7 @@ static int get_info(struct net *net, void __user *user,
2718 t = xt_request_find_table_lock(net, AF_INET6, name);
2719 if (!IS_ERR(t)) {
2720 struct ip6t_getinfo info;
2721 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2722 + const struct xt_table_info *private = t->private;
2723 #ifdef CONFIG_COMPAT
2724 struct xt_table_info tmp;
2725
2726 @@ -1036,7 +1036,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
2727
2728 t = xt_find_table_lock(net, AF_INET6, get.name);
2729 if (!IS_ERR(t)) {
2730 - struct xt_table_info *private = xt_table_get_private_protected(t);
2731 + struct xt_table_info *private = t->private;
2732 if (get.size == private->size)
2733 ret = copy_entries_to_user(private->size,
2734 t, uptr->entrytable);
2735 @@ -1191,7 +1191,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
2736 }
2737
2738 local_bh_disable();
2739 - private = xt_table_get_private_protected(t);
2740 + private = t->private;
2741 if (private->number != tmp.num_counters) {
2742 ret = -EINVAL;
2743 goto unlock_up_free;
2744 @@ -1579,7 +1579,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
2745 void __user *userptr)
2746 {
2747 struct xt_counters *counters;
2748 - const struct xt_table_info *private = xt_table_get_private_protected(table);
2749 + const struct xt_table_info *private = table->private;
2750 void __user *pos;
2751 unsigned int size;
2752 int ret = 0;
2753 @@ -1625,7 +1625,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
2754 xt_compat_lock(AF_INET6);
2755 t = xt_find_table_lock(net, AF_INET6, get.name);
2756 if (!IS_ERR(t)) {
2757 - const struct xt_table_info *private = xt_table_get_private_protected(t);
2758 + const struct xt_table_info *private = t->private;
2759 struct xt_table_info info;
2760 ret = compat_table_info(private, &info);
2761 if (!ret && get.size == info.size)
2762 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
2763 index fa293feef935d..677928bf13d13 100644
2764 --- a/net/mac80211/cfg.c
2765 +++ b/net/mac80211/cfg.c
2766 @@ -2906,14 +2906,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2767 continue;
2768
2769 for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
2770 - if (~sdata->rc_rateidx_mcs_mask[i][j]) {
2771 + if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
2772 sdata->rc_has_mcs_mask[i] = true;
2773 break;
2774 }
2775 }
2776
2777 for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
2778 - if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
2779 + if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
2780 sdata->rc_has_vht_mcs_mask[i] = true;
2781 break;
2782 }
2783 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
2784 index 0a6ff01c68a96..0e26c83b6b412 100644
2785 --- a/net/mac80211/ibss.c
2786 +++ b/net/mac80211/ibss.c
2787 @@ -1868,6 +1868,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
2788
2789 /* remove beacon */
2790 kfree(sdata->u.ibss.ie);
2791 + sdata->u.ibss.ie = NULL;
2792 + sdata->u.ibss.ie_len = 0;
2793
2794 /* on the next join, re-program HT parameters */
2795 memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
2796 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
2797 index b7f0d52e5f1b6..783af451a8325 100644
2798 --- a/net/netfilter/nf_conntrack_netlink.c
2799 +++ b/net/netfilter/nf_conntrack_netlink.c
2800 @@ -2680,6 +2680,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
2801 memset(&m, 0xFF, sizeof(m));
2802 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2803 m.src.u.all = mask->src.u.all;
2804 + m.src.l3num = tuple->src.l3num;
2805 m.dst.protonum = tuple->dst.protonum;
2806
2807 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
2808 diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
2809 index 8b60fc04c67c2..5c35d64d1f342 100644
2810 --- a/net/netfilter/x_tables.c
2811 +++ b/net/netfilter/x_tables.c
2812 @@ -1351,14 +1351,6 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
2813 }
2814 EXPORT_SYMBOL(xt_counters_alloc);
2815
2816 -struct xt_table_info
2817 -*xt_table_get_private_protected(const struct xt_table *table)
2818 -{
2819 - return rcu_dereference_protected(table->private,
2820 - mutex_is_locked(&xt[table->af].mutex));
2821 -}
2822 -EXPORT_SYMBOL(xt_table_get_private_protected);
2823 -
2824 struct xt_table_info *
2825 xt_replace_table(struct xt_table *table,
2826 unsigned int num_counters,
2827 @@ -1366,6 +1358,7 @@ xt_replace_table(struct xt_table *table,
2828 int *error)
2829 {
2830 struct xt_table_info *private;
2831 + unsigned int cpu;
2832 int ret;
2833
2834 ret = xt_jumpstack_alloc(newinfo);
2835 @@ -1375,20 +1368,47 @@ xt_replace_table(struct xt_table *table,
2836 }
2837
2838 /* Do the substitution. */
2839 - private = xt_table_get_private_protected(table);
2840 + local_bh_disable();
2841 + private = table->private;
2842
2843 /* Check inside lock: is the old number correct? */
2844 if (num_counters != private->number) {
2845 pr_debug("num_counters != table->private->number (%u/%u)\n",
2846 num_counters, private->number);
2847 + local_bh_enable();
2848 *error = -EAGAIN;
2849 return NULL;
2850 }
2851
2852 newinfo->initial_entries = private->initial_entries;
2853 + /*
2854 + * Ensure contents of newinfo are visible before assigning to
2855 + * private.
2856 + */
2857 + smp_wmb();
2858 + table->private = newinfo;
2859 +
2860 + /* make sure all cpus see new ->private value */
2861 + smp_mb();
2862
2863 - rcu_assign_pointer(table->private, newinfo);
2864 - synchronize_rcu();
2865 + /*
2866 + * Even though table entries have now been swapped, other CPU's
2867 + * may still be using the old entries...
2868 + */
2869 + local_bh_enable();
2870 +
2871 + /* ... so wait for even xt_recseq on all cpus */
2872 + for_each_possible_cpu(cpu) {
2873 + seqcount_t *s = &per_cpu(xt_recseq, cpu);
2874 + u32 seq = raw_read_seqcount(s);
2875 +
2876 + if (seq & 1) {
2877 + do {
2878 + cond_resched();
2879 + cpu_relax();
2880 + } while (seq == raw_read_seqcount(s));
2881 + }
2882 + }
2883
2884 #ifdef CONFIG_AUDIT
2885 if (audit_enabled) {
2886 @@ -1429,12 +1449,12 @@ struct xt_table *xt_register_table(struct net *net,
2887 }
2888
2889 /* Simplifies replace_table code. */
2890 - rcu_assign_pointer(table->private, bootstrap);
2891 + table->private = bootstrap;
2892
2893 if (!xt_replace_table(table, 0, newinfo, &ret))
2894 goto unlock;
2895
2896 - private = xt_table_get_private_protected(table);
2897 + private = table->private;
2898 pr_debug("table->private->number = %u\n", private->number);
2899
2900 /* save number of initial entries */
2901 @@ -1457,8 +1477,7 @@ void *xt_unregister_table(struct xt_table *table)
2902 struct xt_table_info *private;
2903
2904 mutex_lock(&xt[table->af].mutex);
2905 - private = xt_table_get_private_protected(table);
2906 - RCU_INIT_POINTER(table->private, NULL);
2907 + private = table->private;
2908 list_del(&table->list);
2909 mutex_unlock(&xt[table->af].mutex);
2910 kfree(table);
2911 diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
2912 index 0d4f12dbd2443..46273a8383615 100644
2913 --- a/net/qrtr/qrtr.c
2914 +++ b/net/qrtr/qrtr.c
2915 @@ -862,6 +862,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
2916 rc = copied;
2917
2918 if (addr) {
2919 + /* There is an anonymous 2-byte hole after sq_family,
2920 + * make sure to clear it.
2921 + */
2922 + memset(addr, 0, sizeof(*addr));
2923 +
2924 cb = (struct qrtr_cb *)skb->cb;
2925 addr->sq_family = AF_QIPCRTR;
2926 addr->sq_node = cb->src_node;
2927 diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
2928 index d856b395ee8eb..e54f6eabfa0c0 100644
2929 --- a/net/sched/sch_choke.c
2930 +++ b/net/sched/sch_choke.c
2931 @@ -351,6 +351,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
2932 struct sk_buff **old = NULL;
2933 unsigned int mask;
2934 u32 max_P;
2935 + u8 *stab;
2936
2937 if (opt == NULL)
2938 return -EINVAL;
2939 @@ -367,8 +368,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
2940 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
2941
2942 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
2943 -
2944 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
2945 + stab = nla_data(tb[TCA_CHOKE_STAB]);
2946 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
2947 return -EINVAL;
2948
2949 if (ctl->limit > CHOKE_MAX_QUEUE)
2950 @@ -418,7 +419,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
2951
2952 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
2953 ctl->Plog, ctl->Scell_log,
2954 - nla_data(tb[TCA_CHOKE_STAB]),
2955 + stab,
2956 max_P);
2957 red_set_vars(&q->vars);
2958
2959 diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
2960 index e0bc77533acc3..f4132dc25ac05 100644
2961 --- a/net/sched/sch_gred.c
2962 +++ b/net/sched/sch_gred.c
2963 @@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
2964 struct gred_sched *table = qdisc_priv(sch);
2965 struct gred_sched_data *q = table->tab[dp];
2966
2967 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
2968 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
2969 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
2970 return -EINVAL;
2971 }
2972 diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
2973 index 71e167e91a48d..7741f102be4a0 100644
2974 --- a/net/sched/sch_red.c
2975 +++ b/net/sched/sch_red.c
2976 @@ -197,6 +197,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
2977 struct tc_red_qopt *ctl;
2978 int err;
2979 u32 max_P;
2980 + u8 *stab;
2981
2982 if (opt == NULL)
2983 return -EINVAL;
2984 @@ -213,7 +214,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
2985 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
2986
2987 ctl = nla_data(tb[TCA_RED_PARMS]);
2988 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
2989 + stab = nla_data(tb[TCA_RED_STAB]);
2990 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
2991 + ctl->Scell_log, stab))
2992 return -EINVAL;
2993
2994 if (ctl->limit > 0) {
2995 @@ -238,7 +241,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
2996 red_set_parms(&q->parms,
2997 ctl->qth_min, ctl->qth_max, ctl->Wlog,
2998 ctl->Plog, ctl->Scell_log,
2999 - nla_data(tb[TCA_RED_STAB]),
3000 + stab,
3001 max_P);
3002 red_set_vars(&q->vars);
3003
3004 diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
3005 index 6e13e137883c3..b92bafaf83f36 100644
3006 --- a/net/sched/sch_sfq.c
3007 +++ b/net/sched/sch_sfq.c
3008 @@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
3009 }
3010
3011 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
3012 - ctl_v1->Wlog, ctl_v1->Scell_log))
3013 + ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
3014 return -EINVAL;
3015 if (ctl_v1 && ctl_v1->qth_min) {
3016 p = kmalloc(sizeof(*p), GFP_KERNEL);
3017 diff --git a/security/integrity/iint.c b/security/integrity/iint.c
3018 index e12c4900510f6..0b9cb639a0ed0 100644
3019 --- a/security/integrity/iint.c
3020 +++ b/security/integrity/iint.c
3021 @@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
3022 struct rb_node *node, *parent = NULL;
3023 struct integrity_iint_cache *iint, *test_iint;
3024
3025 + /*
3026 + * The integrity's "iint_cache" is initialized at security_init(),
3027 + * unless it is not included in the ordered list of LSMs enabled
3028 + * on the boot command line.
3029 + */
3030 + if (!iint_cache)
3031 + panic("%s: lsm=integrity required.\n", __func__);
3032 +
3033 iint = integrity_iint_find(inode);
3034 if (iint)
3035 return iint;
3036 diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
3037 index baeda6c9716a1..6ed80a4cba01a 100644
3038 --- a/sound/hda/intel-nhlt.c
3039 +++ b/sound/hda/intel-nhlt.c
3040 @@ -72,6 +72,11 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
3041 if (!nhlt)
3042 return 0;
3043
3044 + if (nhlt->header.length <= sizeof(struct acpi_table_header)) {
3045 + dev_warn(dev, "Invalid DMIC description table\n");
3046 + return 0;
3047 + }
3048 +
3049 for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++,
3050 epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) {
3051
3052 diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
3053 index 283caeaaffc30..9758bfa592321 100644
3054 --- a/tools/lib/bpf/Makefile
3055 +++ b/tools/lib/bpf/Makefile
3056 @@ -241,7 +241,7 @@ define do_install
3057 if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
3058 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
3059 fi; \
3060 - $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
3061 + $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
3062 endef
3063
3064 install_lib: all_cmd
3065 diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
3066 index d9e386b8f47ed..b2fc452504501 100644
3067 --- a/tools/lib/bpf/btf_dump.c
3068 +++ b/tools/lib/bpf/btf_dump.c
3069 @@ -443,7 +443,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
3070 return err;
3071
3072 case BTF_KIND_ARRAY:
3073 - return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
3074 + return btf_dump_order_type(d, btf_array(t)->type, false);
3075
3076 case BTF_KIND_STRUCT:
3077 case BTF_KIND_UNION: {
3078 diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
3079 index 88416be2bf994..5ec8043c71bca 100644
3080 --- a/tools/lib/bpf/netlink.c
3081 +++ b/tools/lib/bpf/netlink.c
3082 @@ -37,7 +37,7 @@ int libbpf_netlink_open(__u32 *nl_pid)
3083 memset(&sa, 0, sizeof(sa));
3084 sa.nl_family = AF_NETLINK;
3085
3086 - sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
3087 + sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
3088 if (sock < 0)
3089 return -errno;
3090
3091 diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
3092 index 8470dfe9fe97b..61b8dc45428ff 100644
3093 --- a/tools/perf/util/auxtrace.c
3094 +++ b/tools/perf/util/auxtrace.c
3095 @@ -252,10 +252,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
3096 queue->set = true;
3097 queue->tid = buffer->tid;
3098 queue->cpu = buffer->cpu;
3099 - } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
3100 - pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
3101 - queue->cpu, queue->tid, buffer->cpu, buffer->tid);
3102 - return -EINVAL;
3103 }
3104
3105 buffer->buffer_nr = queues->next_buffer_nr++;
3106 diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
3107 index b4e9a1d8c6cdb..141670ab4e670 100644
3108 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
3109 +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
3110 @@ -508,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
3111 }
3112
3113 ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
3114 - if (ret < 0) {
3115 - ERROR(ret);
3116 - return TC_ACT_SHOT;
3117 - }
3118 + if (ret < 0)
3119 + gopt.opt_class = 0;
3120
3121 bpf_trace_printk(fmt, sizeof(fmt),
3122 key.tunnel_id, key.remote_ipv4, gopt.opt_class);
3123 diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
3124 index ce6bea9675c07..0ccb1dda099ae 100755
3125 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
3126 +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
3127 @@ -658,7 +658,7 @@ test_ecn_decap()
3128 # In accordance with INET_ECN_decapsulate()
3129 __test_ecn_decap 00 00 0x00
3130 __test_ecn_decap 01 01 0x01
3131 - __test_ecn_decap 02 01 0x02
3132 + __test_ecn_decap 02 01 0x01
3133 __test_ecn_decap 01 03 0x03
3134 __test_ecn_decap 02 03 0x03
3135 test_ecn_decap_error