Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0363-4.9.264-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3665 - (hide annotations) (download)
Mon Oct 24 14:07:42 2022 UTC (18 months, 2 weeks ago) by niro
File size: 72746 byte(s)
-linux-4.9.264
1 niro 3665 diff --git a/Makefile b/Makefile
2     index 80b265a383bb6..2ae6f4b707dd9 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 263
9     +SUBLEVEL = 264
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
14     index 97d331ec25001..cd8db85f7c119 100644
15     --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
16     +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
17     @@ -177,6 +177,7 @@
18     ranges = <0x0 0x00 0x1700000 0x100000>;
19     reg = <0x00 0x1700000 0x0 0x100000>;
20     interrupts = <0 75 0x4>;
21     + dma-coherent;
22    
23     sec_jr0: jr@10000 {
24     compatible = "fsl,sec-v5.4-job-ring",
25     diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
26     index 86a43450f014c..bdf5ec2b83565 100644
27     --- a/arch/arm64/include/asm/futex.h
28     +++ b/arch/arm64/include/asm/futex.h
29     @@ -26,7 +26,12 @@
30     #include <asm/errno.h>
31     #include <asm/sysreg.h>
32    
33     +#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
34     +
35     #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
36     +do { \
37     + unsigned int loops = FUTEX_MAX_LOOPS; \
38     + \
39     asm volatile( \
40     ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
41     CONFIG_ARM64_PAN) \
42     @@ -34,21 +39,26 @@
43     "1: ldxr %w1, %2\n" \
44     insn "\n" \
45     "2: stlxr %w0, %w3, %2\n" \
46     -" cbnz %w0, 1b\n" \
47     -" dmb ish\n" \
48     +" cbz %w0, 3f\n" \
49     +" sub %w4, %w4, %w0\n" \
50     +" cbnz %w4, 1b\n" \
51     +" mov %w0, %w7\n" \
52     "3:\n" \
53     +" dmb ish\n" \
54     " .pushsection .fixup,\"ax\"\n" \
55     " .align 2\n" \
56     -"4: mov %w0, %w5\n" \
57     +"4: mov %w0, %w6\n" \
58     " b 3b\n" \
59     " .popsection\n" \
60     _ASM_EXTABLE(1b, 4b) \
61     _ASM_EXTABLE(2b, 4b) \
62     ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
63     CONFIG_ARM64_PAN) \
64     - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
65     - : "r" (oparg), "Ir" (-EFAULT) \
66     - : "memory")
67     + : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
68     + "+r" (loops) \
69     + : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
70     + : "memory"); \
71     +} while (0)
72    
73     static inline int
74     arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
75     @@ -59,23 +69,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
76    
77     switch (op) {
78     case FUTEX_OP_SET:
79     - __futex_atomic_op("mov %w3, %w4",
80     + __futex_atomic_op("mov %w3, %w5",
81     ret, oldval, uaddr, tmp, oparg);
82     break;
83     case FUTEX_OP_ADD:
84     - __futex_atomic_op("add %w3, %w1, %w4",
85     + __futex_atomic_op("add %w3, %w1, %w5",
86     ret, oldval, uaddr, tmp, oparg);
87     break;
88     case FUTEX_OP_OR:
89     - __futex_atomic_op("orr %w3, %w1, %w4",
90     + __futex_atomic_op("orr %w3, %w1, %w5",
91     ret, oldval, uaddr, tmp, oparg);
92     break;
93     case FUTEX_OP_ANDN:
94     - __futex_atomic_op("and %w3, %w1, %w4",
95     + __futex_atomic_op("and %w3, %w1, %w5",
96     ret, oldval, uaddr, tmp, ~oparg);
97     break;
98     case FUTEX_OP_XOR:
99     - __futex_atomic_op("eor %w3, %w1, %w4",
100     + __futex_atomic_op("eor %w3, %w1, %w5",
101     ret, oldval, uaddr, tmp, oparg);
102     break;
103     default:
104     @@ -95,6 +105,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
105     u32 oldval, u32 newval)
106     {
107     int ret = 0;
108     + unsigned int loops = FUTEX_MAX_LOOPS;
109     u32 val, tmp;
110     u32 __user *uaddr;
111    
112     @@ -106,21 +117,25 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
113     ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
114     " prfm pstl1strm, %2\n"
115     "1: ldxr %w1, %2\n"
116     -" sub %w3, %w1, %w4\n"
117     -" cbnz %w3, 3f\n"
118     -"2: stlxr %w3, %w5, %2\n"
119     -" cbnz %w3, 1b\n"
120     -" dmb ish\n"
121     +" sub %w3, %w1, %w5\n"
122     +" cbnz %w3, 4f\n"
123     +"2: stlxr %w3, %w6, %2\n"
124     +" cbz %w3, 3f\n"
125     +" sub %w4, %w4, %w3\n"
126     +" cbnz %w4, 1b\n"
127     +" mov %w0, %w8\n"
128     "3:\n"
129     +" dmb ish\n"
130     +"4:\n"
131     " .pushsection .fixup,\"ax\"\n"
132     -"4: mov %w0, %w6\n"
133     -" b 3b\n"
134     +"5: mov %w0, %w7\n"
135     +" b 4b\n"
136     " .popsection\n"
137     - _ASM_EXTABLE(1b, 4b)
138     - _ASM_EXTABLE(2b, 4b)
139     + _ASM_EXTABLE(1b, 5b)
140     + _ASM_EXTABLE(2b, 5b)
141     ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
142     - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
143     - : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
144     + : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
145     + : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
146     : "memory");
147    
148     *uval = val;
149     diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
150     index 1d0b875fec44f..ec909eec0b4c6 100644
151     --- a/arch/ia64/include/asm/syscall.h
152     +++ b/arch/ia64/include/asm/syscall.h
153     @@ -35,7 +35,7 @@ static inline void syscall_rollback(struct task_struct *task,
154     static inline long syscall_get_error(struct task_struct *task,
155     struct pt_regs *regs)
156     {
157     - return regs->r10 == -1 ? regs->r8:0;
158     + return regs->r10 == -1 ? -regs->r8:0;
159     }
160    
161     static inline long syscall_get_return_value(struct task_struct *task,
162     diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
163     index 36f660da81242..56007258c0141 100644
164     --- a/arch/ia64/kernel/ptrace.c
165     +++ b/arch/ia64/kernel/ptrace.c
166     @@ -2144,27 +2144,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
167     {
168     struct syscall_get_set_args *args = data;
169     struct pt_regs *pt = args->regs;
170     - unsigned long *krbs, cfm, ndirty;
171     + unsigned long *krbs, cfm, ndirty, nlocals, nouts;
172     int i, count;
173    
174     if (unw_unwind_to_user(info) < 0)
175     return;
176    
177     + /*
178     + * We get here via a few paths:
179     + * - break instruction: cfm is shared with caller.
180     + * syscall args are in out= regs, locals are non-empty.
181     + * - epsinstruction: cfm is set by br.call
182     + * locals don't exist.
183     + *
184     + * For both cases argguments are reachable in cfm.sof - cfm.sol.
185     + * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
186     + */
187     cfm = pt->cr_ifs;
188     + nlocals = (cfm >> 7) & 0x7f; /* aka sol */
189     + nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
190     krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
191     ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
192    
193     count = 0;
194     if (in_syscall(pt))
195     - count = min_t(int, args->n, cfm & 0x7f);
196     + count = min_t(int, args->n, nouts);
197    
198     + /* Iterate over outs. */
199     for (i = 0; i < count; i++) {
200     + int j = ndirty + nlocals + i + args->i;
201     if (args->rw)
202     - *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
203     - args->args[i];
204     + *ia64_rse_skip_regs(krbs, j) = args->args[i];
205     else
206     - args->args[i] = *ia64_rse_skip_regs(krbs,
207     - ndirty + i + args->i);
208     + args->args[i] = *ia64_rse_skip_regs(krbs, j);
209     }
210    
211     if (!args->rw) {
212     diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
213     index 4a2beef742772..86fdda16bb73e 100644
214     --- a/arch/powerpc/include/asm/dcr-native.h
215     +++ b/arch/powerpc/include/asm/dcr-native.h
216     @@ -65,8 +65,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
217     #define mfdcr(rn) \
218     ({unsigned int rval; \
219     if (__builtin_constant_p(rn) && rn < 1024) \
220     - asm volatile("mfdcr %0," __stringify(rn) \
221     - : "=r" (rval)); \
222     + asm volatile("mfdcr %0, %1" : "=r" (rval) \
223     + : "n" (rn)); \
224     else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
225     rval = mfdcrx(rn); \
226     else \
227     @@ -76,8 +76,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
228     #define mtdcr(rn, v) \
229     do { \
230     if (__builtin_constant_p(rn) && rn < 1024) \
231     - asm volatile("mtdcr " __stringify(rn) ",%0" \
232     - : : "r" (v)); \
233     + asm volatile("mtdcr %0, %1" \
234     + : : "n" (rn), "r" (v)); \
235     else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
236     mtdcrx(rn, v); \
237     else \
238     diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
239     index f5ca15622dc9c..2bfa4deb8cae8 100644
240     --- a/arch/x86/include/asm/tlbflush.h
241     +++ b/arch/x86/include/asm/tlbflush.h
242     @@ -245,12 +245,15 @@ static inline void __native_flush_tlb_single(unsigned long addr)
243     * ASID. But, userspace flushes are probably much more
244     * important performance-wise.
245     *
246     - * Make sure to do only a single invpcid when KAISER is
247     - * disabled and we have only a single ASID.
248     + * In the KAISER disabled case, do an INVLPG to make sure
249     + * the mapping is flushed in case it is a global one.
250     */
251     - if (kaiser_enabled)
252     + if (kaiser_enabled) {
253     invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr);
254     - invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr);
255     + invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr);
256     + } else {
257     + asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
258     + }
259     }
260    
261     static inline void __flush_tlb_all(void)
262     diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
263     index eae0b278d5172..56c429ea6aaf4 100644
264     --- a/drivers/acpi/internal.h
265     +++ b/drivers/acpi/internal.h
266     @@ -18,6 +18,8 @@
267     #ifndef _ACPI_INTERNAL_H_
268     #define _ACPI_INTERNAL_H_
269    
270     +#include <linux/idr.h>
271     +
272     #define PREFIX "ACPI: "
273    
274     int early_acpi_osi_init(void);
275     @@ -97,9 +99,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
276    
277     extern struct list_head acpi_bus_id_list;
278    
279     +#define ACPI_MAX_DEVICE_INSTANCES 4096
280     +
281     struct acpi_device_bus_id {
282     const char *bus_id;
283     - unsigned int instance_no;
284     + struct ida instance_ida;
285     struct list_head node;
286     };
287    
288     diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
289     index 5aa4a01f698fe..d749fe20fbfc5 100644
290     --- a/drivers/acpi/scan.c
291     +++ b/drivers/acpi/scan.c
292     @@ -481,9 +481,8 @@ static void acpi_device_del(struct acpi_device *device)
293     list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
294     if (!strcmp(acpi_device_bus_id->bus_id,
295     acpi_device_hid(device))) {
296     - if (acpi_device_bus_id->instance_no > 0)
297     - acpi_device_bus_id->instance_no--;
298     - else {
299     + ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
300     + if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
301     list_del(&acpi_device_bus_id->node);
302     kfree_const(acpi_device_bus_id->bus_id);
303     kfree(acpi_device_bus_id);
304     @@ -622,12 +621,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
305     put_device(&adev->dev);
306     }
307    
308     +static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
309     +{
310     + struct acpi_device_bus_id *acpi_device_bus_id;
311     +
312     + /* Find suitable bus_id and instance number in acpi_bus_id_list. */
313     + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
314     + if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
315     + return acpi_device_bus_id;
316     + }
317     + return NULL;
318     +}
319     +
320     +static int acpi_device_set_name(struct acpi_device *device,
321     + struct acpi_device_bus_id *acpi_device_bus_id)
322     +{
323     + struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
324     + int result;
325     +
326     + result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
327     + if (result < 0)
328     + return result;
329     +
330     + device->pnp.instance_no = result;
331     + dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
332     + return 0;
333     +}
334     +
335     int acpi_device_add(struct acpi_device *device,
336     void (*release)(struct device *))
337     {
338     + struct acpi_device_bus_id *acpi_device_bus_id;
339     int result;
340     - struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
341     - int found = 0;
342    
343     if (device->handle) {
344     acpi_status status;
345     @@ -653,41 +678,38 @@ int acpi_device_add(struct acpi_device *device,
346     INIT_LIST_HEAD(&device->del_list);
347     mutex_init(&device->physical_node_lock);
348    
349     - new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
350     - if (!new_bus_id) {
351     - pr_err(PREFIX "Memory allocation error\n");
352     - result = -ENOMEM;
353     - goto err_detach;
354     - }
355     -
356     mutex_lock(&acpi_device_lock);
357     - /*
358     - * Find suitable bus_id and instance number in acpi_bus_id_list
359     - * If failed, create one and link it into acpi_bus_id_list
360     - */
361     - list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
362     - if (!strcmp(acpi_device_bus_id->bus_id,
363     - acpi_device_hid(device))) {
364     - acpi_device_bus_id->instance_no++;
365     - found = 1;
366     - kfree(new_bus_id);
367     - break;
368     +
369     + acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
370     + if (acpi_device_bus_id) {
371     + result = acpi_device_set_name(device, acpi_device_bus_id);
372     + if (result)
373     + goto err_unlock;
374     + } else {
375     + acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
376     + GFP_KERNEL);
377     + if (!acpi_device_bus_id) {
378     + result = -ENOMEM;
379     + goto err_unlock;
380     }
381     - }
382     - if (!found) {
383     - acpi_device_bus_id = new_bus_id;
384     acpi_device_bus_id->bus_id =
385     kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
386     if (!acpi_device_bus_id->bus_id) {
387     - pr_err(PREFIX "Memory allocation error for bus id\n");
388     + kfree(acpi_device_bus_id);
389     result = -ENOMEM;
390     - goto err_free_new_bus_id;
391     + goto err_unlock;
392     + }
393     +
394     + ida_init(&acpi_device_bus_id->instance_ida);
395     +
396     + result = acpi_device_set_name(device, acpi_device_bus_id);
397     + if (result) {
398     + kfree(acpi_device_bus_id);
399     + goto err_unlock;
400     }
401    
402     - acpi_device_bus_id->instance_no = 0;
403     list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
404     }
405     - dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
406    
407     if (device->parent)
408     list_add_tail(&device->node, &device->parent->children);
409     @@ -719,13 +741,9 @@ int acpi_device_add(struct acpi_device *device,
410     list_del(&device->node);
411     list_del(&device->wakeup_list);
412    
413     - err_free_new_bus_id:
414     - if (!found)
415     - kfree(new_bus_id);
416     -
417     + err_unlock:
418     mutex_unlock(&acpi_device_lock);
419    
420     - err_detach:
421     acpi_detach_data(device->handle, acpi_scan_drop_device);
422     return result;
423     }
424     diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
425     index 9d16743c49178..2b7786cd548f8 100644
426     --- a/drivers/atm/eni.c
427     +++ b/drivers/atm/eni.c
428     @@ -2279,7 +2279,8 @@ out:
429     return rc;
430    
431     err_eni_release:
432     - eni_do_release(dev);
433     + dev->phy = NULL;
434     + iounmap(ENI_DEV(dev)->ioaddr);
435     err_unregister:
436     atm_dev_deregister(dev);
437     err_free_consistent:
438     diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
439     index feb023d7eebd6..40644670cff26 100644
440     --- a/drivers/atm/idt77105.c
441     +++ b/drivers/atm/idt77105.c
442     @@ -261,7 +261,7 @@ static int idt77105_start(struct atm_dev *dev)
443     {
444     unsigned long flags;
445    
446     - if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
447     + if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
448     return -ENOMEM;
449     PRIV(dev)->dev = dev;
450     spin_lock_irqsave(&idt77105_priv_lock, flags);
451     @@ -338,7 +338,7 @@ static int idt77105_stop(struct atm_dev *dev)
452     else
453     idt77105_all = walk->next;
454     dev->phy = NULL;
455     - dev->dev_data = NULL;
456     + dev->phy_data = NULL;
457     kfree(walk);
458     break;
459     }
460     diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
461     index 445505d9ea071..dec6c68156ee5 100644
462     --- a/drivers/atm/lanai.c
463     +++ b/drivers/atm/lanai.c
464     @@ -2240,6 +2240,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
465     conf1_write(lanai);
466     #endif
467     iounmap(lanai->base);
468     + lanai->base = NULL;
469     error_pci:
470     pci_disable_device(lanai->pci);
471     error:
472     @@ -2252,6 +2253,8 @@ static int lanai_dev_open(struct atm_dev *atmdev)
473     static void lanai_dev_close(struct atm_dev *atmdev)
474     {
475     struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
476     + if (lanai->base==NULL)
477     + return;
478     printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n",
479     lanai->number);
480     lanai_timed_poll_stop(lanai);
481     @@ -2561,7 +2564,7 @@ static int lanai_init_one(struct pci_dev *pci,
482     struct atm_dev *atmdev;
483     int result;
484    
485     - lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
486     + lanai = kzalloc(sizeof(*lanai), GFP_KERNEL);
487     if (lanai == NULL) {
488     printk(KERN_ERR DEV_LABEL
489     ": couldn't allocate dev_data structure!\n");
490     diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
491     index 5120a96b3a894..b2f4e8df15911 100644
492     --- a/drivers/atm/uPD98402.c
493     +++ b/drivers/atm/uPD98402.c
494     @@ -210,7 +210,7 @@ static void uPD98402_int(struct atm_dev *dev)
495     static int uPD98402_start(struct atm_dev *dev)
496     {
497     DPRINTK("phy_start\n");
498     - if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
499     + if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
500     return -ENOMEM;
501     spin_lock_init(&PRIV(dev)->lock);
502     memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
503     diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
504     index 2b739ba841b1a..1a1ad0fdc039a 100644
505     --- a/drivers/block/xen-blkback/blkback.c
506     +++ b/drivers/block/xen-blkback/blkback.c
507     @@ -937,7 +937,7 @@ next:
508     out:
509     for (i = last_map; i < num; i++) {
510     /* Don't zap current batch's valid persistent grants. */
511     - if(i >= last_map + segs_to_map)
512     + if(i >= map_until)
513     pages[i]->persistent_gnt = NULL;
514     pages[i]->handle = BLKBACK_INVALID_HANDLE;
515     }
516     diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
517     index 5012e3ad12256..624f74d03a83a 100644
518     --- a/drivers/bus/omap_l3_noc.c
519     +++ b/drivers/bus/omap_l3_noc.c
520     @@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
521     */
522     l3->debug_irq = platform_get_irq(pdev, 0);
523     ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
524     - 0x0, "l3-dbg-irq", l3);
525     + IRQF_NO_THREAD, "l3-dbg-irq", l3);
526     if (ret) {
527     dev_err(l3->dev, "request_irq failed for %d\n",
528     l3->debug_irq);
529     @@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
530    
531     l3->app_irq = platform_get_irq(pdev, 1);
532     ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
533     - 0x0, "l3-app-irq", l3);
534     + IRQF_NO_THREAD, "l3-app-irq", l3);
535     if (ret)
536     dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
537    
538     diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
539     index a60e1c1b4b5e8..8bd062635399a 100644
540     --- a/drivers/infiniband/hw/cxgb4/cm.c
541     +++ b/drivers/infiniband/hw/cxgb4/cm.c
542     @@ -3472,13 +3472,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
543     ep->com.local_addr.ss_family == AF_INET) {
544     err = cxgb4_remove_server_filter(
545     ep->com.dev->rdev.lldi.ports[0], ep->stid,
546     - ep->com.dev->rdev.lldi.rxq_ids[0], 0);
547     + ep->com.dev->rdev.lldi.rxq_ids[0], false);
548     } else {
549     struct sockaddr_in6 *sin6;
550     c4iw_init_wr_wait(&ep->com.wr_wait);
551     err = cxgb4_remove_server(
552     ep->com.dev->rdev.lldi.ports[0], ep->stid,
553     - ep->com.dev->rdev.lldi.rxq_ids[0], 0);
554     + ep->com.dev->rdev.lldi.rxq_ids[0], true);
555     if (err)
556     goto done;
557     err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
558     diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
559     index 4ead5a18b7940..c41ab2cb272e7 100644
560     --- a/drivers/net/can/c_can/c_can.c
561     +++ b/drivers/net/can/c_can/c_can.c
562     @@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
563     .brp_inc = 1,
564     };
565    
566     -static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
567     -{
568     - if (priv->device)
569     - pm_runtime_enable(priv->device);
570     -}
571     -
572     -static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
573     -{
574     - if (priv->device)
575     - pm_runtime_disable(priv->device);
576     -}
577     -
578     static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
579     {
580     if (priv->device)
581     @@ -1318,7 +1306,6 @@ static const struct net_device_ops c_can_netdev_ops = {
582    
583     int register_c_can_dev(struct net_device *dev)
584     {
585     - struct c_can_priv *priv = netdev_priv(dev);
586     int err;
587    
588     /* Deactivate pins to prevent DRA7 DCAN IP from being
589     @@ -1328,28 +1315,19 @@ int register_c_can_dev(struct net_device *dev)
590     */
591     pinctrl_pm_select_sleep_state(dev->dev.parent);
592    
593     - c_can_pm_runtime_enable(priv);
594     -
595     dev->flags |= IFF_ECHO; /* we support local echo */
596     dev->netdev_ops = &c_can_netdev_ops;
597    
598     err = register_candev(dev);
599     - if (err)
600     - c_can_pm_runtime_disable(priv);
601     - else
602     + if (!err)
603     devm_can_led_init(dev);
604     -
605     return err;
606     }
607     EXPORT_SYMBOL_GPL(register_c_can_dev);
608    
609     void unregister_c_can_dev(struct net_device *dev)
610     {
611     - struct c_can_priv *priv = netdev_priv(dev);
612     -
613     unregister_candev(dev);
614     -
615     - c_can_pm_runtime_disable(priv);
616     }
617     EXPORT_SYMBOL_GPL(unregister_c_can_dev);
618    
619     diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
620     index d065c0e2d18e6..f3e0b2124a376 100644
621     --- a/drivers/net/can/c_can/c_can_pci.c
622     +++ b/drivers/net/can/c_can/c_can_pci.c
623     @@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
624     {
625     struct net_device *dev = pci_get_drvdata(pdev);
626     struct c_can_priv *priv = netdev_priv(dev);
627     + void __iomem *addr = priv->base;
628    
629     unregister_c_can_dev(dev);
630    
631     free_c_can_dev(dev);
632    
633     - pci_iounmap(pdev, priv->base);
634     + pci_iounmap(pdev, addr);
635     pci_disable_msi(pdev);
636     pci_clear_master(pdev);
637     pci_release_regions(pdev);
638     diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
639     index 717530eac70c7..c6a03f565e3fc 100644
640     --- a/drivers/net/can/c_can/c_can_platform.c
641     +++ b/drivers/net/can/c_can/c_can_platform.c
642     @@ -29,6 +29,7 @@
643     #include <linux/list.h>
644     #include <linux/io.h>
645     #include <linux/platform_device.h>
646     +#include <linux/pm_runtime.h>
647     #include <linux/clk.h>
648     #include <linux/of.h>
649     #include <linux/of_device.h>
650     @@ -385,6 +386,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
651     platform_set_drvdata(pdev, dev);
652     SET_NETDEV_DEV(dev, &pdev->dev);
653    
654     + pm_runtime_enable(priv->device);
655     ret = register_c_can_dev(dev);
656     if (ret) {
657     dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
658     @@ -397,6 +399,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
659     return 0;
660    
661     exit_free_device:
662     + pm_runtime_disable(priv->device);
663     free_c_can_dev(dev);
664     exit:
665     dev_err(&pdev->dev, "probe failed\n");
666     @@ -407,9 +410,10 @@ exit:
667     static int c_can_plat_remove(struct platform_device *pdev)
668     {
669     struct net_device *dev = platform_get_drvdata(pdev);
670     + struct c_can_priv *priv = netdev_priv(dev);
671    
672     unregister_c_can_dev(dev);
673     -
674     + pm_runtime_disable(priv->device);
675     free_c_can_dev(dev);
676    
677     return 0;
678     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
679     index ea38b67d0b737..3d7bffd529feb 100644
680     --- a/drivers/net/can/dev.c
681     +++ b/drivers/net/can/dev.c
682     @@ -1084,6 +1084,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
683    
684     static struct rtnl_link_ops can_link_ops __read_mostly = {
685     .kind = "can",
686     + .netns_refund = true,
687     .maxtype = IFLA_CAN_MAX,
688     .policy = can_policy,
689     .setup = can_setup,
690     diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
691     index 0bd7e71647964..197c27d8f584b 100644
692     --- a/drivers/net/can/m_can/m_can.c
693     +++ b/drivers/net/can/m_can/m_can.c
694     @@ -428,9 +428,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
695     }
696    
697     while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
698     - if (rxfs & RXFS_RFL)
699     - netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
700     -
701     m_can_read_fifo(dev, rxfs);
702    
703     quota--;
704     diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
705     index 0c69d5858558a..40b3adf7ad998 100644
706     --- a/drivers/net/dsa/bcm_sf2.c
707     +++ b/drivers/net/dsa/bcm_sf2.c
708     @@ -588,8 +588,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
709     * in bits 15:8 and the patch level in bits 7:0 which is exactly what
710     * the REG_PHY_REVISION register layout is.
711     */
712     -
713     - return priv->hw_params.gphy_rev;
714     + if (priv->int_phy_mask & BIT(port))
715     + return priv->hw_params.gphy_rev;
716     + else
717     + return 0;
718     }
719    
720     static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
721     diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
722     index f9e74461bdc0b..1231816125955 100644
723     --- a/drivers/net/ethernet/freescale/fec_ptp.c
724     +++ b/drivers/net/ethernet/freescale/fec_ptp.c
725     @@ -396,9 +396,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
726     u64 ns;
727     unsigned long flags;
728    
729     + mutex_lock(&adapter->ptp_clk_mutex);
730     + /* Check the ptp clock */
731     + if (!adapter->ptp_clk_on) {
732     + mutex_unlock(&adapter->ptp_clk_mutex);
733     + return -EINVAL;
734     + }
735     spin_lock_irqsave(&adapter->tmreg_lock, flags);
736     ns = timecounter_read(&adapter->tc);
737     spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
738     + mutex_unlock(&adapter->ptp_clk_mutex);
739    
740     *ts = ns_to_timespec64(ns);
741    
742     diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
743     index 6b03c8553e597..65deaf8f30047 100644
744     --- a/drivers/net/ethernet/intel/e1000e/82571.c
745     +++ b/drivers/net/ethernet/intel/e1000e/82571.c
746     @@ -917,6 +917,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
747     } else {
748     data &= ~IGP02E1000_PM_D0_LPLU;
749     ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
750     + if (ret_val)
751     + return ret_val;
752     /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
753     * during Dx states where the power conservation is most
754     * important. During driver activity we should enable
755     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
756     index 3c01bc43889a2..46323019aa631 100644
757     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
758     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
759     @@ -5920,15 +5920,19 @@ static void e1000_reset_task(struct work_struct *work)
760     struct e1000_adapter *adapter;
761     adapter = container_of(work, struct e1000_adapter, reset_task);
762    
763     + rtnl_lock();
764     /* don't run the task if already down */
765     - if (test_bit(__E1000_DOWN, &adapter->state))
766     + if (test_bit(__E1000_DOWN, &adapter->state)) {
767     + rtnl_unlock();
768     return;
769     + }
770    
771     if (!(adapter->flags & FLAG_RESTART_NOW)) {
772     e1000e_dump(adapter);
773     e_err("Reset adapter unexpectedly\n");
774     }
775     e1000e_reinit_locked(adapter);
776     + rtnl_unlock();
777     }
778    
779     /**
780     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
781     index 36d73bf32f4fb..8e2aaf774693f 100644
782     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
783     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
784     @@ -8677,8 +8677,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
785     ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
786     err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
787     input->sw_idx, queue);
788     - if (!err)
789     - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
790     + if (err)
791     + goto err_out_w_lock;
792     +
793     + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
794     spin_unlock(&adapter->fdir_perfect_lock);
795    
796     if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
797     diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
798     index 5174e0bd75d1e..625336264a44b 100644
799     --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
800     +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
801     @@ -1426,6 +1426,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
802    
803     if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
804     vfree(fw_dump->tmpl_hdr);
805     + fw_dump->tmpl_hdr = NULL;
806    
807     if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
808     extended = !qlcnic_83xx_extend_md_capab(adapter);
809     @@ -1444,6 +1445,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
810     struct qlcnic_83xx_dump_template_hdr *hdr;
811    
812     hdr = fw_dump->tmpl_hdr;
813     + if (!hdr)
814     + return;
815     hdr->drv_cap_mask = 0x1f;
816     fw_dump->cap_mask = 0x1f;
817     dev_info(&pdev->dev,
818     diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
819     index fe5b0ac8c6319..5bf47279f9c1b 100644
820     --- a/drivers/net/ethernet/sun/niu.c
821     +++ b/drivers/net/ethernet/sun/niu.c
822     @@ -3948,8 +3948,6 @@ static void niu_xmac_interrupt(struct niu *np)
823     mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
824     if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
825     mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
826     - if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
827     - mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
828     if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
829     mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
830     if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
831     diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
832     index 7108c68f16d3e..6ee7f8d2f2d17 100644
833     --- a/drivers/net/ethernet/tehuti/tehuti.c
834     +++ b/drivers/net/ethernet/tehuti/tehuti.c
835     @@ -2062,6 +2062,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
836     /*bdx_hw_reset(priv); */
837     if (bdx_read_mac(priv)) {
838     pr_err("load MAC address failed\n");
839     + err = -EFAULT;
840     goto err_out_iomap;
841     }
842     SET_NETDEV_DEV(ndev, &pdev->dev);
843     diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
844     index ff2270ead2e68..84e0e7f780297 100644
845     --- a/drivers/net/usb/cdc-phonet.c
846     +++ b/drivers/net/usb/cdc-phonet.c
847     @@ -406,6 +406,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
848    
849     err = register_netdev(dev);
850     if (err) {
851     + /* Set disconnected flag so that disconnect() returns early. */
852     + pnd->disconnected = 1;
853     usb_driver_release_interface(&usbpn_driver, data_intf);
854     goto out;
855     }
856     diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
857     index 87bf05a81db50..fc7d28edee077 100644
858     --- a/drivers/net/wan/fsl_ucc_hdlc.c
859     +++ b/drivers/net/wan/fsl_ucc_hdlc.c
860     @@ -169,13 +169,17 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
861    
862     priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
863     GFP_KERNEL);
864     - if (!priv->rx_skbuff)
865     + if (!priv->rx_skbuff) {
866     + ret = -ENOMEM;
867     goto free_ucc_pram;
868     + }
869    
870     priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
871     GFP_KERNEL);
872     - if (!priv->tx_skbuff)
873     + if (!priv->tx_skbuff) {
874     + ret = -ENOMEM;
875     goto free_rx_skbuff;
876     + }
877    
878     priv->skb_curtx = 0;
879     priv->skb_dirtytx = 0;
880     diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
881     index 8e83649f77ce1..42e5677d932d8 100644
882     --- a/drivers/usb/gadget/function/f_hid.c
883     +++ b/drivers/usb/gadget/function/f_hid.c
884     @@ -932,7 +932,7 @@ static void hidg_free_inst(struct usb_function_instance *f)
885     mutex_lock(&hidg_ida_lock);
886    
887     hidg_put_minor(opts->minor);
888     - if (idr_is_empty(&hidg_ida.idr))
889     + if (ida_is_empty(&hidg_ida))
890     ghid_cleanup();
891    
892     mutex_unlock(&hidg_ida_lock);
893     @@ -958,7 +958,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
894    
895     mutex_lock(&hidg_ida_lock);
896    
897     - if (idr_is_empty(&hidg_ida.idr)) {
898     + if (ida_is_empty(&hidg_ida)) {
899     status = ghid_setup(NULL, HIDG_MINORS);
900     if (status) {
901     ret = ERR_PTR(status);
902     @@ -971,7 +971,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
903     if (opts->minor < 0) {
904     ret = ERR_PTR(opts->minor);
905     kfree(opts);
906     - if (idr_is_empty(&hidg_ida.idr))
907     + if (ida_is_empty(&hidg_ida))
908     ghid_cleanup();
909     goto unlock;
910     }
911     diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
912     index b962f24b500bf..b3d036d06553c 100644
913     --- a/drivers/usb/gadget/function/f_printer.c
914     +++ b/drivers/usb/gadget/function/f_printer.c
915     @@ -1276,7 +1276,7 @@ static void gprinter_free_inst(struct usb_function_instance *f)
916     mutex_lock(&printer_ida_lock);
917    
918     gprinter_put_minor(opts->minor);
919     - if (idr_is_empty(&printer_ida.idr))
920     + if (ida_is_empty(&printer_ida))
921     gprinter_cleanup();
922    
923     mutex_unlock(&printer_ida_lock);
924     @@ -1300,7 +1300,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
925    
926     mutex_lock(&printer_ida_lock);
927    
928     - if (idr_is_empty(&printer_ida.idr)) {
929     + if (ida_is_empty(&printer_ida)) {
930     status = gprinter_setup(PRINTER_MINORS);
931     if (status) {
932     ret = ERR_PTR(status);
933     @@ -1313,7 +1313,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
934     if (opts->minor < 0) {
935     ret = ERR_PTR(opts->minor);
936     kfree(opts);
937     - if (idr_is_empty(&printer_ida.idr))
938     + if (ida_is_empty(&printer_ida))
939     gprinter_cleanup();
940     goto unlock;
941     }
942     diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
943     index c3428767332c2..55ebf9f4a824e 100644
944     --- a/fs/nfs/Kconfig
945     +++ b/fs/nfs/Kconfig
946     @@ -132,7 +132,7 @@ config PNFS_OBJLAYOUT
947     config PNFS_FLEXFILE_LAYOUT
948     tristate
949     depends on NFS_V4_1 && NFS_V3
950     - default m
951     + default NFS_V4
952    
953     config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
954     string "NFSv4.1 Implementation ID Domain"
955     diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
956     index 267126d32ec0f..4a68837e92ea4 100644
957     --- a/fs/nfs/nfs3xdr.c
958     +++ b/fs/nfs/nfs3xdr.c
959     @@ -33,6 +33,7 @@
960     */
961     #define NFS3_fhandle_sz (1+16)
962     #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
963     +#define NFS3_post_op_fh_sz (1+NFS3_fh_sz)
964     #define NFS3_sattr_sz (15)
965     #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
966     #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
967     @@ -70,7 +71,7 @@
968     #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
969     #define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3)
970     #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
971     -#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
972     +#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
973     #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
974     #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
975     #define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2)
976     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
977     index 0cebe0ca03b2a..94130588ebf52 100644
978     --- a/fs/nfs/nfs4proc.c
979     +++ b/fs/nfs/nfs4proc.c
980     @@ -5144,6 +5144,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
981     unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
982     int ret, i;
983    
984     + /* You can't remove system.nfs4_acl: */
985     + if (buflen == 0)
986     + return -EINVAL;
987     if (!nfs4_server_supports_acls(server))
988     return -EOPNOTSUPP;
989     if (npages > ARRAY_SIZE(pages))
990     diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
991     index d2a806416c3ab..1d406a2094a56 100644
992     --- a/fs/squashfs/export.c
993     +++ b/fs/squashfs/export.c
994     @@ -165,14 +165,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
995     start = le64_to_cpu(table[n]);
996     end = le64_to_cpu(table[n + 1]);
997    
998     - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
999     + if (start >= end
1000     + || (end - start) >
1001     + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1002     kfree(table);
1003     return ERR_PTR(-EINVAL);
1004     }
1005     }
1006    
1007     start = le64_to_cpu(table[indexes - 1]);
1008     - if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
1009     + if (start >= lookup_table_start ||
1010     + (lookup_table_start - start) >
1011     + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1012     kfree(table);
1013     return ERR_PTR(-EINVAL);
1014     }
1015     diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
1016     index 8ccc0e3f6ea5a..d2e15baab5378 100644
1017     --- a/fs/squashfs/id.c
1018     +++ b/fs/squashfs/id.c
1019     @@ -110,14 +110,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
1020     start = le64_to_cpu(table[n]);
1021     end = le64_to_cpu(table[n + 1]);
1022    
1023     - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
1024     + if (start >= end || (end - start) >
1025     + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1026     kfree(table);
1027     return ERR_PTR(-EINVAL);
1028     }
1029     }
1030    
1031     start = le64_to_cpu(table[indexes - 1]);
1032     - if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
1033     + if (start >= id_table_start || (id_table_start - start) >
1034     + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1035     kfree(table);
1036     return ERR_PTR(-EINVAL);
1037     }
1038     diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
1039     index e66486366f025..2fd1262cc1bd4 100644
1040     --- a/fs/squashfs/squashfs_fs.h
1041     +++ b/fs/squashfs/squashfs_fs.h
1042     @@ -30,6 +30,7 @@
1043    
1044     /* size of metadata (inode and directory) blocks */
1045     #define SQUASHFS_METADATA_SIZE 8192
1046     +#define SQUASHFS_BLOCK_OFFSET 2
1047    
1048     /* default size of block device I/O */
1049     #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
1050     diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
1051     index 3a655d879600c..7f718d2bf3579 100644
1052     --- a/fs/squashfs/xattr_id.c
1053     +++ b/fs/squashfs/xattr_id.c
1054     @@ -122,14 +122,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
1055     start = le64_to_cpu(table[n]);
1056     end = le64_to_cpu(table[n + 1]);
1057    
1058     - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
1059     + if (start >= end || (end - start) >
1060     + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1061     kfree(table);
1062     return ERR_PTR(-EINVAL);
1063     }
1064     }
1065    
1066     start = le64_to_cpu(table[indexes - 1]);
1067     - if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
1068     + if (start >= table_start || (table_start - start) >
1069     + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
1070     kfree(table);
1071     return ERR_PTR(-EINVAL);
1072     }
1073     diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
1074     index c1a524de67c5b..53b2a1f320f9f 100644
1075     --- a/include/acpi/acpi_bus.h
1076     +++ b/include/acpi/acpi_bus.h
1077     @@ -241,6 +241,7 @@ struct acpi_pnp_type {
1078    
1079     struct acpi_device_pnp {
1080     acpi_bus_id bus_id; /* Object name */
1081     + int instance_no; /* Instance number of this object */
1082     struct acpi_pnp_type type; /* ID type */
1083     acpi_bus_address bus_address; /* _ADR */
1084     char *unique_id; /* _UID */
1085     diff --git a/include/linux/idr.h b/include/linux/idr.h
1086     index 083d61e927063..3639a28188c92 100644
1087     --- a/include/linux/idr.h
1088     +++ b/include/linux/idr.h
1089     @@ -195,6 +195,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
1090     return ida_get_new_above(ida, 0, p_id);
1091     }
1092    
1093     +static inline bool ida_is_empty(struct ida *ida)
1094     +{
1095     + return idr_is_empty(&ida->idr);
1096     +}
1097     +
1098     void __init idr_init_cache(void);
1099    
1100     #endif /* __IDR_H__ */
1101     diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
1102     index a4ccc3122f938..cfcbc49f4ddfa 100644
1103     --- a/include/linux/if_macvlan.h
1104     +++ b/include/linux/if_macvlan.h
1105     @@ -70,13 +70,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
1106     if (likely(success)) {
1107     struct vlan_pcpu_stats *pcpu_stats;
1108    
1109     - pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
1110     + pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
1111     u64_stats_update_begin(&pcpu_stats->syncp);
1112     pcpu_stats->rx_packets++;
1113     pcpu_stats->rx_bytes += len;
1114     if (multicast)
1115     pcpu_stats->rx_multicast++;
1116     u64_stats_update_end(&pcpu_stats->syncp);
1117     + put_cpu_ptr(vlan->pcpu_stats);
1118     } else {
1119     this_cpu_inc(vlan->pcpu_stats->rx_errors);
1120     }
1121     diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
1122     index 650f3dd6b800f..f604a8fe9d2e5 100644
1123     --- a/include/linux/u64_stats_sync.h
1124     +++ b/include/linux/u64_stats_sync.h
1125     @@ -68,12 +68,13 @@ struct u64_stats_sync {
1126     };
1127    
1128    
1129     +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
1130     +#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
1131     +#else
1132     static inline void u64_stats_init(struct u64_stats_sync *syncp)
1133     {
1134     -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
1135     - seqcount_init(&syncp->seq);
1136     -#endif
1137     }
1138     +#endif
1139    
1140     static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
1141     {
1142     diff --git a/include/net/red.h b/include/net/red.h
1143     index 17821f66de111..b3ab5c6bfa83f 100644
1144     --- a/include/net/red.h
1145     +++ b/include/net/red.h
1146     @@ -167,7 +167,8 @@ static inline void red_set_vars(struct red_vars *v)
1147     v->qcount = -1;
1148     }
1149    
1150     -static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
1151     +static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
1152     + u8 Scell_log, u8 *stab)
1153     {
1154     if (fls(qth_min) + Wlog > 32)
1155     return false;
1156     @@ -177,6 +178,13 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_
1157     return false;
1158     if (qth_max < qth_min)
1159     return false;
1160     + if (stab) {
1161     + int i;
1162     +
1163     + for (i = 0; i < RED_STAB_SIZE; i++)
1164     + if (stab[i] >= 32)
1165     + return false;
1166     + }
1167     return true;
1168     }
1169    
1170     diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
1171     index 4113916cc1bb0..af0745f316fe3 100644
1172     --- a/include/net/rtnetlink.h
1173     +++ b/include/net/rtnetlink.h
1174     @@ -28,6 +28,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
1175     *
1176     * @list: Used internally
1177     * @kind: Identifier
1178     + * @netns_refund: Physical device, move to init_net on netns exit
1179     * @maxtype: Highest device specific netlink attribute number
1180     * @policy: Netlink policy for device specific attribute validation
1181     * @validate: Optional validation function for netlink/changelink parameters
1182     @@ -84,6 +85,7 @@ struct rtnl_link_ops {
1183     unsigned int (*get_num_tx_queues)(void);
1184     unsigned int (*get_num_rx_queues)(void);
1185    
1186     + bool netns_refund;
1187     int slave_maxtype;
1188     const struct nla_policy *slave_policy;
1189     int (*slave_validate)(struct nlattr *tb[],
1190     diff --git a/kernel/futex.c b/kernel/futex.c
1191     index 796b1c8608397..468f39476476b 100644
1192     --- a/kernel/futex.c
1193     +++ b/kernel/futex.c
1194     @@ -1407,13 +1407,15 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1195    
1196     static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1197     {
1198     + int err;
1199     u32 uninitialized_var(curval);
1200    
1201     if (unlikely(should_fail_futex(true)))
1202     return -EFAULT;
1203    
1204     - if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1205     - return -EFAULT;
1206     + err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1207     + if (unlikely(err))
1208     + return err;
1209    
1210     /* If user space value changed, let the caller retry */
1211     return curval != uval ? -EAGAIN : 0;
1212     @@ -1553,11 +1555,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1213     if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1214     return;
1215    
1216     - /*
1217     - * Queue the task for later wakeup for after we've released
1218     - * the hb->lock. wake_q_add() grabs reference to p.
1219     - */
1220     - wake_q_add(wake_q, p);
1221     + get_task_struct(p);
1222     __unqueue_futex(q);
1223     /*
1224     * The waiting task can free the futex_q as soon as
1225     @@ -1565,8 +1563,14 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1226     * memory barrier is required here to prevent the following
1227     * store to lock_ptr from getting ahead of the plist_del.
1228     */
1229     - smp_wmb();
1230     - q->lock_ptr = NULL;
1231     + smp_store_release(&q->lock_ptr, NULL);
1232     +
1233     + /*
1234     + * Queue the task for later wakeup for after we've released
1235     + * the hb->lock. wake_q_add() grabs reference to p.
1236     + */
1237     + wake_q_add(wake_q, p);
1238     + put_task_struct(p);
1239     }
1240    
1241     /*
1242     @@ -1601,13 +1605,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
1243     */
1244     newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1245    
1246     - if (unlikely(should_fail_futex(true)))
1247     - ret = -EFAULT;
1248     -
1249     - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1250     + if (unlikely(should_fail_futex(true))) {
1251     ret = -EFAULT;
1252     + goto out_unlock;
1253     + }
1254    
1255     - } else if (curval != uval) {
1256     + ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1257     + if (!ret && (curval != uval)) {
1258     /*
1259     * If a unconditional UNLOCK_PI operation (user space did not
1260     * try the TID->0 transition) raced with a waiter setting the
1261     @@ -1793,32 +1797,32 @@ retry_private:
1262     double_lock_hb(hb1, hb2);
1263     op_ret = futex_atomic_op_inuser(op, uaddr2);
1264     if (unlikely(op_ret < 0)) {
1265     -
1266     double_unlock_hb(hb1, hb2);
1267    
1268     -#ifndef CONFIG_MMU
1269     - /*
1270     - * we don't get EFAULT from MMU faults if we don't have an MMU,
1271     - * but we might get them from range checking
1272     - */
1273     - ret = op_ret;
1274     - goto out_put_keys;
1275     -#endif
1276     -
1277     - if (unlikely(op_ret != -EFAULT)) {
1278     + if (!IS_ENABLED(CONFIG_MMU) ||
1279     + unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
1280     + /*
1281     + * we don't get EFAULT from MMU faults if we don't have
1282     + * an MMU, but we might get them from range checking
1283     + */
1284     ret = op_ret;
1285     goto out_put_keys;
1286     }
1287    
1288     - ret = fault_in_user_writeable(uaddr2);
1289     - if (ret)
1290     - goto out_put_keys;
1291     + if (op_ret == -EFAULT) {
1292     + ret = fault_in_user_writeable(uaddr2);
1293     + if (ret)
1294     + goto out_put_keys;
1295     + }
1296    
1297     - if (!(flags & FLAGS_SHARED))
1298     + if (!(flags & FLAGS_SHARED)) {
1299     + cond_resched();
1300     goto retry_private;
1301     + }
1302    
1303     put_futex_key(&key2);
1304     put_futex_key(&key1);
1305     + cond_resched();
1306     goto retry;
1307     }
1308    
1309     @@ -2334,20 +2338,7 @@ queue_unlock(struct futex_hash_bucket *hb)
1310     hb_waiters_dec(hb);
1311     }
1312    
1313     -/**
1314     - * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1315     - * @q: The futex_q to enqueue
1316     - * @hb: The destination hash bucket
1317     - *
1318     - * The hb->lock must be held by the caller, and is released here. A call to
1319     - * queue_me() is typically paired with exactly one call to unqueue_me(). The
1320     - * exceptions involve the PI related operations, which may use unqueue_me_pi()
1321     - * or nothing if the unqueue is done as part of the wake process and the unqueue
1322     - * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1323     - * an example).
1324     - */
1325     -static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1326     - __releases(&hb->lock)
1327     +static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1328     {
1329     int prio;
1330    
1331     @@ -2364,6 +2355,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1332     plist_node_init(&q->list, prio);
1333     plist_add(&q->list, &hb->chain);
1334     q->task = current;
1335     +}
1336     +
1337     +/**
1338     + * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1339     + * @q: The futex_q to enqueue
1340     + * @hb: The destination hash bucket
1341     + *
1342     + * The hb->lock must be held by the caller, and is released here. A call to
1343     + * queue_me() is typically paired with exactly one call to unqueue_me(). The
1344     + * exceptions involve the PI related operations, which may use unqueue_me_pi()
1345     + * or nothing if the unqueue is done as part of the wake process and the unqueue
1346     + * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1347     + * an example).
1348     + */
1349     +static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1350     + __releases(&hb->lock)
1351     +{
1352     + __queue_me(q, hb);
1353     spin_unlock(&hb->lock);
1354     }
1355    
1356     @@ -2488,10 +2497,22 @@ retry:
1357     }
1358    
1359     /*
1360     - * Since we just failed the trylock; there must be an owner.
1361     + * The trylock just failed, so either there is an owner or
1362     + * there is a higher priority waiter than this one.
1363     */
1364     newowner = rt_mutex_owner(&pi_state->pi_mutex);
1365     - BUG_ON(!newowner);
1366     + /*
1367     + * If the higher priority waiter has not yet taken over the
1368     + * rtmutex then newowner is NULL. We can't return here with
1369     + * that state because it's inconsistent vs. the user space
1370     + * state. So drop the locks and try again. It's a valid
1371     + * situation and not any different from the other retry
1372     + * conditions.
1373     + */
1374     + if (unlikely(!newowner)) {
1375     + err = -EAGAIN;
1376     + goto handle_err;
1377     + }
1378     } else {
1379     WARN_ON_ONCE(argowner != current);
1380     if (oldowner == current) {
1381     @@ -2509,14 +2530,17 @@ retry:
1382     if (!pi_state->owner)
1383     newtid |= FUTEX_OWNER_DIED;
1384    
1385     - if (get_futex_value_locked(&uval, uaddr))
1386     - goto handle_fault;
1387     + err = get_futex_value_locked(&uval, uaddr);
1388     + if (err)
1389     + goto handle_err;
1390    
1391     for (;;) {
1392     newval = (uval & FUTEX_OWNER_DIED) | newtid;
1393    
1394     - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1395     - goto handle_fault;
1396     + err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1397     + if (err)
1398     + goto handle_err;
1399     +
1400     if (curval == uval)
1401     break;
1402     uval = curval;
1403     @@ -2531,23 +2555,36 @@ retry:
1404     return argowner == current;
1405    
1406     /*
1407     - * To handle the page fault we need to drop the locks here. That gives
1408     - * the other task (either the highest priority waiter itself or the
1409     - * task which stole the rtmutex) the chance to try the fixup of the
1410     - * pi_state. So once we are back from handling the fault we need to
1411     - * check the pi_state after reacquiring the locks and before trying to
1412     - * do another fixup. When the fixup has been done already we simply
1413     - * return.
1414     + * In order to reschedule or handle a page fault, we need to drop the
1415     + * locks here. In the case of a fault, this gives the other task
1416     + * (either the highest priority waiter itself or the task which stole
1417     + * the rtmutex) the chance to try the fixup of the pi_state. So once we
1418     + * are back from handling the fault we need to check the pi_state after
1419     + * reacquiring the locks and before trying to do another fixup. When
1420     + * the fixup has been done already we simply return.
1421     *
1422     * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
1423     * drop hb->lock since the caller owns the hb -> futex_q relation.
1424     * Dropping the pi_mutex->wait_lock requires the state revalidate.
1425     */
1426     -handle_fault:
1427     +handle_err:
1428     raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1429     spin_unlock(q->lock_ptr);
1430    
1431     - err = fault_in_user_writeable(uaddr);
1432     + switch (err) {
1433     + case -EFAULT:
1434     + err = fault_in_user_writeable(uaddr);
1435     + break;
1436     +
1437     + case -EAGAIN:
1438     + cond_resched();
1439     + err = 0;
1440     + break;
1441     +
1442     + default:
1443     + WARN_ON_ONCE(1);
1444     + break;
1445     + }
1446    
1447     spin_lock(q->lock_ptr);
1448     raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1449     @@ -2869,6 +2906,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
1450     {
1451     struct hrtimer_sleeper timeout, *to = NULL;
1452     struct task_struct *exiting = NULL;
1453     + struct rt_mutex_waiter rt_waiter;
1454     struct futex_hash_bucket *hb;
1455     struct futex_q q = futex_q_init;
1456     int res, ret;
1457     @@ -2929,24 +2967,71 @@ retry_private:
1458     }
1459     }
1460    
1461     + WARN_ON(!q.pi_state);
1462     +
1463     /*
1464     * Only actually queue now that the atomic ops are done:
1465     */
1466     - queue_me(&q, hb);
1467     + __queue_me(&q, hb);
1468    
1469     - WARN_ON(!q.pi_state);
1470     - /*
1471     - * Block on the PI mutex:
1472     - */
1473     - if (!trylock) {
1474     - ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
1475     - } else {
1476     + if (trylock) {
1477     ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
1478     /* Fixup the trylock return value: */
1479     ret = ret ? 0 : -EWOULDBLOCK;
1480     + goto no_block;
1481     }
1482    
1483     + rt_mutex_init_waiter(&rt_waiter);
1484     +
1485     + /*
1486     + * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
1487     + * hold it while doing rt_mutex_start_proxy(), because then it will
1488     + * include hb->lock in the blocking chain, even through we'll not in
1489     + * fact hold it while blocking. This will lead it to report -EDEADLK
1490     + * and BUG when futex_unlock_pi() interleaves with this.
1491     + *
1492     + * Therefore acquire wait_lock while holding hb->lock, but drop the
1493     + * latter before calling __rt_mutex_start_proxy_lock(). This
1494     + * interleaves with futex_unlock_pi() -- which does a similar lock
1495     + * handoff -- such that the latter can observe the futex_q::pi_state
1496     + * before __rt_mutex_start_proxy_lock() is done.
1497     + */
1498     + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
1499     + spin_unlock(q.lock_ptr);
1500     + /*
1501     + * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
1502     + * such that futex_unlock_pi() is guaranteed to observe the waiter when
1503     + * it sees the futex_q::pi_state.
1504     + */
1505     + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
1506     + raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
1507     +
1508     + if (ret) {
1509     + if (ret == 1)
1510     + ret = 0;
1511     + goto cleanup;
1512     + }
1513     +
1514     + if (unlikely(to))
1515     + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
1516     +
1517     + ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
1518     +
1519     +cleanup:
1520     spin_lock(q.lock_ptr);
1521     + /*
1522     + * If we failed to acquire the lock (deadlock/signal/timeout), we must
1523     + * first acquire the hb->lock before removing the lock from the
1524     + * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
1525     + * lists consistent.
1526     + *
1527     + * In particular; it is important that futex_unlock_pi() can not
1528     + * observe this inconsistency.
1529     + */
1530     + if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
1531     + ret = 0;
1532     +
1533     +no_block:
1534     /*
1535     * Fixup the pi_state owner and possibly acquire the lock if we
1536     * haven't already.
1537     @@ -2970,8 +3055,10 @@ out_unlock_put_key:
1538     out_put_key:
1539     put_futex_key(&q.key);
1540     out:
1541     - if (to)
1542     + if (to) {
1543     + hrtimer_cancel(&to->timer);
1544     destroy_hrtimer_on_stack(&to->timer);
1545     + }
1546     return ret != -EINTR ? ret : -ERESTARTNOINTR;
1547    
1548     uaddr_faulted:
1549     @@ -3039,14 +3126,14 @@ retry:
1550    
1551     get_pi_state(pi_state);
1552     /*
1553     - * Since modifying the wait_list is done while holding both
1554     - * hb->lock and wait_lock, holding either is sufficient to
1555     - * observe it.
1556     - *
1557     * By taking wait_lock while still holding hb->lock, we ensure
1558     * there is no point where we hold neither; and therefore
1559     * wake_futex_pi() must observe a state consistent with what we
1560     * observed.
1561     + *
1562     + * In particular; this forces __rt_mutex_start_proxy() to
1563     + * complete such that we're guaranteed to observe the
1564     + * rt_waiter. Also see the WARN in wake_futex_pi().
1565     */
1566     raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1567     spin_unlock(&hb->lock);
1568     @@ -3071,10 +3158,8 @@ retry:
1569     * A unconditional UNLOCK_PI op raced against a waiter
1570     * setting the FUTEX_WAITERS bit. Try again.
1571     */
1572     - if (ret == -EAGAIN) {
1573     - put_futex_key(&key);
1574     - goto retry;
1575     - }
1576     + if (ret == -EAGAIN)
1577     + goto pi_retry;
1578     /*
1579     * wake_futex_pi has detected invalid state. Tell user
1580     * space.
1581     @@ -3089,9 +3174,19 @@ retry:
1582     * preserve the WAITERS bit not the OWNER_DIED one. We are the
1583     * owner.
1584     */
1585     - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
1586     + if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
1587     spin_unlock(&hb->lock);
1588     - goto pi_faulted;
1589     + switch (ret) {
1590     + case -EFAULT:
1591     + goto pi_faulted;
1592     +
1593     + case -EAGAIN:
1594     + goto pi_retry;
1595     +
1596     + default:
1597     + WARN_ON_ONCE(1);
1598     + goto out_putkey;
1599     + }
1600     }
1601    
1602     /*
1603     @@ -3105,6 +3200,11 @@ out_putkey:
1604     put_futex_key(&key);
1605     return ret;
1606    
1607     +pi_retry:
1608     + put_futex_key(&key);
1609     + cond_resched();
1610     + goto retry;
1611     +
1612     pi_faulted:
1613     put_futex_key(&key);
1614    
1615     @@ -3235,10 +3335,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
1616     * The waiter is allocated on our stack, manipulated by the requeue
1617     * code while we sleep on uaddr.
1618     */
1619     - debug_rt_mutex_init_waiter(&rt_waiter);
1620     - RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
1621     - RB_CLEAR_NODE(&rt_waiter.tree_entry);
1622     - rt_waiter.task = NULL;
1623     + rt_mutex_init_waiter(&rt_waiter);
1624    
1625     ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1626     if (unlikely(ret != 0))
1627     @@ -3443,13 +3540,19 @@ err_unlock:
1628     return ret;
1629     }
1630    
1631     +/* Constants for the pending_op argument of handle_futex_death */
1632     +#define HANDLE_DEATH_PENDING true
1633     +#define HANDLE_DEATH_LIST false
1634     +
1635     /*
1636     * Process a futex-list entry, check whether it's owned by the
1637     * dying task, and do notification if so:
1638     */
1639     -static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1640     +static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
1641     + bool pi, bool pending_op)
1642     {
1643     u32 uval, uninitialized_var(nval), mval;
1644     + int err;
1645    
1646     /* Futex address must be 32bit aligned */
1647     if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
1648     @@ -3459,42 +3562,93 @@ retry:
1649     if (get_user(uval, uaddr))
1650     return -1;
1651    
1652     - if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
1653     - /*
1654     - * Ok, this dying thread is truly holding a futex
1655     - * of interest. Set the OWNER_DIED bit atomically
1656     - * via cmpxchg, and if the value had FUTEX_WAITERS
1657     - * set, wake up a waiter (if any). (We have to do a
1658     - * futex_wake() even if OWNER_DIED is already set -
1659     - * to handle the rare but possible case of recursive
1660     - * thread-death.) The rest of the cleanup is done in
1661     - * userspace.
1662     - */
1663     - mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1664     - /*
1665     - * We are not holding a lock here, but we want to have
1666     - * the pagefault_disable/enable() protection because
1667     - * we want to handle the fault gracefully. If the
1668     - * access fails we try to fault in the futex with R/W
1669     - * verification via get_user_pages. get_user() above
1670     - * does not guarantee R/W access. If that fails we
1671     - * give up and leave the futex locked.
1672     - */
1673     - if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
1674     + /*
1675     + * Special case for regular (non PI) futexes. The unlock path in
1676     + * user space has two race scenarios:
1677     + *
1678     + * 1. The unlock path releases the user space futex value and
1679     + * before it can execute the futex() syscall to wake up
1680     + * waiters it is killed.
1681     + *
1682     + * 2. A woken up waiter is killed before it can acquire the
1683     + * futex in user space.
1684     + *
1685     + * In both cases the TID validation below prevents a wakeup of
1686     + * potential waiters which can cause these waiters to block
1687     + * forever.
1688     + *
1689     + * In both cases the following conditions are met:
1690     + *
1691     + * 1) task->robust_list->list_op_pending != NULL
1692     + * @pending_op == true
1693     + * 2) User space futex value == 0
1694     + * 3) Regular futex: @pi == false
1695     + *
1696     + * If these conditions are met, it is safe to attempt waking up a
1697     + * potential waiter without touching the user space futex value and
1698     + * trying to set the OWNER_DIED bit. The user space futex value is
1699     + * uncontended and the rest of the user space mutex state is
1700     + * consistent, so a woken waiter will just take over the
1701     + * uncontended futex. Setting the OWNER_DIED bit would create
1702     + * inconsistent state and malfunction of the user space owner died
1703     + * handling.
1704     + */
1705     + if (pending_op && !pi && !uval) {
1706     + futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1707     + return 0;
1708     + }
1709     +
1710     + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
1711     + return 0;
1712     +
1713     + /*
1714     + * Ok, this dying thread is truly holding a futex
1715     + * of interest. Set the OWNER_DIED bit atomically
1716     + * via cmpxchg, and if the value had FUTEX_WAITERS
1717     + * set, wake up a waiter (if any). (We have to do a
1718     + * futex_wake() even if OWNER_DIED is already set -
1719     + * to handle the rare but possible case of recursive
1720     + * thread-death.) The rest of the cleanup is done in
1721     + * userspace.
1722     + */
1723     + mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1724     +
1725     + /*
1726     + * We are not holding a lock here, but we want to have
1727     + * the pagefault_disable/enable() protection because
1728     + * we want to handle the fault gracefully. If the
1729     + * access fails we try to fault in the futex with R/W
1730     + * verification via get_user_pages. get_user() above
1731     + * does not guarantee R/W access. If that fails we
1732     + * give up and leave the futex locked.
1733     + */
1734     + if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
1735     + switch (err) {
1736     + case -EFAULT:
1737     if (fault_in_user_writeable(uaddr))
1738     return -1;
1739     goto retry;
1740     - }
1741     - if (nval != uval)
1742     +
1743     + case -EAGAIN:
1744     + cond_resched();
1745     goto retry;
1746    
1747     - /*
1748     - * Wake robust non-PI futexes here. The wakeup of
1749     - * PI futexes happens in exit_pi_state():
1750     - */
1751     - if (!pi && (uval & FUTEX_WAITERS))
1752     - futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1753     + default:
1754     + WARN_ON_ONCE(1);
1755     + return err;
1756     + }
1757     }
1758     +
1759     + if (nval != uval)
1760     + goto retry;
1761     +
1762     + /*
1763     + * Wake robust non-PI futexes here. The wakeup of
1764     + * PI futexes happens in exit_pi_state():
1765     + */
1766     + if (!pi && (uval & FUTEX_WAITERS))
1767     + futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1768     +
1769     return 0;
1770     }
1771    
1772     @@ -3563,10 +3717,11 @@ static void exit_robust_list(struct task_struct *curr)
1773     * A pending lock might already be on the list, so
1774     * don't process it twice:
1775     */
1776     - if (entry != pending)
1777     + if (entry != pending) {
1778     if (handle_futex_death((void __user *)entry + futex_offset,
1779     - curr, pi))
1780     + curr, pi, HANDLE_DEATH_LIST))
1781     return;
1782     + }
1783     if (rc)
1784     return;
1785     entry = next_entry;
1786     @@ -3580,9 +3735,10 @@ static void exit_robust_list(struct task_struct *curr)
1787     cond_resched();
1788     }
1789    
1790     - if (pending)
1791     + if (pending) {
1792     handle_futex_death((void __user *)pending + futex_offset,
1793     - curr, pip);
1794     + curr, pip, HANDLE_DEATH_PENDING);
1795     + }
1796     }
1797    
1798     static void futex_cleanup(struct task_struct *tsk)
1799     @@ -3865,7 +4021,8 @@ void compat_exit_robust_list(struct task_struct *curr)
1800     if (entry != pending) {
1801     void __user *uaddr = futex_uaddr(entry, futex_offset);
1802    
1803     - if (handle_futex_death(uaddr, curr, pi))
1804     + if (handle_futex_death(uaddr, curr, pi,
1805     + HANDLE_DEATH_LIST))
1806     return;
1807     }
1808     if (rc)
1809     @@ -3884,7 +4041,7 @@ void compat_exit_robust_list(struct task_struct *curr)
1810     if (pending) {
1811     void __user *uaddr = futex_uaddr(pending, futex_offset);
1812    
1813     - handle_futex_death(uaddr, curr, pip);
1814     + handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
1815     }
1816     }
1817    
1818     diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
1819     index 6ff4156b3929e..1589e131ee4b8 100644
1820     --- a/kernel/locking/rtmutex.c
1821     +++ b/kernel/locking/rtmutex.c
1822     @@ -1176,6 +1176,14 @@ void rt_mutex_adjust_pi(struct task_struct *task)
1823     next_lock, NULL, task);
1824     }
1825    
1826     +void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
1827     +{
1828     + debug_rt_mutex_init_waiter(waiter);
1829     + RB_CLEAR_NODE(&waiter->pi_tree_entry);
1830     + RB_CLEAR_NODE(&waiter->tree_entry);
1831     + waiter->task = NULL;
1832     +}
1833     +
1834     /**
1835     * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1836     * @lock: the rt_mutex to take
1837     @@ -1258,9 +1266,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1838     unsigned long flags;
1839     int ret = 0;
1840    
1841     - debug_rt_mutex_init_waiter(&waiter);
1842     - RB_CLEAR_NODE(&waiter.pi_tree_entry);
1843     - RB_CLEAR_NODE(&waiter.tree_entry);
1844     + rt_mutex_init_waiter(&waiter);
1845    
1846     /*
1847     * Technically we could use raw_spin_[un]lock_irq() here, but this can
1848     @@ -1516,19 +1522,6 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1849     }
1850     EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1851    
1852     -/*
1853     - * Futex variant with full deadlock detection.
1854     - * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock().
1855     - */
1856     -int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1857     - struct hrtimer_sleeper *timeout)
1858     -{
1859     - might_sleep();
1860     -
1861     - return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE,
1862     - timeout, RT_MUTEX_FULL_CHAINWALK);
1863     -}
1864     -
1865     /*
1866     * Futex variant, must not use fastpath.
1867     */
1868     @@ -1703,30 +1696,34 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock)
1869     }
1870    
1871     /**
1872     - * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1873     + * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1874     * @lock: the rt_mutex to take
1875     * @waiter: the pre-initialized rt_mutex_waiter
1876     * @task: the task to prepare
1877     *
1878     + * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
1879     + * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
1880     + *
1881     + * NOTE: does _NOT_ remove the @waiter on failure; must either call
1882     + * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
1883     + *
1884     * Returns:
1885     * 0 - task blocked on lock
1886     * 1 - acquired the lock for task, caller should wake it up
1887     * <0 - error
1888     *
1889     - * Special API call for FUTEX_REQUEUE_PI support.
1890     + * Special API call for PI-futex support.
1891     */
1892     -int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1893     +int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1894     struct rt_mutex_waiter *waiter,
1895     struct task_struct *task)
1896     {
1897     int ret;
1898    
1899     - raw_spin_lock_irq(&lock->wait_lock);
1900     + lockdep_assert_held(&lock->wait_lock);
1901    
1902     - if (try_to_take_rt_mutex(lock, task, NULL)) {
1903     - raw_spin_unlock_irq(&lock->wait_lock);
1904     + if (try_to_take_rt_mutex(lock, task, NULL))
1905     return 1;
1906     - }
1907    
1908     /* We enforce deadlock detection for futexes */
1909     ret = task_blocks_on_rt_mutex(lock, waiter, task,
1910     @@ -1742,13 +1739,42 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1911     ret = 0;
1912     }
1913    
1914     + debug_rt_mutex_print_deadlock(waiter);
1915     +
1916     + return ret;
1917     +}
1918     +
1919     +/**
1920     + * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1921     + * @lock: the rt_mutex to take
1922     + * @waiter: the pre-initialized rt_mutex_waiter
1923     + * @task: the task to prepare
1924     + *
1925     + * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
1926     + * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
1927     + *
1928     + * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
1929     + * on failure.
1930     + *
1931     + * Returns:
1932     + * 0 - task blocked on lock
1933     + * 1 - acquired the lock for task, caller should wake it up
1934     + * <0 - error
1935     + *
1936     + * Special API call for PI-futex support.
1937     + */
1938     +int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1939     + struct rt_mutex_waiter *waiter,
1940     + struct task_struct *task)
1941     +{
1942     + int ret;
1943     +
1944     + raw_spin_lock_irq(&lock->wait_lock);
1945     + ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
1946     if (unlikely(ret))
1947     remove_waiter(lock, waiter);
1948     -
1949     raw_spin_unlock_irq(&lock->wait_lock);
1950    
1951     - debug_rt_mutex_print_deadlock(waiter);
1952     -
1953     return ret;
1954     }
1955    
1956     @@ -1796,18 +1822,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1957     int ret;
1958    
1959     raw_spin_lock_irq(&lock->wait_lock);
1960     -
1961     - set_current_state(TASK_INTERRUPTIBLE);
1962     -
1963     /* sleep on the mutex */
1964     + set_current_state(TASK_INTERRUPTIBLE);
1965     ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1966     -
1967     /*
1968     * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1969     * have to fix that up.
1970     */
1971     fixup_rt_mutex_waiters(lock);
1972     -
1973     raw_spin_unlock_irq(&lock->wait_lock);
1974    
1975     return ret;
1976     @@ -1818,7 +1840,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1977     * @lock: the rt_mutex we were woken on
1978     * @waiter: the pre-initialized rt_mutex_waiter
1979     *
1980     - * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
1981     + * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
1982     + * rt_mutex_wait_proxy_lock().
1983     *
1984     * Unless we acquired the lock; we're still enqueued on the wait-list and can
1985     * in fact still be granted ownership until we're removed. Therefore we can
1986     @@ -1838,15 +1861,32 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1987     bool cleanup = false;
1988    
1989     raw_spin_lock_irq(&lock->wait_lock);
1990     + /*
1991     + * Do an unconditional try-lock, this deals with the lock stealing
1992     + * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
1993     + * sets a NULL owner.
1994     + *
1995     + * We're not interested in the return value, because the subsequent
1996     + * test on rt_mutex_owner() will infer that. If the trylock succeeded,
1997     + * we will own the lock and it will have removed the waiter. If we
1998     + * failed the trylock, we're still not owner and we need to remove
1999     + * ourselves.
2000     + */
2001     + try_to_take_rt_mutex(lock, current, waiter);
2002     /*
2003     * Unless we're the owner; we're still enqueued on the wait_list.
2004     * So check if we became owner, if not, take us off the wait_list.
2005     */
2006     if (rt_mutex_owner(lock) != current) {
2007     remove_waiter(lock, waiter);
2008     - fixup_rt_mutex_waiters(lock);
2009     cleanup = true;
2010     }
2011     + /*
2012     + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
2013     + * have to fix that up.
2014     + */
2015     + fixup_rt_mutex_waiters(lock);
2016     +
2017     raw_spin_unlock_irq(&lock->wait_lock);
2018    
2019     return cleanup;
2020     diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
2021     index bea5d677fe343..c5d3f577b2a7e 100644
2022     --- a/kernel/locking/rtmutex_common.h
2023     +++ b/kernel/locking/rtmutex_common.h
2024     @@ -103,6 +103,10 @@ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
2025     extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
2026     struct task_struct *proxy_owner);
2027     extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
2028     +extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
2029     +extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
2030     + struct rt_mutex_waiter *waiter,
2031     + struct task_struct *task);
2032     extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
2033     struct rt_mutex_waiter *waiter,
2034     struct task_struct *task);
2035     @@ -111,7 +115,6 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
2036     struct rt_mutex_waiter *waiter);
2037     extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
2038     struct rt_mutex_waiter *waiter);
2039     -extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
2040     extern int rt_mutex_futex_trylock(struct rt_mutex *l);
2041     extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
2042    
2043     diff --git a/net/core/dev.c b/net/core/dev.c
2044     index 9ac591dd16d50..5b69a9a41dd50 100644
2045     --- a/net/core/dev.c
2046     +++ b/net/core/dev.c
2047     @@ -8300,7 +8300,7 @@ static void __net_exit default_device_exit(struct net *net)
2048     continue;
2049    
2050     /* Leave virtual devices for the generic cleanup */
2051     - if (dev->rtnl_link_ops)
2052     + if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
2053     continue;
2054    
2055     /* Push remaining network devices to init_net */
2056     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
2057     index 1a13715b9a591..f37fbc71fc1db 100644
2058     --- a/net/mac80211/cfg.c
2059     +++ b/net/mac80211/cfg.c
2060     @@ -2681,14 +2681,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2061     continue;
2062    
2063     for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
2064     - if (~sdata->rc_rateidx_mcs_mask[i][j]) {
2065     + if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
2066     sdata->rc_has_mcs_mask[i] = true;
2067     break;
2068     }
2069     }
2070    
2071     for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
2072     - if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
2073     + if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
2074     sdata->rc_has_vht_mcs_mask[i] = true;
2075     break;
2076     }
2077     diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
2078     index 0c0695eb2609a..3796c24defcb9 100644
2079     --- a/net/mac80211/ibss.c
2080     +++ b/net/mac80211/ibss.c
2081     @@ -1862,6 +1862,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
2082    
2083     /* remove beacon */
2084     kfree(sdata->u.ibss.ie);
2085     + sdata->u.ibss.ie = NULL;
2086     + sdata->u.ibss.ie_len = 0;
2087    
2088     /* on the next join, re-program HT parameters */
2089     memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
2090     diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
2091     index d62b7a7f65bb0..2cc5ced1cec94 100644
2092     --- a/net/qrtr/qrtr.c
2093     +++ b/net/qrtr/qrtr.c
2094     @@ -728,6 +728,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
2095     rc = copied;
2096    
2097     if (addr) {
2098     + /* There is an anonymous 2-byte hole after sq_family,
2099     + * make sure to clear it.
2100     + */
2101     + memset(addr, 0, sizeof(*addr));
2102     +
2103     addr->sq_family = AF_QIPCRTR;
2104     addr->sq_node = le32_to_cpu(phdr->src_node_id);
2105     addr->sq_port = le32_to_cpu(phdr->src_port_id);
2106     diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
2107     index 2fb79c245f3fc..1283c3bf401a5 100644
2108     --- a/net/sched/sch_choke.c
2109     +++ b/net/sched/sch_choke.c
2110     @@ -409,6 +409,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
2111     struct sk_buff **old = NULL;
2112     unsigned int mask;
2113     u32 max_P;
2114     + u8 *stab;
2115    
2116     if (opt == NULL)
2117     return -EINVAL;
2118     @@ -424,8 +425,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
2119     max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
2120    
2121     ctl = nla_data(tb[TCA_CHOKE_PARMS]);
2122     -
2123     - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
2124     + stab = nla_data(tb[TCA_CHOKE_STAB]);
2125     + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
2126     return -EINVAL;
2127    
2128     if (ctl->limit > CHOKE_MAX_QUEUE)
2129     @@ -478,7 +479,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
2130    
2131     red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
2132     ctl->Plog, ctl->Scell_log,
2133     - nla_data(tb[TCA_CHOKE_STAB]),
2134     + stab,
2135     max_P);
2136     red_set_vars(&q->vars);
2137    
2138     diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
2139     index d86a96313981b..745e8fae62b3e 100644
2140     --- a/net/sched/sch_gred.c
2141     +++ b/net/sched/sch_gred.c
2142     @@ -356,7 +356,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
2143     struct gred_sched *table = qdisc_priv(sch);
2144     struct gred_sched_data *q = table->tab[dp];
2145    
2146     - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
2147     + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
2148     return -EINVAL;
2149    
2150     if (!q) {
2151     diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
2152     index 797895bddcfda..d6abf5c5a5b82 100644
2153     --- a/net/sched/sch_red.c
2154     +++ b/net/sched/sch_red.c
2155     @@ -169,6 +169,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
2156     struct Qdisc *child = NULL;
2157     int err;
2158     u32 max_P;
2159     + u8 *stab;
2160    
2161     if (opt == NULL)
2162     return -EINVAL;
2163     @@ -184,7 +185,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
2164     max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
2165    
2166     ctl = nla_data(tb[TCA_RED_PARMS]);
2167     - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
2168     + stab = nla_data(tb[TCA_RED_STAB]);
2169     + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
2170     + ctl->Scell_log, stab))
2171     return -EINVAL;
2172    
2173     if (ctl->limit > 0) {
2174     @@ -206,7 +209,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
2175     red_set_parms(&q->parms,
2176     ctl->qth_min, ctl->qth_max, ctl->Wlog,
2177     ctl->Plog, ctl->Scell_log,
2178     - nla_data(tb[TCA_RED_STAB]),
2179     + stab,
2180     max_P);
2181     red_set_vars(&q->vars);
2182    
2183     diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2184     index 69a5fffed86c7..b2598a32b556e 100644
2185     --- a/net/sched/sch_sfq.c
2186     +++ b/net/sched/sch_sfq.c
2187     @@ -645,7 +645,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
2188     }
2189    
2190     if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
2191     - ctl_v1->Wlog, ctl_v1->Scell_log))
2192     + ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
2193     return -EINVAL;
2194     if (ctl_v1 && ctl_v1->qth_min) {
2195     p = kmalloc(sizeof(*p), GFP_KERNEL);
2196     diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
2197     index b87221efdf7e0..51fdec9273d72 100644
2198     --- a/tools/perf/util/auxtrace.c
2199     +++ b/tools/perf/util/auxtrace.c
2200     @@ -248,10 +248,6 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
2201     queue->set = true;
2202     queue->tid = buffer->tid;
2203     queue->cpu = buffer->cpu;
2204     - } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
2205     - pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
2206     - queue->cpu, queue->tid, buffer->cpu, buffer->tid);
2207     - return -EINVAL;
2208     }
2209    
2210     buffer->buffer_nr = queues->next_buffer_nr++;