Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0116-4.19.17-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3395 - (hide annotations) (download)
Fri Aug 2 11:47:29 2019 UTC (4 years, 9 months ago) by niro
File size: 119188 byte(s)
-linux-4.19.17
1 niro 3395 diff --git a/Makefile b/Makefile
2     index e8cb4875b86d..4b0bce87a36b 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 16
10     +SUBLEVEL = 17
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
15     index 176e38d54872..ec0da5b3d7fd 100644
16     --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
17     +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
18     @@ -27,6 +27,23 @@
19     method = "smc";
20     };
21    
22     + reserved-memory {
23     + #address-cells = <2>;
24     + #size-cells = <2>;
25     + ranges;
26     +
27     + /*
28     + * This area matches the mapping done with a
29     + * mainline U-Boot, and should be updated by the
30     + * bootloader.
31     + */
32     +
33     + psci-area@4000000 {
34     + reg = <0x0 0x4000000 0x0 0x200000>;
35     + no-map;
36     + };
37     + };
38     +
39     ap806 {
40     #address-cells = <2>;
41     #size-cells = <2>;
42     diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
43     index 95e3fa7ded8b..8b284cbf8162 100644
44     --- a/arch/arm64/include/asm/kvm_arm.h
45     +++ b/arch/arm64/include/asm/kvm_arm.h
46     @@ -24,6 +24,8 @@
47    
48     /* Hyp Configuration Register (HCR) bits */
49     #define HCR_FWB (UL(1) << 46)
50     +#define HCR_API (UL(1) << 41)
51     +#define HCR_APK (UL(1) << 40)
52     #define HCR_TEA (UL(1) << 37)
53     #define HCR_TERR (UL(1) << 36)
54     #define HCR_TLOR (UL(1) << 35)
55     @@ -87,6 +89,7 @@
56     HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
57     HCR_FMO | HCR_IMO)
58     #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
59     +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
60     #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
61    
62     /* TCR_EL2 Registers bits */
63     diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
64     index b0853069702f..651a06b1980f 100644
65     --- a/arch/arm64/kernel/head.S
66     +++ b/arch/arm64/kernel/head.S
67     @@ -494,10 +494,9 @@ ENTRY(el2_setup)
68     #endif
69    
70     /* Hyp configuration. */
71     - mov x0, #HCR_RW // 64-bit EL1
72     + mov_q x0, HCR_HOST_NVHE_FLAGS
73     cbz x2, set_hcr
74     - orr x0, x0, #HCR_TGE // Enable Host Extensions
75     - orr x0, x0, #HCR_E2H
76     + mov_q x0, HCR_HOST_VHE_FLAGS
77     set_hcr:
78     msr hcr_el2, x0
79     isb
80     diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
81     index f0e6ab8abe9c..ba6b41790fcd 100644
82     --- a/arch/arm64/kernel/kaslr.c
83     +++ b/arch/arm64/kernel/kaslr.c
84     @@ -14,6 +14,7 @@
85     #include <linux/sched.h>
86     #include <linux/types.h>
87    
88     +#include <asm/cacheflush.h>
89     #include <asm/fixmap.h>
90     #include <asm/kernel-pgtable.h>
91     #include <asm/memory.h>
92     @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
93     return ret;
94     }
95    
96     -static __init const u8 *get_cmdline(void *fdt)
97     +static __init const u8 *kaslr_get_cmdline(void *fdt)
98     {
99     static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
100    
101     @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
102     * Check if 'nokaslr' appears on the command line, and
103     * return 0 if that is the case.
104     */
105     - cmdline = get_cmdline(fdt);
106     + cmdline = kaslr_get_cmdline(fdt);
107     str = strstr(cmdline, "nokaslr");
108     if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
109     return 0;
110     @@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
111     module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
112     module_alloc_base &= PAGE_MASK;
113    
114     + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
115     + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
116     +
117     return offset;
118     }
119     diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
120     index ca46153d7915..a1c32c1f2267 100644
121     --- a/arch/arm64/kvm/hyp/switch.c
122     +++ b/arch/arm64/kvm/hyp/switch.c
123     @@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
124     mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
125    
126     write_sysreg(mdcr_el2, mdcr_el2);
127     - write_sysreg(HCR_RW, hcr_el2);
128     + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
129     write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
130     }
131    
132     diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
133     index 35511999156a..154b811d5894 100644
134     --- a/arch/mips/Kconfig
135     +++ b/arch/mips/Kconfig
136     @@ -3149,6 +3149,7 @@ config MIPS32_O32
137     config MIPS32_N32
138     bool "Kernel support for n32 binaries"
139     depends on 64BIT
140     + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
141     select COMPAT
142     select MIPS32_COMPAT
143     select SYSVIPC_COMPAT if SYSVIPC
144     diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
145     index 6054d49e608e..fe3773539eff 100644
146     --- a/arch/mips/bcm47xx/setup.c
147     +++ b/arch/mips/bcm47xx/setup.c
148     @@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
149     pm_power_off = bcm47xx_machine_halt;
150     }
151    
152     +#ifdef CONFIG_BCM47XX_BCMA
153     +static struct device * __init bcm47xx_setup_device(void)
154     +{
155     + struct device *dev;
156     + int err;
157     +
158     + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
159     + if (!dev)
160     + return NULL;
161     +
162     + err = dev_set_name(dev, "bcm47xx_soc");
163     + if (err) {
164     + pr_err("Failed to set SoC device name: %d\n", err);
165     + kfree(dev);
166     + return NULL;
167     + }
168     +
169     + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
170     + if (err)
171     + pr_err("Failed to set SoC DMA mask: %d\n", err);
172     +
173     + return dev;
174     +}
175     +#endif
176     +
177     /*
178     * This finishes bus initialization doing things that were not possible without
179     * kmalloc. Make sure to call it late enough (after mm_init).
180     @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
181     if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
182     int err;
183    
184     + bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
185     + if (!bcm47xx_bus.bcma.dev)
186     + panic("Failed to setup SoC device\n");
187     +
188     err = bcma_host_soc_init(&bcm47xx_bus.bcma);
189     if (err)
190     panic("Failed to initialize BCMA bus (err %d)", err);
191     @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
192     #endif
193     #ifdef CONFIG_BCM47XX_BCMA
194     case BCM47XX_BUS_TYPE_BCMA:
195     + if (device_register(bcm47xx_bus.bcma.dev))
196     + pr_err("Failed to register SoC device\n");
197     bcma_bus_register(&bcm47xx_bus.bcma.bus);
198     break;
199     #endif
200     diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
201     index f0bc3312ed11..c4ef1c31e0c4 100644
202     --- a/arch/mips/lantiq/irq.c
203     +++ b/arch/mips/lantiq/irq.c
204     @@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
205     .irq_set_type = ltq_eiu_settype,
206     };
207    
208     -static void ltq_hw_irqdispatch(int module)
209     +static void ltq_hw_irq_handler(struct irq_desc *desc)
210     {
211     + int module = irq_desc_get_irq(desc) - 2;
212     u32 irq;
213     + int hwirq;
214    
215     irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
216     if (irq == 0)
217     @@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
218     * other bits might be bogus
219     */
220     irq = __fls(irq);
221     - do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
222     + hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
223     + generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
224    
225     /* if this is a EBU irq, we need to ack it or get a deadlock */
226     if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
227     @@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
228     LTQ_EBU_PCC_ISTAT);
229     }
230    
231     -#define DEFINE_HWx_IRQDISPATCH(x) \
232     - static void ltq_hw ## x ## _irqdispatch(void) \
233     - { \
234     - ltq_hw_irqdispatch(x); \
235     - }
236     -DEFINE_HWx_IRQDISPATCH(0)
237     -DEFINE_HWx_IRQDISPATCH(1)
238     -DEFINE_HWx_IRQDISPATCH(2)
239     -DEFINE_HWx_IRQDISPATCH(3)
240     -DEFINE_HWx_IRQDISPATCH(4)
241     -
242     -#if MIPS_CPU_TIMER_IRQ == 7
243     -static void ltq_hw5_irqdispatch(void)
244     -{
245     - do_IRQ(MIPS_CPU_TIMER_IRQ);
246     -}
247     -#else
248     -DEFINE_HWx_IRQDISPATCH(5)
249     -#endif
250     -
251     -static void ltq_hw_irq_handler(struct irq_desc *desc)
252     -{
253     - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
254     -}
255     -
256     -asmlinkage void plat_irq_dispatch(void)
257     -{
258     - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
259     - int irq;
260     -
261     - if (!pending) {
262     - spurious_interrupt();
263     - return;
264     - }
265     -
266     - pending >>= CAUSEB_IP;
267     - while (pending) {
268     - irq = fls(pending) - 1;
269     - do_IRQ(MIPS_CPU_IRQ_BASE + irq);
270     - pending &= ~BIT(irq);
271     - }
272     -}
273     -
274     static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
275     {
276     struct irq_chip *chip = &ltq_irq_type;
277     @@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
278     for (i = 0; i < MAX_IM; i++)
279     irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
280    
281     - if (cpu_has_vint) {
282     - pr_info("Setting up vectored interrupts\n");
283     - set_vi_handler(2, ltq_hw0_irqdispatch);
284     - set_vi_handler(3, ltq_hw1_irqdispatch);
285     - set_vi_handler(4, ltq_hw2_irqdispatch);
286     - set_vi_handler(5, ltq_hw3_irqdispatch);
287     - set_vi_handler(6, ltq_hw4_irqdispatch);
288     - set_vi_handler(7, ltq_hw5_irqdispatch);
289     - }
290     -
291     ltq_domain = irq_domain_add_linear(node,
292     (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
293     &irq_domain_ops, 0);
294    
295     -#ifndef CONFIG_MIPS_MT_SMP
296     - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
297     - IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
298     -#else
299     - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
300     - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
301     -#endif
302     -
303     /* tell oprofile which irq to use */
304     ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
305    
306     diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
307     index 2a5bb849b10e..288b58b00dc8 100644
308     --- a/arch/mips/pci/msi-octeon.c
309     +++ b/arch/mips/pci/msi-octeon.c
310     @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
311     int irq;
312     struct irq_chip *msi;
313    
314     - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
315     + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
316     + return 0;
317     + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
318     msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
319     msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
320     msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
321     diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
322     index c84f1e039d84..01dcccf9185f 100644
323     --- a/arch/x86/xen/time.c
324     +++ b/arch/x86/xen/time.c
325     @@ -361,8 +361,6 @@ void xen_timer_resume(void)
326     {
327     int cpu;
328    
329     - pvclock_resume();
330     -
331     if (xen_clockevent != &xen_vcpuop_clockevent)
332     return;
333    
334     @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
335     };
336    
337     static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
338     +static u64 xen_clock_value_saved;
339    
340     void xen_save_time_memory_area(void)
341     {
342     struct vcpu_register_time_memory_area t;
343     int ret;
344    
345     + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
346     +
347     if (!xen_clock)
348     return;
349    
350     @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
351     int ret;
352    
353     if (!xen_clock)
354     - return;
355     + goto out;
356    
357     t.addr.v = &xen_clock->pvti;
358    
359     @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
360     if (ret != 0)
361     pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
362     ret);
363     +
364     +out:
365     + /* Need pvclock_resume() before using xen_clocksource_read(). */
366     + pvclock_resume();
367     + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
368     }
369    
370     static void xen_setup_vsyscall_time_info(void)
371     diff --git a/block/partition-generic.c b/block/partition-generic.c
372     index d3d14e81fb12..5f8db5c5140f 100644
373     --- a/block/partition-generic.c
374     +++ b/block/partition-generic.c
375     @@ -249,9 +249,10 @@ struct device_type part_type = {
376     .uevent = part_uevent,
377     };
378    
379     -static void delete_partition_rcu_cb(struct rcu_head *head)
380     +static void delete_partition_work_fn(struct work_struct *work)
381     {
382     - struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
383     + struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
384     + rcu_work);
385    
386     part->start_sect = 0;
387     part->nr_sects = 0;
388     @@ -262,7 +263,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head)
389     void __delete_partition(struct percpu_ref *ref)
390     {
391     struct hd_struct *part = container_of(ref, struct hd_struct, ref);
392     - call_rcu(&part->rcu_head, delete_partition_rcu_cb);
393     + INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
394     + queue_rcu_work(system_wq, &part->rcu_work);
395     }
396    
397     /*
398     diff --git a/crypto/authenc.c b/crypto/authenc.c
399     index 4fa8d40d947b..3ee10fc25aff 100644
400     --- a/crypto/authenc.c
401     +++ b/crypto/authenc.c
402     @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
403     return -EINVAL;
404     if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
405     return -EINVAL;
406     - if (RTA_PAYLOAD(rta) < sizeof(*param))
407     +
408     + /*
409     + * RTA_OK() didn't align the rtattr's payload when validating that it
410     + * fits in the buffer. Yet, the keys should start on the next 4-byte
411     + * aligned boundary. To avoid confusion, require that the rtattr
412     + * payload be exactly the param struct, which has a 4-byte aligned size.
413     + */
414     + if (RTA_PAYLOAD(rta) != sizeof(*param))
415     return -EINVAL;
416     + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
417    
418     param = RTA_DATA(rta);
419     keys->enckeylen = be32_to_cpu(param->enckeylen);
420    
421     - key += RTA_ALIGN(rta->rta_len);
422     - keylen -= RTA_ALIGN(rta->rta_len);
423     + key += rta->rta_len;
424     + keylen -= rta->rta_len;
425    
426     if (keylen < keys->enckeylen)
427     return -EINVAL;
428     diff --git a/crypto/authencesn.c b/crypto/authencesn.c
429     index 50b804747e20..4eff4be6bd12 100644
430     --- a/crypto/authencesn.c
431     +++ b/crypto/authencesn.c
432     @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
433     struct aead_request *req = areq->data;
434    
435     err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
436     - aead_request_complete(req, err);
437     + authenc_esn_request_complete(req, err);
438     }
439    
440     static int crypto_authenc_esn_decrypt(struct aead_request *req)
441     diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
442     index 9a5c60f08aad..c0cf87ae7ef6 100644
443     --- a/crypto/sm3_generic.c
444     +++ b/crypto/sm3_generic.c
445     @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
446    
447     for (i = 0; i <= 63; i++) {
448    
449     - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
450     + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
451    
452     ss2 = ss1 ^ rol32(a, 12);
453    
454     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
455     index ea9debf59b22..c9c2bcc36e26 100644
456     --- a/drivers/block/loop.c
457     +++ b/drivers/block/loop.c
458     @@ -83,7 +83,7 @@
459     #include <linux/uaccess.h>
460    
461     static DEFINE_IDR(loop_index_idr);
462     -static DEFINE_MUTEX(loop_index_mutex);
463     +static DEFINE_MUTEX(loop_ctl_mutex);
464    
465     static int max_part;
466     static int part_shift;
467     @@ -631,18 +631,7 @@ static void loop_reread_partitions(struct loop_device *lo,
468     {
469     int rc;
470    
471     - /*
472     - * bd_mutex has been held already in release path, so don't
473     - * acquire it if this function is called in such case.
474     - *
475     - * If the reread partition isn't from release path, lo_refcnt
476     - * must be at least one and it can only become zero when the
477     - * current holder is released.
478     - */
479     - if (!atomic_read(&lo->lo_refcnt))
480     - rc = __blkdev_reread_part(bdev);
481     - else
482     - rc = blkdev_reread_part(bdev);
483     + rc = blkdev_reread_part(bdev);
484     if (rc)
485     pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
486     __func__, lo->lo_number, lo->lo_file_name, rc);
487     @@ -689,26 +678,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
488     static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
489     unsigned int arg)
490     {
491     - struct file *file, *old_file;
492     + struct file *file = NULL, *old_file;
493     int error;
494     + bool partscan;
495    
496     + error = mutex_lock_killable(&loop_ctl_mutex);
497     + if (error)
498     + return error;
499     error = -ENXIO;
500     if (lo->lo_state != Lo_bound)
501     - goto out;
502     + goto out_err;
503    
504     /* the loop device has to be read-only */
505     error = -EINVAL;
506     if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
507     - goto out;
508     + goto out_err;
509    
510     error = -EBADF;
511     file = fget(arg);
512     if (!file)
513     - goto out;
514     + goto out_err;
515    
516     error = loop_validate_file(file, bdev);
517     if (error)
518     - goto out_putf;
519     + goto out_err;
520    
521     old_file = lo->lo_backing_file;
522    
523     @@ -716,7 +709,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
524    
525     /* size of the new backing store needs to be the same */
526     if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
527     - goto out_putf;
528     + goto out_err;
529    
530     /* and ... switch */
531     blk_mq_freeze_queue(lo->lo_queue);
532     @@ -727,15 +720,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
533     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
534     loop_update_dio(lo);
535     blk_mq_unfreeze_queue(lo->lo_queue);
536     -
537     + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
538     + mutex_unlock(&loop_ctl_mutex);
539     + /*
540     + * We must drop file reference outside of loop_ctl_mutex as dropping
541     + * the file ref can take bd_mutex which creates circular locking
542     + * dependency.
543     + */
544     fput(old_file);
545     - if (lo->lo_flags & LO_FLAGS_PARTSCAN)
546     + if (partscan)
547     loop_reread_partitions(lo, bdev);
548     return 0;
549    
550     - out_putf:
551     - fput(file);
552     - out:
553     +out_err:
554     + mutex_unlock(&loop_ctl_mutex);
555     + if (file)
556     + fput(file);
557     return error;
558     }
559    
560     @@ -910,6 +910,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
561     int lo_flags = 0;
562     int error;
563     loff_t size;
564     + bool partscan;
565    
566     /* This is safe, since we have a reference from open(). */
567     __module_get(THIS_MODULE);
568     @@ -919,13 +920,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
569     if (!file)
570     goto out;
571    
572     + error = mutex_lock_killable(&loop_ctl_mutex);
573     + if (error)
574     + goto out_putf;
575     +
576     error = -EBUSY;
577     if (lo->lo_state != Lo_unbound)
578     - goto out_putf;
579     + goto out_unlock;
580    
581     error = loop_validate_file(file, bdev);
582     if (error)
583     - goto out_putf;
584     + goto out_unlock;
585    
586     mapping = file->f_mapping;
587     inode = mapping->host;
588     @@ -937,10 +942,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
589     error = -EFBIG;
590     size = get_loop_size(lo, file);
591     if ((loff_t)(sector_t)size != size)
592     - goto out_putf;
593     + goto out_unlock;
594     error = loop_prepare_queue(lo);
595     if (error)
596     - goto out_putf;
597     + goto out_unlock;
598    
599     error = 0;
600    
601     @@ -972,18 +977,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
602     lo->lo_state = Lo_bound;
603     if (part_shift)
604     lo->lo_flags |= LO_FLAGS_PARTSCAN;
605     - if (lo->lo_flags & LO_FLAGS_PARTSCAN)
606     - loop_reread_partitions(lo, bdev);
607     + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
608    
609     /* Grab the block_device to prevent its destruction after we
610     - * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
611     + * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
612     */
613     bdgrab(bdev);
614     + mutex_unlock(&loop_ctl_mutex);
615     + if (partscan)
616     + loop_reread_partitions(lo, bdev);
617     return 0;
618    
619     - out_putf:
620     +out_unlock:
621     + mutex_unlock(&loop_ctl_mutex);
622     +out_putf:
623     fput(file);
624     - out:
625     +out:
626     /* This is safe: open() is still holding a reference. */
627     module_put(THIS_MODULE);
628     return error;
629     @@ -1026,39 +1035,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
630     return err;
631     }
632    
633     -static int loop_clr_fd(struct loop_device *lo)
634     +static int __loop_clr_fd(struct loop_device *lo, bool release)
635     {
636     - struct file *filp = lo->lo_backing_file;
637     + struct file *filp = NULL;
638     gfp_t gfp = lo->old_gfp_mask;
639     struct block_device *bdev = lo->lo_device;
640     + int err = 0;
641     + bool partscan = false;
642     + int lo_number;
643    
644     - if (lo->lo_state != Lo_bound)
645     - return -ENXIO;
646     -
647     - /*
648     - * If we've explicitly asked to tear down the loop device,
649     - * and it has an elevated reference count, set it for auto-teardown when
650     - * the last reference goes away. This stops $!~#$@ udev from
651     - * preventing teardown because it decided that it needs to run blkid on
652     - * the loopback device whenever they appear. xfstests is notorious for
653     - * failing tests because blkid via udev races with a losetup
654     - * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
655     - * command to fail with EBUSY.
656     - */
657     - if (atomic_read(&lo->lo_refcnt) > 1) {
658     - lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
659     - mutex_unlock(&lo->lo_ctl_mutex);
660     - return 0;
661     + mutex_lock(&loop_ctl_mutex);
662     + if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
663     + err = -ENXIO;
664     + goto out_unlock;
665     }
666    
667     - if (filp == NULL)
668     - return -EINVAL;
669     + filp = lo->lo_backing_file;
670     + if (filp == NULL) {
671     + err = -EINVAL;
672     + goto out_unlock;
673     + }
674    
675     /* freeze request queue during the transition */
676     blk_mq_freeze_queue(lo->lo_queue);
677    
678     spin_lock_irq(&lo->lo_lock);
679     - lo->lo_state = Lo_rundown;
680     lo->lo_backing_file = NULL;
681     spin_unlock_irq(&lo->lo_lock);
682    
683     @@ -1094,21 +1095,73 @@ static int loop_clr_fd(struct loop_device *lo)
684     module_put(THIS_MODULE);
685     blk_mq_unfreeze_queue(lo->lo_queue);
686    
687     - if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
688     - loop_reread_partitions(lo, bdev);
689     + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
690     + lo_number = lo->lo_number;
691     lo->lo_flags = 0;
692     if (!part_shift)
693     lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
694     loop_unprepare_queue(lo);
695     - mutex_unlock(&lo->lo_ctl_mutex);
696     +out_unlock:
697     + mutex_unlock(&loop_ctl_mutex);
698     + if (partscan) {
699     + /*
700     + * bd_mutex has been held already in release path, so don't
701     + * acquire it if this function is called in such case.
702     + *
703     + * If the reread partition isn't from release path, lo_refcnt
704     + * must be at least one and it can only become zero when the
705     + * current holder is released.
706     + */
707     + if (release)
708     + err = __blkdev_reread_part(bdev);
709     + else
710     + err = blkdev_reread_part(bdev);
711     + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
712     + __func__, lo_number, err);
713     + /* Device is gone, no point in returning error */
714     + err = 0;
715     + }
716     /*
717     - * Need not hold lo_ctl_mutex to fput backing file.
718     - * Calling fput holding lo_ctl_mutex triggers a circular
719     + * Need not hold loop_ctl_mutex to fput backing file.
720     + * Calling fput holding loop_ctl_mutex triggers a circular
721     * lock dependency possibility warning as fput can take
722     - * bd_mutex which is usually taken before lo_ctl_mutex.
723     + * bd_mutex which is usually taken before loop_ctl_mutex.
724     */
725     - fput(filp);
726     - return 0;
727     + if (filp)
728     + fput(filp);
729     + return err;
730     +}
731     +
732     +static int loop_clr_fd(struct loop_device *lo)
733     +{
734     + int err;
735     +
736     + err = mutex_lock_killable(&loop_ctl_mutex);
737     + if (err)
738     + return err;
739     + if (lo->lo_state != Lo_bound) {
740     + mutex_unlock(&loop_ctl_mutex);
741     + return -ENXIO;
742     + }
743     + /*
744     + * If we've explicitly asked to tear down the loop device,
745     + * and it has an elevated reference count, set it for auto-teardown when
746     + * the last reference goes away. This stops $!~#$@ udev from
747     + * preventing teardown because it decided that it needs to run blkid on
748     + * the loopback device whenever they appear. xfstests is notorious for
749     + * failing tests because blkid via udev races with a losetup
750     + * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
751     + * command to fail with EBUSY.
752     + */
753     + if (atomic_read(&lo->lo_refcnt) > 1) {
754     + lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
755     + mutex_unlock(&loop_ctl_mutex);
756     + return 0;
757     + }
758     + lo->lo_state = Lo_rundown;
759     + mutex_unlock(&loop_ctl_mutex);
760     +
761     + return __loop_clr_fd(lo, false);
762     }
763    
764     static int
765     @@ -1117,47 +1170,72 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
766     int err;
767     struct loop_func_table *xfer;
768     kuid_t uid = current_uid();
769     + struct block_device *bdev;
770     + bool partscan = false;
771    
772     + err = mutex_lock_killable(&loop_ctl_mutex);
773     + if (err)
774     + return err;
775     if (lo->lo_encrypt_key_size &&
776     !uid_eq(lo->lo_key_owner, uid) &&
777     - !capable(CAP_SYS_ADMIN))
778     - return -EPERM;
779     - if (lo->lo_state != Lo_bound)
780     - return -ENXIO;
781     - if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
782     - return -EINVAL;
783     + !capable(CAP_SYS_ADMIN)) {
784     + err = -EPERM;
785     + goto out_unlock;
786     + }
787     + if (lo->lo_state != Lo_bound) {
788     + err = -ENXIO;
789     + goto out_unlock;
790     + }
791     + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
792     + err = -EINVAL;
793     + goto out_unlock;
794     + }
795     +
796     + if (lo->lo_offset != info->lo_offset ||
797     + lo->lo_sizelimit != info->lo_sizelimit) {
798     + sync_blockdev(lo->lo_device);
799     + kill_bdev(lo->lo_device);
800     + }
801    
802     /* I/O need to be drained during transfer transition */
803     blk_mq_freeze_queue(lo->lo_queue);
804    
805     err = loop_release_xfer(lo);
806     if (err)
807     - goto exit;
808     + goto out_unfreeze;
809    
810     if (info->lo_encrypt_type) {
811     unsigned int type = info->lo_encrypt_type;
812    
813     if (type >= MAX_LO_CRYPT) {
814     err = -EINVAL;
815     - goto exit;
816     + goto out_unfreeze;
817     }
818     xfer = xfer_funcs[type];
819     if (xfer == NULL) {
820     err = -EINVAL;
821     - goto exit;
822     + goto out_unfreeze;
823     }
824     } else
825     xfer = NULL;
826    
827     err = loop_init_xfer(lo, xfer, info);
828     if (err)
829     - goto exit;
830     + goto out_unfreeze;
831    
832     if (lo->lo_offset != info->lo_offset ||
833     lo->lo_sizelimit != info->lo_sizelimit) {
834     + /* kill_bdev should have truncated all the pages */
835     + if (lo->lo_device->bd_inode->i_mapping->nrpages) {
836     + err = -EAGAIN;
837     + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
838     + __func__, lo->lo_number, lo->lo_file_name,
839     + lo->lo_device->bd_inode->i_mapping->nrpages);
840     + goto out_unfreeze;
841     + }
842     if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
843     err = -EFBIG;
844     - goto exit;
845     + goto out_unfreeze;
846     }
847     }
848    
849     @@ -1189,15 +1267,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
850     /* update dio if lo_offset or transfer is changed */
851     __loop_update_dio(lo, lo->use_dio);
852    
853     - exit:
854     +out_unfreeze:
855     blk_mq_unfreeze_queue(lo->lo_queue);
856    
857     if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
858     !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
859     lo->lo_flags |= LO_FLAGS_PARTSCAN;
860     lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
861     - loop_reread_partitions(lo, lo->lo_device);
862     + bdev = lo->lo_device;
863     + partscan = true;
864     }
865     +out_unlock:
866     + mutex_unlock(&loop_ctl_mutex);
867     + if (partscan)
868     + loop_reread_partitions(lo, bdev);
869    
870     return err;
871     }
872     @@ -1205,12 +1288,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
873     static int
874     loop_get_status(struct loop_device *lo, struct loop_info64 *info)
875     {
876     - struct file *file;
877     + struct path path;
878     struct kstat stat;
879     int ret;
880    
881     + ret = mutex_lock_killable(&loop_ctl_mutex);
882     + if (ret)
883     + return ret;
884     if (lo->lo_state != Lo_bound) {
885     - mutex_unlock(&lo->lo_ctl_mutex);
886     + mutex_unlock(&loop_ctl_mutex);
887     return -ENXIO;
888     }
889    
890     @@ -1229,17 +1315,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
891     lo->lo_encrypt_key_size);
892     }
893    
894     - /* Drop lo_ctl_mutex while we call into the filesystem. */
895     - file = get_file(lo->lo_backing_file);
896     - mutex_unlock(&lo->lo_ctl_mutex);
897     - ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
898     - AT_STATX_SYNC_AS_STAT);
899     + /* Drop loop_ctl_mutex while we call into the filesystem. */
900     + path = lo->lo_backing_file->f_path;
901     + path_get(&path);
902     + mutex_unlock(&loop_ctl_mutex);
903     + ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
904     if (!ret) {
905     info->lo_device = huge_encode_dev(stat.dev);
906     info->lo_inode = stat.ino;
907     info->lo_rdevice = huge_encode_dev(stat.rdev);
908     }
909     - fput(file);
910     + path_put(&path);
911     return ret;
912     }
913    
914     @@ -1323,10 +1409,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
915     struct loop_info64 info64;
916     int err;
917    
918     - if (!arg) {
919     - mutex_unlock(&lo->lo_ctl_mutex);
920     + if (!arg)
921     return -EINVAL;
922     - }
923     err = loop_get_status(lo, &info64);
924     if (!err)
925     err = loop_info64_to_old(&info64, &info);
926     @@ -1341,10 +1425,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
927     struct loop_info64 info64;
928     int err;
929    
930     - if (!arg) {
931     - mutex_unlock(&lo->lo_ctl_mutex);
932     + if (!arg)
933     return -EINVAL;
934     - }
935     err = loop_get_status(lo, &info64);
936     if (!err && copy_to_user(arg, &info64, sizeof(info64)))
937     err = -EFAULT;
938     @@ -1376,22 +1458,64 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
939    
940     static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
941     {
942     + int err = 0;
943     +
944     if (lo->lo_state != Lo_bound)
945     return -ENXIO;
946    
947     if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
948     return -EINVAL;
949    
950     + if (lo->lo_queue->limits.logical_block_size != arg) {
951     + sync_blockdev(lo->lo_device);
952     + kill_bdev(lo->lo_device);
953     + }
954     +
955     blk_mq_freeze_queue(lo->lo_queue);
956    
957     + /* kill_bdev should have truncated all the pages */
958     + if (lo->lo_queue->limits.logical_block_size != arg &&
959     + lo->lo_device->bd_inode->i_mapping->nrpages) {
960     + err = -EAGAIN;
961     + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
962     + __func__, lo->lo_number, lo->lo_file_name,
963     + lo->lo_device->bd_inode->i_mapping->nrpages);
964     + goto out_unfreeze;
965     + }
966     +
967     blk_queue_logical_block_size(lo->lo_queue, arg);
968     blk_queue_physical_block_size(lo->lo_queue, arg);
969     blk_queue_io_min(lo->lo_queue, arg);
970     loop_update_dio(lo);
971     -
972     +out_unfreeze:
973     blk_mq_unfreeze_queue(lo->lo_queue);
974    
975     - return 0;
976     + return err;
977     +}
978     +
979     +static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
980     + unsigned long arg)
981     +{
982     + int err;
983     +
984     + err = mutex_lock_killable(&loop_ctl_mutex);
985     + if (err)
986     + return err;
987     + switch (cmd) {
988     + case LOOP_SET_CAPACITY:
989     + err = loop_set_capacity(lo);
990     + break;
991     + case LOOP_SET_DIRECT_IO:
992     + err = loop_set_dio(lo, arg);
993     + break;
994     + case LOOP_SET_BLOCK_SIZE:
995     + err = loop_set_block_size(lo, arg);
996     + break;
997     + default:
998     + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
999     + }
1000     + mutex_unlock(&loop_ctl_mutex);
1001     + return err;
1002     }
1003    
1004     static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1005     @@ -1400,64 +1524,42 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1006     struct loop_device *lo = bdev->bd_disk->private_data;
1007     int err;
1008    
1009     - err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
1010     - if (err)
1011     - goto out_unlocked;
1012     -
1013     switch (cmd) {
1014     case LOOP_SET_FD:
1015     - err = loop_set_fd(lo, mode, bdev, arg);
1016     - break;
1017     + return loop_set_fd(lo, mode, bdev, arg);
1018     case LOOP_CHANGE_FD:
1019     - err = loop_change_fd(lo, bdev, arg);
1020     - break;
1021     + return loop_change_fd(lo, bdev, arg);
1022     case LOOP_CLR_FD:
1023     - /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1024     - err = loop_clr_fd(lo);
1025     - if (!err)
1026     - goto out_unlocked;
1027     - break;
1028     + return loop_clr_fd(lo);
1029     case LOOP_SET_STATUS:
1030     err = -EPERM;
1031     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1032     + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1033     err = loop_set_status_old(lo,
1034     (struct loop_info __user *)arg);
1035     + }
1036     break;
1037     case LOOP_GET_STATUS:
1038     - err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1039     - /* loop_get_status() unlocks lo_ctl_mutex */
1040     - goto out_unlocked;
1041     + return loop_get_status_old(lo, (struct loop_info __user *) arg);
1042     case LOOP_SET_STATUS64:
1043     err = -EPERM;
1044     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1045     + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1046     err = loop_set_status64(lo,
1047     (struct loop_info64 __user *) arg);
1048     + }
1049     break;
1050     case LOOP_GET_STATUS64:
1051     - err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1052     - /* loop_get_status() unlocks lo_ctl_mutex */
1053     - goto out_unlocked;
1054     + return loop_get_status64(lo, (struct loop_info64 __user *) arg);
1055     case LOOP_SET_CAPACITY:
1056     - err = -EPERM;
1057     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1058     - err = loop_set_capacity(lo);
1059     - break;
1060     case LOOP_SET_DIRECT_IO:
1061     - err = -EPERM;
1062     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1063     - err = loop_set_dio(lo, arg);
1064     - break;
1065     case LOOP_SET_BLOCK_SIZE:
1066     - err = -EPERM;
1067     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1068     - err = loop_set_block_size(lo, arg);
1069     - break;
1070     + if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1071     + return -EPERM;
1072     + /* Fall through */
1073     default:
1074     - err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1075     + err = lo_simple_ioctl(lo, cmd, arg);
1076     + break;
1077     }
1078     - mutex_unlock(&lo->lo_ctl_mutex);
1079    
1080     -out_unlocked:
1081     return err;
1082     }
1083    
1084     @@ -1571,10 +1673,8 @@ loop_get_status_compat(struct loop_device *lo,
1085     struct loop_info64 info64;
1086     int err;
1087    
1088     - if (!arg) {
1089     - mutex_unlock(&lo->lo_ctl_mutex);
1090     + if (!arg)
1091     return -EINVAL;
1092     - }
1093     err = loop_get_status(lo, &info64);
1094     if (!err)
1095     err = loop_info64_to_compat(&info64, arg);
1096     @@ -1589,20 +1689,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1097    
1098     switch(cmd) {
1099     case LOOP_SET_STATUS:
1100     - err = mutex_lock_killable(&lo->lo_ctl_mutex);
1101     - if (!err) {
1102     - err = loop_set_status_compat(lo,
1103     - (const struct compat_loop_info __user *)arg);
1104     - mutex_unlock(&lo->lo_ctl_mutex);
1105     - }
1106     + err = loop_set_status_compat(lo,
1107     + (const struct compat_loop_info __user *)arg);
1108     break;
1109     case LOOP_GET_STATUS:
1110     - err = mutex_lock_killable(&lo->lo_ctl_mutex);
1111     - if (!err) {
1112     - err = loop_get_status_compat(lo,
1113     - (struct compat_loop_info __user *)arg);
1114     - /* loop_get_status() unlocks lo_ctl_mutex */
1115     - }
1116     + err = loop_get_status_compat(lo,
1117     + (struct compat_loop_info __user *)arg);
1118     break;
1119     case LOOP_SET_CAPACITY:
1120     case LOOP_CLR_FD:
1121     @@ -1626,9 +1718,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1122     static int lo_open(struct block_device *bdev, fmode_t mode)
1123     {
1124     struct loop_device *lo;
1125     - int err = 0;
1126     + int err;
1127    
1128     - mutex_lock(&loop_index_mutex);
1129     + err = mutex_lock_killable(&loop_ctl_mutex);
1130     + if (err)
1131     + return err;
1132     lo = bdev->bd_disk->private_data;
1133     if (!lo) {
1134     err = -ENXIO;
1135     @@ -1637,26 +1731,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1136    
1137     atomic_inc(&lo->lo_refcnt);
1138     out:
1139     - mutex_unlock(&loop_index_mutex);
1140     + mutex_unlock(&loop_ctl_mutex);
1141     return err;
1142     }
1143    
1144     -static void __lo_release(struct loop_device *lo)
1145     +static void lo_release(struct gendisk *disk, fmode_t mode)
1146     {
1147     - int err;
1148     + struct loop_device *lo;
1149    
1150     + mutex_lock(&loop_ctl_mutex);
1151     + lo = disk->private_data;
1152     if (atomic_dec_return(&lo->lo_refcnt))
1153     - return;
1154     + goto out_unlock;
1155    
1156     - mutex_lock(&lo->lo_ctl_mutex);
1157     if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1158     + if (lo->lo_state != Lo_bound)
1159     + goto out_unlock;
1160     + lo->lo_state = Lo_rundown;
1161     + mutex_unlock(&loop_ctl_mutex);
1162     /*
1163     * In autoclear mode, stop the loop thread
1164     * and remove configuration after last close.
1165     */
1166     - err = loop_clr_fd(lo);
1167     - if (!err)
1168     - return;
1169     + __loop_clr_fd(lo, true);
1170     + return;
1171     } else if (lo->lo_state == Lo_bound) {
1172     /*
1173     * Otherwise keep thread (if running) and config,
1174     @@ -1666,14 +1764,8 @@ static void __lo_release(struct loop_device *lo)
1175     blk_mq_unfreeze_queue(lo->lo_queue);
1176     }
1177    
1178     - mutex_unlock(&lo->lo_ctl_mutex);
1179     -}
1180     -
1181     -static void lo_release(struct gendisk *disk, fmode_t mode)
1182     -{
1183     - mutex_lock(&loop_index_mutex);
1184     - __lo_release(disk->private_data);
1185     - mutex_unlock(&loop_index_mutex);
1186     +out_unlock:
1187     + mutex_unlock(&loop_ctl_mutex);
1188     }
1189    
1190     static const struct block_device_operations lo_fops = {
1191     @@ -1712,10 +1804,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
1192     struct loop_device *lo = ptr;
1193     struct loop_func_table *xfer = data;
1194    
1195     - mutex_lock(&lo->lo_ctl_mutex);
1196     + mutex_lock(&loop_ctl_mutex);
1197     if (lo->lo_encryption == xfer)
1198     loop_release_xfer(lo);
1199     - mutex_unlock(&lo->lo_ctl_mutex);
1200     + mutex_unlock(&loop_ctl_mutex);
1201     return 0;
1202     }
1203    
1204     @@ -1896,7 +1988,6 @@ static int loop_add(struct loop_device **l, int i)
1205     if (!part_shift)
1206     disk->flags |= GENHD_FL_NO_PART_SCAN;
1207     disk->flags |= GENHD_FL_EXT_DEVT;
1208     - mutex_init(&lo->lo_ctl_mutex);
1209     atomic_set(&lo->lo_refcnt, 0);
1210     lo->lo_number = i;
1211     spin_lock_init(&lo->lo_lock);
1212     @@ -1975,7 +2066,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1213     struct kobject *kobj;
1214     int err;
1215    
1216     - mutex_lock(&loop_index_mutex);
1217     + mutex_lock(&loop_ctl_mutex);
1218     err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1219     if (err < 0)
1220     err = loop_add(&lo, MINOR(dev) >> part_shift);
1221     @@ -1983,7 +2074,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1222     kobj = NULL;
1223     else
1224     kobj = get_disk_and_module(lo->lo_disk);
1225     - mutex_unlock(&loop_index_mutex);
1226     + mutex_unlock(&loop_ctl_mutex);
1227    
1228     *part = 0;
1229     return kobj;
1230     @@ -1993,9 +2084,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1231     unsigned long parm)
1232     {
1233     struct loop_device *lo;
1234     - int ret = -ENOSYS;
1235     + int ret;
1236     +
1237     + ret = mutex_lock_killable(&loop_ctl_mutex);
1238     + if (ret)
1239     + return ret;
1240    
1241     - mutex_lock(&loop_index_mutex);
1242     + ret = -ENOSYS;
1243     switch (cmd) {
1244     case LOOP_CTL_ADD:
1245     ret = loop_lookup(&lo, parm);
1246     @@ -2009,21 +2104,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1247     ret = loop_lookup(&lo, parm);
1248     if (ret < 0)
1249     break;
1250     - ret = mutex_lock_killable(&lo->lo_ctl_mutex);
1251     - if (ret)
1252     - break;
1253     if (lo->lo_state != Lo_unbound) {
1254     ret = -EBUSY;
1255     - mutex_unlock(&lo->lo_ctl_mutex);
1256     break;
1257     }
1258     if (atomic_read(&lo->lo_refcnt) > 0) {
1259     ret = -EBUSY;
1260     - mutex_unlock(&lo->lo_ctl_mutex);
1261     break;
1262     }
1263     lo->lo_disk->private_data = NULL;
1264     - mutex_unlock(&lo->lo_ctl_mutex);
1265     idr_remove(&loop_index_idr, lo->lo_number);
1266     loop_remove(lo);
1267     break;
1268     @@ -2033,7 +2122,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1269     break;
1270     ret = loop_add(&lo, -1);
1271     }
1272     - mutex_unlock(&loop_index_mutex);
1273     + mutex_unlock(&loop_ctl_mutex);
1274    
1275     return ret;
1276     }
1277     @@ -2117,10 +2206,10 @@ static int __init loop_init(void)
1278     THIS_MODULE, loop_probe, NULL, NULL);
1279    
1280     /* pre-create number of devices given by config or max_loop */
1281     - mutex_lock(&loop_index_mutex);
1282     + mutex_lock(&loop_ctl_mutex);
1283     for (i = 0; i < nr; i++)
1284     loop_add(&lo, i);
1285     - mutex_unlock(&loop_index_mutex);
1286     + mutex_unlock(&loop_ctl_mutex);
1287    
1288     printk(KERN_INFO "loop: module loaded\n");
1289     return 0;
1290     diff --git a/drivers/block/loop.h b/drivers/block/loop.h
1291     index 4d42c7af7de7..af75a5ee4094 100644
1292     --- a/drivers/block/loop.h
1293     +++ b/drivers/block/loop.h
1294     @@ -54,7 +54,6 @@ struct loop_device {
1295    
1296     spinlock_t lo_lock;
1297     int lo_state;
1298     - struct mutex lo_ctl_mutex;
1299     struct kthread_worker worker;
1300     struct task_struct *worker_task;
1301     bool use_dio;
1302     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1303     index 14a51254c3db..c13a6d1796a7 100644
1304     --- a/drivers/block/nbd.c
1305     +++ b/drivers/block/nbd.c
1306     @@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
1307     blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
1308     set_capacity(nbd->disk, config->bytesize >> 9);
1309     if (bdev) {
1310     - if (bdev->bd_disk)
1311     + if (bdev->bd_disk) {
1312     bd_set_size(bdev, config->bytesize);
1313     - else
1314     + set_blocksize(bdev, config->blksize);
1315     + } else
1316     bdev->bd_invalidated = 1;
1317     bdput(bdev);
1318     }
1319     diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
1320     index a8c4ce07fc9d..a825b6444459 100644
1321     --- a/drivers/crypto/Kconfig
1322     +++ b/drivers/crypto/Kconfig
1323     @@ -681,6 +681,7 @@ config CRYPTO_DEV_BCM_SPU
1324     depends on ARCH_BCM_IPROC
1325     depends on MAILBOX
1326     default m
1327     + select CRYPTO_AUTHENC
1328     select CRYPTO_DES
1329     select CRYPTO_MD5
1330     select CRYPTO_SHA1
1331     diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
1332     index 2d1f1db9f807..cd464637b0cb 100644
1333     --- a/drivers/crypto/bcm/cipher.c
1334     +++ b/drivers/crypto/bcm/cipher.c
1335     @@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1336     struct spu_hw *spu = &iproc_priv.spu;
1337     struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
1338     struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
1339     - struct rtattr *rta = (void *)key;
1340     - struct crypto_authenc_key_param *param;
1341     - const u8 *origkey = key;
1342     - const unsigned int origkeylen = keylen;
1343     -
1344     - int ret = 0;
1345     + struct crypto_authenc_keys keys;
1346     + int ret;
1347    
1348     flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
1349     keylen);
1350     flow_dump(" key: ", key, keylen);
1351    
1352     - if (!RTA_OK(rta, keylen))
1353     - goto badkey;
1354     - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1355     - goto badkey;
1356     - if (RTA_PAYLOAD(rta) < sizeof(*param))
1357     + ret = crypto_authenc_extractkeys(&keys, key, keylen);
1358     + if (ret)
1359     goto badkey;
1360    
1361     - param = RTA_DATA(rta);
1362     - ctx->enckeylen = be32_to_cpu(param->enckeylen);
1363     -
1364     - key += RTA_ALIGN(rta->rta_len);
1365     - keylen -= RTA_ALIGN(rta->rta_len);
1366     -
1367     - if (keylen < ctx->enckeylen)
1368     - goto badkey;
1369     - if (ctx->enckeylen > MAX_KEY_SIZE)
1370     + if (keys.enckeylen > MAX_KEY_SIZE ||
1371     + keys.authkeylen > MAX_KEY_SIZE)
1372     goto badkey;
1373    
1374     - ctx->authkeylen = keylen - ctx->enckeylen;
1375     -
1376     - if (ctx->authkeylen > MAX_KEY_SIZE)
1377     - goto badkey;
1378     + ctx->enckeylen = keys.enckeylen;
1379     + ctx->authkeylen = keys.authkeylen;
1380    
1381     - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
1382     + memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1383     /* May end up padding auth key. So make sure it's zeroed. */
1384     memset(ctx->authkey, 0, sizeof(ctx->authkey));
1385     - memcpy(ctx->authkey, key, ctx->authkeylen);
1386     + memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1387    
1388     switch (ctx->alg->cipher_info.alg) {
1389     case CIPHER_ALG_DES:
1390     @@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1391     u32 tmp[DES_EXPKEY_WORDS];
1392     u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1393    
1394     - if (des_ekey(tmp, key) == 0) {
1395     + if (des_ekey(tmp, keys.enckey) == 0) {
1396     if (crypto_aead_get_flags(cipher) &
1397     CRYPTO_TFM_REQ_WEAK_KEY) {
1398     crypto_aead_set_flags(cipher, flags);
1399     @@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1400     break;
1401     case CIPHER_ALG_3DES:
1402     if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
1403     - const u32 *K = (const u32 *)key;
1404     + const u32 *K = (const u32 *)keys.enckey;
1405     u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
1406    
1407     if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1408     @@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1409     ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
1410     ctx->fallback_cipher->base.crt_flags |=
1411     tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
1412     - ret =
1413     - crypto_aead_setkey(ctx->fallback_cipher, origkey,
1414     - origkeylen);
1415     + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
1416     if (ret) {
1417     flow_log(" fallback setkey() returned:%d\n", ret);
1418     tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
1419     diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
1420     index 43975ab5f09c..f84ca2ff61de 100644
1421     --- a/drivers/crypto/caam/caamhash.c
1422     +++ b/drivers/crypto/caam/caamhash.c
1423     @@ -1131,13 +1131,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1424    
1425     desc = edesc->hw_desc;
1426    
1427     - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1428     - if (dma_mapping_error(jrdev, state->buf_dma)) {
1429     - dev_err(jrdev, "unable to map src\n");
1430     - goto unmap;
1431     - }
1432     + if (buflen) {
1433     + state->buf_dma = dma_map_single(jrdev, buf, buflen,
1434     + DMA_TO_DEVICE);
1435     + if (dma_mapping_error(jrdev, state->buf_dma)) {
1436     + dev_err(jrdev, "unable to map src\n");
1437     + goto unmap;
1438     + }
1439    
1440     - append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1441     + append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1442     + }
1443    
1444     edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1445     digestsize);
1446     diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
1447     index 01b82b82f8b8..5852d29ae2da 100644
1448     --- a/drivers/crypto/ccree/cc_aead.c
1449     +++ b/drivers/crypto/ccree/cc_aead.c
1450     @@ -540,13 +540,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1451     unsigned int keylen)
1452     {
1453     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1454     - struct rtattr *rta = (struct rtattr *)key;
1455     struct cc_crypto_req cc_req = {};
1456     - struct crypto_authenc_key_param *param;
1457     struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
1458     - int rc = -EINVAL;
1459     unsigned int seq_len = 0;
1460     struct device *dev = drvdata_to_dev(ctx->drvdata);
1461     + const u8 *enckey, *authkey;
1462     + int rc;
1463    
1464     dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
1465     ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
1466     @@ -554,35 +553,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1467     /* STAT_PHASE_0: Init and sanity checks */
1468    
1469     if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
1470     - if (!RTA_OK(rta, keylen))
1471     - goto badkey;
1472     - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1473     - goto badkey;
1474     - if (RTA_PAYLOAD(rta) < sizeof(*param))
1475     - goto badkey;
1476     - param = RTA_DATA(rta);
1477     - ctx->enc_keylen = be32_to_cpu(param->enckeylen);
1478     - key += RTA_ALIGN(rta->rta_len);
1479     - keylen -= RTA_ALIGN(rta->rta_len);
1480     - if (keylen < ctx->enc_keylen)
1481     + struct crypto_authenc_keys keys;
1482     +
1483     + rc = crypto_authenc_extractkeys(&keys, key, keylen);
1484     + if (rc)
1485     goto badkey;
1486     - ctx->auth_keylen = keylen - ctx->enc_keylen;
1487     + enckey = keys.enckey;
1488     + authkey = keys.authkey;
1489     + ctx->enc_keylen = keys.enckeylen;
1490     + ctx->auth_keylen = keys.authkeylen;
1491    
1492     if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1493     /* the nonce is stored in bytes at end of key */
1494     + rc = -EINVAL;
1495     if (ctx->enc_keylen <
1496     (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
1497     goto badkey;
1498     /* Copy nonce from last 4 bytes in CTR key to
1499     * first 4 bytes in CTR IV
1500     */
1501     - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
1502     - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
1503     - CTR_RFC3686_NONCE_SIZE);
1504     + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
1505     + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
1506     /* Set CTR key size */
1507     ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
1508     }
1509     } else { /* non-authenc - has just one key */
1510     + enckey = key;
1511     + authkey = NULL;
1512     ctx->enc_keylen = keylen;
1513     ctx->auth_keylen = 0;
1514     }
1515     @@ -594,13 +591,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1516     /* STAT_PHASE_1: Copy key to ctx */
1517    
1518     /* Get key material */
1519     - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
1520     + memcpy(ctx->enckey, enckey, ctx->enc_keylen);
1521     if (ctx->enc_keylen == 24)
1522     memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1523     if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
1524     - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
1525     + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
1526     + ctx->auth_keylen);
1527     } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
1528     - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
1529     + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
1530     if (rc)
1531     goto badkey;
1532     }
1533     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1534     index 6988012deca4..f4f3e9a5851e 100644
1535     --- a/drivers/crypto/talitos.c
1536     +++ b/drivers/crypto/talitos.c
1537     @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1538     struct talitos_private *priv = dev_get_drvdata(dev);
1539     bool is_sec1 = has_ftr_sec1(priv);
1540     int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1541     - void *err;
1542    
1543     if (cryptlen + authsize > max_len) {
1544     dev_err(dev, "length exceeds h/w max limit\n");
1545     return ERR_PTR(-EINVAL);
1546     }
1547    
1548     - if (ivsize)
1549     - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1550     -
1551     if (!dst || dst == src) {
1552     src_len = assoclen + cryptlen + authsize;
1553     src_nents = sg_nents_for_len(src, src_len);
1554     if (src_nents < 0) {
1555     dev_err(dev, "Invalid number of src SG.\n");
1556     - err = ERR_PTR(-EINVAL);
1557     - goto error_sg;
1558     + return ERR_PTR(-EINVAL);
1559     }
1560     src_nents = (src_nents == 1) ? 0 : src_nents;
1561     dst_nents = dst ? src_nents : 0;
1562     @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1563     src_nents = sg_nents_for_len(src, src_len);
1564     if (src_nents < 0) {
1565     dev_err(dev, "Invalid number of src SG.\n");
1566     - err = ERR_PTR(-EINVAL);
1567     - goto error_sg;
1568     + return ERR_PTR(-EINVAL);
1569     }
1570     src_nents = (src_nents == 1) ? 0 : src_nents;
1571     dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1572     dst_nents = sg_nents_for_len(dst, dst_len);
1573     if (dst_nents < 0) {
1574     dev_err(dev, "Invalid number of dst SG.\n");
1575     - err = ERR_PTR(-EINVAL);
1576     - goto error_sg;
1577     + return ERR_PTR(-EINVAL);
1578     }
1579     dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1580     }
1581     @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1582     /* if its a ahash, add space for a second desc next to the first one */
1583     if (is_sec1 && !dst)
1584     alloc_len += sizeof(struct talitos_desc);
1585     + alloc_len += ivsize;
1586    
1587     edesc = kmalloc(alloc_len, GFP_DMA | flags);
1588     - if (!edesc) {
1589     - err = ERR_PTR(-ENOMEM);
1590     - goto error_sg;
1591     + if (!edesc)
1592     + return ERR_PTR(-ENOMEM);
1593     + if (ivsize) {
1594     + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1595     + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1596     }
1597     memset(&edesc->desc, 0, sizeof(edesc->desc));
1598    
1599     @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1600     DMA_BIDIRECTIONAL);
1601     }
1602     return edesc;
1603     -error_sg:
1604     - if (iv_dma)
1605     - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1606     - return err;
1607     }
1608    
1609     static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1610     diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1611     index b5b9f15549c2..1bda809a7289 100644
1612     --- a/drivers/gpu/drm/drm_fb_helper.c
1613     +++ b/drivers/gpu/drm/drm_fb_helper.c
1614     @@ -1690,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1615     struct drm_fb_helper *fb_helper = info->par;
1616     struct drm_framebuffer *fb = fb_helper->fb;
1617    
1618     - if (var->pixclock != 0 || in_dbg_master())
1619     + if (in_dbg_master())
1620     return -EINVAL;
1621    
1622     + if (var->pixclock != 0) {
1623     + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
1624     + var->pixclock = 0;
1625     + }
1626     +
1627     /*
1628     * Changes struct fb_var_screeninfo are currently not pushed back
1629     * to KMS, hence fail if different settings are requested.
1630     diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
1631     index 9ad89e38f6c0..12e4203c06db 100644
1632     --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
1633     +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
1634     @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1635     {
1636     unsigned int index;
1637     u64 virtaddr;
1638     - unsigned long req_size, pgoff = 0;
1639     + unsigned long req_size, pgoff, req_start;
1640     pgprot_t pg_prot;
1641     struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1642    
1643     @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1644     pg_prot = vma->vm_page_prot;
1645     virtaddr = vma->vm_start;
1646     req_size = vma->vm_end - vma->vm_start;
1647     - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
1648     + pgoff = vma->vm_pgoff &
1649     + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1650     + req_start = pgoff << PAGE_SHIFT;
1651     +
1652     + if (!intel_vgpu_in_aperture(vgpu, req_start))
1653     + return -EINVAL;
1654     + if (req_start + req_size >
1655     + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1656     + return -EINVAL;
1657     +
1658     + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1659    
1660     return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1661     }
1662     diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
1663     index 0385ab438320..f6fa9b115fda 100644
1664     --- a/drivers/infiniband/core/nldev.c
1665     +++ b/drivers/infiniband/core/nldev.c
1666     @@ -579,10 +579,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
1667     if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
1668     atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
1669     goto err;
1670     - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
1671     - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
1672     - pd->unsafe_global_rkey))
1673     - goto err;
1674    
1675     if (fill_res_name_pid(msg, res))
1676     goto err;
1677     diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1678     index 42b8685c997e..3c633ab58052 100644
1679     --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1680     +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1681     @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
1682    
1683     static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
1684     {
1685     - return (enum pvrdma_wr_opcode)op;
1686     + switch (op) {
1687     + case IB_WR_RDMA_WRITE:
1688     + return PVRDMA_WR_RDMA_WRITE;
1689     + case IB_WR_RDMA_WRITE_WITH_IMM:
1690     + return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
1691     + case IB_WR_SEND:
1692     + return PVRDMA_WR_SEND;
1693     + case IB_WR_SEND_WITH_IMM:
1694     + return PVRDMA_WR_SEND_WITH_IMM;
1695     + case IB_WR_RDMA_READ:
1696     + return PVRDMA_WR_RDMA_READ;
1697     + case IB_WR_ATOMIC_CMP_AND_SWP:
1698     + return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
1699     + case IB_WR_ATOMIC_FETCH_AND_ADD:
1700     + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
1701     + case IB_WR_LSO:
1702     + return PVRDMA_WR_LSO;
1703     + case IB_WR_SEND_WITH_INV:
1704     + return PVRDMA_WR_SEND_WITH_INV;
1705     + case IB_WR_RDMA_READ_WITH_INV:
1706     + return PVRDMA_WR_RDMA_READ_WITH_INV;
1707     + case IB_WR_LOCAL_INV:
1708     + return PVRDMA_WR_LOCAL_INV;
1709     + case IB_WR_REG_MR:
1710     + return PVRDMA_WR_FAST_REG_MR;
1711     + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1712     + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
1713     + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1714     + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
1715     + case IB_WR_REG_SIG_MR:
1716     + return PVRDMA_WR_REG_SIG_MR;
1717     + default:
1718     + return PVRDMA_WR_ERROR;
1719     + }
1720     }
1721    
1722     static inline enum ib_wc_status pvrdma_wc_status_to_ib(
1723     diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1724     index 60083c0363a5..9aeb33093279 100644
1725     --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1726     +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1727     @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1728     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1729     wqe_hdr->ex.imm_data = wr->ex.imm_data;
1730    
1731     + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
1732     + *bad_wr = wr;
1733     + ret = -EINVAL;
1734     + goto out;
1735     + }
1736     +
1737     switch (qp->ibqp.qp_type) {
1738     case IB_QPT_GSI:
1739     case IB_QPT_UD:
1740     diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
1741     index 1faa64abc74f..6889c25c62cb 100644
1742     --- a/drivers/media/common/videobuf2/videobuf2-core.c
1743     +++ b/drivers/media/common/videobuf2/videobuf2-core.c
1744     @@ -1933,9 +1933,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1745     return -EINVAL;
1746     }
1747     }
1748     +
1749     + mutex_lock(&q->mmap_lock);
1750     +
1751     if (vb2_fileio_is_active(q)) {
1752     dprintk(1, "mmap: file io in progress\n");
1753     - return -EBUSY;
1754     + ret = -EBUSY;
1755     + goto unlock;
1756     }
1757    
1758     /*
1759     @@ -1943,7 +1947,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1760     */
1761     ret = __find_plane_by_offset(q, off, &buffer, &plane);
1762     if (ret)
1763     - return ret;
1764     + goto unlock;
1765    
1766     vb = q->bufs[buffer];
1767    
1768     @@ -1956,11 +1960,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1769     if (length < (vma->vm_end - vma->vm_start)) {
1770     dprintk(1,
1771     "MMAP invalid, as it would overflow buffer length\n");
1772     - return -EINVAL;
1773     + ret = -EINVAL;
1774     + goto unlock;
1775     }
1776    
1777     - mutex_lock(&q->mmap_lock);
1778     ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
1779     +
1780     +unlock:
1781     mutex_unlock(&q->mmap_lock);
1782     if (ret)
1783     return ret;
1784     diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
1785     index f06003bb8e42..2a92e5aac9ed 100644
1786     --- a/drivers/media/platform/vivid/vivid-kthread-cap.c
1787     +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
1788     @@ -865,8 +865,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
1789     "%s-vid-cap", dev->v4l2_dev.name);
1790    
1791     if (IS_ERR(dev->kthread_vid_cap)) {
1792     + int err = PTR_ERR(dev->kthread_vid_cap);
1793     +
1794     + dev->kthread_vid_cap = NULL;
1795     v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
1796     - return PTR_ERR(dev->kthread_vid_cap);
1797     + return err;
1798     }
1799     *pstreaming = true;
1800     vivid_grab_controls(dev, true);
1801     diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
1802     index 9981e7548019..488590594150 100644
1803     --- a/drivers/media/platform/vivid/vivid-kthread-out.c
1804     +++ b/drivers/media/platform/vivid/vivid-kthread-out.c
1805     @@ -236,8 +236,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
1806     "%s-vid-out", dev->v4l2_dev.name);
1807    
1808     if (IS_ERR(dev->kthread_vid_out)) {
1809     + int err = PTR_ERR(dev->kthread_vid_out);
1810     +
1811     + dev->kthread_vid_out = NULL;
1812     v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
1813     - return PTR_ERR(dev->kthread_vid_out);
1814     + return err;
1815     }
1816     *pstreaming = true;
1817     vivid_grab_controls(dev, true);
1818     diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
1819     index be531caa2cdf..2079861d2270 100644
1820     --- a/drivers/media/platform/vivid/vivid-vid-common.c
1821     +++ b/drivers/media/platform/vivid/vivid-vid-common.c
1822     @@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
1823     .type = V4L2_DV_BT_656_1120,
1824     /* keep this initialization for compatibility with GCC < 4.4.6 */
1825     .reserved = { 0 },
1826     - V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
1827     + V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
1828     V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
1829     V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
1830     V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
1831     diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
1832     index 54afc9c7ee6e..a4d3e94a400c 100644
1833     --- a/drivers/media/v4l2-core/v4l2-ioctl.c
1834     +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
1835     @@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only)
1836     const struct v4l2_window *win;
1837     const struct v4l2_sdr_format *sdr;
1838     const struct v4l2_meta_format *meta;
1839     + u32 planes;
1840     unsigned i;
1841    
1842     pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
1843     @@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only)
1844     prt_names(mp->field, v4l2_field_names),
1845     mp->colorspace, mp->num_planes, mp->flags,
1846     mp->ycbcr_enc, mp->quantization, mp->xfer_func);
1847     - for (i = 0; i < mp->num_planes; i++)
1848     + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
1849     + for (i = 0; i < planes; i++)
1850     printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
1851     mp->plane_fmt[i].bytesperline,
1852     mp->plane_fmt[i].sizeimage);
1853     diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
1854     index b89379782741..9c7925ca13cf 100644
1855     --- a/drivers/mfd/tps6586x.c
1856     +++ b/drivers/mfd/tps6586x.c
1857     @@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
1858     return 0;
1859     }
1860    
1861     +static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
1862     +{
1863     + struct tps6586x *tps6586x = dev_get_drvdata(dev);
1864     +
1865     + if (tps6586x->client->irq)
1866     + disable_irq(tps6586x->client->irq);
1867     +
1868     + return 0;
1869     +}
1870     +
1871     +static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
1872     +{
1873     + struct tps6586x *tps6586x = dev_get_drvdata(dev);
1874     +
1875     + if (tps6586x->client->irq)
1876     + enable_irq(tps6586x->client->irq);
1877     +
1878     + return 0;
1879     +}
1880     +
1881     +static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
1882     + tps6586x_i2c_resume);
1883     +
1884     static const struct i2c_device_id tps6586x_id_table[] = {
1885     { "tps6586x", 0 },
1886     { },
1887     @@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
1888     .driver = {
1889     .name = "tps6586x",
1890     .of_match_table = of_match_ptr(tps6586x_of_match),
1891     + .pm = &tps6586x_pm_ops,
1892     },
1893     .probe = tps6586x_i2c_probe,
1894     .remove = tps6586x_i2c_remove,
1895     diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
1896     index 3cc8bfee6c18..8594659cb592 100644
1897     --- a/drivers/mmc/host/sdhci-msm.c
1898     +++ b/drivers/mmc/host/sdhci-msm.c
1899     @@ -258,6 +258,8 @@ struct sdhci_msm_host {
1900     bool mci_removed;
1901     const struct sdhci_msm_variant_ops *var_ops;
1902     const struct sdhci_msm_offset *offset;
1903     + bool use_cdr;
1904     + u32 transfer_mode;
1905     };
1906    
1907     static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
1908     @@ -1025,6 +1027,26 @@ out:
1909     return ret;
1910     }
1911    
1912     +static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1913     +{
1914     + const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1915     + u32 config, oldconfig = readl_relaxed(host->ioaddr +
1916     + msm_offset->core_dll_config);
1917     +
1918     + config = oldconfig;
1919     + if (enable) {
1920     + config |= CORE_CDR_EN;
1921     + config &= ~CORE_CDR_EXT_EN;
1922     + } else {
1923     + config &= ~CORE_CDR_EN;
1924     + config |= CORE_CDR_EXT_EN;
1925     + }
1926     +
1927     + if (config != oldconfig)
1928     + writel_relaxed(config, host->ioaddr +
1929     + msm_offset->core_dll_config);
1930     +}
1931     +
1932     static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1933     {
1934     struct sdhci_host *host = mmc_priv(mmc);
1935     @@ -1042,8 +1064,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1936     if (host->clock <= CORE_FREQ_100MHZ ||
1937     !(ios.timing == MMC_TIMING_MMC_HS400 ||
1938     ios.timing == MMC_TIMING_MMC_HS200 ||
1939     - ios.timing == MMC_TIMING_UHS_SDR104))
1940     + ios.timing == MMC_TIMING_UHS_SDR104)) {
1941     + msm_host->use_cdr = false;
1942     + sdhci_msm_set_cdr(host, false);
1943     return 0;
1944     + }
1945     +
1946     + /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1947     + msm_host->use_cdr = true;
1948    
1949     /*
1950     * For HS400 tuning in HS200 timing requires:
1951     @@ -1525,6 +1553,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1952     case SDHCI_POWER_CONTROL:
1953     req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1954     break;
1955     + case SDHCI_TRANSFER_MODE:
1956     + msm_host->transfer_mode = val;
1957     + break;
1958     + case SDHCI_COMMAND:
1959     + if (!msm_host->use_cdr)
1960     + break;
1961     + if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1962     + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1963     + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1964     + sdhci_msm_set_cdr(host, true);
1965     + else
1966     + sdhci_msm_set_cdr(host, false);
1967     + break;
1968     }
1969    
1970     if (req_type) {
1971     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1972     index 3c597569cfae..a6fcc5c96070 100644
1973     --- a/drivers/net/bonding/bond_main.c
1974     +++ b/drivers/net/bonding/bond_main.c
1975     @@ -1947,6 +1947,9 @@ static int __bond_release_one(struct net_device *bond_dev,
1976     if (!bond_has_slaves(bond)) {
1977     bond_set_carrier(bond);
1978     eth_hw_addr_random(bond_dev);
1979     + bond->nest_level = SINGLE_DEPTH_NESTING;
1980     + } else {
1981     + bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1982     }
1983    
1984     unblock_netpoll_tx();
1985     diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
1986     index b4b839a1d095..ad41ec63cc9f 100644
1987     --- a/drivers/net/dsa/realtek-smi.c
1988     +++ b/drivers/net/dsa/realtek-smi.c
1989     @@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
1990     struct device_node *mdio_np;
1991     int ret;
1992    
1993     - mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
1994     - "realtek,smi-mdio");
1995     + mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
1996     if (!mdio_np) {
1997     dev_err(smi->dev, "no MDIO bus node\n");
1998     return -ENODEV;
1999     }
2000    
2001     smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
2002     - if (!smi->slave_mii_bus)
2003     - return -ENOMEM;
2004     + if (!smi->slave_mii_bus) {
2005     + ret = -ENOMEM;
2006     + goto err_put_node;
2007     + }
2008     smi->slave_mii_bus->priv = smi;
2009     smi->slave_mii_bus->name = "SMI slave MII";
2010     smi->slave_mii_bus->read = realtek_smi_mdio_read;
2011     @@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
2012     if (ret) {
2013     dev_err(smi->dev, "unable to register MDIO bus %s\n",
2014     smi->slave_mii_bus->id);
2015     - of_node_put(mdio_np);
2016     + goto err_put_node;
2017     }
2018    
2019     return 0;
2020     +
2021     +err_put_node:
2022     + of_node_put(mdio_np);
2023     +
2024     + return ret;
2025     }
2026    
2027     static int realtek_smi_probe(struct platform_device *pdev)
2028     @@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
2029     struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
2030    
2031     dsa_unregister_switch(smi->ds);
2032     + if (smi->slave_mii_bus)
2033     + of_node_put(smi->slave_mii_bus->dev.of_node);
2034     gpiod_set_value(smi->reset, 1);
2035    
2036     return 0;
2037     diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
2038     index 1393252c6e3c..42f5bfa33694 100644
2039     --- a/drivers/net/ethernet/microchip/lan743x_main.c
2040     +++ b/drivers/net/ethernet/microchip/lan743x_main.c
2041     @@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
2042    
2043     memset(&ksettings, 0, sizeof(ksettings));
2044     phy_ethtool_get_link_ksettings(netdev, &ksettings);
2045     - local_advertisement = phy_read(phydev, MII_ADVERTISE);
2046     - if (local_advertisement < 0)
2047     - return;
2048     -
2049     - remote_advertisement = phy_read(phydev, MII_LPA);
2050     - if (remote_advertisement < 0)
2051     - return;
2052     + local_advertisement =
2053     + ethtool_adv_to_mii_adv_t(phydev->advertising);
2054     + remote_advertisement =
2055     + ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
2056    
2057     lan743x_phy_update_flowcontrol(adapter,
2058     ksettings.base.duplex,
2059     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2060     index 9fc8a2bc0ff1..07f3080eca18 100644
2061     --- a/drivers/net/ethernet/realtek/r8169.c
2062     +++ b/drivers/net/ethernet/realtek/r8169.c
2063     @@ -717,6 +717,7 @@ module_param(use_dac, int, 0);
2064     MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
2065     module_param_named(debug, debug.msg_enable, int, 0);
2066     MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
2067     +MODULE_SOFTDEP("pre: realtek");
2068     MODULE_LICENSE("GPL");
2069     MODULE_FIRMWARE(FIRMWARE_8168D_1);
2070     MODULE_FIRMWARE(FIRMWARE_8168D_2);
2071     @@ -1730,11 +1731,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
2072    
2073     static bool rtl8169_update_counters(struct rtl8169_private *tp)
2074     {
2075     + u8 val = RTL_R8(tp, ChipCmd);
2076     +
2077     /*
2078     * Some chips are unable to dump tally counters when the receiver
2079     - * is disabled.
2080     + * is disabled. If 0xff chip may be in a PCI power-save state.
2081     */
2082     - if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
2083     + if (!(val & CmdRxEnb) || val == 0xff)
2084     return true;
2085    
2086     return rtl8169_do_counters(tp, CounterDump);
2087     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2088     index 8c1abcba4cbd..33978b0cdac8 100644
2089     --- a/drivers/net/tun.c
2090     +++ b/drivers/net/tun.c
2091     @@ -859,10 +859,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
2092     err = 0;
2093     }
2094    
2095     - rcu_assign_pointer(tfile->tun, tun);
2096     - rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2097     - tun->numqueues++;
2098     -
2099     if (tfile->detached) {
2100     tun_enable_queue(tfile);
2101     } else {
2102     @@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
2103     * refcnt.
2104     */
2105    
2106     + /* Publish tfile->tun and tun->tfiles only after we've fully
2107     + * initialized tfile; otherwise we risk using half-initialized
2108     + * object.
2109     + */
2110     + rcu_assign_pointer(tfile->tun, tun);
2111     + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2112     + tun->numqueues++;
2113     out:
2114     return err;
2115     }
2116     diff --git a/drivers/of/property.c b/drivers/of/property.c
2117     index f46828e3b082..43720c2de138 100644
2118     --- a/drivers/of/property.c
2119     +++ b/drivers/of/property.c
2120     @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
2121    
2122     if (!of_device_is_available(remote)) {
2123     pr_debug("not available for remote node\n");
2124     + of_node_put(remote);
2125     return NULL;
2126     }
2127    
2128     diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
2129     index b44c1bb687a2..ebc193f7f7dd 100644
2130     --- a/drivers/scsi/scsi_pm.c
2131     +++ b/drivers/scsi/scsi_pm.c
2132     @@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev,
2133    
2134     if (err == 0) {
2135     pm_runtime_disable(dev);
2136     - pm_runtime_set_active(dev);
2137     + err = pm_runtime_set_active(dev);
2138     pm_runtime_enable(dev);
2139     +
2140     + /*
2141     + * Forcibly set runtime PM status of request queue to "active"
2142     + * to make sure we can again get requests from the queue
2143     + * (see also blk_pm_peek_request()).
2144     + *
2145     + * The resume hook will correct runtime PM status of the disk.
2146     + */
2147     + if (!err && scsi_is_sdev_device(dev)) {
2148     + struct scsi_device *sdev = to_scsi_device(dev);
2149     +
2150     + if (sdev->request_queue->dev)
2151     + blk_set_runtime_active(sdev->request_queue);
2152     + }
2153     }
2154    
2155     return err;
2156     @@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev,
2157     else
2158     fn = NULL;
2159    
2160     - /*
2161     - * Forcibly set runtime PM status of request queue to "active" to
2162     - * make sure we can again get requests from the queue (see also
2163     - * blk_pm_peek_request()).
2164     - *
2165     - * The resume hook will correct runtime PM status of the disk.
2166     - */
2167     - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
2168     - blk_set_runtime_active(to_scsi_device(dev)->request_queue);
2169     -
2170     if (fn) {
2171     async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
2172    
2173     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2174     index 5c9acb634ff7..0a27917263aa 100644
2175     --- a/drivers/scsi/sd.c
2176     +++ b/drivers/scsi/sd.c
2177     @@ -205,6 +205,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
2178     sp = buffer_data[0] & 0x80 ? 1 : 0;
2179     buffer_data[0] &= ~0x80;
2180    
2181     + /*
2182     + * Ensure WP, DPOFUA, and RESERVED fields are cleared in
2183     + * received mode parameter buffer before doing MODE SELECT.
2184     + */
2185     + data.device_specific = 0;
2186     +
2187     if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
2188     SD_MAX_RETRIES, &data, &sshdr)) {
2189     if (scsi_sense_valid(&sshdr))
2190     diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
2191     index b289b90ae6dc..b19c960d5490 100644
2192     --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
2193     +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
2194     @@ -598,9 +598,12 @@ out:
2195     mutex_unlock(&cdev_list_lock);
2196     }
2197    
2198     +static void __cxgbit_free_conn(struct cxgbit_sock *csk);
2199     +
2200     void cxgbit_free_np(struct iscsi_np *np)
2201     {
2202     struct cxgbit_np *cnp = np->np_context;
2203     + struct cxgbit_sock *csk, *tmp;
2204    
2205     cnp->com.state = CSK_STATE_DEAD;
2206     if (cnp->com.cdev)
2207     @@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
2208     else
2209     cxgbit_free_all_np(cnp);
2210    
2211     + spin_lock_bh(&cnp->np_accept_lock);
2212     + list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
2213     + list_del_init(&csk->accept_node);
2214     + __cxgbit_free_conn(csk);
2215     + }
2216     + spin_unlock_bh(&cnp->np_accept_lock);
2217     +
2218     np->np_context = NULL;
2219     cxgbit_put_cnp(cnp);
2220     }
2221     @@ -708,9 +718,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
2222     csk->tid, 600, __func__);
2223     }
2224    
2225     -void cxgbit_free_conn(struct iscsi_conn *conn)
2226     +static void __cxgbit_free_conn(struct cxgbit_sock *csk)
2227     {
2228     - struct cxgbit_sock *csk = conn->context;
2229     + struct iscsi_conn *conn = csk->conn;
2230     bool release = false;
2231    
2232     pr_debug("%s: state %d\n",
2233     @@ -719,7 +729,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
2234     spin_lock_bh(&csk->lock);
2235     switch (csk->com.state) {
2236     case CSK_STATE_ESTABLISHED:
2237     - if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
2238     + if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
2239     csk->com.state = CSK_STATE_CLOSING;
2240     cxgbit_send_halfclose(csk);
2241     } else {
2242     @@ -744,6 +754,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
2243     cxgbit_put_csk(csk);
2244     }
2245    
2246     +void cxgbit_free_conn(struct iscsi_conn *conn)
2247     +{
2248     + __cxgbit_free_conn(conn->context);
2249     +}
2250     +
2251     static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
2252     {
2253     csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
2254     @@ -806,6 +821,7 @@ void _cxgbit_free_csk(struct kref *kref)
2255     spin_unlock_bh(&cdev->cskq.lock);
2256    
2257     cxgbit_free_skb(csk);
2258     + cxgbit_put_cnp(csk->cnp);
2259     cxgbit_put_cdev(cdev);
2260    
2261     kfree(csk);
2262     @@ -1354,6 +1370,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
2263     goto rel_skb;
2264     }
2265    
2266     + cxgbit_get_cnp(cnp);
2267     cxgbit_get_cdev(cdev);
2268    
2269     spin_lock(&cdev->cskq.lock);
2270     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2271     index d6f42b528277..052ec16a4e84 100644
2272     --- a/drivers/tty/tty_io.c
2273     +++ b/drivers/tty/tty_io.c
2274     @@ -1255,7 +1255,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
2275     static int tty_reopen(struct tty_struct *tty)
2276     {
2277     struct tty_driver *driver = tty->driver;
2278     - int retval;
2279     + struct tty_ldisc *ld;
2280     + int retval = 0;
2281    
2282     if (driver->type == TTY_DRIVER_TYPE_PTY &&
2283     driver->subtype == PTY_TYPE_MASTER)
2284     @@ -1267,14 +1268,21 @@ static int tty_reopen(struct tty_struct *tty)
2285     if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
2286     return -EBUSY;
2287    
2288     - tty->count++;
2289     + ld = tty_ldisc_ref_wait(tty);
2290     + if (ld) {
2291     + tty_ldisc_deref(ld);
2292     + } else {
2293     + retval = tty_ldisc_lock(tty, 5 * HZ);
2294     + if (retval)
2295     + return retval;
2296    
2297     - if (tty->ldisc)
2298     - return 0;
2299     + if (!tty->ldisc)
2300     + retval = tty_ldisc_reinit(tty, tty->termios.c_line);
2301     + tty_ldisc_unlock(tty);
2302     + }
2303    
2304     - retval = tty_ldisc_reinit(tty, tty->termios.c_line);
2305     - if (retval)
2306     - tty->count--;
2307     + if (retval == 0)
2308     + tty->count++;
2309    
2310     return retval;
2311     }
2312     diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
2313     index 0c98d88f795a..b989ca26fc78 100644
2314     --- a/drivers/tty/tty_ldsem.c
2315     +++ b/drivers/tty/tty_ldsem.c
2316     @@ -293,6 +293,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
2317     if (!locked)
2318     atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
2319     list_del(&waiter.list);
2320     +
2321     + /*
2322     + * In case of timeout, wake up every reader who gave the right of way
2323     + * to writer. Prevent separation readers into two groups:
2324     + * one that helds semaphore and another that sleeps.
2325     + * (in case of no contention with a writer)
2326     + */
2327     + if (!locked && list_empty(&sem->write_wait))
2328     + __ldsem_wake_readers(sem);
2329     +
2330     raw_spin_unlock_irq(&sem->wait_lock);
2331    
2332     __set_current_state(TASK_RUNNING);
2333     diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2334     index a3edb20ea4c3..a846d32ee653 100644
2335     --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2336     +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2337     @@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
2338    
2339     int r = 0;
2340    
2341     + memset(&p, 0, sizeof(p));
2342     +
2343     switch (cmd) {
2344     case OMAPFB_SYNC_GFX:
2345     DBG("ioctl SYNC_GFX\n");
2346     diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2347     index e6c1934734b7..fe1f16351f94 100644
2348     --- a/drivers/xen/events/events_base.c
2349     +++ b/drivers/xen/events/events_base.c
2350     @@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
2351     xen_have_vector_callback = 0;
2352     return;
2353     }
2354     - pr_info("Xen HVM callback vector for event delivery is enabled\n");
2355     + pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
2356     alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
2357     xen_hvm_callback_vector);
2358     }
2359     diff --git a/fs/block_dev.c b/fs/block_dev.c
2360     index 38b8ce05cbc7..cdbb888a8d4a 100644
2361     --- a/fs/block_dev.c
2362     +++ b/fs/block_dev.c
2363     @@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
2364     }
2365     EXPORT_SYMBOL(invalidate_bdev);
2366    
2367     +static void set_init_blocksize(struct block_device *bdev)
2368     +{
2369     + unsigned bsize = bdev_logical_block_size(bdev);
2370     + loff_t size = i_size_read(bdev->bd_inode);
2371     +
2372     + while (bsize < PAGE_SIZE) {
2373     + if (size & bsize)
2374     + break;
2375     + bsize <<= 1;
2376     + }
2377     + bdev->bd_block_size = bsize;
2378     + bdev->bd_inode->i_blkbits = blksize_bits(bsize);
2379     +}
2380     +
2381     int set_blocksize(struct block_device *bdev, int size)
2382     {
2383     /* Size must be a power of two, and between 512 and PAGE_SIZE */
2384     @@ -1408,18 +1422,9 @@ EXPORT_SYMBOL(check_disk_change);
2385    
2386     void bd_set_size(struct block_device *bdev, loff_t size)
2387     {
2388     - unsigned bsize = bdev_logical_block_size(bdev);
2389     -
2390     inode_lock(bdev->bd_inode);
2391     i_size_write(bdev->bd_inode, size);
2392     inode_unlock(bdev->bd_inode);
2393     - while (bsize < PAGE_SIZE) {
2394     - if (size & bsize)
2395     - break;
2396     - bsize <<= 1;
2397     - }
2398     - bdev->bd_block_size = bsize;
2399     - bdev->bd_inode->i_blkbits = blksize_bits(bsize);
2400     }
2401     EXPORT_SYMBOL(bd_set_size);
2402    
2403     @@ -1496,8 +1501,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2404     }
2405     }
2406    
2407     - if (!ret)
2408     + if (!ret) {
2409     bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
2410     + set_init_blocksize(bdev);
2411     + }
2412    
2413     /*
2414     * If the device is invalidated, rescan partition
2415     @@ -1532,6 +1539,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2416     goto out_clear;
2417     }
2418     bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
2419     + set_init_blocksize(bdev);
2420     }
2421    
2422     if (bdev->bd_bdi == &noop_backing_dev_info)
2423     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2424     index d4a7f7ca4145..d96d1390068a 100644
2425     --- a/fs/btrfs/disk-io.c
2426     +++ b/fs/btrfs/disk-io.c
2427     @@ -4155,6 +4155,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
2428     spin_lock(&fs_info->ordered_root_lock);
2429     }
2430     spin_unlock(&fs_info->ordered_root_lock);
2431     +
2432     + /*
2433     + * We need this here because if we've been flipped read-only we won't
2434     + * get sync() from the umount, so we need to make sure any ordered
2435     + * extents that haven't had their dirty pages IO start writeout yet
2436     + * actually get run and error out properly.
2437     + */
2438     + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2439     }
2440    
2441     static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2442     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2443     index 14c85e61134d..4f6dc56b4f4d 100644
2444     --- a/fs/btrfs/inode.c
2445     +++ b/fs/btrfs/inode.c
2446     @@ -3151,9 +3151,6 @@ out:
2447     /* once for the tree */
2448     btrfs_put_ordered_extent(ordered_extent);
2449    
2450     - /* Try to release some metadata so we don't get an OOM but don't wait */
2451     - btrfs_btree_balance_dirty_nodelay(fs_info);
2452     -
2453     return ret;
2454     }
2455    
2456     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
2457     index 03cd59375abe..eb67bb7f04de 100644
2458     --- a/fs/pstore/ram.c
2459     +++ b/fs/pstore/ram.c
2460     @@ -713,18 +713,15 @@ static int ramoops_probe(struct platform_device *pdev)
2461     {
2462     struct device *dev = &pdev->dev;
2463     struct ramoops_platform_data *pdata = dev->platform_data;
2464     + struct ramoops_platform_data pdata_local;
2465     struct ramoops_context *cxt = &oops_cxt;
2466     size_t dump_mem_sz;
2467     phys_addr_t paddr;
2468     int err = -EINVAL;
2469    
2470     if (dev_of_node(dev) && !pdata) {
2471     - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2472     - if (!pdata) {
2473     - pr_err("cannot allocate platform data buffer\n");
2474     - err = -ENOMEM;
2475     - goto fail_out;
2476     - }
2477     + pdata = &pdata_local;
2478     + memset(pdata, 0, sizeof(*pdata));
2479    
2480     err = ramoops_parse_dt(pdev, pdata);
2481     if (err < 0)
2482     diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
2483     index 7cca5f859a90..f3c43519baa7 100644
2484     --- a/include/linux/bcma/bcma_soc.h
2485     +++ b/include/linux/bcma/bcma_soc.h
2486     @@ -6,6 +6,7 @@
2487    
2488     struct bcma_soc {
2489     struct bcma_bus bus;
2490     + struct device *dev;
2491     };
2492    
2493     int __init bcma_host_soc_register(struct bcma_soc *soc);
2494     diff --git a/include/linux/genhd.h b/include/linux/genhd.h
2495     index 25c08c6c7f99..f767293b00e6 100644
2496     --- a/include/linux/genhd.h
2497     +++ b/include/linux/genhd.h
2498     @@ -129,7 +129,7 @@ struct hd_struct {
2499     struct disk_stats dkstats;
2500     #endif
2501     struct percpu_ref ref;
2502     - struct rcu_head rcu_head;
2503     + struct rcu_work rcu_work;
2504     };
2505    
2506     #define GENHD_FL_REMOVABLE 1
2507     diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
2508     index 4b2b2baf8ab4..f32fc8289473 100644
2509     --- a/include/net/netfilter/nf_conntrack_count.h
2510     +++ b/include/net/netfilter/nf_conntrack_count.h
2511     @@ -5,17 +5,10 @@
2512    
2513     struct nf_conncount_data;
2514    
2515     -enum nf_conncount_list_add {
2516     - NF_CONNCOUNT_ADDED, /* list add was ok */
2517     - NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
2518     - NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
2519     -};
2520     -
2521     struct nf_conncount_list {
2522     spinlock_t list_lock;
2523     struct list_head head; /* connections with the same filtering key */
2524     unsigned int count; /* length of list */
2525     - bool dead;
2526     };
2527    
2528     struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
2529     @@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net,
2530     const struct nf_conntrack_tuple *tuple,
2531     const struct nf_conntrack_zone *zone);
2532    
2533     -void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
2534     - const struct nf_conntrack_tuple *tuple,
2535     - const struct nf_conntrack_zone *zone,
2536     - bool *addit);
2537     +int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
2538     + const struct nf_conntrack_tuple *tuple,
2539     + const struct nf_conntrack_zone *zone);
2540    
2541     void nf_conncount_list_init(struct nf_conncount_list *list);
2542    
2543     -enum nf_conncount_list_add
2544     -nf_conncount_add(struct nf_conncount_list *list,
2545     - const struct nf_conntrack_tuple *tuple,
2546     - const struct nf_conntrack_zone *zone);
2547     -
2548     bool nf_conncount_gc_list(struct net *net,
2549     struct nf_conncount_list *list);
2550    
2551     diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
2552     index d13fd490b66d..6e73f0274e41 100644
2553     --- a/include/uapi/rdma/vmw_pvrdma-abi.h
2554     +++ b/include/uapi/rdma/vmw_pvrdma-abi.h
2555     @@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
2556     PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
2557     PVRDMA_WR_BIND_MW,
2558     PVRDMA_WR_REG_SIG_MR,
2559     + PVRDMA_WR_ERROR,
2560     };
2561    
2562     enum pvrdma_wc_status {
2563     diff --git a/init/Kconfig b/init/Kconfig
2564     index 317d5ccb5191..864af10bb1b9 100644
2565     --- a/init/Kconfig
2566     +++ b/init/Kconfig
2567     @@ -1102,6 +1102,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
2568     bool "Dead code and data elimination (EXPERIMENTAL)"
2569     depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
2570     depends on EXPERT
2571     + depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
2572     depends on $(cc-option,-ffunction-sections -fdata-sections)
2573     depends on $(ld-option,--gc-sections)
2574     help
2575     diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
2576     index 14436f4ca6bd..30e0f9770f88 100644
2577     --- a/lib/int_sqrt.c
2578     +++ b/lib/int_sqrt.c
2579     @@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
2580     if (x <= ULONG_MAX)
2581     return int_sqrt((unsigned long) x);
2582    
2583     - m = 1ULL << (fls64(x) & ~1ULL);
2584     + m = 1ULL << ((fls64(x) - 1) & ~1ULL);
2585     while (m != 0) {
2586     b = y + m;
2587     y >>= 1;
2588     diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
2589     index 37278dc280eb..e07a7e62c705 100644
2590     --- a/net/bridge/br_netfilter_hooks.c
2591     +++ b/net/bridge/br_netfilter_hooks.c
2592     @@ -278,7 +278,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
2593     struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
2594     int ret;
2595    
2596     - if (neigh->hh.hh_len) {
2597     + if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
2598     neigh_hh_bridge(&neigh->hh, skb);
2599     skb->dev = nf_bridge->physindev;
2600     ret = br_handle_frame_finish(net, sk, skb);
2601     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2602     index 491828713e0b..5e55cef0cec3 100644
2603     --- a/net/bridge/netfilter/ebtables.c
2604     +++ b/net/bridge/netfilter/ebtables.c
2605     @@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
2606     tmp.name[sizeof(tmp.name) - 1] = 0;
2607    
2608     countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2609     - newinfo = vmalloc(sizeof(*newinfo) + countersize);
2610     + newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
2611     + PAGE_KERNEL);
2612     if (!newinfo)
2613     return -ENOMEM;
2614    
2615     if (countersize)
2616     memset(newinfo->counters, 0, countersize);
2617    
2618     - newinfo->entries = vmalloc(tmp.entries_size);
2619     + newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
2620     + PAGE_KERNEL);
2621     if (!newinfo->entries) {
2622     ret = -ENOMEM;
2623     goto free_newinfo;
2624     diff --git a/net/can/gw.c b/net/can/gw.c
2625     index faa3da88a127..53859346dc9a 100644
2626     --- a/net/can/gw.c
2627     +++ b/net/can/gw.c
2628     @@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
2629     while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
2630     (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
2631    
2632     - /* check for checksum updates when the CAN frame has been modified */
2633     + /* Has the CAN frame been modified? */
2634     if (modidx) {
2635     - if (gwj->mod.csumfunc.crc8)
2636     + /* get available space for the processed CAN frame type */
2637     + int max_len = nskb->len - offsetof(struct can_frame, data);
2638     +
2639     + /* dlc may have changed, make sure it fits to the CAN frame */
2640     + if (cf->can_dlc > max_len)
2641     + goto out_delete;
2642     +
2643     + /* check for checksum updates in classic CAN length only */
2644     + if (gwj->mod.csumfunc.crc8) {
2645     + if (cf->can_dlc > 8)
2646     + goto out_delete;
2647     +
2648     (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
2649     + }
2650     +
2651     + if (gwj->mod.csumfunc.xor) {
2652     + if (cf->can_dlc > 8)
2653     + goto out_delete;
2654    
2655     - if (gwj->mod.csumfunc.xor)
2656     (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
2657     + }
2658     }
2659    
2660     /* clear the skb timestamp if not configured the other way */
2661     @@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
2662     gwj->dropped_frames++;
2663     else
2664     gwj->handled_frames++;
2665     +
2666     + return;
2667     +
2668     + out_delete:
2669     + /* delete frame due to misconfiguration */
2670     + gwj->deleted_frames++;
2671     + kfree_skb(nskb);
2672     + return;
2673     }
2674    
2675     static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
2676     diff --git a/net/core/filter.c b/net/core/filter.c
2677     index 5e00f2b85a56..8c2411fb2509 100644
2678     --- a/net/core/filter.c
2679     +++ b/net/core/filter.c
2680     @@ -2018,18 +2018,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2681     static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2682     u32 flags)
2683     {
2684     - /* skb->mac_len is not set on normal egress */
2685     - unsigned int mlen = skb->network_header - skb->mac_header;
2686     + unsigned int mlen = skb_network_offset(skb);
2687    
2688     - __skb_pull(skb, mlen);
2689     + if (mlen) {
2690     + __skb_pull(skb, mlen);
2691    
2692     - /* At ingress, the mac header has already been pulled once.
2693     - * At egress, skb_pospull_rcsum has to be done in case that
2694     - * the skb is originated from ingress (i.e. a forwarded skb)
2695     - * to ensure that rcsum starts at net header.
2696     - */
2697     - if (!skb_at_tc_ingress(skb))
2698     - skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2699     + /* At ingress, the mac header has already been pulled once.
2700     + * At egress, skb_pospull_rcsum has to be done in case that
2701     + * the skb is originated from ingress (i.e. a forwarded skb)
2702     + * to ensure that rcsum starts at net header.
2703     + */
2704     + if (!skb_at_tc_ingress(skb))
2705     + skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2706     + }
2707     skb_pop_mac_header(skb);
2708     skb_reset_mac_len(skb);
2709     return flags & BPF_F_INGRESS ?
2710     diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
2711     index 3e85437f7106..a648568c5e8f 100644
2712     --- a/net/core/lwt_bpf.c
2713     +++ b/net/core/lwt_bpf.c
2714     @@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
2715     lwt->name ? : "<unknown>");
2716     ret = BPF_OK;
2717     } else {
2718     + skb_reset_mac_header(skb);
2719     ret = skb_do_redirect(skb);
2720     if (ret == 0)
2721     ret = BPF_REDIRECT;
2722     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
2723     index 26c36cccabdc..b7a26120d552 100644
2724     --- a/net/ipv4/ip_sockglue.c
2725     +++ b/net/ipv4/ip_sockglue.c
2726     @@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
2727    
2728     static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
2729     {
2730     + __be16 _ports[2], *ports;
2731     struct sockaddr_in sin;
2732     - __be16 *ports;
2733     - int end;
2734     -
2735     - end = skb_transport_offset(skb) + 4;
2736     - if (end > 0 && !pskb_may_pull(skb, end))
2737     - return;
2738    
2739     /* All current transport protocols have the port numbers in the
2740     * first four bytes of the transport header and this function is
2741     * written with this assumption in mind.
2742     */
2743     - ports = (__be16 *)skb_transport_header(skb);
2744     + ports = skb_header_pointer(skb, skb_transport_offset(skb),
2745     + sizeof(_ports), &_ports);
2746     + if (!ports)
2747     + return;
2748    
2749     sin.sin_family = AF_INET;
2750     sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
2751     diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
2752     index 57eae8d70ba1..b1b5a648def6 100644
2753     --- a/net/ipv4/tcp_timer.c
2754     +++ b/net/ipv4/tcp_timer.c
2755     @@ -224,7 +224,7 @@ static int tcp_write_timeout(struct sock *sk)
2756     if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2757     if (icsk->icsk_retransmits) {
2758     dst_negative_advice(sk);
2759     - } else if (!tp->syn_data && !tp->syn_fastopen) {
2760     + } else {
2761     sk_rethink_txhash(sk);
2762     }
2763     retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
2764     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
2765     index 1ede7a16a0be..cb24850d2c7f 100644
2766     --- a/net/ipv6/datagram.c
2767     +++ b/net/ipv6/datagram.c
2768     @@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
2769     skb_reset_network_header(skb);
2770     iph = ipv6_hdr(skb);
2771     iph->daddr = fl6->daddr;
2772     + ip6_flow_hdr(iph, 0, 0);
2773    
2774     serr = SKB_EXT_ERR(skb);
2775     serr->ee.ee_errno = err;
2776     @@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
2777     }
2778     if (np->rxopt.bits.rxorigdstaddr) {
2779     struct sockaddr_in6 sin6;
2780     - __be16 *ports;
2781     - int end;
2782     + __be16 _ports[2], *ports;
2783    
2784     - end = skb_transport_offset(skb) + 4;
2785     - if (end <= 0 || pskb_may_pull(skb, end)) {
2786     + ports = skb_header_pointer(skb, skb_transport_offset(skb),
2787     + sizeof(_ports), &_ports);
2788     + if (ports) {
2789     /* All current transport protocols have the port numbers in the
2790     * first four bytes of the transport header and this function is
2791     * written with this assumption in mind.
2792     */
2793     - ports = (__be16 *)skb_transport_header(skb);
2794     -
2795     sin6.sin6_family = AF_INET6;
2796     sin6.sin6_addr = ipv6_hdr(skb)->daddr;
2797     sin6.sin6_port = ports[1];
2798     diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
2799     index c9c53ade55c3..6d14cbe443f8 100644
2800     --- a/net/ipv6/icmp.c
2801     +++ b/net/ipv6/icmp.c
2802     @@ -421,10 +421,10 @@ static int icmp6_iif(const struct sk_buff *skb)
2803     static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
2804     const struct in6_addr *force_saddr)
2805     {
2806     - struct net *net = dev_net(skb->dev);
2807     struct inet6_dev *idev = NULL;
2808     struct ipv6hdr *hdr = ipv6_hdr(skb);
2809     struct sock *sk;
2810     + struct net *net;
2811     struct ipv6_pinfo *np;
2812     const struct in6_addr *saddr = NULL;
2813     struct dst_entry *dst;
2814     @@ -435,12 +435,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
2815     int iif = 0;
2816     int addr_type = 0;
2817     int len;
2818     - u32 mark = IP6_REPLY_MARK(net, skb->mark);
2819     + u32 mark;
2820    
2821     if ((u8 *)hdr < skb->head ||
2822     (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
2823     return;
2824    
2825     + if (!skb->dev)
2826     + return;
2827     + net = dev_net(skb->dev);
2828     + mark = IP6_REPLY_MARK(net, skb->mark);
2829     /*
2830     * Make sure we respect the rules
2831     * i.e. RFC 1885 2.4(e)
2832     diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
2833     index 9cd180bda092..7554c56b2e63 100644
2834     --- a/net/netfilter/nf_conncount.c
2835     +++ b/net/netfilter/nf_conncount.c
2836     @@ -33,12 +33,6 @@
2837    
2838     #define CONNCOUNT_SLOTS 256U
2839    
2840     -#ifdef CONFIG_LOCKDEP
2841     -#define CONNCOUNT_LOCK_SLOTS 8U
2842     -#else
2843     -#define CONNCOUNT_LOCK_SLOTS 256U
2844     -#endif
2845     -
2846     #define CONNCOUNT_GC_MAX_NODES 8
2847     #define MAX_KEYLEN 5
2848    
2849     @@ -49,8 +43,6 @@ struct nf_conncount_tuple {
2850     struct nf_conntrack_zone zone;
2851     int cpu;
2852     u32 jiffies32;
2853     - bool dead;
2854     - struct rcu_head rcu_head;
2855     };
2856    
2857     struct nf_conncount_rb {
2858     @@ -60,7 +52,7 @@ struct nf_conncount_rb {
2859     struct rcu_head rcu_head;
2860     };
2861    
2862     -static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
2863     +static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
2864    
2865     struct nf_conncount_data {
2866     unsigned int keylen;
2867     @@ -89,79 +81,25 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
2868     return memcmp(a, b, klen * sizeof(u32));
2869     }
2870    
2871     -enum nf_conncount_list_add
2872     -nf_conncount_add(struct nf_conncount_list *list,
2873     - const struct nf_conntrack_tuple *tuple,
2874     - const struct nf_conntrack_zone *zone)
2875     -{
2876     - struct nf_conncount_tuple *conn;
2877     -
2878     - if (WARN_ON_ONCE(list->count > INT_MAX))
2879     - return NF_CONNCOUNT_ERR;
2880     -
2881     - conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
2882     - if (conn == NULL)
2883     - return NF_CONNCOUNT_ERR;
2884     -
2885     - conn->tuple = *tuple;
2886     - conn->zone = *zone;
2887     - conn->cpu = raw_smp_processor_id();
2888     - conn->jiffies32 = (u32)jiffies;
2889     - conn->dead = false;
2890     - spin_lock_bh(&list->list_lock);
2891     - if (list->dead == true) {
2892     - kmem_cache_free(conncount_conn_cachep, conn);
2893     - spin_unlock_bh(&list->list_lock);
2894     - return NF_CONNCOUNT_SKIP;
2895     - }
2896     - list_add_tail(&conn->node, &list->head);
2897     - list->count++;
2898     - spin_unlock_bh(&list->list_lock);
2899     - return NF_CONNCOUNT_ADDED;
2900     -}
2901     -EXPORT_SYMBOL_GPL(nf_conncount_add);
2902     -
2903     -static void __conn_free(struct rcu_head *h)
2904     -{
2905     - struct nf_conncount_tuple *conn;
2906     -
2907     - conn = container_of(h, struct nf_conncount_tuple, rcu_head);
2908     - kmem_cache_free(conncount_conn_cachep, conn);
2909     -}
2910     -
2911     -static bool conn_free(struct nf_conncount_list *list,
2912     +static void conn_free(struct nf_conncount_list *list,
2913     struct nf_conncount_tuple *conn)
2914     {
2915     - bool free_entry = false;
2916     -
2917     - spin_lock_bh(&list->list_lock);
2918     -
2919     - if (conn->dead) {
2920     - spin_unlock_bh(&list->list_lock);
2921     - return free_entry;
2922     - }
2923     + lockdep_assert_held(&list->list_lock);
2924    
2925     list->count--;
2926     - conn->dead = true;
2927     - list_del_rcu(&conn->node);
2928     - if (list->count == 0) {
2929     - list->dead = true;
2930     - free_entry = true;
2931     - }
2932     + list_del(&conn->node);
2933    
2934     - spin_unlock_bh(&list->list_lock);
2935     - call_rcu(&conn->rcu_head, __conn_free);
2936     - return free_entry;
2937     + kmem_cache_free(conncount_conn_cachep, conn);
2938     }
2939    
2940     static const struct nf_conntrack_tuple_hash *
2941     find_or_evict(struct net *net, struct nf_conncount_list *list,
2942     - struct nf_conncount_tuple *conn, bool *free_entry)
2943     + struct nf_conncount_tuple *conn)
2944     {
2945     const struct nf_conntrack_tuple_hash *found;
2946     unsigned long a, b;
2947     int cpu = raw_smp_processor_id();
2948     - __s32 age;
2949     + u32 age;
2950    
2951     found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
2952     if (found)
2953     @@ -176,52 +114,45 @@ find_or_evict(struct net *net, struct nf_conncount_list *list,
2954     */
2955     age = a - b;
2956     if (conn->cpu == cpu || age >= 2) {
2957     - *free_entry = conn_free(list, conn);
2958     + conn_free(list, conn);
2959     return ERR_PTR(-ENOENT);
2960     }
2961    
2962     return ERR_PTR(-EAGAIN);
2963     }
2964    
2965     -void nf_conncount_lookup(struct net *net,
2966     - struct nf_conncount_list *list,
2967     - const struct nf_conntrack_tuple *tuple,
2968     - const struct nf_conntrack_zone *zone,
2969     - bool *addit)
2970     +static int __nf_conncount_add(struct net *net,
2971     + struct nf_conncount_list *list,
2972     + const struct nf_conntrack_tuple *tuple,
2973     + const struct nf_conntrack_zone *zone)
2974     {
2975     const struct nf_conntrack_tuple_hash *found;
2976     struct nf_conncount_tuple *conn, *conn_n;
2977     struct nf_conn *found_ct;
2978     unsigned int collect = 0;
2979     - bool free_entry = false;
2980     -
2981     - /* best effort only */
2982     - *addit = tuple ? true : false;
2983    
2984     /* check the saved connections */
2985     list_for_each_entry_safe(conn, conn_n, &list->head, node) {
2986     if (collect > CONNCOUNT_GC_MAX_NODES)
2987     break;
2988    
2989     - found = find_or_evict(net, list, conn, &free_entry);
2990     + found = find_or_evict(net, list, conn);
2991     if (IS_ERR(found)) {
2992     /* Not found, but might be about to be confirmed */
2993     if (PTR_ERR(found) == -EAGAIN) {
2994     - if (!tuple)
2995     - continue;
2996     -
2997     if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
2998     nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
2999     nf_ct_zone_id(zone, zone->dir))
3000     - *addit = false;
3001     - } else if (PTR_ERR(found) == -ENOENT)
3002     + return 0; /* already exists */
3003     + } else {
3004     collect++;
3005     + }
3006     continue;
3007     }
3008    
3009     found_ct = nf_ct_tuplehash_to_ctrack(found);
3010    
3011     - if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
3012     + if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
3013     nf_ct_zone_equal(found_ct, zone, zone->dir)) {
3014     /*
3015     * We should not see tuples twice unless someone hooks
3016     @@ -229,7 +160,8 @@ void nf_conncount_lookup(struct net *net,
3017     *
3018     * Attempt to avoid a re-add in this case.
3019     */
3020     - *addit = false;
3021     + nf_ct_put(found_ct);
3022     + return 0;
3023     } else if (already_closed(found_ct)) {
3024     /*
3025     * we do not care about connections which are
3026     @@ -243,19 +175,48 @@ void nf_conncount_lookup(struct net *net,
3027    
3028     nf_ct_put(found_ct);
3029     }
3030     +
3031     + if (WARN_ON_ONCE(list->count > INT_MAX))
3032     + return -EOVERFLOW;
3033     +
3034     + conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
3035     + if (conn == NULL)
3036     + return -ENOMEM;
3037     +
3038     + conn->tuple = *tuple;
3039     + conn->zone = *zone;
3040     + conn->cpu = raw_smp_processor_id();
3041     + conn->jiffies32 = (u32)jiffies;
3042     + list_add_tail(&conn->node, &list->head);
3043     + list->count++;
3044     + return 0;
3045     }
3046     -EXPORT_SYMBOL_GPL(nf_conncount_lookup);
3047     +
3048     +int nf_conncount_add(struct net *net,
3049     + struct nf_conncount_list *list,
3050     + const struct nf_conntrack_tuple *tuple,
3051     + const struct nf_conntrack_zone *zone)
3052     +{
3053     + int ret;
3054     +
3055     + /* check the saved connections */
3056     + spin_lock_bh(&list->list_lock);
3057     + ret = __nf_conncount_add(net, list, tuple, zone);
3058     + spin_unlock_bh(&list->list_lock);
3059     +
3060     + return ret;
3061     +}
3062     +EXPORT_SYMBOL_GPL(nf_conncount_add);
3063    
3064     void nf_conncount_list_init(struct nf_conncount_list *list)
3065     {
3066     spin_lock_init(&list->list_lock);
3067     INIT_LIST_HEAD(&list->head);
3068     list->count = 0;
3069     - list->dead = false;
3070     }
3071     EXPORT_SYMBOL_GPL(nf_conncount_list_init);
3072    
3073     -/* Return true if the list is empty */
3074     +/* Return true if the list is empty. Must be called with BH disabled. */
3075     bool nf_conncount_gc_list(struct net *net,
3076     struct nf_conncount_list *list)
3077     {
3078     @@ -263,17 +224,17 @@ bool nf_conncount_gc_list(struct net *net,
3079     struct nf_conncount_tuple *conn, *conn_n;
3080     struct nf_conn *found_ct;
3081     unsigned int collected = 0;
3082     - bool free_entry = false;
3083     bool ret = false;
3084    
3085     + /* don't bother if other cpu is already doing GC */
3086     + if (!spin_trylock(&list->list_lock))
3087     + return false;
3088     +
3089     list_for_each_entry_safe(conn, conn_n, &list->head, node) {
3090     - found = find_or_evict(net, list, conn, &free_entry);
3091     + found = find_or_evict(net, list, conn);
3092     if (IS_ERR(found)) {
3093     - if (PTR_ERR(found) == -ENOENT) {
3094     - if (free_entry)
3095     - return true;
3096     + if (PTR_ERR(found) == -ENOENT)
3097     collected++;
3098     - }
3099     continue;
3100     }
3101    
3102     @@ -284,23 +245,19 @@ bool nf_conncount_gc_list(struct net *net,
3103     * closed already -> ditch it
3104     */
3105     nf_ct_put(found_ct);
3106     - if (conn_free(list, conn))
3107     - return true;
3108     + conn_free(list, conn);
3109     collected++;
3110     continue;
3111     }
3112    
3113     nf_ct_put(found_ct);
3114     if (collected > CONNCOUNT_GC_MAX_NODES)
3115     - return false;
3116     + break;
3117     }
3118    
3119     - spin_lock_bh(&list->list_lock);
3120     - if (!list->count) {
3121     - list->dead = true;
3122     + if (!list->count)
3123     ret = true;
3124     - }
3125     - spin_unlock_bh(&list->list_lock);
3126     + spin_unlock(&list->list_lock);
3127    
3128     return ret;
3129     }
3130     @@ -314,6 +271,7 @@ static void __tree_nodes_free(struct rcu_head *h)
3131     kmem_cache_free(conncount_rb_cachep, rbconn);
3132     }
3133    
3134     +/* caller must hold tree nf_conncount_locks[] lock */
3135     static void tree_nodes_free(struct rb_root *root,
3136     struct nf_conncount_rb *gc_nodes[],
3137     unsigned int gc_count)
3138     @@ -323,8 +281,10 @@ static void tree_nodes_free(struct rb_root *root,
3139     while (gc_count) {
3140     rbconn = gc_nodes[--gc_count];
3141     spin_lock(&rbconn->list.list_lock);
3142     - rb_erase(&rbconn->node, root);
3143     - call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3144     + if (!rbconn->list.count) {
3145     + rb_erase(&rbconn->node, root);
3146     + call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3147     + }
3148     spin_unlock(&rbconn->list.list_lock);
3149     }
3150     }
3151     @@ -341,20 +301,19 @@ insert_tree(struct net *net,
3152     struct rb_root *root,
3153     unsigned int hash,
3154     const u32 *key,
3155     - u8 keylen,
3156     const struct nf_conntrack_tuple *tuple,
3157     const struct nf_conntrack_zone *zone)
3158     {
3159     - enum nf_conncount_list_add ret;
3160     struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
3161     struct rb_node **rbnode, *parent;
3162     struct nf_conncount_rb *rbconn;
3163     struct nf_conncount_tuple *conn;
3164     unsigned int count = 0, gc_count = 0;
3165     - bool node_found = false;
3166     -
3167     - spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
3168     + u8 keylen = data->keylen;
3169     + bool do_gc = true;
3170    
3171     + spin_lock_bh(&nf_conncount_locks[hash]);
3172     +restart:
3173     parent = NULL;
3174     rbnode = &(root->rb_node);
3175     while (*rbnode) {
3176     @@ -368,45 +327,32 @@ insert_tree(struct net *net,
3177     } else if (diff > 0) {
3178     rbnode = &((*rbnode)->rb_right);
3179     } else {
3180     - /* unlikely: other cpu added node already */
3181     - node_found = true;
3182     - ret = nf_conncount_add(&rbconn->list, tuple, zone);
3183     - if (ret == NF_CONNCOUNT_ERR) {
3184     + int ret;
3185     +
3186     + ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
3187     + if (ret)
3188     count = 0; /* hotdrop */
3189     - } else if (ret == NF_CONNCOUNT_ADDED) {
3190     + else
3191     count = rbconn->list.count;
3192     - } else {
3193     - /* NF_CONNCOUNT_SKIP, rbconn is already
3194     - * reclaimed by gc, insert a new tree node
3195     - */
3196     - node_found = false;
3197     - }
3198     - break;
3199     + tree_nodes_free(root, gc_nodes, gc_count);
3200     + goto out_unlock;
3201     }
3202    
3203     if (gc_count >= ARRAY_SIZE(gc_nodes))
3204     continue;
3205    
3206     - if (nf_conncount_gc_list(net, &rbconn->list))
3207     + if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
3208     gc_nodes[gc_count++] = rbconn;
3209     }
3210    
3211     if (gc_count) {
3212     tree_nodes_free(root, gc_nodes, gc_count);
3213     - /* tree_node_free before new allocation permits
3214     - * allocator to re-use newly free'd object.
3215     - *
3216     - * This is a rare event; in most cases we will find
3217     - * existing node to re-use. (or gc_count is 0).
3218     - */
3219     -
3220     - if (gc_count >= ARRAY_SIZE(gc_nodes))
3221     - schedule_gc_worker(data, hash);
3222     + schedule_gc_worker(data, hash);
3223     + gc_count = 0;
3224     + do_gc = false;
3225     + goto restart;
3226     }
3227    
3228     - if (node_found)
3229     - goto out_unlock;
3230     -
3231     /* expected case: match, insert new node */
3232     rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
3233     if (rbconn == NULL)
3234     @@ -430,7 +376,7 @@ insert_tree(struct net *net,
3235     rb_link_node_rcu(&rbconn->node, parent, rbnode);
3236     rb_insert_color(&rbconn->node, root);
3237     out_unlock:
3238     - spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
3239     + spin_unlock_bh(&nf_conncount_locks[hash]);
3240     return count;
3241     }
3242    
3243     @@ -441,7 +387,6 @@ count_tree(struct net *net,
3244     const struct nf_conntrack_tuple *tuple,
3245     const struct nf_conntrack_zone *zone)
3246     {
3247     - enum nf_conncount_list_add ret;
3248     struct rb_root *root;
3249     struct rb_node *parent;
3250     struct nf_conncount_rb *rbconn;
3251     @@ -454,7 +399,6 @@ count_tree(struct net *net,
3252     parent = rcu_dereference_raw(root->rb_node);
3253     while (parent) {
3254     int diff;
3255     - bool addit;
3256    
3257     rbconn = rb_entry(parent, struct nf_conncount_rb, node);
3258    
3259     @@ -464,31 +408,36 @@ count_tree(struct net *net,
3260     } else if (diff > 0) {
3261     parent = rcu_dereference_raw(parent->rb_right);
3262     } else {
3263     - /* same source network -> be counted! */
3264     - nf_conncount_lookup(net, &rbconn->list, tuple, zone,
3265     - &addit);
3266     + int ret;
3267    
3268     - if (!addit)
3269     + if (!tuple) {
3270     + nf_conncount_gc_list(net, &rbconn->list);
3271     return rbconn->list.count;
3272     + }
3273    
3274     - ret = nf_conncount_add(&rbconn->list, tuple, zone);
3275     - if (ret == NF_CONNCOUNT_ERR) {
3276     - return 0; /* hotdrop */
3277     - } else if (ret == NF_CONNCOUNT_ADDED) {
3278     - return rbconn->list.count;
3279     - } else {
3280     - /* NF_CONNCOUNT_SKIP, rbconn is already
3281     - * reclaimed by gc, insert a new tree node
3282     - */
3283     + spin_lock_bh(&rbconn->list.list_lock);
3284     + /* Node might be about to be free'd.
3285     + * We need to defer to insert_tree() in this case.
3286     + */
3287     + if (rbconn->list.count == 0) {
3288     + spin_unlock_bh(&rbconn->list.list_lock);
3289     break;
3290     }
3291     +
3292     + /* same source network -> be counted! */
3293     + ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
3294     + spin_unlock_bh(&rbconn->list.list_lock);
3295     + if (ret)
3296     + return 0; /* hotdrop */
3297     + else
3298     + return rbconn->list.count;
3299     }
3300     }
3301    
3302     if (!tuple)
3303     return 0;
3304    
3305     - return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
3306     + return insert_tree(net, data, root, hash, key, tuple, zone);
3307     }
3308    
3309     static void tree_gc_worker(struct work_struct *work)
3310     @@ -499,27 +448,47 @@ static void tree_gc_worker(struct work_struct *work)
3311     struct rb_node *node;
3312     unsigned int tree, next_tree, gc_count = 0;
3313    
3314     - tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
3315     + tree = data->gc_tree % CONNCOUNT_SLOTS;
3316     root = &data->root[tree];
3317    
3318     + local_bh_disable();
3319     rcu_read_lock();
3320     for (node = rb_first(root); node != NULL; node = rb_next(node)) {
3321     rbconn = rb_entry(node, struct nf_conncount_rb, node);
3322     if (nf_conncount_gc_list(data->net, &rbconn->list))
3323     - gc_nodes[gc_count++] = rbconn;
3324     + gc_count++;
3325     }
3326     rcu_read_unlock();
3327     + local_bh_enable();
3328     +
3329     + cond_resched();
3330    
3331     spin_lock_bh(&nf_conncount_locks[tree]);
3332     + if (gc_count < ARRAY_SIZE(gc_nodes))
3333     + goto next; /* do not bother */
3334    
3335     - if (gc_count) {
3336     - tree_nodes_free(root, gc_nodes, gc_count);
3337     + gc_count = 0;
3338     + node = rb_first(root);
3339     + while (node != NULL) {
3340     + rbconn = rb_entry(node, struct nf_conncount_rb, node);
3341     + node = rb_next(node);
3342     +
3343     + if (rbconn->list.count > 0)
3344     + continue;
3345     +
3346     + gc_nodes[gc_count++] = rbconn;
3347     + if (gc_count >= ARRAY_SIZE(gc_nodes)) {
3348     + tree_nodes_free(root, gc_nodes, gc_count);
3349     + gc_count = 0;
3350     + }
3351     }
3352    
3353     + tree_nodes_free(root, gc_nodes, gc_count);
3354     +next:
3355     clear_bit(tree, data->pending_trees);
3356    
3357     next_tree = (tree + 1) % CONNCOUNT_SLOTS;
3358     - next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
3359     + next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
3360    
3361     if (next_tree < CONNCOUNT_SLOTS) {
3362     data->gc_tree = next_tree;
3363     @@ -621,10 +590,7 @@ static int __init nf_conncount_modinit(void)
3364     {
3365     int i;
3366    
3367     - BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
3368     - BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
3369     -
3370     - for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
3371     + for (i = 0; i < CONNCOUNT_SLOTS; ++i)
3372     spin_lock_init(&nf_conncount_locks[i]);
3373    
3374     conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
3375     diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
3376     index b90d96ba4a12..af1497ab9464 100644
3377     --- a/net/netfilter/nft_connlimit.c
3378     +++ b/net/netfilter/nft_connlimit.c
3379     @@ -30,7 +30,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
3380     enum ip_conntrack_info ctinfo;
3381     const struct nf_conn *ct;
3382     unsigned int count;
3383     - bool addit;
3384    
3385     tuple_ptr = &tuple;
3386    
3387     @@ -44,19 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
3388     return;
3389     }
3390    
3391     - nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
3392     - &addit);
3393     - count = priv->list.count;
3394     -
3395     - if (!addit)
3396     - goto out;
3397     -
3398     - if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
3399     + if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
3400     regs->verdict.code = NF_DROP;
3401     return;
3402     }
3403     - count++;
3404     -out:
3405     +
3406     + count = priv->list.count;
3407    
3408     if ((count > priv->limit) ^ priv->invert) {
3409     regs->verdict.code = NFT_BREAK;
3410     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3411     index 0541cfc93440..b6ea0fadb34f 100644
3412     --- a/net/packet/af_packet.c
3413     +++ b/net/packet/af_packet.c
3414     @@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3415     addr = saddr->sll_halen ? saddr->sll_addr : NULL;
3416     dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
3417     if (addr && dev && saddr->sll_halen < dev->addr_len)
3418     - goto out;
3419     + goto out_put;
3420     }
3421    
3422     err = -ENXIO;
3423     @@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3424     addr = saddr->sll_halen ? saddr->sll_addr : NULL;
3425     dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3426     if (addr && dev && saddr->sll_halen < dev->addr_len)
3427     - goto out;
3428     + goto out_unlock;
3429     }
3430    
3431     err = -ENXIO;
3432     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3433     index 7f0539db5604..0bae07e9c9e7 100644
3434     --- a/net/sctp/ipv6.c
3435     +++ b/net/sctp/ipv6.c
3436     @@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
3437    
3438     switch (ev) {
3439     case NETDEV_UP:
3440     - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
3441     + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3442     if (addr) {
3443     addr->a.v6.sin6_family = AF_INET6;
3444     - addr->a.v6.sin6_port = 0;
3445     - addr->a.v6.sin6_flowinfo = 0;
3446     addr->a.v6.sin6_addr = ifa->addr;
3447     addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
3448     addr->valid = 1;
3449     @@ -431,7 +429,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
3450     addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3451     if (addr) {
3452     addr->a.v6.sin6_family = AF_INET6;
3453     - addr->a.v6.sin6_port = 0;
3454     addr->a.v6.sin6_addr = ifp->addr;
3455     addr->a.v6.sin6_scope_id = dev->ifindex;
3456     addr->valid = 1;
3457     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3458     index e948db29ab53..d4352111e69d 100644
3459     --- a/net/sctp/protocol.c
3460     +++ b/net/sctp/protocol.c
3461     @@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
3462     addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3463     if (addr) {
3464     addr->a.v4.sin_family = AF_INET;
3465     - addr->a.v4.sin_port = 0;
3466     addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
3467     addr->valid = 1;
3468     INIT_LIST_HEAD(&addr->list);
3469     @@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
3470    
3471     switch (ev) {
3472     case NETDEV_UP:
3473     - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
3474     + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3475     if (addr) {
3476     addr->a.v4.sin_family = AF_INET;
3477     - addr->a.v4.sin_port = 0;
3478     addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
3479     addr->valid = 1;
3480     spin_lock_bh(&net->sctp.local_addr_lock);
3481     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
3482     index 2b8f95290627..e6e506b2db99 100644
3483     --- a/net/smc/af_smc.c
3484     +++ b/net/smc/af_smc.c
3485     @@ -144,6 +144,9 @@ static int smc_release(struct socket *sock)
3486     sock_set_flag(sk, SOCK_DEAD);
3487     sk->sk_shutdown |= SHUTDOWN_MASK;
3488     }
3489     +
3490     + sk->sk_prot->unhash(sk);
3491     +
3492     if (smc->clcsock) {
3493     if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
3494     /* wake up clcsock accept */
3495     @@ -168,7 +171,6 @@ static int smc_release(struct socket *sock)
3496     smc_conn_free(&smc->conn);
3497     release_sock(sk);
3498    
3499     - sk->sk_prot->unhash(sk);
3500     sock_put(sk); /* final sock_put */
3501     out:
3502     return rc;
3503     diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
3504     index c7872bc13860..08b5fa4a2852 100644
3505     --- a/net/sunrpc/rpcb_clnt.c
3506     +++ b/net/sunrpc/rpcb_clnt.c
3507     @@ -771,6 +771,12 @@ void rpcb_getport_async(struct rpc_task *task)
3508     case RPCBVERS_3:
3509     map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
3510     map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
3511     + if (!map->r_addr) {
3512     + status = -ENOMEM;
3513     + dprintk("RPC: %5u %s: no memory available\n",
3514     + task->tk_pid, __func__);
3515     + goto bailout_free_args;
3516     + }
3517     map->r_owner = "";
3518     break;
3519     case RPCBVERS_2:
3520     @@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task)
3521     rpc_put_task(child);
3522     return;
3523    
3524     +bailout_free_args:
3525     + kfree(map);
3526     bailout_release_client:
3527     rpc_release_client(rpcb_clnt);
3528     bailout_nofree:
3529     diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
3530     index 6376467e78f8..0b21187d74df 100644
3531     --- a/net/tipc/netlink_compat.c
3532     +++ b/net/tipc/netlink_compat.c
3533     @@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
3534     return limit;
3535     }
3536    
3537     +static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
3538     +{
3539     + return TLV_GET_LEN(tlv) - TLV_SPACE(0);
3540     +}
3541     +
3542     static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
3543     {
3544     struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
3545     @@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
3546     return buf;
3547     }
3548    
3549     +static inline bool string_is_valid(char *s, int len)
3550     +{
3551     + return memchr(s, '\0', len) ? true : false;
3552     +}
3553     +
3554     static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
3555     struct tipc_nl_compat_msg *msg,
3556     struct sk_buff *arg)
3557     @@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
3558     struct nlattr *prop;
3559     struct nlattr *bearer;
3560     struct tipc_bearer_config *b;
3561     + int len;
3562    
3563     b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
3564    
3565     @@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
3566     if (!bearer)
3567     return -EMSGSIZE;
3568    
3569     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
3570     + if (!string_is_valid(b->name, len))
3571     + return -EINVAL;
3572     +
3573     if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
3574     return -EMSGSIZE;
3575    
3576     @@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
3577     {
3578     char *name;
3579     struct nlattr *bearer;
3580     + int len;
3581    
3582     name = (char *)TLV_DATA(msg->req);
3583    
3584     @@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
3585     if (!bearer)
3586     return -EMSGSIZE;
3587    
3588     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
3589     + if (!string_is_valid(name, len))
3590     + return -EINVAL;
3591     +
3592     if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
3593     return -EMSGSIZE;
3594    
3595     @@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
3596     struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
3597     struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
3598     int err;
3599     + int len;
3600    
3601     if (!attrs[TIPC_NLA_LINK])
3602     return -EINVAL;
3603     @@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
3604     return err;
3605    
3606     name = (char *)TLV_DATA(msg->req);
3607     +
3608     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3609     + if (!string_is_valid(name, len))
3610     + return -EINVAL;
3611     +
3612     if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
3613     return 0;
3614    
3615     @@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
3616     struct nlattr *prop;
3617     struct nlattr *media;
3618     struct tipc_link_config *lc;
3619     + int len;
3620    
3621     lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3622    
3623     @@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
3624     if (!media)
3625     return -EMSGSIZE;
3626    
3627     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
3628     + if (!string_is_valid(lc->name, len))
3629     + return -EINVAL;
3630     +
3631     if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
3632     return -EMSGSIZE;
3633    
3634     @@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
3635     struct nlattr *prop;
3636     struct nlattr *bearer;
3637     struct tipc_link_config *lc;
3638     + int len;
3639    
3640     lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3641    
3642     @@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
3643     if (!bearer)
3644     return -EMSGSIZE;
3645    
3646     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
3647     + if (!string_is_valid(lc->name, len))
3648     + return -EINVAL;
3649     +
3650     if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
3651     return -EMSGSIZE;
3652    
3653     @@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
3654     struct tipc_link_config *lc;
3655     struct tipc_bearer *bearer;
3656     struct tipc_media *media;
3657     + int len;
3658    
3659     lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3660    
3661     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3662     + if (!string_is_valid(lc->name, len))
3663     + return -EINVAL;
3664     +
3665     media = tipc_media_find(lc->name);
3666     if (media) {
3667     cmd->doit = &__tipc_nl_media_set;
3668     @@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
3669     {
3670     char *name;
3671     struct nlattr *link;
3672     + int len;
3673    
3674     name = (char *)TLV_DATA(msg->req);
3675    
3676     @@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
3677     if (!link)
3678     return -EMSGSIZE;
3679    
3680     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3681     + if (!string_is_valid(name, len))
3682     + return -EINVAL;
3683     +
3684     if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
3685     return -EMSGSIZE;
3686    
3687     @@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
3688     };
3689    
3690     ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
3691     + if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
3692     + return -EINVAL;
3693    
3694     depth = ntohl(ntq->depth);
3695    
3696     @@ -1201,7 +1249,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
3697     }
3698    
3699     len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
3700     - if (len && !TLV_OK(msg.req, len)) {
3701     + if (!len || !TLV_OK(msg.req, len)) {
3702     msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
3703     err = -EOPNOTSUPP;
3704     goto send;
3705     diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
3706     index b84c0059214f..d65eed88c495 100644
3707     --- a/net/tipc/topsrv.c
3708     +++ b/net/tipc/topsrv.c
3709     @@ -404,7 +404,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
3710     ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
3711     if (ret == -EWOULDBLOCK)
3712     return -EWOULDBLOCK;
3713     - if (ret > 0) {
3714     + if (ret == sizeof(s)) {
3715     read_lock_bh(&sk->sk_callback_lock);
3716     ret = tipc_conn_rcv_sub(srv, con, &s);
3717     read_unlock_bh(&sk->sk_callback_lock);
3718     diff --git a/security/security.c b/security/security.c
3719     index 736e78da1ab9..5ce2448f3a45 100644
3720     --- a/security/security.c
3721     +++ b/security/security.c
3722     @@ -1003,6 +1003,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
3723    
3724     void security_cred_free(struct cred *cred)
3725     {
3726     + /*
3727     + * There is a failure case in prepare_creds() that
3728     + * may result in a call here with ->security being NULL.
3729     + */
3730     + if (unlikely(cred->security == NULL))
3731     + return;
3732     +
3733     call_void_hook(cred_free, cred);
3734     }
3735    
3736     diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
3737     index b63ef865ce1e..d31a52e56b9e 100644
3738     --- a/security/selinux/ss/policydb.c
3739     +++ b/security/selinux/ss/policydb.c
3740     @@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
3741     kfree(key);
3742     if (datum) {
3743     levdatum = datum;
3744     - ebitmap_destroy(&levdatum->level->cat);
3745     + if (levdatum->level)
3746     + ebitmap_destroy(&levdatum->level->cat);
3747     kfree(levdatum->level);
3748     }
3749     kfree(datum);
3750     diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
3751     index ffda91a4a1aa..02514fe558b4 100644
3752     --- a/security/yama/yama_lsm.c
3753     +++ b/security/yama/yama_lsm.c
3754     @@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
3755     break;
3756     case YAMA_SCOPE_RELATIONAL:
3757     rcu_read_lock();
3758     - if (!task_is_descendant(current, child) &&
3759     + if (!pid_alive(child))
3760     + rc = -EPERM;
3761     + if (!rc && !task_is_descendant(current, child) &&
3762     !ptracer_exception_found(current, child) &&
3763     !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
3764     rc = -EPERM;
3765     diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
3766     index d9a725478375..72c25a3cb658 100644
3767     --- a/tools/testing/selftests/android/Makefile
3768     +++ b/tools/testing/selftests/android/Makefile
3769     @@ -6,7 +6,7 @@ TEST_PROGS := run.sh
3770    
3771     include ../lib.mk
3772    
3773     -all: khdr
3774     +all:
3775     @for DIR in $(SUBDIRS); do \
3776     BUILD_TARGET=$(OUTPUT)/$$DIR; \
3777     mkdir $$BUILD_TARGET -p; \
3778     diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
3779     index ad1eeb14fda7..30996306cabc 100644
3780     --- a/tools/testing/selftests/futex/functional/Makefile
3781     +++ b/tools/testing/selftests/futex/functional/Makefile
3782     @@ -19,6 +19,7 @@ TEST_GEN_FILES := \
3783     TEST_PROGS := run.sh
3784    
3785     top_srcdir = ../../../../..
3786     +KSFT_KHDR_INSTALL := 1
3787     include ../../lib.mk
3788    
3789     $(TEST_GEN_FILES): $(HEADERS)
3790     diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
3791     index 4665cdbf1a8d..59ea4c461978 100644
3792     --- a/tools/testing/selftests/gpio/Makefile
3793     +++ b/tools/testing/selftests/gpio/Makefile
3794     @@ -9,6 +9,7 @@ EXTRA_OBJS := ../gpiogpio-event-mon-in.o ../gpiogpio-event-mon.o
3795     EXTRA_OBJS += ../gpiogpio-hammer-in.o ../gpiogpio-utils.o ../gpiolsgpio-in.o
3796     EXTRA_OBJS += ../gpiolsgpio.o
3797    
3798     +KSFT_KHDR_INSTALL := 1
3799     include ../lib.mk
3800    
3801     all: $(BINARIES)
3802     diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
3803     index ec32dad3c3f0..cc83e2fd3787 100644
3804     --- a/tools/testing/selftests/kvm/Makefile
3805     +++ b/tools/testing/selftests/kvm/Makefile
3806     @@ -1,6 +1,7 @@
3807     all:
3808    
3809     top_srcdir = ../../../../
3810     +KSFT_KHDR_INSTALL := 1
3811     UNAME_M := $(shell uname -m)
3812    
3813     LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
3814     @@ -40,4 +41,3 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
3815    
3816     all: $(STATIC_LIBS)
3817     $(TEST_GEN_PROGS): $(STATIC_LIBS)
3818     -$(STATIC_LIBS):| khdr
3819     diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
3820     index 0a8e75886224..8b0f16409ed7 100644
3821     --- a/tools/testing/selftests/lib.mk
3822     +++ b/tools/testing/selftests/lib.mk
3823     @@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
3824     TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
3825     TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
3826    
3827     +ifdef KSFT_KHDR_INSTALL
3828     top_srcdir ?= ../../../..
3829     include $(top_srcdir)/scripts/subarch.include
3830     ARCH ?= $(SUBARCH)
3831    
3832     -all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3833     -
3834     .PHONY: khdr
3835     khdr:
3836     make ARCH=$(ARCH) -C $(top_srcdir) headers_install
3837    
3838     -ifdef KSFT_KHDR_INSTALL
3839     -$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
3840     +all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3841     +else
3842     +all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3843     endif
3844    
3845     .ONESHELL:
3846     diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
3847     index 14cfcf006936..c46c0eefab9e 100644
3848     --- a/tools/testing/selftests/networking/timestamping/Makefile
3849     +++ b/tools/testing/selftests/networking/timestamping/Makefile
3850     @@ -6,6 +6,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
3851     all: $(TEST_PROGS)
3852    
3853     top_srcdir = ../../../../..
3854     +KSFT_KHDR_INSTALL := 1
3855     include ../../lib.mk
3856    
3857     clean:
3858     diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
3859     index e94b7b14bcb2..dc68340a6a96 100644
3860     --- a/tools/testing/selftests/vm/Makefile
3861     +++ b/tools/testing/selftests/vm/Makefile
3862     @@ -24,6 +24,7 @@ TEST_GEN_FILES += virtual_address_range
3863    
3864     TEST_PROGS := run_vmtests
3865    
3866     +KSFT_KHDR_INSTALL := 1
3867     include ../lib.mk
3868    
3869     $(OUTPUT)/userfaultfd: LDLIBS += -lpthread