Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.20/0103-4.20.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3280 - (hide annotations) (download)
Mon Mar 4 10:35:51 2019 UTC (5 years, 3 months ago) by niro
File size: 138610 byte(s)
linux-4.20.4
1 niro 3280 diff --git a/Makefile b/Makefile
2     index 3b9e4658d31f..a056dba5ede0 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 20
9     -SUBLEVEL = 3
10     +SUBLEVEL = 4
11     EXTRAVERSION =
12     NAME = Shy Crocodile
13    
14     @@ -967,6 +967,7 @@ ifdef CONFIG_STACK_VALIDATION
15     endif
16     endif
17    
18     +PHONY += prepare0
19    
20     ifeq ($(KBUILD_EXTMOD),)
21     core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
22     @@ -1075,8 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
23     # archprepare is used in arch Makefiles and when processed asm symlink,
24     # version.h and scripts_basic is processed / created.
25    
26     -# Listed in dependency order
27     -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
28     +PHONY += prepare archprepare prepare1 prepare2 prepare3
29    
30     # prepare3 is used to check if we are building in a separate output directory,
31     # and if so do:
32     @@ -1545,9 +1545,6 @@ else # KBUILD_EXTMOD
33    
34     # We are always building modules
35     KBUILD_MODULES := 1
36     -PHONY += crmodverdir
37     -crmodverdir:
38     - $(cmd_crmodverdir)
39    
40     PHONY += $(objtree)/Module.symvers
41     $(objtree)/Module.symvers:
42     @@ -1559,7 +1556,7 @@ $(objtree)/Module.symvers:
43    
44     module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD))
45     PHONY += $(module-dirs) modules
46     -$(module-dirs): crmodverdir $(objtree)/Module.symvers
47     +$(module-dirs): prepare $(objtree)/Module.symvers
48     $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
49    
50     modules: $(module-dirs)
51     @@ -1600,7 +1597,8 @@ help:
52    
53     # Dummies...
54     PHONY += prepare scripts
55     -prepare: ;
56     +prepare:
57     + $(cmd_crmodverdir)
58     scripts: ;
59     endif # KBUILD_EXTMOD
60    
61     @@ -1724,17 +1722,14 @@ endif
62    
63     # Modules
64     /: prepare scripts FORCE
65     - $(cmd_crmodverdir)
66     $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67     $(build)=$(build-dir)
68     # Make sure the latest headers are built for Documentation
69     Documentation/ samples/: headers_install
70     %/: prepare scripts FORCE
71     - $(cmd_crmodverdir)
72     $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
73     $(build)=$(build-dir)
74     %.ko: prepare scripts FORCE
75     - $(cmd_crmodverdir)
76     $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
77     $(build)=$(build-dir) $(@:.ko=.o)
78     $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
79     diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
80     index 7d94c1fa592a..7f799cb5668e 100644
81     --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
82     +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
83     @@ -28,6 +28,23 @@
84     method = "smc";
85     };
86    
87     + reserved-memory {
88     + #address-cells = <2>;
89     + #size-cells = <2>;
90     + ranges;
91     +
92     + /*
93     + * This area matches the mapping done with a
94     + * mainline U-Boot, and should be updated by the
95     + * bootloader.
96     + */
97     +
98     + psci-area@4000000 {
99     + reg = <0x0 0x4000000 0x0 0x200000>;
100     + no-map;
101     + };
102     + };
103     +
104     ap806 {
105     #address-cells = <2>;
106     #size-cells = <2>;
107     diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
108     index 2dafd936d84d..bc2327d4a505 100644
109     --- a/arch/arm64/include/asm/kvm_arm.h
110     +++ b/arch/arm64/include/asm/kvm_arm.h
111     @@ -24,6 +24,8 @@
112    
113     /* Hyp Configuration Register (HCR) bits */
114     #define HCR_FWB (UL(1) << 46)
115     +#define HCR_API (UL(1) << 41)
116     +#define HCR_APK (UL(1) << 40)
117     #define HCR_TEA (UL(1) << 37)
118     #define HCR_TERR (UL(1) << 36)
119     #define HCR_TLOR (UL(1) << 35)
120     @@ -87,6 +89,7 @@
121     HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
122     HCR_FMO | HCR_IMO)
123     #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
124     +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
125     #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
126    
127     /* TCR_EL2 Registers bits */
128     diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
129     index 4471f570a295..b207a2ce4bc6 100644
130     --- a/arch/arm64/kernel/head.S
131     +++ b/arch/arm64/kernel/head.S
132     @@ -496,10 +496,9 @@ ENTRY(el2_setup)
133     #endif
134    
135     /* Hyp configuration. */
136     - mov x0, #HCR_RW // 64-bit EL1
137     + mov_q x0, HCR_HOST_NVHE_FLAGS
138     cbz x2, set_hcr
139     - orr x0, x0, #HCR_TGE // Enable Host Extensions
140     - orr x0, x0, #HCR_E2H
141     + mov_q x0, HCR_HOST_VHE_FLAGS
142     set_hcr:
143     msr hcr_el2, x0
144     isb
145     diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
146     index f0e6ab8abe9c..ba6b41790fcd 100644
147     --- a/arch/arm64/kernel/kaslr.c
148     +++ b/arch/arm64/kernel/kaslr.c
149     @@ -14,6 +14,7 @@
150     #include <linux/sched.h>
151     #include <linux/types.h>
152    
153     +#include <asm/cacheflush.h>
154     #include <asm/fixmap.h>
155     #include <asm/kernel-pgtable.h>
156     #include <asm/memory.h>
157     @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
158     return ret;
159     }
160    
161     -static __init const u8 *get_cmdline(void *fdt)
162     +static __init const u8 *kaslr_get_cmdline(void *fdt)
163     {
164     static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
165    
166     @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
167     * Check if 'nokaslr' appears on the command line, and
168     * return 0 if that is the case.
169     */
170     - cmdline = get_cmdline(fdt);
171     + cmdline = kaslr_get_cmdline(fdt);
172     str = strstr(cmdline, "nokaslr");
173     if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
174     return 0;
175     @@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
176     module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
177     module_alloc_base &= PAGE_MASK;
178    
179     + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
180     + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
181     +
182     return offset;
183     }
184     diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
185     index 7cc175c88a37..f6e02cc4d856 100644
186     --- a/arch/arm64/kvm/hyp/switch.c
187     +++ b/arch/arm64/kvm/hyp/switch.c
188     @@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
189     mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
190    
191     write_sysreg(mdcr_el2, mdcr_el2);
192     - write_sysreg(HCR_RW, hcr_el2);
193     + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
194     write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
195     }
196    
197     diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
198     index 8272ea4c7264..6207b41473a0 100644
199     --- a/arch/mips/Kconfig
200     +++ b/arch/mips/Kconfig
201     @@ -3184,6 +3184,7 @@ config MIPS32_O32
202     config MIPS32_N32
203     bool "Kernel support for n32 binaries"
204     depends on 64BIT
205     + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
206     select COMPAT
207     select MIPS32_COMPAT
208     select SYSVIPC_COMPAT if SYSVIPC
209     diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
210     index 6054d49e608e..fe3773539eff 100644
211     --- a/arch/mips/bcm47xx/setup.c
212     +++ b/arch/mips/bcm47xx/setup.c
213     @@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
214     pm_power_off = bcm47xx_machine_halt;
215     }
216    
217     +#ifdef CONFIG_BCM47XX_BCMA
218     +static struct device * __init bcm47xx_setup_device(void)
219     +{
220     + struct device *dev;
221     + int err;
222     +
223     + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
224     + if (!dev)
225     + return NULL;
226     +
227     + err = dev_set_name(dev, "bcm47xx_soc");
228     + if (err) {
229     + pr_err("Failed to set SoC device name: %d\n", err);
230     + kfree(dev);
231     + return NULL;
232     + }
233     +
234     + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
235     + if (err)
236     + pr_err("Failed to set SoC DMA mask: %d\n", err);
237     +
238     + return dev;
239     +}
240     +#endif
241     +
242     /*
243     * This finishes bus initialization doing things that were not possible without
244     * kmalloc. Make sure to call it late enough (after mm_init).
245     @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
246     if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
247     int err;
248    
249     + bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
250     + if (!bcm47xx_bus.bcma.dev)
251     + panic("Failed to setup SoC device\n");
252     +
253     err = bcma_host_soc_init(&bcm47xx_bus.bcma);
254     if (err)
255     panic("Failed to initialize BCMA bus (err %d)", err);
256     @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
257     #endif
258     #ifdef CONFIG_BCM47XX_BCMA
259     case BCM47XX_BUS_TYPE_BCMA:
260     + if (device_register(bcm47xx_bus.bcma.dev))
261     + pr_err("Failed to register SoC device\n");
262     bcma_bus_register(&bcm47xx_bus.bcma.bus);
263     break;
264     #endif
265     diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
266     index dfb95cffef3e..a3cf68538f3d 100644
267     --- a/arch/mips/cavium-octeon/setup.c
268     +++ b/arch/mips/cavium-octeon/setup.c
269     @@ -96,7 +96,7 @@ static void octeon_kexec_smp_down(void *ignored)
270     " sync \n"
271     " synci ($0) \n");
272    
273     - relocated_kexec_smp_wait(NULL);
274     + kexec_reboot();
275     }
276     #endif
277    
278     diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
279     index f0bc3312ed11..c4ef1c31e0c4 100644
280     --- a/arch/mips/lantiq/irq.c
281     +++ b/arch/mips/lantiq/irq.c
282     @@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
283     .irq_set_type = ltq_eiu_settype,
284     };
285    
286     -static void ltq_hw_irqdispatch(int module)
287     +static void ltq_hw_irq_handler(struct irq_desc *desc)
288     {
289     + int module = irq_desc_get_irq(desc) - 2;
290     u32 irq;
291     + int hwirq;
292    
293     irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
294     if (irq == 0)
295     @@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
296     * other bits might be bogus
297     */
298     irq = __fls(irq);
299     - do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
300     + hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
301     + generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
302    
303     /* if this is a EBU irq, we need to ack it or get a deadlock */
304     if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
305     @@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
306     LTQ_EBU_PCC_ISTAT);
307     }
308    
309     -#define DEFINE_HWx_IRQDISPATCH(x) \
310     - static void ltq_hw ## x ## _irqdispatch(void) \
311     - { \
312     - ltq_hw_irqdispatch(x); \
313     - }
314     -DEFINE_HWx_IRQDISPATCH(0)
315     -DEFINE_HWx_IRQDISPATCH(1)
316     -DEFINE_HWx_IRQDISPATCH(2)
317     -DEFINE_HWx_IRQDISPATCH(3)
318     -DEFINE_HWx_IRQDISPATCH(4)
319     -
320     -#if MIPS_CPU_TIMER_IRQ == 7
321     -static void ltq_hw5_irqdispatch(void)
322     -{
323     - do_IRQ(MIPS_CPU_TIMER_IRQ);
324     -}
325     -#else
326     -DEFINE_HWx_IRQDISPATCH(5)
327     -#endif
328     -
329     -static void ltq_hw_irq_handler(struct irq_desc *desc)
330     -{
331     - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
332     -}
333     -
334     -asmlinkage void plat_irq_dispatch(void)
335     -{
336     - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
337     - int irq;
338     -
339     - if (!pending) {
340     - spurious_interrupt();
341     - return;
342     - }
343     -
344     - pending >>= CAUSEB_IP;
345     - while (pending) {
346     - irq = fls(pending) - 1;
347     - do_IRQ(MIPS_CPU_IRQ_BASE + irq);
348     - pending &= ~BIT(irq);
349     - }
350     -}
351     -
352     static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
353     {
354     struct irq_chip *chip = &ltq_irq_type;
355     @@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
356     for (i = 0; i < MAX_IM; i++)
357     irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
358    
359     - if (cpu_has_vint) {
360     - pr_info("Setting up vectored interrupts\n");
361     - set_vi_handler(2, ltq_hw0_irqdispatch);
362     - set_vi_handler(3, ltq_hw1_irqdispatch);
363     - set_vi_handler(4, ltq_hw2_irqdispatch);
364     - set_vi_handler(5, ltq_hw3_irqdispatch);
365     - set_vi_handler(6, ltq_hw4_irqdispatch);
366     - set_vi_handler(7, ltq_hw5_irqdispatch);
367     - }
368     -
369     ltq_domain = irq_domain_add_linear(node,
370     (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
371     &irq_domain_ops, 0);
372    
373     -#ifndef CONFIG_MIPS_MT_SMP
374     - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
375     - IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
376     -#else
377     - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
378     - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
379     -#endif
380     -
381     /* tell oprofile which irq to use */
382     ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
383    
384     diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
385     index 2a5bb849b10e..288b58b00dc8 100644
386     --- a/arch/mips/pci/msi-octeon.c
387     +++ b/arch/mips/pci/msi-octeon.c
388     @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
389     int irq;
390     struct irq_chip *msi;
391    
392     - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
393     + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
394     + return 0;
395     + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
396     msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
397     msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
398     msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
399     diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
400     index 6f70d1b4bf36..14b0f5b6a373 100644
401     --- a/arch/powerpc/kernel/signal_64.c
402     +++ b/arch/powerpc/kernel/signal_64.c
403     @@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn)
404     if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
405     &uc_transact->uc_mcontext))
406     goto badframe;
407     - }
408     + } else
409     #endif
410     - /* Fall through, for non-TM restore */
411     - if (!MSR_TM_ACTIVE(msr)) {
412     + {
413     /*
414     + * Fall through, for non-TM restore
415     + *
416     * Unset MSR[TS] on the thread regs since MSR from user
417     * context does not have MSR active, and recheckpoint was
418     * not called since restore_tm_sigcontexts() was not called
419     diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
420     index 72bf446c3fee..6e29794573b7 100644
421     --- a/arch/x86/xen/time.c
422     +++ b/arch/x86/xen/time.c
423     @@ -361,8 +361,6 @@ void xen_timer_resume(void)
424     {
425     int cpu;
426    
427     - pvclock_resume();
428     -
429     if (xen_clockevent != &xen_vcpuop_clockevent)
430     return;
431    
432     @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
433     };
434    
435     static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
436     +static u64 xen_clock_value_saved;
437    
438     void xen_save_time_memory_area(void)
439     {
440     struct vcpu_register_time_memory_area t;
441     int ret;
442    
443     + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
444     +
445     if (!xen_clock)
446     return;
447    
448     @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
449     int ret;
450    
451     if (!xen_clock)
452     - return;
453     + goto out;
454    
455     t.addr.v = &xen_clock->pvti;
456    
457     @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
458     if (ret != 0)
459     pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
460     ret);
461     +
462     +out:
463     + /* Need pvclock_resume() before using xen_clocksource_read(). */
464     + pvclock_resume();
465     + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
466     }
467    
468     static void xen_setup_vsyscall_time_info(void)
469     diff --git a/block/partition-generic.c b/block/partition-generic.c
470     index d3d14e81fb12..5f8db5c5140f 100644
471     --- a/block/partition-generic.c
472     +++ b/block/partition-generic.c
473     @@ -249,9 +249,10 @@ struct device_type part_type = {
474     .uevent = part_uevent,
475     };
476    
477     -static void delete_partition_rcu_cb(struct rcu_head *head)
478     +static void delete_partition_work_fn(struct work_struct *work)
479     {
480     - struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
481     + struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
482     + rcu_work);
483    
484     part->start_sect = 0;
485     part->nr_sects = 0;
486     @@ -262,7 +263,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head)
487     void __delete_partition(struct percpu_ref *ref)
488     {
489     struct hd_struct *part = container_of(ref, struct hd_struct, ref);
490     - call_rcu(&part->rcu_head, delete_partition_rcu_cb);
491     + INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
492     + queue_rcu_work(system_wq, &part->rcu_work);
493     }
494    
495     /*
496     diff --git a/crypto/authenc.c b/crypto/authenc.c
497     index 37f54d1b2f66..4be293a4b5f0 100644
498     --- a/crypto/authenc.c
499     +++ b/crypto/authenc.c
500     @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
501     return -EINVAL;
502     if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
503     return -EINVAL;
504     - if (RTA_PAYLOAD(rta) < sizeof(*param))
505     +
506     + /*
507     + * RTA_OK() didn't align the rtattr's payload when validating that it
508     + * fits in the buffer. Yet, the keys should start on the next 4-byte
509     + * aligned boundary. To avoid confusion, require that the rtattr
510     + * payload be exactly the param struct, which has a 4-byte aligned size.
511     + */
512     + if (RTA_PAYLOAD(rta) != sizeof(*param))
513     return -EINVAL;
514     + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
515    
516     param = RTA_DATA(rta);
517     keys->enckeylen = be32_to_cpu(param->enckeylen);
518    
519     - key += RTA_ALIGN(rta->rta_len);
520     - keylen -= RTA_ALIGN(rta->rta_len);
521     + key += rta->rta_len;
522     + keylen -= rta->rta_len;
523    
524     if (keylen < keys->enckeylen)
525     return -EINVAL;
526     diff --git a/crypto/authencesn.c b/crypto/authencesn.c
527     index 80a25cc04aec..4741fe89ba2c 100644
528     --- a/crypto/authencesn.c
529     +++ b/crypto/authencesn.c
530     @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
531     struct aead_request *req = areq->data;
532    
533     err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
534     - aead_request_complete(req, err);
535     + authenc_esn_request_complete(req, err);
536     }
537    
538     static int crypto_authenc_esn_decrypt(struct aead_request *req)
539     diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
540     index 9a5c60f08aad..c0cf87ae7ef6 100644
541     --- a/crypto/sm3_generic.c
542     +++ b/crypto/sm3_generic.c
543     @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
544    
545     for (i = 0; i <= 63; i++) {
546    
547     - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
548     + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
549    
550     ss2 = ss1 ^ rol32(a, 12);
551    
552     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
553     index cb0cc8685076..84b055aa81ba 100644
554     --- a/drivers/block/loop.c
555     +++ b/drivers/block/loop.c
556     @@ -83,7 +83,7 @@
557     #include <linux/uaccess.h>
558    
559     static DEFINE_IDR(loop_index_idr);
560     -static DEFINE_MUTEX(loop_index_mutex);
561     +static DEFINE_MUTEX(loop_ctl_mutex);
562    
563     static int max_part;
564     static int part_shift;
565     @@ -630,18 +630,7 @@ static void loop_reread_partitions(struct loop_device *lo,
566     {
567     int rc;
568    
569     - /*
570     - * bd_mutex has been held already in release path, so don't
571     - * acquire it if this function is called in such case.
572     - *
573     - * If the reread partition isn't from release path, lo_refcnt
574     - * must be at least one and it can only become zero when the
575     - * current holder is released.
576     - */
577     - if (!atomic_read(&lo->lo_refcnt))
578     - rc = __blkdev_reread_part(bdev);
579     - else
580     - rc = blkdev_reread_part(bdev);
581     + rc = blkdev_reread_part(bdev);
582     if (rc)
583     pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
584     __func__, lo->lo_number, lo->lo_file_name, rc);
585     @@ -688,26 +677,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
586     static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
587     unsigned int arg)
588     {
589     - struct file *file, *old_file;
590     + struct file *file = NULL, *old_file;
591     int error;
592     + bool partscan;
593    
594     + error = mutex_lock_killable(&loop_ctl_mutex);
595     + if (error)
596     + return error;
597     error = -ENXIO;
598     if (lo->lo_state != Lo_bound)
599     - goto out;
600     + goto out_err;
601    
602     /* the loop device has to be read-only */
603     error = -EINVAL;
604     if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
605     - goto out;
606     + goto out_err;
607    
608     error = -EBADF;
609     file = fget(arg);
610     if (!file)
611     - goto out;
612     + goto out_err;
613    
614     error = loop_validate_file(file, bdev);
615     if (error)
616     - goto out_putf;
617     + goto out_err;
618    
619     old_file = lo->lo_backing_file;
620    
621     @@ -715,7 +708,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
622    
623     /* size of the new backing store needs to be the same */
624     if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
625     - goto out_putf;
626     + goto out_err;
627    
628     /* and ... switch */
629     blk_mq_freeze_queue(lo->lo_queue);
630     @@ -726,15 +719,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
631     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
632     loop_update_dio(lo);
633     blk_mq_unfreeze_queue(lo->lo_queue);
634     -
635     + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
636     + mutex_unlock(&loop_ctl_mutex);
637     + /*
638     + * We must drop file reference outside of loop_ctl_mutex as dropping
639     + * the file ref can take bd_mutex which creates circular locking
640     + * dependency.
641     + */
642     fput(old_file);
643     - if (lo->lo_flags & LO_FLAGS_PARTSCAN)
644     + if (partscan)
645     loop_reread_partitions(lo, bdev);
646     return 0;
647    
648     - out_putf:
649     - fput(file);
650     - out:
651     +out_err:
652     + mutex_unlock(&loop_ctl_mutex);
653     + if (file)
654     + fput(file);
655     return error;
656     }
657    
658     @@ -909,6 +909,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
659     int lo_flags = 0;
660     int error;
661     loff_t size;
662     + bool partscan;
663    
664     /* This is safe, since we have a reference from open(). */
665     __module_get(THIS_MODULE);
666     @@ -918,13 +919,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
667     if (!file)
668     goto out;
669    
670     + error = mutex_lock_killable(&loop_ctl_mutex);
671     + if (error)
672     + goto out_putf;
673     +
674     error = -EBUSY;
675     if (lo->lo_state != Lo_unbound)
676     - goto out_putf;
677     + goto out_unlock;
678    
679     error = loop_validate_file(file, bdev);
680     if (error)
681     - goto out_putf;
682     + goto out_unlock;
683    
684     mapping = file->f_mapping;
685     inode = mapping->host;
686     @@ -936,10 +941,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
687     error = -EFBIG;
688     size = get_loop_size(lo, file);
689     if ((loff_t)(sector_t)size != size)
690     - goto out_putf;
691     + goto out_unlock;
692     error = loop_prepare_queue(lo);
693     if (error)
694     - goto out_putf;
695     + goto out_unlock;
696    
697     error = 0;
698    
699     @@ -971,18 +976,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
700     lo->lo_state = Lo_bound;
701     if (part_shift)
702     lo->lo_flags |= LO_FLAGS_PARTSCAN;
703     - if (lo->lo_flags & LO_FLAGS_PARTSCAN)
704     - loop_reread_partitions(lo, bdev);
705     + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
706    
707     /* Grab the block_device to prevent its destruction after we
708     - * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
709     + * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
710     */
711     bdgrab(bdev);
712     + mutex_unlock(&loop_ctl_mutex);
713     + if (partscan)
714     + loop_reread_partitions(lo, bdev);
715     return 0;
716    
717     - out_putf:
718     +out_unlock:
719     + mutex_unlock(&loop_ctl_mutex);
720     +out_putf:
721     fput(file);
722     - out:
723     +out:
724     /* This is safe: open() is still holding a reference. */
725     module_put(THIS_MODULE);
726     return error;
727     @@ -1025,39 +1034,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
728     return err;
729     }
730    
731     -static int loop_clr_fd(struct loop_device *lo)
732     +static int __loop_clr_fd(struct loop_device *lo, bool release)
733     {
734     - struct file *filp = lo->lo_backing_file;
735     + struct file *filp = NULL;
736     gfp_t gfp = lo->old_gfp_mask;
737     struct block_device *bdev = lo->lo_device;
738     + int err = 0;
739     + bool partscan = false;
740     + int lo_number;
741    
742     - if (lo->lo_state != Lo_bound)
743     - return -ENXIO;
744     -
745     - /*
746     - * If we've explicitly asked to tear down the loop device,
747     - * and it has an elevated reference count, set it for auto-teardown when
748     - * the last reference goes away. This stops $!~#$@ udev from
749     - * preventing teardown because it decided that it needs to run blkid on
750     - * the loopback device whenever they appear. xfstests is notorious for
751     - * failing tests because blkid via udev races with a losetup
752     - * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
753     - * command to fail with EBUSY.
754     - */
755     - if (atomic_read(&lo->lo_refcnt) > 1) {
756     - lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
757     - mutex_unlock(&lo->lo_ctl_mutex);
758     - return 0;
759     + mutex_lock(&loop_ctl_mutex);
760     + if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
761     + err = -ENXIO;
762     + goto out_unlock;
763     }
764    
765     - if (filp == NULL)
766     - return -EINVAL;
767     + filp = lo->lo_backing_file;
768     + if (filp == NULL) {
769     + err = -EINVAL;
770     + goto out_unlock;
771     + }
772    
773     /* freeze request queue during the transition */
774     blk_mq_freeze_queue(lo->lo_queue);
775    
776     spin_lock_irq(&lo->lo_lock);
777     - lo->lo_state = Lo_rundown;
778     lo->lo_backing_file = NULL;
779     spin_unlock_irq(&lo->lo_lock);
780    
781     @@ -1093,21 +1094,73 @@ static int loop_clr_fd(struct loop_device *lo)
782     module_put(THIS_MODULE);
783     blk_mq_unfreeze_queue(lo->lo_queue);
784    
785     - if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
786     - loop_reread_partitions(lo, bdev);
787     + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
788     + lo_number = lo->lo_number;
789     lo->lo_flags = 0;
790     if (!part_shift)
791     lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
792     loop_unprepare_queue(lo);
793     - mutex_unlock(&lo->lo_ctl_mutex);
794     +out_unlock:
795     + mutex_unlock(&loop_ctl_mutex);
796     + if (partscan) {
797     + /*
798     + * bd_mutex has been held already in release path, so don't
799     + * acquire it if this function is called in such case.
800     + *
801     + * If the reread partition isn't from release path, lo_refcnt
802     + * must be at least one and it can only become zero when the
803     + * current holder is released.
804     + */
805     + if (release)
806     + err = __blkdev_reread_part(bdev);
807     + else
808     + err = blkdev_reread_part(bdev);
809     + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
810     + __func__, lo_number, err);
811     + /* Device is gone, no point in returning error */
812     + err = 0;
813     + }
814     /*
815     - * Need not hold lo_ctl_mutex to fput backing file.
816     - * Calling fput holding lo_ctl_mutex triggers a circular
817     + * Need not hold loop_ctl_mutex to fput backing file.
818     + * Calling fput holding loop_ctl_mutex triggers a circular
819     * lock dependency possibility warning as fput can take
820     - * bd_mutex which is usually taken before lo_ctl_mutex.
821     + * bd_mutex which is usually taken before loop_ctl_mutex.
822     */
823     - fput(filp);
824     - return 0;
825     + if (filp)
826     + fput(filp);
827     + return err;
828     +}
829     +
830     +static int loop_clr_fd(struct loop_device *lo)
831     +{
832     + int err;
833     +
834     + err = mutex_lock_killable(&loop_ctl_mutex);
835     + if (err)
836     + return err;
837     + if (lo->lo_state != Lo_bound) {
838     + mutex_unlock(&loop_ctl_mutex);
839     + return -ENXIO;
840     + }
841     + /*
842     + * If we've explicitly asked to tear down the loop device,
843     + * and it has an elevated reference count, set it for auto-teardown when
844     + * the last reference goes away. This stops $!~#$@ udev from
845     + * preventing teardown because it decided that it needs to run blkid on
846     + * the loopback device whenever they appear. xfstests is notorious for
847     + * failing tests because blkid via udev races with a losetup
848     + * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
849     + * command to fail with EBUSY.
850     + */
851     + if (atomic_read(&lo->lo_refcnt) > 1) {
852     + lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
853     + mutex_unlock(&loop_ctl_mutex);
854     + return 0;
855     + }
856     + lo->lo_state = Lo_rundown;
857     + mutex_unlock(&loop_ctl_mutex);
858     +
859     + return __loop_clr_fd(lo, false);
860     }
861    
862     static int
863     @@ -1116,47 +1169,72 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
864     int err;
865     struct loop_func_table *xfer;
866     kuid_t uid = current_uid();
867     + struct block_device *bdev;
868     + bool partscan = false;
869    
870     + err = mutex_lock_killable(&loop_ctl_mutex);
871     + if (err)
872     + return err;
873     if (lo->lo_encrypt_key_size &&
874     !uid_eq(lo->lo_key_owner, uid) &&
875     - !capable(CAP_SYS_ADMIN))
876     - return -EPERM;
877     - if (lo->lo_state != Lo_bound)
878     - return -ENXIO;
879     - if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
880     - return -EINVAL;
881     + !capable(CAP_SYS_ADMIN)) {
882     + err = -EPERM;
883     + goto out_unlock;
884     + }
885     + if (lo->lo_state != Lo_bound) {
886     + err = -ENXIO;
887     + goto out_unlock;
888     + }
889     + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
890     + err = -EINVAL;
891     + goto out_unlock;
892     + }
893     +
894     + if (lo->lo_offset != info->lo_offset ||
895     + lo->lo_sizelimit != info->lo_sizelimit) {
896     + sync_blockdev(lo->lo_device);
897     + kill_bdev(lo->lo_device);
898     + }
899    
900     /* I/O need to be drained during transfer transition */
901     blk_mq_freeze_queue(lo->lo_queue);
902    
903     err = loop_release_xfer(lo);
904     if (err)
905     - goto exit;
906     + goto out_unfreeze;
907    
908     if (info->lo_encrypt_type) {
909     unsigned int type = info->lo_encrypt_type;
910    
911     if (type >= MAX_LO_CRYPT) {
912     err = -EINVAL;
913     - goto exit;
914     + goto out_unfreeze;
915     }
916     xfer = xfer_funcs[type];
917     if (xfer == NULL) {
918     err = -EINVAL;
919     - goto exit;
920     + goto out_unfreeze;
921     }
922     } else
923     xfer = NULL;
924    
925     err = loop_init_xfer(lo, xfer, info);
926     if (err)
927     - goto exit;
928     + goto out_unfreeze;
929    
930     if (lo->lo_offset != info->lo_offset ||
931     lo->lo_sizelimit != info->lo_sizelimit) {
932     + /* kill_bdev should have truncated all the pages */
933     + if (lo->lo_device->bd_inode->i_mapping->nrpages) {
934     + err = -EAGAIN;
935     + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
936     + __func__, lo->lo_number, lo->lo_file_name,
937     + lo->lo_device->bd_inode->i_mapping->nrpages);
938     + goto out_unfreeze;
939     + }
940     if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
941     err = -EFBIG;
942     - goto exit;
943     + goto out_unfreeze;
944     }
945     }
946    
947     @@ -1188,15 +1266,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
948     /* update dio if lo_offset or transfer is changed */
949     __loop_update_dio(lo, lo->use_dio);
950    
951     - exit:
952     +out_unfreeze:
953     blk_mq_unfreeze_queue(lo->lo_queue);
954    
955     if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
956     !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
957     lo->lo_flags |= LO_FLAGS_PARTSCAN;
958     lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
959     - loop_reread_partitions(lo, lo->lo_device);
960     + bdev = lo->lo_device;
961     + partscan = true;
962     }
963     +out_unlock:
964     + mutex_unlock(&loop_ctl_mutex);
965     + if (partscan)
966     + loop_reread_partitions(lo, bdev);
967    
968     return err;
969     }
970     @@ -1204,12 +1287,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
971     static int
972     loop_get_status(struct loop_device *lo, struct loop_info64 *info)
973     {
974     - struct file *file;
975     + struct path path;
976     struct kstat stat;
977     int ret;
978    
979     + ret = mutex_lock_killable(&loop_ctl_mutex);
980     + if (ret)
981     + return ret;
982     if (lo->lo_state != Lo_bound) {
983     - mutex_unlock(&lo->lo_ctl_mutex);
984     + mutex_unlock(&loop_ctl_mutex);
985     return -ENXIO;
986     }
987    
988     @@ -1228,17 +1314,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
989     lo->lo_encrypt_key_size);
990     }
991    
992     - /* Drop lo_ctl_mutex while we call into the filesystem. */
993     - file = get_file(lo->lo_backing_file);
994     - mutex_unlock(&lo->lo_ctl_mutex);
995     - ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
996     - AT_STATX_SYNC_AS_STAT);
997     + /* Drop loop_ctl_mutex while we call into the filesystem. */
998     + path = lo->lo_backing_file->f_path;
999     + path_get(&path);
1000     + mutex_unlock(&loop_ctl_mutex);
1001     + ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1002     if (!ret) {
1003     info->lo_device = huge_encode_dev(stat.dev);
1004     info->lo_inode = stat.ino;
1005     info->lo_rdevice = huge_encode_dev(stat.rdev);
1006     }
1007     - fput(file);
1008     + path_put(&path);
1009     return ret;
1010     }
1011    
1012     @@ -1322,10 +1408,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1013     struct loop_info64 info64;
1014     int err;
1015    
1016     - if (!arg) {
1017     - mutex_unlock(&lo->lo_ctl_mutex);
1018     + if (!arg)
1019     return -EINVAL;
1020     - }
1021     err = loop_get_status(lo, &info64);
1022     if (!err)
1023     err = loop_info64_to_old(&info64, &info);
1024     @@ -1340,10 +1424,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1025     struct loop_info64 info64;
1026     int err;
1027    
1028     - if (!arg) {
1029     - mutex_unlock(&lo->lo_ctl_mutex);
1030     + if (!arg)
1031     return -EINVAL;
1032     - }
1033     err = loop_get_status(lo, &info64);
1034     if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1035     err = -EFAULT;
1036     @@ -1375,22 +1457,64 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1037    
1038     static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1039     {
1040     + int err = 0;
1041     +
1042     if (lo->lo_state != Lo_bound)
1043     return -ENXIO;
1044    
1045     if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
1046     return -EINVAL;
1047    
1048     + if (lo->lo_queue->limits.logical_block_size != arg) {
1049     + sync_blockdev(lo->lo_device);
1050     + kill_bdev(lo->lo_device);
1051     + }
1052     +
1053     blk_mq_freeze_queue(lo->lo_queue);
1054    
1055     + /* kill_bdev should have truncated all the pages */
1056     + if (lo->lo_queue->limits.logical_block_size != arg &&
1057     + lo->lo_device->bd_inode->i_mapping->nrpages) {
1058     + err = -EAGAIN;
1059     + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1060     + __func__, lo->lo_number, lo->lo_file_name,
1061     + lo->lo_device->bd_inode->i_mapping->nrpages);
1062     + goto out_unfreeze;
1063     + }
1064     +
1065     blk_queue_logical_block_size(lo->lo_queue, arg);
1066     blk_queue_physical_block_size(lo->lo_queue, arg);
1067     blk_queue_io_min(lo->lo_queue, arg);
1068     loop_update_dio(lo);
1069     -
1070     +out_unfreeze:
1071     blk_mq_unfreeze_queue(lo->lo_queue);
1072    
1073     - return 0;
1074     + return err;
1075     +}
1076     +
1077     +static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1078     + unsigned long arg)
1079     +{
1080     + int err;
1081     +
1082     + err = mutex_lock_killable(&loop_ctl_mutex);
1083     + if (err)
1084     + return err;
1085     + switch (cmd) {
1086     + case LOOP_SET_CAPACITY:
1087     + err = loop_set_capacity(lo);
1088     + break;
1089     + case LOOP_SET_DIRECT_IO:
1090     + err = loop_set_dio(lo, arg);
1091     + break;
1092     + case LOOP_SET_BLOCK_SIZE:
1093     + err = loop_set_block_size(lo, arg);
1094     + break;
1095     + default:
1096     + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1097     + }
1098     + mutex_unlock(&loop_ctl_mutex);
1099     + return err;
1100     }
1101    
1102     static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1103     @@ -1399,64 +1523,42 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1104     struct loop_device *lo = bdev->bd_disk->private_data;
1105     int err;
1106    
1107     - err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
1108     - if (err)
1109     - goto out_unlocked;
1110     -
1111     switch (cmd) {
1112     case LOOP_SET_FD:
1113     - err = loop_set_fd(lo, mode, bdev, arg);
1114     - break;
1115     + return loop_set_fd(lo, mode, bdev, arg);
1116     case LOOP_CHANGE_FD:
1117     - err = loop_change_fd(lo, bdev, arg);
1118     - break;
1119     + return loop_change_fd(lo, bdev, arg);
1120     case LOOP_CLR_FD:
1121     - /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1122     - err = loop_clr_fd(lo);
1123     - if (!err)
1124     - goto out_unlocked;
1125     - break;
1126     + return loop_clr_fd(lo);
1127     case LOOP_SET_STATUS:
1128     err = -EPERM;
1129     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1130     + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1131     err = loop_set_status_old(lo,
1132     (struct loop_info __user *)arg);
1133     + }
1134     break;
1135     case LOOP_GET_STATUS:
1136     - err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1137     - /* loop_get_status() unlocks lo_ctl_mutex */
1138     - goto out_unlocked;
1139     + return loop_get_status_old(lo, (struct loop_info __user *) arg);
1140     case LOOP_SET_STATUS64:
1141     err = -EPERM;
1142     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1143     + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1144     err = loop_set_status64(lo,
1145     (struct loop_info64 __user *) arg);
1146     + }
1147     break;
1148     case LOOP_GET_STATUS64:
1149     - err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1150     - /* loop_get_status() unlocks lo_ctl_mutex */
1151     - goto out_unlocked;
1152     + return loop_get_status64(lo, (struct loop_info64 __user *) arg);
1153     case LOOP_SET_CAPACITY:
1154     - err = -EPERM;
1155     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1156     - err = loop_set_capacity(lo);
1157     - break;
1158     case LOOP_SET_DIRECT_IO:
1159     - err = -EPERM;
1160     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1161     - err = loop_set_dio(lo, arg);
1162     - break;
1163     case LOOP_SET_BLOCK_SIZE:
1164     - err = -EPERM;
1165     - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1166     - err = loop_set_block_size(lo, arg);
1167     - break;
1168     + if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1169     + return -EPERM;
1170     + /* Fall through */
1171     default:
1172     - err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1173     + err = lo_simple_ioctl(lo, cmd, arg);
1174     + break;
1175     }
1176     - mutex_unlock(&lo->lo_ctl_mutex);
1177    
1178     -out_unlocked:
1179     return err;
1180     }
1181    
1182     @@ -1570,10 +1672,8 @@ loop_get_status_compat(struct loop_device *lo,
1183     struct loop_info64 info64;
1184     int err;
1185    
1186     - if (!arg) {
1187     - mutex_unlock(&lo->lo_ctl_mutex);
1188     + if (!arg)
1189     return -EINVAL;
1190     - }
1191     err = loop_get_status(lo, &info64);
1192     if (!err)
1193     err = loop_info64_to_compat(&info64, arg);
1194     @@ -1588,20 +1688,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1195    
1196     switch(cmd) {
1197     case LOOP_SET_STATUS:
1198     - err = mutex_lock_killable(&lo->lo_ctl_mutex);
1199     - if (!err) {
1200     - err = loop_set_status_compat(lo,
1201     - (const struct compat_loop_info __user *)arg);
1202     - mutex_unlock(&lo->lo_ctl_mutex);
1203     - }
1204     + err = loop_set_status_compat(lo,
1205     + (const struct compat_loop_info __user *)arg);
1206     break;
1207     case LOOP_GET_STATUS:
1208     - err = mutex_lock_killable(&lo->lo_ctl_mutex);
1209     - if (!err) {
1210     - err = loop_get_status_compat(lo,
1211     - (struct compat_loop_info __user *)arg);
1212     - /* loop_get_status() unlocks lo_ctl_mutex */
1213     - }
1214     + err = loop_get_status_compat(lo,
1215     + (struct compat_loop_info __user *)arg);
1216     break;
1217     case LOOP_SET_CAPACITY:
1218     case LOOP_CLR_FD:
1219     @@ -1625,9 +1717,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1220     static int lo_open(struct block_device *bdev, fmode_t mode)
1221     {
1222     struct loop_device *lo;
1223     - int err = 0;
1224     + int err;
1225    
1226     - mutex_lock(&loop_index_mutex);
1227     + err = mutex_lock_killable(&loop_ctl_mutex);
1228     + if (err)
1229     + return err;
1230     lo = bdev->bd_disk->private_data;
1231     if (!lo) {
1232     err = -ENXIO;
1233     @@ -1636,26 +1730,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1234    
1235     atomic_inc(&lo->lo_refcnt);
1236     out:
1237     - mutex_unlock(&loop_index_mutex);
1238     + mutex_unlock(&loop_ctl_mutex);
1239     return err;
1240     }
1241    
1242     -static void __lo_release(struct loop_device *lo)
1243     +static void lo_release(struct gendisk *disk, fmode_t mode)
1244     {
1245     - int err;
1246     + struct loop_device *lo;
1247    
1248     + mutex_lock(&loop_ctl_mutex);
1249     + lo = disk->private_data;
1250     if (atomic_dec_return(&lo->lo_refcnt))
1251     - return;
1252     + goto out_unlock;
1253    
1254     - mutex_lock(&lo->lo_ctl_mutex);
1255     if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1256     + if (lo->lo_state != Lo_bound)
1257     + goto out_unlock;
1258     + lo->lo_state = Lo_rundown;
1259     + mutex_unlock(&loop_ctl_mutex);
1260     /*
1261     * In autoclear mode, stop the loop thread
1262     * and remove configuration after last close.
1263     */
1264     - err = loop_clr_fd(lo);
1265     - if (!err)
1266     - return;
1267     + __loop_clr_fd(lo, true);
1268     + return;
1269     } else if (lo->lo_state == Lo_bound) {
1270     /*
1271     * Otherwise keep thread (if running) and config,
1272     @@ -1665,14 +1763,8 @@ static void __lo_release(struct loop_device *lo)
1273     blk_mq_unfreeze_queue(lo->lo_queue);
1274     }
1275    
1276     - mutex_unlock(&lo->lo_ctl_mutex);
1277     -}
1278     -
1279     -static void lo_release(struct gendisk *disk, fmode_t mode)
1280     -{
1281     - mutex_lock(&loop_index_mutex);
1282     - __lo_release(disk->private_data);
1283     - mutex_unlock(&loop_index_mutex);
1284     +out_unlock:
1285     + mutex_unlock(&loop_ctl_mutex);
1286     }
1287    
1288     static const struct block_device_operations lo_fops = {
1289     @@ -1711,10 +1803,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
1290     struct loop_device *lo = ptr;
1291     struct loop_func_table *xfer = data;
1292    
1293     - mutex_lock(&lo->lo_ctl_mutex);
1294     + mutex_lock(&loop_ctl_mutex);
1295     if (lo->lo_encryption == xfer)
1296     loop_release_xfer(lo);
1297     - mutex_unlock(&lo->lo_ctl_mutex);
1298     + mutex_unlock(&loop_ctl_mutex);
1299     return 0;
1300     }
1301    
1302     @@ -1895,7 +1987,6 @@ static int loop_add(struct loop_device **l, int i)
1303     if (!part_shift)
1304     disk->flags |= GENHD_FL_NO_PART_SCAN;
1305     disk->flags |= GENHD_FL_EXT_DEVT;
1306     - mutex_init(&lo->lo_ctl_mutex);
1307     atomic_set(&lo->lo_refcnt, 0);
1308     lo->lo_number = i;
1309     spin_lock_init(&lo->lo_lock);
1310     @@ -1974,7 +2065,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1311     struct kobject *kobj;
1312     int err;
1313    
1314     - mutex_lock(&loop_index_mutex);
1315     + mutex_lock(&loop_ctl_mutex);
1316     err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1317     if (err < 0)
1318     err = loop_add(&lo, MINOR(dev) >> part_shift);
1319     @@ -1982,7 +2073,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1320     kobj = NULL;
1321     else
1322     kobj = get_disk_and_module(lo->lo_disk);
1323     - mutex_unlock(&loop_index_mutex);
1324     + mutex_unlock(&loop_ctl_mutex);
1325    
1326     *part = 0;
1327     return kobj;
1328     @@ -1992,9 +2083,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1329     unsigned long parm)
1330     {
1331     struct loop_device *lo;
1332     - int ret = -ENOSYS;
1333     + int ret;
1334     +
1335     + ret = mutex_lock_killable(&loop_ctl_mutex);
1336     + if (ret)
1337     + return ret;
1338    
1339     - mutex_lock(&loop_index_mutex);
1340     + ret = -ENOSYS;
1341     switch (cmd) {
1342     case LOOP_CTL_ADD:
1343     ret = loop_lookup(&lo, parm);
1344     @@ -2008,21 +2103,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1345     ret = loop_lookup(&lo, parm);
1346     if (ret < 0)
1347     break;
1348     - ret = mutex_lock_killable(&lo->lo_ctl_mutex);
1349     - if (ret)
1350     - break;
1351     if (lo->lo_state != Lo_unbound) {
1352     ret = -EBUSY;
1353     - mutex_unlock(&lo->lo_ctl_mutex);
1354     break;
1355     }
1356     if (atomic_read(&lo->lo_refcnt) > 0) {
1357     ret = -EBUSY;
1358     - mutex_unlock(&lo->lo_ctl_mutex);
1359     break;
1360     }
1361     lo->lo_disk->private_data = NULL;
1362     - mutex_unlock(&lo->lo_ctl_mutex);
1363     idr_remove(&loop_index_idr, lo->lo_number);
1364     loop_remove(lo);
1365     break;
1366     @@ -2032,7 +2121,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1367     break;
1368     ret = loop_add(&lo, -1);
1369     }
1370     - mutex_unlock(&loop_index_mutex);
1371     + mutex_unlock(&loop_ctl_mutex);
1372    
1373     return ret;
1374     }
1375     @@ -2116,10 +2205,10 @@ static int __init loop_init(void)
1376     THIS_MODULE, loop_probe, NULL, NULL);
1377    
1378     /* pre-create number of devices given by config or max_loop */
1379     - mutex_lock(&loop_index_mutex);
1380     + mutex_lock(&loop_ctl_mutex);
1381     for (i = 0; i < nr; i++)
1382     loop_add(&lo, i);
1383     - mutex_unlock(&loop_index_mutex);
1384     + mutex_unlock(&loop_ctl_mutex);
1385    
1386     printk(KERN_INFO "loop: module loaded\n");
1387     return 0;
1388     diff --git a/drivers/block/loop.h b/drivers/block/loop.h
1389     index 4d42c7af7de7..af75a5ee4094 100644
1390     --- a/drivers/block/loop.h
1391     +++ b/drivers/block/loop.h
1392     @@ -54,7 +54,6 @@ struct loop_device {
1393    
1394     spinlock_t lo_lock;
1395     int lo_state;
1396     - struct mutex lo_ctl_mutex;
1397     struct kthread_worker worker;
1398     struct task_struct *worker_task;
1399     bool use_dio;
1400     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1401     index 4d4d6129ff66..c964315c7b0b 100644
1402     --- a/drivers/block/nbd.c
1403     +++ b/drivers/block/nbd.c
1404     @@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
1405     blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
1406     set_capacity(nbd->disk, config->bytesize >> 9);
1407     if (bdev) {
1408     - if (bdev->bd_disk)
1409     + if (bdev->bd_disk) {
1410     bd_set_size(bdev, config->bytesize);
1411     - else
1412     + set_blocksize(bdev, config->blksize);
1413     + } else
1414     bdev->bd_invalidated = 1;
1415     bdput(bdev);
1416     }
1417     diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
1418     index caa98a7fe392..db330a0106b2 100644
1419     --- a/drivers/crypto/Kconfig
1420     +++ b/drivers/crypto/Kconfig
1421     @@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
1422     depends on ARCH_BCM_IPROC
1423     depends on MAILBOX
1424     default m
1425     + select CRYPTO_AUTHENC
1426     select CRYPTO_DES
1427     select CRYPTO_MD5
1428     select CRYPTO_SHA1
1429     diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
1430     index 2d1f1db9f807..cd464637b0cb 100644
1431     --- a/drivers/crypto/bcm/cipher.c
1432     +++ b/drivers/crypto/bcm/cipher.c
1433     @@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1434     struct spu_hw *spu = &iproc_priv.spu;
1435     struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
1436     struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
1437     - struct rtattr *rta = (void *)key;
1438     - struct crypto_authenc_key_param *param;
1439     - const u8 *origkey = key;
1440     - const unsigned int origkeylen = keylen;
1441     -
1442     - int ret = 0;
1443     + struct crypto_authenc_keys keys;
1444     + int ret;
1445    
1446     flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
1447     keylen);
1448     flow_dump(" key: ", key, keylen);
1449    
1450     - if (!RTA_OK(rta, keylen))
1451     - goto badkey;
1452     - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1453     - goto badkey;
1454     - if (RTA_PAYLOAD(rta) < sizeof(*param))
1455     + ret = crypto_authenc_extractkeys(&keys, key, keylen);
1456     + if (ret)
1457     goto badkey;
1458    
1459     - param = RTA_DATA(rta);
1460     - ctx->enckeylen = be32_to_cpu(param->enckeylen);
1461     -
1462     - key += RTA_ALIGN(rta->rta_len);
1463     - keylen -= RTA_ALIGN(rta->rta_len);
1464     -
1465     - if (keylen < ctx->enckeylen)
1466     - goto badkey;
1467     - if (ctx->enckeylen > MAX_KEY_SIZE)
1468     + if (keys.enckeylen > MAX_KEY_SIZE ||
1469     + keys.authkeylen > MAX_KEY_SIZE)
1470     goto badkey;
1471    
1472     - ctx->authkeylen = keylen - ctx->enckeylen;
1473     -
1474     - if (ctx->authkeylen > MAX_KEY_SIZE)
1475     - goto badkey;
1476     + ctx->enckeylen = keys.enckeylen;
1477     + ctx->authkeylen = keys.authkeylen;
1478    
1479     - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
1480     + memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1481     /* May end up padding auth key. So make sure it's zeroed. */
1482     memset(ctx->authkey, 0, sizeof(ctx->authkey));
1483     - memcpy(ctx->authkey, key, ctx->authkeylen);
1484     + memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1485    
1486     switch (ctx->alg->cipher_info.alg) {
1487     case CIPHER_ALG_DES:
1488     @@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1489     u32 tmp[DES_EXPKEY_WORDS];
1490     u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1491    
1492     - if (des_ekey(tmp, key) == 0) {
1493     + if (des_ekey(tmp, keys.enckey) == 0) {
1494     if (crypto_aead_get_flags(cipher) &
1495     CRYPTO_TFM_REQ_WEAK_KEY) {
1496     crypto_aead_set_flags(cipher, flags);
1497     @@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1498     break;
1499     case CIPHER_ALG_3DES:
1500     if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
1501     - const u32 *K = (const u32 *)key;
1502     + const u32 *K = (const u32 *)keys.enckey;
1503     u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
1504    
1505     if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1506     @@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1507     ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
1508     ctx->fallback_cipher->base.crt_flags |=
1509     tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
1510     - ret =
1511     - crypto_aead_setkey(ctx->fallback_cipher, origkey,
1512     - origkeylen);
1513     + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
1514     if (ret) {
1515     flow_log(" fallback setkey() returned:%d\n", ret);
1516     tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
1517     diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
1518     index 46924affa0bd..212fd0b3b8dd 100644
1519     --- a/drivers/crypto/caam/caamhash.c
1520     +++ b/drivers/crypto/caam/caamhash.c
1521     @@ -1071,13 +1071,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1522    
1523     desc = edesc->hw_desc;
1524    
1525     - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1526     - if (dma_mapping_error(jrdev, state->buf_dma)) {
1527     - dev_err(jrdev, "unable to map src\n");
1528     - goto unmap;
1529     - }
1530     + if (buflen) {
1531     + state->buf_dma = dma_map_single(jrdev, buf, buflen,
1532     + DMA_TO_DEVICE);
1533     + if (dma_mapping_error(jrdev, state->buf_dma)) {
1534     + dev_err(jrdev, "unable to map src\n");
1535     + goto unmap;
1536     + }
1537    
1538     - append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1539     + append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1540     + }
1541    
1542     edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1543     digestsize);
1544     diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
1545     index 01b82b82f8b8..5852d29ae2da 100644
1546     --- a/drivers/crypto/ccree/cc_aead.c
1547     +++ b/drivers/crypto/ccree/cc_aead.c
1548     @@ -540,13 +540,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1549     unsigned int keylen)
1550     {
1551     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1552     - struct rtattr *rta = (struct rtattr *)key;
1553     struct cc_crypto_req cc_req = {};
1554     - struct crypto_authenc_key_param *param;
1555     struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
1556     - int rc = -EINVAL;
1557     unsigned int seq_len = 0;
1558     struct device *dev = drvdata_to_dev(ctx->drvdata);
1559     + const u8 *enckey, *authkey;
1560     + int rc;
1561    
1562     dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
1563     ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
1564     @@ -554,35 +553,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1565     /* STAT_PHASE_0: Init and sanity checks */
1566    
1567     if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
1568     - if (!RTA_OK(rta, keylen))
1569     - goto badkey;
1570     - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1571     - goto badkey;
1572     - if (RTA_PAYLOAD(rta) < sizeof(*param))
1573     - goto badkey;
1574     - param = RTA_DATA(rta);
1575     - ctx->enc_keylen = be32_to_cpu(param->enckeylen);
1576     - key += RTA_ALIGN(rta->rta_len);
1577     - keylen -= RTA_ALIGN(rta->rta_len);
1578     - if (keylen < ctx->enc_keylen)
1579     + struct crypto_authenc_keys keys;
1580     +
1581     + rc = crypto_authenc_extractkeys(&keys, key, keylen);
1582     + if (rc)
1583     goto badkey;
1584     - ctx->auth_keylen = keylen - ctx->enc_keylen;
1585     + enckey = keys.enckey;
1586     + authkey = keys.authkey;
1587     + ctx->enc_keylen = keys.enckeylen;
1588     + ctx->auth_keylen = keys.authkeylen;
1589    
1590     if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1591     /* the nonce is stored in bytes at end of key */
1592     + rc = -EINVAL;
1593     if (ctx->enc_keylen <
1594     (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
1595     goto badkey;
1596     /* Copy nonce from last 4 bytes in CTR key to
1597     * first 4 bytes in CTR IV
1598     */
1599     - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
1600     - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
1601     - CTR_RFC3686_NONCE_SIZE);
1602     + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
1603     + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
1604     /* Set CTR key size */
1605     ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
1606     }
1607     } else { /* non-authenc - has just one key */
1608     + enckey = key;
1609     + authkey = NULL;
1610     ctx->enc_keylen = keylen;
1611     ctx->auth_keylen = 0;
1612     }
1613     @@ -594,13 +591,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1614     /* STAT_PHASE_1: Copy key to ctx */
1615    
1616     /* Get key material */
1617     - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
1618     + memcpy(ctx->enckey, enckey, ctx->enc_keylen);
1619     if (ctx->enc_keylen == 24)
1620     memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1621     if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
1622     - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
1623     + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
1624     + ctx->auth_keylen);
1625     } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
1626     - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
1627     + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
1628     if (rc)
1629     goto badkey;
1630     }
1631     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1632     index 6988012deca4..f4f3e9a5851e 100644
1633     --- a/drivers/crypto/talitos.c
1634     +++ b/drivers/crypto/talitos.c
1635     @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1636     struct talitos_private *priv = dev_get_drvdata(dev);
1637     bool is_sec1 = has_ftr_sec1(priv);
1638     int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1639     - void *err;
1640    
1641     if (cryptlen + authsize > max_len) {
1642     dev_err(dev, "length exceeds h/w max limit\n");
1643     return ERR_PTR(-EINVAL);
1644     }
1645    
1646     - if (ivsize)
1647     - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1648     -
1649     if (!dst || dst == src) {
1650     src_len = assoclen + cryptlen + authsize;
1651     src_nents = sg_nents_for_len(src, src_len);
1652     if (src_nents < 0) {
1653     dev_err(dev, "Invalid number of src SG.\n");
1654     - err = ERR_PTR(-EINVAL);
1655     - goto error_sg;
1656     + return ERR_PTR(-EINVAL);
1657     }
1658     src_nents = (src_nents == 1) ? 0 : src_nents;
1659     dst_nents = dst ? src_nents : 0;
1660     @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1661     src_nents = sg_nents_for_len(src, src_len);
1662     if (src_nents < 0) {
1663     dev_err(dev, "Invalid number of src SG.\n");
1664     - err = ERR_PTR(-EINVAL);
1665     - goto error_sg;
1666     + return ERR_PTR(-EINVAL);
1667     }
1668     src_nents = (src_nents == 1) ? 0 : src_nents;
1669     dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1670     dst_nents = sg_nents_for_len(dst, dst_len);
1671     if (dst_nents < 0) {
1672     dev_err(dev, "Invalid number of dst SG.\n");
1673     - err = ERR_PTR(-EINVAL);
1674     - goto error_sg;
1675     + return ERR_PTR(-EINVAL);
1676     }
1677     dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1678     }
1679     @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1680     /* if its a ahash, add space for a second desc next to the first one */
1681     if (is_sec1 && !dst)
1682     alloc_len += sizeof(struct talitos_desc);
1683     + alloc_len += ivsize;
1684    
1685     edesc = kmalloc(alloc_len, GFP_DMA | flags);
1686     - if (!edesc) {
1687     - err = ERR_PTR(-ENOMEM);
1688     - goto error_sg;
1689     + if (!edesc)
1690     + return ERR_PTR(-ENOMEM);
1691     + if (ivsize) {
1692     + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1693     + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1694     }
1695     memset(&edesc->desc, 0, sizeof(edesc->desc));
1696    
1697     @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1698     DMA_BIDIRECTIONAL);
1699     }
1700     return edesc;
1701     -error_sg:
1702     - if (iv_dma)
1703     - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1704     - return err;
1705     }
1706    
1707     static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1708     diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
1709     index d5b7f315098c..087470ad6436 100644
1710     --- a/drivers/gpu/drm/drm_atomic_uapi.c
1711     +++ b/drivers/gpu/drm/drm_atomic_uapi.c
1712     @@ -1275,12 +1275,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1713     (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1714     return -EINVAL;
1715    
1716     - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1717     -
1718     state = drm_atomic_state_alloc(dev);
1719     if (!state)
1720     return -ENOMEM;
1721    
1722     + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1723     state->acquire_ctx = &ctx;
1724     state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1725    
1726     diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1727     index b10ed61526a5..6950e365135c 100644
1728     --- a/drivers/gpu/drm/drm_fb_helper.c
1729     +++ b/drivers/gpu/drm/drm_fb_helper.c
1730     @@ -1690,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1731     struct drm_fb_helper *fb_helper = info->par;
1732     struct drm_framebuffer *fb = fb_helper->fb;
1733    
1734     - if (var->pixclock != 0 || in_dbg_master())
1735     + if (in_dbg_master())
1736     return -EINVAL;
1737    
1738     + if (var->pixclock != 0) {
1739     + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
1740     + var->pixclock = 0;
1741     + }
1742     +
1743     /*
1744     * Changes struct fb_var_screeninfo are currently not pushed back
1745     * to KMS, hence fail if different settings are requested.
1746     diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
1747     index be8b754eaf60..9bc3654c1c7f 100644
1748     --- a/drivers/gpu/drm/drm_mode_object.c
1749     +++ b/drivers/gpu/drm/drm_mode_object.c
1750     @@ -458,11 +458,11 @@ static int set_property_atomic(struct drm_mode_object *obj,
1751     struct drm_modeset_acquire_ctx ctx;
1752     int ret;
1753    
1754     - drm_modeset_acquire_init(&ctx, 0);
1755     -
1756     state = drm_atomic_state_alloc(dev);
1757     if (!state)
1758     return -ENOMEM;
1759     +
1760     + drm_modeset_acquire_init(&ctx, 0);
1761     state->acquire_ctx = &ctx;
1762     retry:
1763     if (prop == state->dev->mode_config.dpms_property) {
1764     diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
1765     index c1072143da1d..e70c450427dc 100644
1766     --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
1767     +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
1768     @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1769     {
1770     unsigned int index;
1771     u64 virtaddr;
1772     - unsigned long req_size, pgoff = 0;
1773     + unsigned long req_size, pgoff, req_start;
1774     pgprot_t pg_prot;
1775     struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1776    
1777     @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1778     pg_prot = vma->vm_page_prot;
1779     virtaddr = vma->vm_start;
1780     req_size = vma->vm_end - vma->vm_start;
1781     - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
1782     + pgoff = vma->vm_pgoff &
1783     + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1784     + req_start = pgoff << PAGE_SHIFT;
1785     +
1786     + if (!intel_vgpu_in_aperture(vgpu, req_start))
1787     + return -EINVAL;
1788     + if (req_start + req_size >
1789     + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1790     + return -EINVAL;
1791     +
1792     + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1793    
1794     return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1795     }
1796     diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
1797     index 96ac1458a59c..37f93022a106 100644
1798     --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
1799     +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
1800     @@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
1801     child_count++;
1802     ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
1803     &panel, &bridge);
1804     - if (!ret)
1805     + if (!ret) {
1806     + of_node_put(endpoint);
1807     break;
1808     + }
1809     }
1810    
1811     of_node_put(port);
1812     diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
1813     index 7041007396ae..e3bcea4b4891 100644
1814     --- a/drivers/gpu/drm/vkms/vkms_plane.c
1815     +++ b/drivers/gpu/drm/vkms/vkms_plane.c
1816     @@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane)
1817     return NULL;
1818    
1819     crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
1820     - if (WARN_ON(!crc_data))
1821     - DRM_INFO("Couldn't allocate crc_data");
1822     + if (!crc_data) {
1823     + DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
1824     + kfree(vkms_state);
1825     + return NULL;
1826     + }
1827    
1828     vkms_state->crc_data = crc_data;
1829    
1830     diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
1831     index 573399e3ccc1..ff6468e7fe79 100644
1832     --- a/drivers/infiniband/core/nldev.c
1833     +++ b/drivers/infiniband/core/nldev.c
1834     @@ -580,10 +580,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
1835     if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
1836     atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
1837     goto err;
1838     - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
1839     - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
1840     - pd->unsafe_global_rkey))
1841     - goto err;
1842    
1843     if (fill_res_name_pid(msg, res))
1844     goto err;
1845     diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1846     index 42b8685c997e..3c633ab58052 100644
1847     --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1848     +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1849     @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
1850    
1851     static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
1852     {
1853     - return (enum pvrdma_wr_opcode)op;
1854     + switch (op) {
1855     + case IB_WR_RDMA_WRITE:
1856     + return PVRDMA_WR_RDMA_WRITE;
1857     + case IB_WR_RDMA_WRITE_WITH_IMM:
1858     + return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
1859     + case IB_WR_SEND:
1860     + return PVRDMA_WR_SEND;
1861     + case IB_WR_SEND_WITH_IMM:
1862     + return PVRDMA_WR_SEND_WITH_IMM;
1863     + case IB_WR_RDMA_READ:
1864     + return PVRDMA_WR_RDMA_READ;
1865     + case IB_WR_ATOMIC_CMP_AND_SWP:
1866     + return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
1867     + case IB_WR_ATOMIC_FETCH_AND_ADD:
1868     + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
1869     + case IB_WR_LSO:
1870     + return PVRDMA_WR_LSO;
1871     + case IB_WR_SEND_WITH_INV:
1872     + return PVRDMA_WR_SEND_WITH_INV;
1873     + case IB_WR_RDMA_READ_WITH_INV:
1874     + return PVRDMA_WR_RDMA_READ_WITH_INV;
1875     + case IB_WR_LOCAL_INV:
1876     + return PVRDMA_WR_LOCAL_INV;
1877     + case IB_WR_REG_MR:
1878     + return PVRDMA_WR_FAST_REG_MR;
1879     + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1880     + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
1881     + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1882     + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
1883     + case IB_WR_REG_SIG_MR:
1884     + return PVRDMA_WR_REG_SIG_MR;
1885     + default:
1886     + return PVRDMA_WR_ERROR;
1887     + }
1888     }
1889    
1890     static inline enum ib_wc_status pvrdma_wc_status_to_ib(
1891     diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1892     index cf22f57a9f0d..418d9ab4ea7f 100644
1893     --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1894     +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1895     @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1896     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1897     wqe_hdr->ex.imm_data = wr->ex.imm_data;
1898    
1899     + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
1900     + *bad_wr = wr;
1901     + ret = -EINVAL;
1902     + goto out;
1903     + }
1904     +
1905     switch (qp->ibqp.qp_type) {
1906     case IB_QPT_GSI:
1907     case IB_QPT_UD:
1908     diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
1909     index 99f736c81286..fa77e2ae4ec4 100644
1910     --- a/drivers/media/common/videobuf2/videobuf2-core.c
1911     +++ b/drivers/media/common/videobuf2/videobuf2-core.c
1912     @@ -2146,9 +2146,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1913     return -EINVAL;
1914     }
1915     }
1916     +
1917     + mutex_lock(&q->mmap_lock);
1918     +
1919     if (vb2_fileio_is_active(q)) {
1920     dprintk(1, "mmap: file io in progress\n");
1921     - return -EBUSY;
1922     + ret = -EBUSY;
1923     + goto unlock;
1924     }
1925    
1926     /*
1927     @@ -2156,7 +2160,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1928     */
1929     ret = __find_plane_by_offset(q, off, &buffer, &plane);
1930     if (ret)
1931     - return ret;
1932     + goto unlock;
1933    
1934     vb = q->bufs[buffer];
1935    
1936     @@ -2169,11 +2173,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1937     if (length < (vma->vm_end - vma->vm_start)) {
1938     dprintk(1,
1939     "MMAP invalid, as it would overflow buffer length\n");
1940     - return -EINVAL;
1941     + ret = -EINVAL;
1942     + goto unlock;
1943     }
1944    
1945     - mutex_lock(&q->mmap_lock);
1946     ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
1947     +
1948     +unlock:
1949     mutex_unlock(&q->mmap_lock);
1950     if (ret)
1951     return ret;
1952     diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
1953     index f938a2c54314..2d1ae83e2fde 100644
1954     --- a/drivers/media/platform/vim2m.c
1955     +++ b/drivers/media/platform/vim2m.c
1956     @@ -809,7 +809,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
1957     struct vb2_v4l2_buffer *vbuf;
1958     unsigned long flags;
1959    
1960     - cancel_delayed_work_sync(&dev->work_run);
1961     + if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
1962     + cancel_delayed_work_sync(&dev->work_run);
1963     +
1964     for (;;) {
1965     if (V4L2_TYPE_IS_OUTPUT(q->type))
1966     vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1967     diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
1968     index eebfff2126be..46e46e34a9e5 100644
1969     --- a/drivers/media/platform/vivid/vivid-kthread-cap.c
1970     +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
1971     @@ -873,8 +873,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
1972     "%s-vid-cap", dev->v4l2_dev.name);
1973    
1974     if (IS_ERR(dev->kthread_vid_cap)) {
1975     + int err = PTR_ERR(dev->kthread_vid_cap);
1976     +
1977     + dev->kthread_vid_cap = NULL;
1978     v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
1979     - return PTR_ERR(dev->kthread_vid_cap);
1980     + return err;
1981     }
1982     *pstreaming = true;
1983     vivid_grab_controls(dev, true);
1984     diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
1985     index 5a14810eeb69..ce5bcda2348c 100644
1986     --- a/drivers/media/platform/vivid/vivid-kthread-out.c
1987     +++ b/drivers/media/platform/vivid/vivid-kthread-out.c
1988     @@ -244,8 +244,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
1989     "%s-vid-out", dev->v4l2_dev.name);
1990    
1991     if (IS_ERR(dev->kthread_vid_out)) {
1992     + int err = PTR_ERR(dev->kthread_vid_out);
1993     +
1994     + dev->kthread_vid_out = NULL;
1995     v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
1996     - return PTR_ERR(dev->kthread_vid_out);
1997     + return err;
1998     }
1999     *pstreaming = true;
2000     vivid_grab_controls(dev, true);
2001     diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
2002     index 9645a91b8782..661f4015fba1 100644
2003     --- a/drivers/media/platform/vivid/vivid-vid-common.c
2004     +++ b/drivers/media/platform/vivid/vivid-vid-common.c
2005     @@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
2006     .type = V4L2_DV_BT_656_1120,
2007     /* keep this initialization for compatibility with GCC < 4.4.6 */
2008     .reserved = { 0 },
2009     - V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
2010     + V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
2011     V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
2012     V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
2013     V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
2014     diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
2015     index c63746968fa3..3cdd09e4dd6b 100644
2016     --- a/drivers/media/v4l2-core/v4l2-ioctl.c
2017     +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
2018     @@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only)
2019     const struct v4l2_window *win;
2020     const struct v4l2_sdr_format *sdr;
2021     const struct v4l2_meta_format *meta;
2022     + u32 planes;
2023     unsigned i;
2024    
2025     pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
2026     @@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only)
2027     prt_names(mp->field, v4l2_field_names),
2028     mp->colorspace, mp->num_planes, mp->flags,
2029     mp->ycbcr_enc, mp->quantization, mp->xfer_func);
2030     - for (i = 0; i < mp->num_planes; i++)
2031     + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
2032     + for (i = 0; i < planes; i++)
2033     printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
2034     mp->plane_fmt[i].bytesperline,
2035     mp->plane_fmt[i].sizeimage);
2036     diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
2037     index b89379782741..9c7925ca13cf 100644
2038     --- a/drivers/mfd/tps6586x.c
2039     +++ b/drivers/mfd/tps6586x.c
2040     @@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
2041     return 0;
2042     }
2043    
2044     +static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
2045     +{
2046     + struct tps6586x *tps6586x = dev_get_drvdata(dev);
2047     +
2048     + if (tps6586x->client->irq)
2049     + disable_irq(tps6586x->client->irq);
2050     +
2051     + return 0;
2052     +}
2053     +
2054     +static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
2055     +{
2056     + struct tps6586x *tps6586x = dev_get_drvdata(dev);
2057     +
2058     + if (tps6586x->client->irq)
2059     + enable_irq(tps6586x->client->irq);
2060     +
2061     + return 0;
2062     +}
2063     +
2064     +static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
2065     + tps6586x_i2c_resume);
2066     +
2067     static const struct i2c_device_id tps6586x_id_table[] = {
2068     { "tps6586x", 0 },
2069     { },
2070     @@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
2071     .driver = {
2072     .name = "tps6586x",
2073     .of_match_table = of_match_ptr(tps6586x_of_match),
2074     + .pm = &tps6586x_pm_ops,
2075     },
2076     .probe = tps6586x_i2c_probe,
2077     .remove = tps6586x_i2c_remove,
2078     diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
2079     index 3633202e18f4..02a9aba85368 100644
2080     --- a/drivers/misc/mic/vop/vop_main.c
2081     +++ b/drivers/misc/mic/vop/vop_main.c
2082     @@ -381,16 +381,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
2083     struct _vop_vdev *vdev = to_vopvdev(dev);
2084     struct vop_device *vpdev = vdev->vpdev;
2085     struct mic_device_ctrl __iomem *dc = vdev->dc;
2086     - int i, err, retry;
2087     + int i, err, retry, queue_idx = 0;
2088    
2089     /* We must have this many virtqueues. */
2090     if (nvqs > ioread8(&vdev->desc->num_vq))
2091     return -ENOENT;
2092    
2093     for (i = 0; i < nvqs; ++i) {
2094     + if (!names[i]) {
2095     + vqs[i] = NULL;
2096     + continue;
2097     + }
2098     +
2099     dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
2100     __func__, i, names[i]);
2101     - vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
2102     + vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
2103     ctx ? ctx[i] : false);
2104     if (IS_ERR(vqs[i])) {
2105     err = PTR_ERR(vqs[i]);
2106     diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
2107     index 3cc8bfee6c18..8594659cb592 100644
2108     --- a/drivers/mmc/host/sdhci-msm.c
2109     +++ b/drivers/mmc/host/sdhci-msm.c
2110     @@ -258,6 +258,8 @@ struct sdhci_msm_host {
2111     bool mci_removed;
2112     const struct sdhci_msm_variant_ops *var_ops;
2113     const struct sdhci_msm_offset *offset;
2114     + bool use_cdr;
2115     + u32 transfer_mode;
2116     };
2117    
2118     static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
2119     @@ -1025,6 +1027,26 @@ out:
2120     return ret;
2121     }
2122    
2123     +static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
2124     +{
2125     + const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
2126     + u32 config, oldconfig = readl_relaxed(host->ioaddr +
2127     + msm_offset->core_dll_config);
2128     +
2129     + config = oldconfig;
2130     + if (enable) {
2131     + config |= CORE_CDR_EN;
2132     + config &= ~CORE_CDR_EXT_EN;
2133     + } else {
2134     + config &= ~CORE_CDR_EN;
2135     + config |= CORE_CDR_EXT_EN;
2136     + }
2137     +
2138     + if (config != oldconfig)
2139     + writel_relaxed(config, host->ioaddr +
2140     + msm_offset->core_dll_config);
2141     +}
2142     +
2143     static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
2144     {
2145     struct sdhci_host *host = mmc_priv(mmc);
2146     @@ -1042,8 +1064,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
2147     if (host->clock <= CORE_FREQ_100MHZ ||
2148     !(ios.timing == MMC_TIMING_MMC_HS400 ||
2149     ios.timing == MMC_TIMING_MMC_HS200 ||
2150     - ios.timing == MMC_TIMING_UHS_SDR104))
2151     + ios.timing == MMC_TIMING_UHS_SDR104)) {
2152     + msm_host->use_cdr = false;
2153     + sdhci_msm_set_cdr(host, false);
2154     return 0;
2155     + }
2156     +
2157     + /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
2158     + msm_host->use_cdr = true;
2159    
2160     /*
2161     * For HS400 tuning in HS200 timing requires:
2162     @@ -1525,6 +1553,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
2163     case SDHCI_POWER_CONTROL:
2164     req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
2165     break;
2166     + case SDHCI_TRANSFER_MODE:
2167     + msm_host->transfer_mode = val;
2168     + break;
2169     + case SDHCI_COMMAND:
2170     + if (!msm_host->use_cdr)
2171     + break;
2172     + if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
2173     + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
2174     + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
2175     + sdhci_msm_set_cdr(host, true);
2176     + else
2177     + sdhci_msm_set_cdr(host, false);
2178     + break;
2179     }
2180    
2181     if (req_type) {
2182     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2183     index 333387f1f1fe..62659abf73cd 100644
2184     --- a/drivers/net/bonding/bond_main.c
2185     +++ b/drivers/net/bonding/bond_main.c
2186     @@ -1948,6 +1948,9 @@ static int __bond_release_one(struct net_device *bond_dev,
2187     if (!bond_has_slaves(bond)) {
2188     bond_set_carrier(bond);
2189     eth_hw_addr_random(bond_dev);
2190     + bond->nest_level = SINGLE_DEPTH_NESTING;
2191     + } else {
2192     + bond->nest_level = dev_get_nest_level(bond_dev) + 1;
2193     }
2194    
2195     unblock_netpoll_tx();
2196     diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
2197     index b4b839a1d095..ad41ec63cc9f 100644
2198     --- a/drivers/net/dsa/realtek-smi.c
2199     +++ b/drivers/net/dsa/realtek-smi.c
2200     @@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
2201     struct device_node *mdio_np;
2202     int ret;
2203    
2204     - mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
2205     - "realtek,smi-mdio");
2206     + mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
2207     if (!mdio_np) {
2208     dev_err(smi->dev, "no MDIO bus node\n");
2209     return -ENODEV;
2210     }
2211    
2212     smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
2213     - if (!smi->slave_mii_bus)
2214     - return -ENOMEM;
2215     + if (!smi->slave_mii_bus) {
2216     + ret = -ENOMEM;
2217     + goto err_put_node;
2218     + }
2219     smi->slave_mii_bus->priv = smi;
2220     smi->slave_mii_bus->name = "SMI slave MII";
2221     smi->slave_mii_bus->read = realtek_smi_mdio_read;
2222     @@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
2223     if (ret) {
2224     dev_err(smi->dev, "unable to register MDIO bus %s\n",
2225     smi->slave_mii_bus->id);
2226     - of_node_put(mdio_np);
2227     + goto err_put_node;
2228     }
2229    
2230     return 0;
2231     +
2232     +err_put_node:
2233     + of_node_put(mdio_np);
2234     +
2235     + return ret;
2236     }
2237    
2238     static int realtek_smi_probe(struct platform_device *pdev)
2239     @@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
2240     struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
2241    
2242     dsa_unregister_switch(smi->ds);
2243     + if (smi->slave_mii_bus)
2244     + of_node_put(smi->slave_mii_bus->dev.of_node);
2245     gpiod_set_value(smi->reset, 1);
2246    
2247     return 0;
2248     diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
2249     index 20c9377e99cb..1ce8b729929f 100644
2250     --- a/drivers/net/ethernet/microchip/lan743x_main.c
2251     +++ b/drivers/net/ethernet/microchip/lan743x_main.c
2252     @@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
2253    
2254     memset(&ksettings, 0, sizeof(ksettings));
2255     phy_ethtool_get_link_ksettings(netdev, &ksettings);
2256     - local_advertisement = phy_read(phydev, MII_ADVERTISE);
2257     - if (local_advertisement < 0)
2258     - return;
2259     -
2260     - remote_advertisement = phy_read(phydev, MII_LPA);
2261     - if (remote_advertisement < 0)
2262     - return;
2263     + local_advertisement =
2264     + ethtool_adv_to_mii_adv_t(phydev->advertising);
2265     + remote_advertisement =
2266     + ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
2267    
2268     lan743x_phy_update_flowcontrol(adapter,
2269     ksettings.base.duplex,
2270     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2271     index 209566f8097b..78ea9639b622 100644
2272     --- a/drivers/net/ethernet/realtek/r8169.c
2273     +++ b/drivers/net/ethernet/realtek/r8169.c
2274     @@ -714,6 +714,7 @@ module_param(use_dac, int, 0);
2275     MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
2276     module_param_named(debug, debug.msg_enable, int, 0);
2277     MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
2278     +MODULE_SOFTDEP("pre: realtek");
2279     MODULE_LICENSE("GPL");
2280     MODULE_FIRMWARE(FIRMWARE_8168D_1);
2281     MODULE_FIRMWARE(FIRMWARE_8168D_2);
2282     @@ -1728,11 +1729,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
2283    
2284     static bool rtl8169_update_counters(struct rtl8169_private *tp)
2285     {
2286     + u8 val = RTL_R8(tp, ChipCmd);
2287     +
2288     /*
2289     * Some chips are unable to dump tally counters when the receiver
2290     - * is disabled.
2291     + * is disabled. If 0xff chip may be in a PCI power-save state.
2292     */
2293     - if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
2294     + if (!(val & CmdRxEnb) || val == 0xff)
2295     return true;
2296    
2297     return rtl8169_do_counters(tp, CounterDump);
2298     diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
2299     index f7ebdcff53e4..4d66e4bb904a 100644
2300     --- a/drivers/net/phy/bcm87xx.c
2301     +++ b/drivers/net/phy/bcm87xx.c
2302     @@ -193,6 +193,7 @@ static struct phy_driver bcm87xx_driver[] = {
2303     .phy_id = PHY_ID_BCM8706,
2304     .phy_id_mask = 0xffffffff,
2305     .name = "Broadcom BCM8706",
2306     + .features = PHY_10GBIT_FEC_FEATURES,
2307     .flags = PHY_HAS_INTERRUPT,
2308     .config_init = bcm87xx_config_init,
2309     .config_aneg = bcm87xx_config_aneg,
2310     @@ -205,6 +206,7 @@ static struct phy_driver bcm87xx_driver[] = {
2311     .phy_id = PHY_ID_BCM8727,
2312     .phy_id_mask = 0xffffffff,
2313     .name = "Broadcom BCM8727",
2314     + .features = PHY_10GBIT_FEC_FEATURES,
2315     .flags = PHY_HAS_INTERRUPT,
2316     .config_init = bcm87xx_config_init,
2317     .config_aneg = bcm87xx_config_aneg,
2318     diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
2319     index 8022cd317f62..1a4d04afb7f0 100644
2320     --- a/drivers/net/phy/cortina.c
2321     +++ b/drivers/net/phy/cortina.c
2322     @@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = {
2323     .phy_id = PHY_ID_CS4340,
2324     .phy_id_mask = 0xffffffff,
2325     .name = "Cortina CS4340",
2326     + .features = PHY_10GBIT_FEATURES,
2327     .config_init = gen10g_config_init,
2328     .config_aneg = gen10g_config_aneg,
2329     .read_status = cortina_read_status,
2330     diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
2331     index ddc2c5ea3787..6ace118502b9 100644
2332     --- a/drivers/net/phy/meson-gxl.c
2333     +++ b/drivers/net/phy/meson-gxl.c
2334     @@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = {
2335     .name = "Meson GXL Internal PHY",
2336     .features = PHY_BASIC_FEATURES,
2337     .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
2338     + .soft_reset = genphy_soft_reset,
2339     .config_init = meson_gxl_config_init,
2340     .aneg_done = genphy_aneg_done,
2341     .read_status = meson_gxl_read_status,
2342     diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
2343     index 9265dea79412..51611c7a23d1 100644
2344     --- a/drivers/net/phy/micrel.c
2345     +++ b/drivers/net/phy/micrel.c
2346     @@ -1105,6 +1105,7 @@ static struct phy_driver ksphy_driver[] = {
2347     .phy_id = PHY_ID_KSZ8873MLL,
2348     .phy_id_mask = MICREL_PHY_ID_MASK,
2349     .name = "Micrel KSZ8873MLL Switch",
2350     + .features = PHY_BASIC_FEATURES,
2351     .config_init = kszphy_config_init,
2352     .config_aneg = ksz8873mll_config_aneg,
2353     .read_status = ksz8873mll_read_status,
2354     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2355     index 26c41ede54a4..fd051ae787cb 100644
2356     --- a/drivers/net/phy/phy_device.c
2357     +++ b/drivers/net/phy/phy_device.c
2358     @@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
2359     __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
2360     EXPORT_SYMBOL_GPL(phy_10gbit_features);
2361    
2362     +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
2363     +EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
2364     +
2365     static const int phy_basic_ports_array[] = {
2366     ETHTOOL_LINK_MODE_Autoneg_BIT,
2367     ETHTOOL_LINK_MODE_TP_BIT,
2368     @@ -102,6 +105,11 @@ static const int phy_10gbit_features_array[] = {
2369     ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2370     };
2371    
2372     +const int phy_10gbit_fec_features_array[1] = {
2373     + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
2374     +};
2375     +EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
2376     +
2377     __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
2378     EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
2379    
2380     @@ -184,6 +192,10 @@ static void features_init(void)
2381     linkmode_set_bit_array(phy_10gbit_full_features_array,
2382     ARRAY_SIZE(phy_10gbit_full_features_array),
2383     phy_10gbit_full_features);
2384     + /* 10G FEC only */
2385     + linkmode_set_bit_array(phy_10gbit_fec_features_array,
2386     + ARRAY_SIZE(phy_10gbit_fec_features_array),
2387     + phy_10gbit_fec_features);
2388     }
2389    
2390     void phy_device_free(struct phy_device *phydev)
2391     diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
2392     index 22f3bdd8206c..91247182bc52 100644
2393     --- a/drivers/net/phy/teranetics.c
2394     +++ b/drivers/net/phy/teranetics.c
2395     @@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = {
2396     .phy_id = PHY_ID_TN2020,
2397     .phy_id_mask = 0xffffffff,
2398     .name = "Teranetics TN2020",
2399     + .features = PHY_10GBIT_FEATURES,
2400     .soft_reset = gen10g_no_soft_reset,
2401     .aneg_done = teranetics_aneg_done,
2402     .config_init = gen10g_config_init,
2403     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2404     index 005020042be9..6658658246d2 100644
2405     --- a/drivers/net/tun.c
2406     +++ b/drivers/net/tun.c
2407     @@ -852,10 +852,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
2408     err = 0;
2409     }
2410    
2411     - rcu_assign_pointer(tfile->tun, tun);
2412     - rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2413     - tun->numqueues++;
2414     -
2415     if (tfile->detached) {
2416     tun_enable_queue(tfile);
2417     } else {
2418     @@ -872,6 +868,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
2419     * refcnt.
2420     */
2421    
2422     + /* Publish tfile->tun and tun->tfiles only after we've fully
2423     + * initialized tfile; otherwise we risk using half-initialized
2424     + * object.
2425     + */
2426     + rcu_assign_pointer(tfile->tun, tun);
2427     + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2428     + tun->numqueues++;
2429     out:
2430     return err;
2431     }
2432     diff --git a/drivers/of/property.c b/drivers/of/property.c
2433     index f46828e3b082..43720c2de138 100644
2434     --- a/drivers/of/property.c
2435     +++ b/drivers/of/property.c
2436     @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
2437    
2438     if (!of_device_is_available(remote)) {
2439     pr_debug("not available for remote node\n");
2440     + of_node_put(remote);
2441     return NULL;
2442     }
2443    
2444     diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
2445     index de21f620b882..21b22a150930 100644
2446     --- a/drivers/remoteproc/remoteproc_virtio.c
2447     +++ b/drivers/remoteproc/remoteproc_virtio.c
2448     @@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
2449     const bool * ctx,
2450     struct irq_affinity *desc)
2451     {
2452     - int i, ret;
2453     + int i, ret, queue_idx = 0;
2454    
2455     for (i = 0; i < nvqs; ++i) {
2456     - vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
2457     + if (!names[i]) {
2458     + vqs[i] = NULL;
2459     + continue;
2460     + }
2461     +
2462     + vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
2463     ctx ? ctx[i] : false);
2464     if (IS_ERR(vqs[i])) {
2465     ret = PTR_ERR(vqs[i]);
2466     diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
2467     index c9c57b4a0b71..4e1bdd03d2aa 100644
2468     --- a/drivers/s390/virtio/virtio_ccw.c
2469     +++ b/drivers/s390/virtio/virtio_ccw.c
2470     @@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
2471     {
2472     struct virtio_ccw_device *vcdev = to_vc_device(vdev);
2473     unsigned long *indicatorp = NULL;
2474     - int ret, i;
2475     + int ret, i, queue_idx = 0;
2476     struct ccw1 *ccw;
2477    
2478     ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
2479     @@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
2480     return -ENOMEM;
2481    
2482     for (i = 0; i < nvqs; ++i) {
2483     - vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
2484     - ctx ? ctx[i] : false, ccw);
2485     + if (!names[i]) {
2486     + vqs[i] = NULL;
2487     + continue;
2488     + }
2489     +
2490     + vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
2491     + names[i], ctx ? ctx[i] : false,
2492     + ccw);
2493     if (IS_ERR(vqs[i])) {
2494     ret = PTR_ERR(vqs[i]);
2495     vqs[i] = NULL;
2496     diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
2497     index a2b4179bfdf7..7639df91b110 100644
2498     --- a/drivers/scsi/scsi_pm.c
2499     +++ b/drivers/scsi/scsi_pm.c
2500     @@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
2501    
2502     if (err == 0) {
2503     pm_runtime_disable(dev);
2504     - pm_runtime_set_active(dev);
2505     + err = pm_runtime_set_active(dev);
2506     pm_runtime_enable(dev);
2507     +
2508     + /*
2509     + * Forcibly set runtime PM status of request queue to "active"
2510     + * to make sure we can again get requests from the queue
2511     + * (see also blk_pm_peek_request()).
2512     + *
2513     + * The resume hook will correct runtime PM status of the disk.
2514     + */
2515     + if (!err && scsi_is_sdev_device(dev)) {
2516     + struct scsi_device *sdev = to_scsi_device(dev);
2517     +
2518     + if (sdev->request_queue->dev)
2519     + blk_set_runtime_active(sdev->request_queue);
2520     + }
2521     }
2522    
2523     return err;
2524     @@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
2525     else
2526     fn = NULL;
2527    
2528     - /*
2529     - * Forcibly set runtime PM status of request queue to "active" to
2530     - * make sure we can again get requests from the queue (see also
2531     - * blk_pm_peek_request()).
2532     - *
2533     - * The resume hook will correct runtime PM status of the disk.
2534     - */
2535     - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
2536     - blk_set_runtime_active(to_scsi_device(dev)->request_queue);
2537     -
2538     if (fn) {
2539     async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
2540    
2541     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2542     index bd0a5c694a97..ba4b8b3ce8cf 100644
2543     --- a/drivers/scsi/sd.c
2544     +++ b/drivers/scsi/sd.c
2545     @@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
2546     sp = buffer_data[0] & 0x80 ? 1 : 0;
2547     buffer_data[0] &= ~0x80;
2548    
2549     + /*
2550     + * Ensure WP, DPOFUA, and RESERVED fields are cleared in
2551     + * received mode parameter buffer before doing MODE SELECT.
2552     + */
2553     + data.device_specific = 0;
2554     +
2555     if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
2556     SD_MAX_RETRIES, &data, &sshdr)) {
2557     if (scsi_sense_valid(&sshdr))
2558     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2559     index 687250ec8032..23c6fd238422 100644
2560     --- a/drivers/tty/tty_io.c
2561     +++ b/drivers/tty/tty_io.c
2562     @@ -1256,7 +1256,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
2563     static int tty_reopen(struct tty_struct *tty)
2564     {
2565     struct tty_driver *driver = tty->driver;
2566     - int retval;
2567     + struct tty_ldisc *ld;
2568     + int retval = 0;
2569    
2570     if (driver->type == TTY_DRIVER_TYPE_PTY &&
2571     driver->subtype == PTY_TYPE_MASTER)
2572     @@ -1268,14 +1269,21 @@ static int tty_reopen(struct tty_struct *tty)
2573     if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
2574     return -EBUSY;
2575    
2576     - tty->count++;
2577     + ld = tty_ldisc_ref_wait(tty);
2578     + if (ld) {
2579     + tty_ldisc_deref(ld);
2580     + } else {
2581     + retval = tty_ldisc_lock(tty, 5 * HZ);
2582     + if (retval)
2583     + return retval;
2584    
2585     - if (tty->ldisc)
2586     - return 0;
2587     + if (!tty->ldisc)
2588     + retval = tty_ldisc_reinit(tty, tty->termios.c_line);
2589     + tty_ldisc_unlock(tty);
2590     + }
2591    
2592     - retval = tty_ldisc_reinit(tty, tty->termios.c_line);
2593     - if (retval)
2594     - tty->count--;
2595     + if (retval == 0)
2596     + tty->count++;
2597    
2598     return retval;
2599     }
2600     diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
2601     index 0c98d88f795a..b989ca26fc78 100644
2602     --- a/drivers/tty/tty_ldsem.c
2603     +++ b/drivers/tty/tty_ldsem.c
2604     @@ -293,6 +293,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
2605     if (!locked)
2606     atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
2607     list_del(&waiter.list);
2608     +
2609     + /*
2610     + * In case of timeout, wake up every reader who gave the right of way
2611     + * to writer. Prevent separation readers into two groups:
2612     + * one that helds semaphore and another that sleeps.
2613     + * (in case of no contention with a writer)
2614     + */
2615     + if (!locked && list_empty(&sem->write_wait))
2616     + __ldsem_wake_readers(sem);
2617     +
2618     raw_spin_unlock_irq(&sem->wait_lock);
2619    
2620     __set_current_state(TASK_RUNNING);
2621     diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
2622     index 31f769d67195..057d3cdef92e 100644
2623     --- a/drivers/video/fbdev/offb.c
2624     +++ b/drivers/video/fbdev/offb.c
2625     @@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
2626     }
2627    
2628     static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
2629     - const char *name, unsigned long address)
2630     + unsigned long address)
2631     {
2632     struct offb_par *par = (struct offb_par *) info->par;
2633    
2634     - if (dp && !strncmp(name, "ATY,Rage128", 11)) {
2635     + if (of_node_name_prefix(dp, "ATY,Rage128")) {
2636     par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
2637     if (par->cmap_adr)
2638     par->cmap_type = cmap_r128;
2639     - } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12)
2640     - || !strncmp(name, "ATY,RageM3p12A", 14))) {
2641     + } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
2642     + of_node_name_prefix(dp, "ATY,RageM3p12A")) {
2643     par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
2644     if (par->cmap_adr)
2645     par->cmap_type = cmap_M3A;
2646     - } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) {
2647     + } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
2648     par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
2649     if (par->cmap_adr)
2650     par->cmap_type = cmap_M3B;
2651     - } else if (dp && !strncmp(name, "ATY,Rage6", 9)) {
2652     + } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
2653     par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
2654     if (par->cmap_adr)
2655     par->cmap_type = cmap_radeon;
2656     - } else if (!strncmp(name, "ATY,", 4)) {
2657     + } else if (of_node_name_prefix(dp, "ATY,")) {
2658     unsigned long base = address & 0xff000000UL;
2659     par->cmap_adr =
2660     ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
2661     @@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
2662     par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
2663     if (par->cmap_adr)
2664     par->cmap_type = cmap_gxt2000;
2665     - } else if (dp && !strncmp(name, "vga,Display-", 12)) {
2666     + } else if (of_node_name_prefix(dp, "vga,Display-")) {
2667     /* Look for AVIVO initialized by SLOF */
2668     struct device_node *pciparent = of_get_parent(dp);
2669     const u32 *vid, *did;
2670     @@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name,
2671    
2672     par->cmap_type = cmap_unknown;
2673     if (depth == 8)
2674     - offb_init_palette_hacks(info, dp, name, address);
2675     + offb_init_palette_hacks(info, dp, address);
2676     else
2677     fix->visual = FB_VISUAL_TRUECOLOR;
2678    
2679     diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2680     index a3edb20ea4c3..a846d32ee653 100644
2681     --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2682     +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2683     @@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
2684    
2685     int r = 0;
2686    
2687     + memset(&p, 0, sizeof(p));
2688     +
2689     switch (cmd) {
2690     case OMAPFB_SYNC_GFX:
2691     DBG("ioctl SYNC_GFX\n");
2692     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
2693     index 728ecd1eea30..fb12fe205f86 100644
2694     --- a/drivers/virtio/virtio_balloon.c
2695     +++ b/drivers/virtio/virtio_balloon.c
2696     @@ -61,6 +61,10 @@ enum virtio_balloon_vq {
2697     VIRTIO_BALLOON_VQ_MAX
2698     };
2699    
2700     +enum virtio_balloon_config_read {
2701     + VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
2702     +};
2703     +
2704     struct virtio_balloon {
2705     struct virtio_device *vdev;
2706     struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
2707     @@ -77,14 +81,20 @@ struct virtio_balloon {
2708     /* Prevent updating balloon when it is being canceled. */
2709     spinlock_t stop_update_lock;
2710     bool stop_update;
2711     + /* Bitmap to indicate if reading the related config fields are needed */
2712     + unsigned long config_read_bitmap;
2713    
2714     /* The list of allocated free pages, waiting to be given back to mm */
2715     struct list_head free_page_list;
2716     spinlock_t free_page_list_lock;
2717     /* The number of free page blocks on the above list */
2718     unsigned long num_free_page_blocks;
2719     - /* The cmd id received from host */
2720     - u32 cmd_id_received;
2721     + /*
2722     + * The cmd id received from host.
2723     + * Read it via virtio_balloon_cmd_id_received to get the latest value
2724     + * sent from host.
2725     + */
2726     + u32 cmd_id_received_cache;
2727     /* The cmd id that is actively in use */
2728     __virtio32 cmd_id_active;
2729     /* Buffer to store the stop sign */
2730     @@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
2731     return num_returned;
2732     }
2733    
2734     +static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
2735     +{
2736     + if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
2737     + return;
2738     +
2739     + /* No need to queue the work if the bit was already set. */
2740     + if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
2741     + &vb->config_read_bitmap))
2742     + return;
2743     +
2744     + queue_work(vb->balloon_wq, &vb->report_free_page_work);
2745     +}
2746     +
2747     static void virtballoon_changed(struct virtio_device *vdev)
2748     {
2749     struct virtio_balloon *vb = vdev->priv;
2750     unsigned long flags;
2751     - s64 diff = towards_target(vb);
2752     -
2753     - if (diff) {
2754     - spin_lock_irqsave(&vb->stop_update_lock, flags);
2755     - if (!vb->stop_update)
2756     - queue_work(system_freezable_wq,
2757     - &vb->update_balloon_size_work);
2758     - spin_unlock_irqrestore(&vb->stop_update_lock, flags);
2759     - }
2760    
2761     - if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
2762     - virtio_cread(vdev, struct virtio_balloon_config,
2763     - free_page_report_cmd_id, &vb->cmd_id_received);
2764     - if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
2765     - /* Pass ULONG_MAX to give back all the free pages */
2766     - return_free_pages_to_mm(vb, ULONG_MAX);
2767     - } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
2768     - vb->cmd_id_received !=
2769     - virtio32_to_cpu(vdev, vb->cmd_id_active)) {
2770     - spin_lock_irqsave(&vb->stop_update_lock, flags);
2771     - if (!vb->stop_update) {
2772     - queue_work(vb->balloon_wq,
2773     - &vb->report_free_page_work);
2774     - }
2775     - spin_unlock_irqrestore(&vb->stop_update_lock, flags);
2776     - }
2777     + spin_lock_irqsave(&vb->stop_update_lock, flags);
2778     + if (!vb->stop_update) {
2779     + queue_work(system_freezable_wq,
2780     + &vb->update_balloon_size_work);
2781     + virtio_balloon_queue_free_page_work(vb);
2782     }
2783     + spin_unlock_irqrestore(&vb->stop_update_lock, flags);
2784     }
2785    
2786     static void update_balloon_size(struct virtio_balloon *vb)
2787     @@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
2788     return 0;
2789     }
2790    
2791     +static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
2792     +{
2793     + if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
2794     + &vb->config_read_bitmap))
2795     + virtio_cread(vb->vdev, struct virtio_balloon_config,
2796     + free_page_report_cmd_id,
2797     + &vb->cmd_id_received_cache);
2798     +
2799     + return vb->cmd_id_received_cache;
2800     +}
2801     +
2802     static int send_cmd_id_start(struct virtio_balloon *vb)
2803     {
2804     struct scatterlist sg;
2805     @@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
2806     while (virtqueue_get_buf(vq, &unused))
2807     ;
2808    
2809     - vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
2810     + vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
2811     + virtio_balloon_cmd_id_received(vb));
2812     sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
2813     err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
2814     if (!err)
2815     @@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
2816     * stop the reporting.
2817     */
2818     cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
2819     - if (cmd_id_active != vb->cmd_id_received)
2820     + if (unlikely(cmd_id_active !=
2821     + virtio_balloon_cmd_id_received(vb)))
2822     break;
2823    
2824     /*
2825     @@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
2826     return 0;
2827     }
2828    
2829     -static void report_free_page_func(struct work_struct *work)
2830     +static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
2831     {
2832     int err;
2833     - struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
2834     - report_free_page_work);
2835     struct device *dev = &vb->vdev->dev;
2836    
2837     /* Start by sending the received cmd id to host with an outbuf. */
2838     @@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
2839     dev_err(dev, "Failed to send a stop id, err = %d\n", err);
2840     }
2841    
2842     +static void report_free_page_func(struct work_struct *work)
2843     +{
2844     + struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
2845     + report_free_page_work);
2846     + u32 cmd_id_received;
2847     +
2848     + cmd_id_received = virtio_balloon_cmd_id_received(vb);
2849     + if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
2850     + /* Pass ULONG_MAX to give back all the free pages */
2851     + return_free_pages_to_mm(vb, ULONG_MAX);
2852     + } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
2853     + cmd_id_received !=
2854     + virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
2855     + virtio_balloon_report_free_page(vb);
2856     + }
2857     +}
2858     +
2859     #ifdef CONFIG_BALLOON_COMPACTION
2860     /*
2861     * virtballoon_migratepage - perform the balloon page migration on behalf of
2862     @@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
2863     goto out_del_vqs;
2864     }
2865     INIT_WORK(&vb->report_free_page_work, report_free_page_func);
2866     - vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
2867     + vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
2868     vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
2869     VIRTIO_BALLOON_CMD_ID_STOP);
2870     vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
2871     diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
2872     index 4cd9ea5c75be..d9dd0f789279 100644
2873     --- a/drivers/virtio/virtio_mmio.c
2874     +++ b/drivers/virtio/virtio_mmio.c
2875     @@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
2876     {
2877     struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
2878     unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
2879     - int i, err;
2880     + int i, err, queue_idx = 0;
2881    
2882     err = request_irq(irq, vm_interrupt, IRQF_SHARED,
2883     dev_name(&vdev->dev), vm_dev);
2884     @@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
2885     return err;
2886    
2887     for (i = 0; i < nvqs; ++i) {
2888     - vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i],
2889     + if (!names[i]) {
2890     + vqs[i] = NULL;
2891     + continue;
2892     + }
2893     +
2894     + vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
2895     ctx ? ctx[i] : false);
2896     if (IS_ERR(vqs[i])) {
2897     vm_del_vqs(vdev);
2898     diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2899     index 93194f3e7540..117e76b2f939 100644
2900     --- a/drivers/xen/events/events_base.c
2901     +++ b/drivers/xen/events/events_base.c
2902     @@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
2903     xen_have_vector_callback = 0;
2904     return;
2905     }
2906     - pr_info("Xen HVM callback vector for event delivery is enabled\n");
2907     + pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
2908     alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
2909     xen_hvm_callback_vector);
2910     }
2911     diff --git a/fs/block_dev.c b/fs/block_dev.c
2912     index a80b4f0ee7c4..5a35ed922c95 100644
2913     --- a/fs/block_dev.c
2914     +++ b/fs/block_dev.c
2915     @@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
2916     }
2917     EXPORT_SYMBOL(invalidate_bdev);
2918    
2919     +static void set_init_blocksize(struct block_device *bdev)
2920     +{
2921     + unsigned bsize = bdev_logical_block_size(bdev);
2922     + loff_t size = i_size_read(bdev->bd_inode);
2923     +
2924     + while (bsize < PAGE_SIZE) {
2925     + if (size & bsize)
2926     + break;
2927     + bsize <<= 1;
2928     + }
2929     + bdev->bd_block_size = bsize;
2930     + bdev->bd_inode->i_blkbits = blksize_bits(bsize);
2931     +}
2932     +
2933     int set_blocksize(struct block_device *bdev, int size)
2934     {
2935     /* Size must be a power of two, and between 512 and PAGE_SIZE */
2936     @@ -1408,18 +1422,9 @@ EXPORT_SYMBOL(check_disk_change);
2937    
2938     void bd_set_size(struct block_device *bdev, loff_t size)
2939     {
2940     - unsigned bsize = bdev_logical_block_size(bdev);
2941     -
2942     inode_lock(bdev->bd_inode);
2943     i_size_write(bdev->bd_inode, size);
2944     inode_unlock(bdev->bd_inode);
2945     - while (bsize < PAGE_SIZE) {
2946     - if (size & bsize)
2947     - break;
2948     - bsize <<= 1;
2949     - }
2950     - bdev->bd_block_size = bsize;
2951     - bdev->bd_inode->i_blkbits = blksize_bits(bsize);
2952     }
2953     EXPORT_SYMBOL(bd_set_size);
2954    
2955     @@ -1496,8 +1501,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2956     }
2957     }
2958    
2959     - if (!ret)
2960     + if (!ret) {
2961     bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
2962     + set_init_blocksize(bdev);
2963     + }
2964    
2965     /*
2966     * If the device is invalidated, rescan partition
2967     @@ -1532,6 +1539,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2968     goto out_clear;
2969     }
2970     bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
2971     + set_init_blocksize(bdev);
2972     }
2973    
2974     if (bdev->bd_bdi == &noop_backing_dev_info)
2975     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2976     index 6d776717d8b3..f74c9e6b84ce 100644
2977     --- a/fs/btrfs/disk-io.c
2978     +++ b/fs/btrfs/disk-io.c
2979     @@ -4155,6 +4155,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
2980     spin_lock(&fs_info->ordered_root_lock);
2981     }
2982     spin_unlock(&fs_info->ordered_root_lock);
2983     +
2984     + /*
2985     + * We need this here because if we've been flipped read-only we won't
2986     + * get sync() from the umount, so we need to make sure any ordered
2987     + * extents that haven't had their dirty pages IO start writeout yet
2988     + * actually get run and error out properly.
2989     + */
2990     + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2991     }
2992    
2993     static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2994     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2995     index 423281c19fad..02772f8823cf 100644
2996     --- a/fs/btrfs/inode.c
2997     +++ b/fs/btrfs/inode.c
2998     @@ -3147,9 +3147,6 @@ out:
2999     /* once for the tree */
3000     btrfs_put_ordered_extent(ordered_extent);
3001    
3002     - /* Try to release some metadata so we don't get an OOM but don't wait */
3003     - btrfs_btree_balance_dirty_nodelay(fs_info);
3004     -
3005     return ret;
3006     }
3007    
3008     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
3009     index e02a9039b5ea..67bdbd3da52e 100644
3010     --- a/fs/pstore/ram.c
3011     +++ b/fs/pstore/ram.c
3012     @@ -723,18 +723,15 @@ static int ramoops_probe(struct platform_device *pdev)
3013     {
3014     struct device *dev = &pdev->dev;
3015     struct ramoops_platform_data *pdata = dev->platform_data;
3016     + struct ramoops_platform_data pdata_local;
3017     struct ramoops_context *cxt = &oops_cxt;
3018     size_t dump_mem_sz;
3019     phys_addr_t paddr;
3020     int err = -EINVAL;
3021    
3022     if (dev_of_node(dev) && !pdata) {
3023     - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3024     - if (!pdata) {
3025     - pr_err("cannot allocate platform data buffer\n");
3026     - err = -ENOMEM;
3027     - goto fail_out;
3028     - }
3029     + pdata = &pdata_local;
3030     + memset(pdata, 0, sizeof(*pdata));
3031    
3032     err = ramoops_parse_dt(pdev, pdata);
3033     if (err < 0)
3034     diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
3035     index 7cca5f859a90..f3c43519baa7 100644
3036     --- a/include/linux/bcma/bcma_soc.h
3037     +++ b/include/linux/bcma/bcma_soc.h
3038     @@ -6,6 +6,7 @@
3039    
3040     struct bcma_soc {
3041     struct bcma_bus bus;
3042     + struct device *dev;
3043     };
3044    
3045     int __init bcma_host_soc_register(struct bcma_soc *soc);
3046     diff --git a/include/linux/genhd.h b/include/linux/genhd.h
3047     index 70fc838e6773..0c5ee17b4d88 100644
3048     --- a/include/linux/genhd.h
3049     +++ b/include/linux/genhd.h
3050     @@ -129,7 +129,7 @@ struct hd_struct {
3051     struct disk_stats dkstats;
3052     #endif
3053     struct percpu_ref ref;
3054     - struct rcu_head rcu_head;
3055     + struct rcu_work rcu_work;
3056     };
3057    
3058     #define GENHD_FL_REMOVABLE 1
3059     diff --git a/include/linux/phy.h b/include/linux/phy.h
3060     index 3ea87f774a76..306630d13523 100644
3061     --- a/include/linux/phy.h
3062     +++ b/include/linux/phy.h
3063     @@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
3064     extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
3065     extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
3066     extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
3067     +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
3068     extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
3069    
3070     #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
3071     @@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
3072     #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
3073     #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
3074     #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
3075     +#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
3076     #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
3077    
3078     /*
3079     diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
3080     index 4b2b2baf8ab4..f32fc8289473 100644
3081     --- a/include/net/netfilter/nf_conntrack_count.h
3082     +++ b/include/net/netfilter/nf_conntrack_count.h
3083     @@ -5,17 +5,10 @@
3084    
3085     struct nf_conncount_data;
3086    
3087     -enum nf_conncount_list_add {
3088     - NF_CONNCOUNT_ADDED, /* list add was ok */
3089     - NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
3090     - NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
3091     -};
3092     -
3093     struct nf_conncount_list {
3094     spinlock_t list_lock;
3095     struct list_head head; /* connections with the same filtering key */
3096     unsigned int count; /* length of list */
3097     - bool dead;
3098     };
3099    
3100     struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
3101     @@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net,
3102     const struct nf_conntrack_tuple *tuple,
3103     const struct nf_conntrack_zone *zone);
3104    
3105     -void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
3106     - const struct nf_conntrack_tuple *tuple,
3107     - const struct nf_conntrack_zone *zone,
3108     - bool *addit);
3109     +int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
3110     + const struct nf_conntrack_tuple *tuple,
3111     + const struct nf_conntrack_zone *zone);
3112    
3113     void nf_conncount_list_init(struct nf_conncount_list *list);
3114    
3115     -enum nf_conncount_list_add
3116     -nf_conncount_add(struct nf_conncount_list *list,
3117     - const struct nf_conntrack_tuple *tuple,
3118     - const struct nf_conntrack_zone *zone);
3119     -
3120     bool nf_conncount_gc_list(struct net *net,
3121     struct nf_conncount_list *list);
3122    
3123     diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
3124     index f6052e70bf40..a55cb8b10165 100644
3125     --- a/include/uapi/linux/in.h
3126     +++ b/include/uapi/linux/in.h
3127     @@ -268,7 +268,7 @@ struct sockaddr_in {
3128     #define IN_MULTICAST(a) IN_CLASSD(a)
3129     #define IN_MULTICAST_NET 0xe0000000
3130    
3131     -#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
3132     +#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
3133     #define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
3134    
3135     #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
3136     diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
3137     index d13fd490b66d..6e73f0274e41 100644
3138     --- a/include/uapi/rdma/vmw_pvrdma-abi.h
3139     +++ b/include/uapi/rdma/vmw_pvrdma-abi.h
3140     @@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
3141     PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
3142     PVRDMA_WR_BIND_MW,
3143     PVRDMA_WR_REG_SIG_MR,
3144     + PVRDMA_WR_ERROR,
3145     };
3146    
3147     enum pvrdma_wc_status {
3148     diff --git a/init/Kconfig b/init/Kconfig
3149     index ed9352513c32..b902f9c89800 100644
3150     --- a/init/Kconfig
3151     +++ b/init/Kconfig
3152     @@ -1130,6 +1130,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
3153     bool "Dead code and data elimination (EXPERIMENTAL)"
3154     depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
3155     depends on EXPERT
3156     + depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
3157     depends on $(cc-option,-ffunction-sections -fdata-sections)
3158     depends on $(ld-option,--gc-sections)
3159     help
3160     diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
3161     index 14436f4ca6bd..30e0f9770f88 100644
3162     --- a/lib/int_sqrt.c
3163     +++ b/lib/int_sqrt.c
3164     @@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
3165     if (x <= ULONG_MAX)
3166     return int_sqrt((unsigned long) x);
3167    
3168     - m = 1ULL << (fls64(x) & ~1ULL);
3169     + m = 1ULL << ((fls64(x) - 1) & ~1ULL);
3170     while (m != 0) {
3171     b = y + m;
3172     y >>= 1;
3173     diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
3174     index b1b5e8516724..ed683e5b73ba 100644
3175     --- a/net/bridge/br_netfilter_hooks.c
3176     +++ b/net/bridge/br_netfilter_hooks.c
3177     @@ -278,7 +278,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
3178     struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
3179     int ret;
3180    
3181     - if (neigh->hh.hh_len) {
3182     + if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
3183     neigh_hh_bridge(&neigh->hh, skb);
3184     skb->dev = nf_bridge->physindev;
3185     ret = br_handle_frame_finish(net, sk, skb);
3186     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3187     index 491828713e0b..5e55cef0cec3 100644
3188     --- a/net/bridge/netfilter/ebtables.c
3189     +++ b/net/bridge/netfilter/ebtables.c
3190     @@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
3191     tmp.name[sizeof(tmp.name) - 1] = 0;
3192    
3193     countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
3194     - newinfo = vmalloc(sizeof(*newinfo) + countersize);
3195     + newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
3196     + PAGE_KERNEL);
3197     if (!newinfo)
3198     return -ENOMEM;
3199    
3200     if (countersize)
3201     memset(newinfo->counters, 0, countersize);
3202    
3203     - newinfo->entries = vmalloc(tmp.entries_size);
3204     + newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
3205     + PAGE_KERNEL);
3206     if (!newinfo->entries) {
3207     ret = -ENOMEM;
3208     goto free_newinfo;
3209     diff --git a/net/can/gw.c b/net/can/gw.c
3210     index faa3da88a127..53859346dc9a 100644
3211     --- a/net/can/gw.c
3212     +++ b/net/can/gw.c
3213     @@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
3214     while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
3215     (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
3216    
3217     - /* check for checksum updates when the CAN frame has been modified */
3218     + /* Has the CAN frame been modified? */
3219     if (modidx) {
3220     - if (gwj->mod.csumfunc.crc8)
3221     + /* get available space for the processed CAN frame type */
3222     + int max_len = nskb->len - offsetof(struct can_frame, data);
3223     +
3224     + /* dlc may have changed, make sure it fits to the CAN frame */
3225     + if (cf->can_dlc > max_len)
3226     + goto out_delete;
3227     +
3228     + /* check for checksum updates in classic CAN length only */
3229     + if (gwj->mod.csumfunc.crc8) {
3230     + if (cf->can_dlc > 8)
3231     + goto out_delete;
3232     +
3233     (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
3234     + }
3235     +
3236     + if (gwj->mod.csumfunc.xor) {
3237     + if (cf->can_dlc > 8)
3238     + goto out_delete;
3239    
3240     - if (gwj->mod.csumfunc.xor)
3241     (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
3242     + }
3243     }
3244    
3245     /* clear the skb timestamp if not configured the other way */
3246     @@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
3247     gwj->dropped_frames++;
3248     else
3249     gwj->handled_frames++;
3250     +
3251     + return;
3252     +
3253     + out_delete:
3254     + /* delete frame due to misconfiguration */
3255     + gwj->deleted_frames++;
3256     + kfree_skb(nskb);
3257     + return;
3258     }
3259    
3260     static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
3261     diff --git a/net/core/filter.c b/net/core/filter.c
3262     index 8d2c629501e2..eb0007f30142 100644
3263     --- a/net/core/filter.c
3264     +++ b/net/core/filter.c
3265     @@ -2023,18 +2023,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
3266     static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
3267     u32 flags)
3268     {
3269     - /* skb->mac_len is not set on normal egress */
3270     - unsigned int mlen = skb->network_header - skb->mac_header;
3271     + unsigned int mlen = skb_network_offset(skb);
3272    
3273     - __skb_pull(skb, mlen);
3274     + if (mlen) {
3275     + __skb_pull(skb, mlen);
3276    
3277     - /* At ingress, the mac header has already been pulled once.
3278     - * At egress, skb_pospull_rcsum has to be done in case that
3279     - * the skb is originated from ingress (i.e. a forwarded skb)
3280     - * to ensure that rcsum starts at net header.
3281     - */
3282     - if (!skb_at_tc_ingress(skb))
3283     - skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
3284     + /* At ingress, the mac header has already been pulled once.
3285     + * At egress, skb_pospull_rcsum has to be done in case that
3286     + * the skb is originated from ingress (i.e. a forwarded skb)
3287     + * to ensure that rcsum starts at net header.
3288     + */
3289     + if (!skb_at_tc_ingress(skb))
3290     + skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
3291     + }
3292     skb_pop_mac_header(skb);
3293     skb_reset_mac_len(skb);
3294     return flags & BPF_F_INGRESS ?
3295     diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
3296     index 3e85437f7106..a648568c5e8f 100644
3297     --- a/net/core/lwt_bpf.c
3298     +++ b/net/core/lwt_bpf.c
3299     @@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
3300     lwt->name ? : "<unknown>");
3301     ret = BPF_OK;
3302     } else {
3303     + skb_reset_mac_header(skb);
3304     ret = skb_do_redirect(skb);
3305     if (ret == 0)
3306     ret = BPF_REDIRECT;
3307     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
3308     index fffcc130900e..82f341e84fae 100644
3309     --- a/net/ipv4/ip_sockglue.c
3310     +++ b/net/ipv4/ip_sockglue.c
3311     @@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
3312    
3313     static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
3314     {
3315     + __be16 _ports[2], *ports;
3316     struct sockaddr_in sin;
3317     - __be16 *ports;
3318     - int end;
3319     -
3320     - end = skb_transport_offset(skb) + 4;
3321     - if (end > 0 && !pskb_may_pull(skb, end))
3322     - return;
3323    
3324     /* All current transport protocols have the port numbers in the
3325     * first four bytes of the transport header and this function is
3326     * written with this assumption in mind.
3327     */
3328     - ports = (__be16 *)skb_transport_header(skb);
3329     + ports = skb_header_pointer(skb, skb_transport_offset(skb),
3330     + sizeof(_ports), &_ports);
3331     + if (!ports)
3332     + return;
3333    
3334     sin.sin_family = AF_INET;
3335     sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
3336     diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
3337     index f87dbc78b6bc..71a29e9c0620 100644
3338     --- a/net/ipv4/tcp_timer.c
3339     +++ b/net/ipv4/tcp_timer.c
3340     @@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk)
3341     if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
3342     if (icsk->icsk_retransmits) {
3343     dst_negative_advice(sk);
3344     - } else if (!tp->syn_data && !tp->syn_fastopen) {
3345     + } else {
3346     sk_rethink_txhash(sk);
3347     }
3348     retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
3349     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
3350     index 1ede7a16a0be..cb24850d2c7f 100644
3351     --- a/net/ipv6/datagram.c
3352     +++ b/net/ipv6/datagram.c
3353     @@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
3354     skb_reset_network_header(skb);
3355     iph = ipv6_hdr(skb);
3356     iph->daddr = fl6->daddr;
3357     + ip6_flow_hdr(iph, 0, 0);
3358    
3359     serr = SKB_EXT_ERR(skb);
3360     serr->ee.ee_errno = err;
3361     @@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
3362     }
3363     if (np->rxopt.bits.rxorigdstaddr) {
3364     struct sockaddr_in6 sin6;
3365     - __be16 *ports;
3366     - int end;
3367     + __be16 _ports[2], *ports;
3368    
3369     - end = skb_transport_offset(skb) + 4;
3370     - if (end <= 0 || pskb_may_pull(skb, end)) {
3371     + ports = skb_header_pointer(skb, skb_transport_offset(skb),
3372     + sizeof(_ports), &_ports);
3373     + if (ports) {
3374     /* All current transport protocols have the port numbers in the
3375     * first four bytes of the transport header and this function is
3376     * written with this assumption in mind.
3377     */
3378     - ports = (__be16 *)skb_transport_header(skb);
3379     -
3380     sin6.sin6_family = AF_INET6;
3381     sin6.sin6_addr = ipv6_hdr(skb)->daddr;
3382     sin6.sin6_port = ports[1];
3383     diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
3384     index c9c53ade55c3..6d14cbe443f8 100644
3385     --- a/net/ipv6/icmp.c
3386     +++ b/net/ipv6/icmp.c
3387     @@ -421,10 +421,10 @@ static int icmp6_iif(const struct sk_buff *skb)
3388     static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
3389     const struct in6_addr *force_saddr)
3390     {
3391     - struct net *net = dev_net(skb->dev);
3392     struct inet6_dev *idev = NULL;
3393     struct ipv6hdr *hdr = ipv6_hdr(skb);
3394     struct sock *sk;
3395     + struct net *net;
3396     struct ipv6_pinfo *np;
3397     const struct in6_addr *saddr = NULL;
3398     struct dst_entry *dst;
3399     @@ -435,12 +435,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
3400     int iif = 0;
3401     int addr_type = 0;
3402     int len;
3403     - u32 mark = IP6_REPLY_MARK(net, skb->mark);
3404     + u32 mark;
3405    
3406     if ((u8 *)hdr < skb->head ||
3407     (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
3408     return;
3409    
3410     + if (!skb->dev)
3411     + return;
3412     + net = dev_net(skb->dev);
3413     + mark = IP6_REPLY_MARK(net, skb->mark);
3414     /*
3415     * Make sure we respect the rules
3416     * i.e. RFC 1885 2.4(e)
3417     diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
3418     index 9cd180bda092..7554c56b2e63 100644
3419     --- a/net/netfilter/nf_conncount.c
3420     +++ b/net/netfilter/nf_conncount.c
3421     @@ -33,12 +33,6 @@
3422    
3423     #define CONNCOUNT_SLOTS 256U
3424    
3425     -#ifdef CONFIG_LOCKDEP
3426     -#define CONNCOUNT_LOCK_SLOTS 8U
3427     -#else
3428     -#define CONNCOUNT_LOCK_SLOTS 256U
3429     -#endif
3430     -
3431     #define CONNCOUNT_GC_MAX_NODES 8
3432     #define MAX_KEYLEN 5
3433    
3434     @@ -49,8 +43,6 @@ struct nf_conncount_tuple {
3435     struct nf_conntrack_zone zone;
3436     int cpu;
3437     u32 jiffies32;
3438     - bool dead;
3439     - struct rcu_head rcu_head;
3440     };
3441    
3442     struct nf_conncount_rb {
3443     @@ -60,7 +52,7 @@ struct nf_conncount_rb {
3444     struct rcu_head rcu_head;
3445     };
3446    
3447     -static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
3448     +static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
3449    
3450     struct nf_conncount_data {
3451     unsigned int keylen;
3452     @@ -89,79 +81,25 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
3453     return memcmp(a, b, klen * sizeof(u32));
3454     }
3455    
3456     -enum nf_conncount_list_add
3457     -nf_conncount_add(struct nf_conncount_list *list,
3458     - const struct nf_conntrack_tuple *tuple,
3459     - const struct nf_conntrack_zone *zone)
3460     -{
3461     - struct nf_conncount_tuple *conn;
3462     -
3463     - if (WARN_ON_ONCE(list->count > INT_MAX))
3464     - return NF_CONNCOUNT_ERR;
3465     -
3466     - conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
3467     - if (conn == NULL)
3468     - return NF_CONNCOUNT_ERR;
3469     -
3470     - conn->tuple = *tuple;
3471     - conn->zone = *zone;
3472     - conn->cpu = raw_smp_processor_id();
3473     - conn->jiffies32 = (u32)jiffies;
3474     - conn->dead = false;
3475     - spin_lock_bh(&list->list_lock);
3476     - if (list->dead == true) {
3477     - kmem_cache_free(conncount_conn_cachep, conn);
3478     - spin_unlock_bh(&list->list_lock);
3479     - return NF_CONNCOUNT_SKIP;
3480     - }
3481     - list_add_tail(&conn->node, &list->head);
3482     - list->count++;
3483     - spin_unlock_bh(&list->list_lock);
3484     - return NF_CONNCOUNT_ADDED;
3485     -}
3486     -EXPORT_SYMBOL_GPL(nf_conncount_add);
3487     -
3488     -static void __conn_free(struct rcu_head *h)
3489     -{
3490     - struct nf_conncount_tuple *conn;
3491     -
3492     - conn = container_of(h, struct nf_conncount_tuple, rcu_head);
3493     - kmem_cache_free(conncount_conn_cachep, conn);
3494     -}
3495     -
3496     -static bool conn_free(struct nf_conncount_list *list,
3497     +static void conn_free(struct nf_conncount_list *list,
3498     struct nf_conncount_tuple *conn)
3499     {
3500     - bool free_entry = false;
3501     -
3502     - spin_lock_bh(&list->list_lock);
3503     -
3504     - if (conn->dead) {
3505     - spin_unlock_bh(&list->list_lock);
3506     - return free_entry;
3507     - }
3508     + lockdep_assert_held(&list->list_lock);
3509    
3510     list->count--;
3511     - conn->dead = true;
3512     - list_del_rcu(&conn->node);
3513     - if (list->count == 0) {
3514     - list->dead = true;
3515     - free_entry = true;
3516     - }
3517     + list_del(&conn->node);
3518    
3519     - spin_unlock_bh(&list->list_lock);
3520     - call_rcu(&conn->rcu_head, __conn_free);
3521     - return free_entry;
3522     + kmem_cache_free(conncount_conn_cachep, conn);
3523     }
3524    
3525     static const struct nf_conntrack_tuple_hash *
3526     find_or_evict(struct net *net, struct nf_conncount_list *list,
3527     - struct nf_conncount_tuple *conn, bool *free_entry)
3528     + struct nf_conncount_tuple *conn)
3529     {
3530     const struct nf_conntrack_tuple_hash *found;
3531     unsigned long a, b;
3532     int cpu = raw_smp_processor_id();
3533     - __s32 age;
3534     + u32 age;
3535    
3536     found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
3537     if (found)
3538     @@ -176,52 +114,45 @@ find_or_evict(struct net *net, struct nf_conncount_list *list,
3539     */
3540     age = a - b;
3541     if (conn->cpu == cpu || age >= 2) {
3542     - *free_entry = conn_free(list, conn);
3543     + conn_free(list, conn);
3544     return ERR_PTR(-ENOENT);
3545     }
3546    
3547     return ERR_PTR(-EAGAIN);
3548     }
3549    
3550     -void nf_conncount_lookup(struct net *net,
3551     - struct nf_conncount_list *list,
3552     - const struct nf_conntrack_tuple *tuple,
3553     - const struct nf_conntrack_zone *zone,
3554     - bool *addit)
3555     +static int __nf_conncount_add(struct net *net,
3556     + struct nf_conncount_list *list,
3557     + const struct nf_conntrack_tuple *tuple,
3558     + const struct nf_conntrack_zone *zone)
3559     {
3560     const struct nf_conntrack_tuple_hash *found;
3561     struct nf_conncount_tuple *conn, *conn_n;
3562     struct nf_conn *found_ct;
3563     unsigned int collect = 0;
3564     - bool free_entry = false;
3565     -
3566     - /* best effort only */
3567     - *addit = tuple ? true : false;
3568    
3569     /* check the saved connections */
3570     list_for_each_entry_safe(conn, conn_n, &list->head, node) {
3571     if (collect > CONNCOUNT_GC_MAX_NODES)
3572     break;
3573    
3574     - found = find_or_evict(net, list, conn, &free_entry);
3575     + found = find_or_evict(net, list, conn);
3576     if (IS_ERR(found)) {
3577     /* Not found, but might be about to be confirmed */
3578     if (PTR_ERR(found) == -EAGAIN) {
3579     - if (!tuple)
3580     - continue;
3581     -
3582     if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
3583     nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
3584     nf_ct_zone_id(zone, zone->dir))
3585     - *addit = false;
3586     - } else if (PTR_ERR(found) == -ENOENT)
3587     + return 0; /* already exists */
3588     + } else {
3589     collect++;
3590     + }
3591     continue;
3592     }
3593    
3594     found_ct = nf_ct_tuplehash_to_ctrack(found);
3595    
3596     - if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
3597     + if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
3598     nf_ct_zone_equal(found_ct, zone, zone->dir)) {
3599     /*
3600     * We should not see tuples twice unless someone hooks
3601     @@ -229,7 +160,8 @@ void nf_conncount_lookup(struct net *net,
3602     *
3603     * Attempt to avoid a re-add in this case.
3604     */
3605     - *addit = false;
3606     + nf_ct_put(found_ct);
3607     + return 0;
3608     } else if (already_closed(found_ct)) {
3609     /*
3610     * we do not care about connections which are
3611     @@ -243,19 +175,48 @@ void nf_conncount_lookup(struct net *net,
3612    
3613     nf_ct_put(found_ct);
3614     }
3615     +
3616     + if (WARN_ON_ONCE(list->count > INT_MAX))
3617     + return -EOVERFLOW;
3618     +
3619     + conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
3620     + if (conn == NULL)
3621     + return -ENOMEM;
3622     +
3623     + conn->tuple = *tuple;
3624     + conn->zone = *zone;
3625     + conn->cpu = raw_smp_processor_id();
3626     + conn->jiffies32 = (u32)jiffies;
3627     + list_add_tail(&conn->node, &list->head);
3628     + list->count++;
3629     + return 0;
3630     }
3631     -EXPORT_SYMBOL_GPL(nf_conncount_lookup);
3632     +
3633     +int nf_conncount_add(struct net *net,
3634     + struct nf_conncount_list *list,
3635     + const struct nf_conntrack_tuple *tuple,
3636     + const struct nf_conntrack_zone *zone)
3637     +{
3638     + int ret;
3639     +
3640     + /* check the saved connections */
3641     + spin_lock_bh(&list->list_lock);
3642     + ret = __nf_conncount_add(net, list, tuple, zone);
3643     + spin_unlock_bh(&list->list_lock);
3644     +
3645     + return ret;
3646     +}
3647     +EXPORT_SYMBOL_GPL(nf_conncount_add);
3648    
3649     void nf_conncount_list_init(struct nf_conncount_list *list)
3650     {
3651     spin_lock_init(&list->list_lock);
3652     INIT_LIST_HEAD(&list->head);
3653     list->count = 0;
3654     - list->dead = false;
3655     }
3656     EXPORT_SYMBOL_GPL(nf_conncount_list_init);
3657    
3658     -/* Return true if the list is empty */
3659     +/* Return true if the list is empty. Must be called with BH disabled. */
3660     bool nf_conncount_gc_list(struct net *net,
3661     struct nf_conncount_list *list)
3662     {
3663     @@ -263,17 +224,17 @@ bool nf_conncount_gc_list(struct net *net,
3664     struct nf_conncount_tuple *conn, *conn_n;
3665     struct nf_conn *found_ct;
3666     unsigned int collected = 0;
3667     - bool free_entry = false;
3668     bool ret = false;
3669    
3670     + /* don't bother if other cpu is already doing GC */
3671     + if (!spin_trylock(&list->list_lock))
3672     + return false;
3673     +
3674     list_for_each_entry_safe(conn, conn_n, &list->head, node) {
3675     - found = find_or_evict(net, list, conn, &free_entry);
3676     + found = find_or_evict(net, list, conn);
3677     if (IS_ERR(found)) {
3678     - if (PTR_ERR(found) == -ENOENT) {
3679     - if (free_entry)
3680     - return true;
3681     + if (PTR_ERR(found) == -ENOENT)
3682     collected++;
3683     - }
3684     continue;
3685     }
3686    
3687     @@ -284,23 +245,19 @@ bool nf_conncount_gc_list(struct net *net,
3688     * closed already -> ditch it
3689     */
3690     nf_ct_put(found_ct);
3691     - if (conn_free(list, conn))
3692     - return true;
3693     + conn_free(list, conn);
3694     collected++;
3695     continue;
3696     }
3697    
3698     nf_ct_put(found_ct);
3699     if (collected > CONNCOUNT_GC_MAX_NODES)
3700     - return false;
3701     + break;
3702     }
3703    
3704     - spin_lock_bh(&list->list_lock);
3705     - if (!list->count) {
3706     - list->dead = true;
3707     + if (!list->count)
3708     ret = true;
3709     - }
3710     - spin_unlock_bh(&list->list_lock);
3711     + spin_unlock(&list->list_lock);
3712    
3713     return ret;
3714     }
3715     @@ -314,6 +271,7 @@ static void __tree_nodes_free(struct rcu_head *h)
3716     kmem_cache_free(conncount_rb_cachep, rbconn);
3717     }
3718    
3719     +/* caller must hold tree nf_conncount_locks[] lock */
3720     static void tree_nodes_free(struct rb_root *root,
3721     struct nf_conncount_rb *gc_nodes[],
3722     unsigned int gc_count)
3723     @@ -323,8 +281,10 @@ static void tree_nodes_free(struct rb_root *root,
3724     while (gc_count) {
3725     rbconn = gc_nodes[--gc_count];
3726     spin_lock(&rbconn->list.list_lock);
3727     - rb_erase(&rbconn->node, root);
3728     - call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3729     + if (!rbconn->list.count) {
3730     + rb_erase(&rbconn->node, root);
3731     + call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3732     + }
3733     spin_unlock(&rbconn->list.list_lock);
3734     }
3735     }
3736     @@ -341,20 +301,19 @@ insert_tree(struct net *net,
3737     struct rb_root *root,
3738     unsigned int hash,
3739     const u32 *key,
3740     - u8 keylen,
3741     const struct nf_conntrack_tuple *tuple,
3742     const struct nf_conntrack_zone *zone)
3743     {
3744     - enum nf_conncount_list_add ret;
3745     struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
3746     struct rb_node **rbnode, *parent;
3747     struct nf_conncount_rb *rbconn;
3748     struct nf_conncount_tuple *conn;
3749     unsigned int count = 0, gc_count = 0;
3750     - bool node_found = false;
3751     -
3752     - spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
3753     + u8 keylen = data->keylen;
3754     + bool do_gc = true;
3755    
3756     + spin_lock_bh(&nf_conncount_locks[hash]);
3757     +restart:
3758     parent = NULL;
3759     rbnode = &(root->rb_node);
3760     while (*rbnode) {
3761     @@ -368,45 +327,32 @@ insert_tree(struct net *net,
3762     } else if (diff > 0) {
3763     rbnode = &((*rbnode)->rb_right);
3764     } else {
3765     - /* unlikely: other cpu added node already */
3766     - node_found = true;
3767     - ret = nf_conncount_add(&rbconn->list, tuple, zone);
3768     - if (ret == NF_CONNCOUNT_ERR) {
3769     + int ret;
3770     +
3771     + ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
3772     + if (ret)
3773     count = 0; /* hotdrop */
3774     - } else if (ret == NF_CONNCOUNT_ADDED) {
3775     + else
3776     count = rbconn->list.count;
3777     - } else {
3778     - /* NF_CONNCOUNT_SKIP, rbconn is already
3779     - * reclaimed by gc, insert a new tree node
3780     - */
3781     - node_found = false;
3782     - }
3783     - break;
3784     + tree_nodes_free(root, gc_nodes, gc_count);
3785     + goto out_unlock;
3786     }
3787    
3788     if (gc_count >= ARRAY_SIZE(gc_nodes))
3789     continue;
3790    
3791     - if (nf_conncount_gc_list(net, &rbconn->list))
3792     + if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
3793     gc_nodes[gc_count++] = rbconn;
3794     }
3795    
3796     if (gc_count) {
3797     tree_nodes_free(root, gc_nodes, gc_count);
3798     - /* tree_node_free before new allocation permits
3799     - * allocator to re-use newly free'd object.
3800     - *
3801     - * This is a rare event; in most cases we will find
3802     - * existing node to re-use. (or gc_count is 0).
3803     - */
3804     -
3805     - if (gc_count >= ARRAY_SIZE(gc_nodes))
3806     - schedule_gc_worker(data, hash);
3807     + schedule_gc_worker(data, hash);
3808     + gc_count = 0;
3809     + do_gc = false;
3810     + goto restart;
3811     }
3812    
3813     - if (node_found)
3814     - goto out_unlock;
3815     -
3816     /* expected case: match, insert new node */
3817     rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
3818     if (rbconn == NULL)
3819     @@ -430,7 +376,7 @@ insert_tree(struct net *net,
3820     rb_link_node_rcu(&rbconn->node, parent, rbnode);
3821     rb_insert_color(&rbconn->node, root);
3822     out_unlock:
3823     - spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
3824     + spin_unlock_bh(&nf_conncount_locks[hash]);
3825     return count;
3826     }
3827    
3828     @@ -441,7 +387,6 @@ count_tree(struct net *net,
3829     const struct nf_conntrack_tuple *tuple,
3830     const struct nf_conntrack_zone *zone)
3831     {
3832     - enum nf_conncount_list_add ret;
3833     struct rb_root *root;
3834     struct rb_node *parent;
3835     struct nf_conncount_rb *rbconn;
3836     @@ -454,7 +399,6 @@ count_tree(struct net *net,
3837     parent = rcu_dereference_raw(root->rb_node);
3838     while (parent) {
3839     int diff;
3840     - bool addit;
3841    
3842     rbconn = rb_entry(parent, struct nf_conncount_rb, node);
3843    
3844     @@ -464,31 +408,36 @@ count_tree(struct net *net,
3845     } else if (diff > 0) {
3846     parent = rcu_dereference_raw(parent->rb_right);
3847     } else {
3848     - /* same source network -> be counted! */
3849     - nf_conncount_lookup(net, &rbconn->list, tuple, zone,
3850     - &addit);
3851     + int ret;
3852    
3853     - if (!addit)
3854     + if (!tuple) {
3855     + nf_conncount_gc_list(net, &rbconn->list);
3856     return rbconn->list.count;
3857     + }
3858    
3859     - ret = nf_conncount_add(&rbconn->list, tuple, zone);
3860     - if (ret == NF_CONNCOUNT_ERR) {
3861     - return 0; /* hotdrop */
3862     - } else if (ret == NF_CONNCOUNT_ADDED) {
3863     - return rbconn->list.count;
3864     - } else {
3865     - /* NF_CONNCOUNT_SKIP, rbconn is already
3866     - * reclaimed by gc, insert a new tree node
3867     - */
3868     + spin_lock_bh(&rbconn->list.list_lock);
3869     + /* Node might be about to be free'd.
3870     + * We need to defer to insert_tree() in this case.
3871     + */
3872     + if (rbconn->list.count == 0) {
3873     + spin_unlock_bh(&rbconn->list.list_lock);
3874     break;
3875     }
3876     +
3877     + /* same source network -> be counted! */
3878     + ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
3879     + spin_unlock_bh(&rbconn->list.list_lock);
3880     + if (ret)
3881     + return 0; /* hotdrop */
3882     + else
3883     + return rbconn->list.count;
3884     }
3885     }
3886    
3887     if (!tuple)
3888     return 0;
3889    
3890     - return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
3891     + return insert_tree(net, data, root, hash, key, tuple, zone);
3892     }
3893    
3894     static void tree_gc_worker(struct work_struct *work)
3895     @@ -499,27 +448,47 @@ static void tree_gc_worker(struct work_struct *work)
3896     struct rb_node *node;
3897     unsigned int tree, next_tree, gc_count = 0;
3898    
3899     - tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
3900     + tree = data->gc_tree % CONNCOUNT_SLOTS;
3901     root = &data->root[tree];
3902    
3903     + local_bh_disable();
3904     rcu_read_lock();
3905     for (node = rb_first(root); node != NULL; node = rb_next(node)) {
3906     rbconn = rb_entry(node, struct nf_conncount_rb, node);
3907     if (nf_conncount_gc_list(data->net, &rbconn->list))
3908     - gc_nodes[gc_count++] = rbconn;
3909     + gc_count++;
3910     }
3911     rcu_read_unlock();
3912     + local_bh_enable();
3913     +
3914     + cond_resched();
3915    
3916     spin_lock_bh(&nf_conncount_locks[tree]);
3917     + if (gc_count < ARRAY_SIZE(gc_nodes))
3918     + goto next; /* do not bother */
3919    
3920     - if (gc_count) {
3921     - tree_nodes_free(root, gc_nodes, gc_count);
3922     + gc_count = 0;
3923     + node = rb_first(root);
3924     + while (node != NULL) {
3925     + rbconn = rb_entry(node, struct nf_conncount_rb, node);
3926     + node = rb_next(node);
3927     +
3928     + if (rbconn->list.count > 0)
3929     + continue;
3930     +
3931     + gc_nodes[gc_count++] = rbconn;
3932     + if (gc_count >= ARRAY_SIZE(gc_nodes)) {
3933     + tree_nodes_free(root, gc_nodes, gc_count);
3934     + gc_count = 0;
3935     + }
3936     }
3937    
3938     + tree_nodes_free(root, gc_nodes, gc_count);
3939     +next:
3940     clear_bit(tree, data->pending_trees);
3941    
3942     next_tree = (tree + 1) % CONNCOUNT_SLOTS;
3943     - next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
3944     + next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
3945    
3946     if (next_tree < CONNCOUNT_SLOTS) {
3947     data->gc_tree = next_tree;
3948     @@ -621,10 +590,7 @@ static int __init nf_conncount_modinit(void)
3949     {
3950     int i;
3951    
3952     - BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
3953     - BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
3954     -
3955     - for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
3956     + for (i = 0; i < CONNCOUNT_SLOTS; ++i)
3957     spin_lock_init(&nf_conncount_locks[i]);
3958    
3959     conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
3960     diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
3961     index b90d96ba4a12..af1497ab9464 100644
3962     --- a/net/netfilter/nft_connlimit.c
3963     +++ b/net/netfilter/nft_connlimit.c
3964     @@ -30,7 +30,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
3965     enum ip_conntrack_info ctinfo;
3966     const struct nf_conn *ct;
3967     unsigned int count;
3968     - bool addit;
3969    
3970     tuple_ptr = &tuple;
3971    
3972     @@ -44,19 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
3973     return;
3974     }
3975    
3976     - nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
3977     - &addit);
3978     - count = priv->list.count;
3979     -
3980     - if (!addit)
3981     - goto out;
3982     -
3983     - if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
3984     + if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
3985     regs->verdict.code = NF_DROP;
3986     return;
3987     }
3988     - count++;
3989     -out:
3990     +
3991     + count = priv->list.count;
3992    
3993     if ((count > priv->limit) ^ priv->invert) {
3994     regs->verdict.code = NFT_BREAK;
3995     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3996     index eedacdebcd4c..d0945253f43b 100644
3997     --- a/net/packet/af_packet.c
3998     +++ b/net/packet/af_packet.c
3999     @@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
4000     addr = saddr->sll_halen ? saddr->sll_addr : NULL;
4001     dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
4002     if (addr && dev && saddr->sll_halen < dev->addr_len)
4003     - goto out;
4004     + goto out_put;
4005     }
4006    
4007     err = -ENXIO;
4008     @@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
4009     addr = saddr->sll_halen ? saddr->sll_addr : NULL;
4010     dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
4011     if (addr && dev && saddr->sll_halen < dev->addr_len)
4012     - goto out;
4013     + goto out_unlock;
4014     }
4015    
4016     err = -ENXIO;
4017     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
4018     index 7f0539db5604..0bae07e9c9e7 100644
4019     --- a/net/sctp/ipv6.c
4020     +++ b/net/sctp/ipv6.c
4021     @@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
4022    
4023     switch (ev) {
4024     case NETDEV_UP:
4025     - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
4026     + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
4027     if (addr) {
4028     addr->a.v6.sin6_family = AF_INET6;
4029     - addr->a.v6.sin6_port = 0;
4030     - addr->a.v6.sin6_flowinfo = 0;
4031     addr->a.v6.sin6_addr = ifa->addr;
4032     addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
4033     addr->valid = 1;
4034     @@ -431,7 +429,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
4035     addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
4036     if (addr) {
4037     addr->a.v6.sin6_family = AF_INET6;
4038     - addr->a.v6.sin6_port = 0;
4039     addr->a.v6.sin6_addr = ifp->addr;
4040     addr->a.v6.sin6_scope_id = dev->ifindex;
4041     addr->valid = 1;
4042     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
4043     index 9b277bd36d1a..85af878f5668 100644
4044     --- a/net/sctp/protocol.c
4045     +++ b/net/sctp/protocol.c
4046     @@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
4047     addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
4048     if (addr) {
4049     addr->a.v4.sin_family = AF_INET;
4050     - addr->a.v4.sin_port = 0;
4051     addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
4052     addr->valid = 1;
4053     INIT_LIST_HEAD(&addr->list);
4054     @@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
4055    
4056     switch (ev) {
4057     case NETDEV_UP:
4058     - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
4059     + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
4060     if (addr) {
4061     addr->a.v4.sin_family = AF_INET;
4062     - addr->a.v4.sin_port = 0;
4063     addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
4064     addr->valid = 1;
4065     spin_lock_bh(&net->sctp.local_addr_lock);
4066     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
4067     index 82cb0e5634bc..5d2214183601 100644
4068     --- a/net/smc/af_smc.c
4069     +++ b/net/smc/af_smc.c
4070     @@ -146,6 +146,9 @@ static int smc_release(struct socket *sock)
4071     sock_set_flag(sk, SOCK_DEAD);
4072     sk->sk_shutdown |= SHUTDOWN_MASK;
4073     }
4074     +
4075     + sk->sk_prot->unhash(sk);
4076     +
4077     if (smc->clcsock) {
4078     if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
4079     /* wake up clcsock accept */
4080     @@ -170,7 +173,6 @@ static int smc_release(struct socket *sock)
4081     smc_conn_free(&smc->conn);
4082     release_sock(sk);
4083    
4084     - sk->sk_prot->unhash(sk);
4085     sock_put(sk); /* final sock_put */
4086     out:
4087     return rc;
4088     diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
4089     index c7872bc13860..08b5fa4a2852 100644
4090     --- a/net/sunrpc/rpcb_clnt.c
4091     +++ b/net/sunrpc/rpcb_clnt.c
4092     @@ -771,6 +771,12 @@ void rpcb_getport_async(struct rpc_task *task)
4093     case RPCBVERS_3:
4094     map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
4095     map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
4096     + if (!map->r_addr) {
4097     + status = -ENOMEM;
4098     + dprintk("RPC: %5u %s: no memory available\n",
4099     + task->tk_pid, __func__);
4100     + goto bailout_free_args;
4101     + }
4102     map->r_owner = "";
4103     break;
4104     case RPCBVERS_2:
4105     @@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task)
4106     rpc_put_task(child);
4107     return;
4108    
4109     +bailout_free_args:
4110     + kfree(map);
4111     bailout_release_client:
4112     rpc_release_client(rpcb_clnt);
4113     bailout_nofree:
4114     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4115     index f0b3700cec95..9cdbb6d6e7f5 100644
4116     --- a/net/sunrpc/xprtsock.c
4117     +++ b/net/sunrpc/xprtsock.c
4118     @@ -48,6 +48,7 @@
4119     #include <net/udp.h>
4120     #include <net/tcp.h>
4121     #include <linux/bvec.h>
4122     +#include <linux/highmem.h>
4123     #include <linux/uio.h>
4124    
4125     #include <trace/events/sunrpc.h>
4126     @@ -380,6 +381,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
4127     return sock_recvmsg(sock, msg, flags);
4128     }
4129    
4130     +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
4131     +static void
4132     +xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
4133     +{
4134     + struct bvec_iter bi = {
4135     + .bi_size = count,
4136     + };
4137     + struct bio_vec bv;
4138     +
4139     + bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
4140     + for_each_bvec(bv, bvec, bi, bi)
4141     + flush_dcache_page(bv.bv_page);
4142     +}
4143     +#else
4144     +static inline void
4145     +xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
4146     +{
4147     +}
4148     +#endif
4149     +
4150     static ssize_t
4151     xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
4152     struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
4153     @@ -413,6 +434,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
4154     seek + buf->page_base);
4155     if (ret <= 0)
4156     goto sock_err;
4157     + xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
4158     offset += ret - buf->page_base;
4159     if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
4160     goto out;
4161     diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
4162     index 6376467e78f8..0b21187d74df 100644
4163     --- a/net/tipc/netlink_compat.c
4164     +++ b/net/tipc/netlink_compat.c
4165     @@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
4166     return limit;
4167     }
4168    
4169     +static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
4170     +{
4171     + return TLV_GET_LEN(tlv) - TLV_SPACE(0);
4172     +}
4173     +
4174     static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
4175     {
4176     struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
4177     @@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
4178     return buf;
4179     }
4180    
4181     +static inline bool string_is_valid(char *s, int len)
4182     +{
4183     + return memchr(s, '\0', len) ? true : false;
4184     +}
4185     +
4186     static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
4187     struct tipc_nl_compat_msg *msg,
4188     struct sk_buff *arg)
4189     @@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
4190     struct nlattr *prop;
4191     struct nlattr *bearer;
4192     struct tipc_bearer_config *b;
4193     + int len;
4194    
4195     b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
4196    
4197     @@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
4198     if (!bearer)
4199     return -EMSGSIZE;
4200    
4201     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
4202     + if (!string_is_valid(b->name, len))
4203     + return -EINVAL;
4204     +
4205     if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
4206     return -EMSGSIZE;
4207    
4208     @@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
4209     {
4210     char *name;
4211     struct nlattr *bearer;
4212     + int len;
4213    
4214     name = (char *)TLV_DATA(msg->req);
4215    
4216     @@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
4217     if (!bearer)
4218     return -EMSGSIZE;
4219    
4220     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
4221     + if (!string_is_valid(name, len))
4222     + return -EINVAL;
4223     +
4224     if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
4225     return -EMSGSIZE;
4226    
4227     @@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
4228     struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
4229     struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
4230     int err;
4231     + int len;
4232    
4233     if (!attrs[TIPC_NLA_LINK])
4234     return -EINVAL;
4235     @@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
4236     return err;
4237    
4238     name = (char *)TLV_DATA(msg->req);
4239     +
4240     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
4241     + if (!string_is_valid(name, len))
4242     + return -EINVAL;
4243     +
4244     if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
4245     return 0;
4246    
4247     @@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
4248     struct nlattr *prop;
4249     struct nlattr *media;
4250     struct tipc_link_config *lc;
4251     + int len;
4252    
4253     lc = (struct tipc_link_config *)TLV_DATA(msg->req);
4254    
4255     @@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
4256     if (!media)
4257     return -EMSGSIZE;
4258    
4259     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
4260     + if (!string_is_valid(lc->name, len))
4261     + return -EINVAL;
4262     +
4263     if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
4264     return -EMSGSIZE;
4265    
4266     @@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
4267     struct nlattr *prop;
4268     struct nlattr *bearer;
4269     struct tipc_link_config *lc;
4270     + int len;
4271    
4272     lc = (struct tipc_link_config *)TLV_DATA(msg->req);
4273    
4274     @@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
4275     if (!bearer)
4276     return -EMSGSIZE;
4277    
4278     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
4279     + if (!string_is_valid(lc->name, len))
4280     + return -EINVAL;
4281     +
4282     if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
4283     return -EMSGSIZE;
4284    
4285     @@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
4286     struct tipc_link_config *lc;
4287     struct tipc_bearer *bearer;
4288     struct tipc_media *media;
4289     + int len;
4290    
4291     lc = (struct tipc_link_config *)TLV_DATA(msg->req);
4292    
4293     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
4294     + if (!string_is_valid(lc->name, len))
4295     + return -EINVAL;
4296     +
4297     media = tipc_media_find(lc->name);
4298     if (media) {
4299     cmd->doit = &__tipc_nl_media_set;
4300     @@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
4301     {
4302     char *name;
4303     struct nlattr *link;
4304     + int len;
4305    
4306     name = (char *)TLV_DATA(msg->req);
4307    
4308     @@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
4309     if (!link)
4310     return -EMSGSIZE;
4311    
4312     + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
4313     + if (!string_is_valid(name, len))
4314     + return -EINVAL;
4315     +
4316     if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
4317     return -EMSGSIZE;
4318    
4319     @@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
4320     };
4321    
4322     ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
4323     + if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
4324     + return -EINVAL;
4325    
4326     depth = ntohl(ntq->depth);
4327    
4328     @@ -1201,7 +1249,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
4329     }
4330    
4331     len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
4332     - if (len && !TLV_OK(msg.req, len)) {
4333     + if (!len || !TLV_OK(msg.req, len)) {
4334     msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
4335     err = -EOPNOTSUPP;
4336     goto send;
4337     diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
4338     index efb16f69bd2c..a457c0fbbef1 100644
4339     --- a/net/tipc/topsrv.c
4340     +++ b/net/tipc/topsrv.c
4341     @@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
4342     ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
4343     if (ret == -EWOULDBLOCK)
4344     return -EWOULDBLOCK;
4345     - if (ret > 0) {
4346     + if (ret == sizeof(s)) {
4347     read_lock_bh(&sk->sk_callback_lock);
4348     ret = tipc_conn_rcv_sub(srv, con, &s);
4349     read_unlock_bh(&sk->sk_callback_lock);
4350     diff --git a/security/security.c b/security/security.c
4351     index 04d173eb93f6..414a45d70c7b 100644
4352     --- a/security/security.c
4353     +++ b/security/security.c
4354     @@ -1014,6 +1014,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
4355    
4356     void security_cred_free(struct cred *cred)
4357     {
4358     + /*
4359     + * There is a failure case in prepare_creds() that
4360     + * may result in a call here with ->security being NULL.
4361     + */
4362     + if (unlikely(cred->security == NULL))
4363     + return;
4364     +
4365     call_void_hook(cred_free, cred);
4366     }
4367    
4368     diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
4369     index b63ef865ce1e..d31a52e56b9e 100644
4370     --- a/security/selinux/ss/policydb.c
4371     +++ b/security/selinux/ss/policydb.c
4372     @@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
4373     kfree(key);
4374     if (datum) {
4375     levdatum = datum;
4376     - ebitmap_destroy(&levdatum->level->cat);
4377     + if (levdatum->level)
4378     + ebitmap_destroy(&levdatum->level->cat);
4379     kfree(levdatum->level);
4380     }
4381     kfree(datum);
4382     diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
4383     index ffda91a4a1aa..02514fe558b4 100644
4384     --- a/security/yama/yama_lsm.c
4385     +++ b/security/yama/yama_lsm.c
4386     @@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
4387     break;
4388     case YAMA_SCOPE_RELATIONAL:
4389     rcu_read_lock();
4390     - if (!task_is_descendant(current, child) &&
4391     + if (!pid_alive(child))
4392     + rc = -EPERM;
4393     + if (!rc && !task_is_descendant(current, child) &&
4394     !ptracer_exception_found(current, child) &&
4395     !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
4396     rc = -EPERM;