Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0171-4.9.72-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 128166 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index 5f2736bb4877..78dde51d9d74 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 71
9     +SUBLEVEL = 72
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
14     index 975c36e332a2..8e6b3938bef9 100644
15     --- a/arch/arm/boot/dts/am335x-evmsk.dts
16     +++ b/arch/arm/boot/dts/am335x-evmsk.dts
17     @@ -668,6 +668,7 @@
18     ti,non-removable;
19     bus-width = <4>;
20     cap-power-off-card;
21     + keep-power-in-suspend;
22     pinctrl-names = "default";
23     pinctrl-0 = <&mmc2_pins>;
24    
25     diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
26     index 064d84f87e45..ce54a70b7695 100644
27     --- a/arch/arm/boot/dts/dra7.dtsi
28     +++ b/arch/arm/boot/dts/dra7.dtsi
29     @@ -282,6 +282,7 @@
30     device_type = "pci";
31     ranges = <0x81000000 0 0 0x03000 0 0x00010000
32     0x82000000 0 0x20013000 0x13000 0 0xffed000>;
33     + bus-range = <0x00 0xff>;
34     #interrupt-cells = <1>;
35     num-lanes = <1>;
36     linux,pci-domain = <0>;
37     @@ -318,6 +319,7 @@
38     device_type = "pci";
39     ranges = <0x81000000 0 0 0x03000 0 0x00010000
40     0x82000000 0 0x30013000 0x13000 0 0xffed000>;
41     + bus-range = <0x00 0xff>;
42     #interrupt-cells = <1>;
43     num-lanes = <1>;
44     linux,pci-domain = <1>;
45     diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
46     index ab7710002ba6..00e9e79b6cb8 100644
47     --- a/arch/arm/mm/dma-mapping.c
48     +++ b/arch/arm/mm/dma-mapping.c
49     @@ -930,13 +930,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
50     __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
51     }
52    
53     +/*
54     + * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
55     + * that the intention is to allow exporting memory allocated via the
56     + * coherent DMA APIs through the dma_buf API, which only accepts a
57     + * scattertable. This presents a couple of problems:
58     + * 1. Not all memory allocated via the coherent DMA APIs is backed by
59     + * a struct page
60     + * 2. Passing coherent DMA memory into the streaming APIs is not allowed
61     + * as we will try to flush the memory through a different alias to that
62     + * actually being used (and the flushes are redundant.)
63     + */
64     int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
65     void *cpu_addr, dma_addr_t handle, size_t size,
66     unsigned long attrs)
67     {
68     - struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
69     + unsigned long pfn = dma_to_pfn(dev, handle);
70     + struct page *page;
71     int ret;
72    
73     + /* If the PFN is not valid, we do not have a struct page */
74     + if (!pfn_valid(pfn))
75     + return -ENXIO;
76     +
77     + page = pfn_to_page(pfn);
78     +
79     ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
80     if (unlikely(ret))
81     return ret;
82     diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
83     index a4ec240ee7ba..3eb018fa1a1f 100644
84     --- a/arch/arm/probes/kprobes/core.c
85     +++ b/arch/arm/probes/kprobes/core.c
86     @@ -433,6 +433,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
87     struct hlist_node *tmp;
88     unsigned long flags, orig_ret_address = 0;
89     unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
90     + kprobe_opcode_t *correct_ret_addr = NULL;
91    
92     INIT_HLIST_HEAD(&empty_rp);
93     kretprobe_hash_lock(current, &head, &flags);
94     @@ -455,14 +456,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
95     /* another task is sharing our hash bucket */
96     continue;
97    
98     + orig_ret_address = (unsigned long)ri->ret_addr;
99     +
100     + if (orig_ret_address != trampoline_address)
101     + /*
102     + * This is the real return address. Any other
103     + * instances associated with this task are for
104     + * other calls deeper on the call stack
105     + */
106     + break;
107     + }
108     +
109     + kretprobe_assert(ri, orig_ret_address, trampoline_address);
110     +
111     + correct_ret_addr = ri->ret_addr;
112     + hlist_for_each_entry_safe(ri, tmp, head, hlist) {
113     + if (ri->task != current)
114     + /* another task is sharing our hash bucket */
115     + continue;
116     +
117     + orig_ret_address = (unsigned long)ri->ret_addr;
118     if (ri->rp && ri->rp->handler) {
119     __this_cpu_write(current_kprobe, &ri->rp->kp);
120     get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
121     + ri->ret_addr = correct_ret_addr;
122     ri->rp->handler(ri, regs);
123     __this_cpu_write(current_kprobe, NULL);
124     }
125    
126     - orig_ret_address = (unsigned long)ri->ret_addr;
127     recycle_rp_inst(ri, &empty_rp);
128    
129     if (orig_ret_address != trampoline_address)
130     @@ -474,7 +495,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
131     break;
132     }
133    
134     - kretprobe_assert(ri, orig_ret_address, trampoline_address);
135     kretprobe_hash_unlock(current, &flags);
136    
137     hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
138     diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
139     index 9775de22e2ff..a48354de1aa1 100644
140     --- a/arch/arm/probes/kprobes/test-core.c
141     +++ b/arch/arm/probes/kprobes/test-core.c
142     @@ -976,7 +976,10 @@ static void coverage_end(void)
143     void __naked __kprobes_test_case_start(void)
144     {
145     __asm__ __volatile__ (
146     - "stmdb sp!, {r4-r11} \n\t"
147     + "mov r2, sp \n\t"
148     + "bic r3, r2, #7 \n\t"
149     + "mov sp, r3 \n\t"
150     + "stmdb sp!, {r2-r11} \n\t"
151     "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
152     "bic r0, lr, #1 @ r0 = inline data \n\t"
153     "mov r1, sp \n\t"
154     @@ -996,7 +999,8 @@ void __naked __kprobes_test_case_end_32(void)
155     "movne pc, r0 \n\t"
156     "mov r0, r4 \n\t"
157     "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
158     - "ldmia sp!, {r4-r11} \n\t"
159     + "ldmia sp!, {r2-r11} \n\t"
160     + "mov sp, r2 \n\t"
161     "mov pc, r0 \n\t"
162     );
163     }
164     @@ -1012,7 +1016,8 @@ void __naked __kprobes_test_case_end_16(void)
165     "bxne r0 \n\t"
166     "mov r0, r4 \n\t"
167     "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
168     - "ldmia sp!, {r4-r11} \n\t"
169     + "ldmia sp!, {r2-r11} \n\t"
170     + "mov sp, r2 \n\t"
171     "bx r0 \n\t"
172     );
173     }
174     diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
175     index 380ebe705093..9b8b477c363d 100644
176     --- a/arch/arm64/mm/init.c
177     +++ b/arch/arm64/mm/init.c
178     @@ -296,6 +296,7 @@ void __init arm64_memblock_init(void)
179     arm64_dma_phys_limit = max_zone_dma_phys();
180     else
181     arm64_dma_phys_limit = PHYS_MASK + 1;
182     + high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
183     dma_contiguous_reserve(arm64_dma_phys_limit);
184    
185     memblock_allow_resize();
186     @@ -322,7 +323,6 @@ void __init bootmem_init(void)
187     sparse_init();
188     zone_sizes_init(min, max);
189    
190     - high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
191     memblock_dump_all();
192     }
193    
194     diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
195     index 9ade60ca08e0..7f2519cfb5d2 100644
196     --- a/arch/mips/math-emu/cp1emu.c
197     +++ b/arch/mips/math-emu/cp1emu.c
198     @@ -1781,7 +1781,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
199     SPFROMREG(fs, MIPSInst_FS(ir));
200     SPFROMREG(fd, MIPSInst_FD(ir));
201     rv.s = ieee754sp_maddf(fd, fs, ft);
202     - break;
203     + goto copcsr;
204     }
205    
206     case fmsubf_op: {
207     @@ -1794,7 +1794,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
208     SPFROMREG(fs, MIPSInst_FS(ir));
209     SPFROMREG(fd, MIPSInst_FD(ir));
210     rv.s = ieee754sp_msubf(fd, fs, ft);
211     - break;
212     + goto copcsr;
213     }
214    
215     case frint_op: {
216     @@ -1818,7 +1818,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
217     SPFROMREG(fs, MIPSInst_FS(ir));
218     rv.w = ieee754sp_2008class(fs);
219     rfmt = w_fmt;
220     - break;
221     + goto copcsr;
222     }
223    
224     case fmin_op: {
225     @@ -1830,7 +1830,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
226     SPFROMREG(ft, MIPSInst_FT(ir));
227     SPFROMREG(fs, MIPSInst_FS(ir));
228     rv.s = ieee754sp_fmin(fs, ft);
229     - break;
230     + goto copcsr;
231     }
232    
233     case fmina_op: {
234     @@ -1842,7 +1842,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
235     SPFROMREG(ft, MIPSInst_FT(ir));
236     SPFROMREG(fs, MIPSInst_FS(ir));
237     rv.s = ieee754sp_fmina(fs, ft);
238     - break;
239     + goto copcsr;
240     }
241    
242     case fmax_op: {
243     @@ -1854,7 +1854,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
244     SPFROMREG(ft, MIPSInst_FT(ir));
245     SPFROMREG(fs, MIPSInst_FS(ir));
246     rv.s = ieee754sp_fmax(fs, ft);
247     - break;
248     + goto copcsr;
249     }
250    
251     case fmaxa_op: {
252     @@ -1866,7 +1866,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
253     SPFROMREG(ft, MIPSInst_FT(ir));
254     SPFROMREG(fs, MIPSInst_FS(ir));
255     rv.s = ieee754sp_fmaxa(fs, ft);
256     - break;
257     + goto copcsr;
258     }
259    
260     case fabs_op:
261     @@ -2110,7 +2110,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
262     DPFROMREG(fs, MIPSInst_FS(ir));
263     DPFROMREG(fd, MIPSInst_FD(ir));
264     rv.d = ieee754dp_maddf(fd, fs, ft);
265     - break;
266     + goto copcsr;
267     }
268    
269     case fmsubf_op: {
270     @@ -2123,7 +2123,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
271     DPFROMREG(fs, MIPSInst_FS(ir));
272     DPFROMREG(fd, MIPSInst_FD(ir));
273     rv.d = ieee754dp_msubf(fd, fs, ft);
274     - break;
275     + goto copcsr;
276     }
277    
278     case frint_op: {
279     @@ -2147,7 +2147,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
280     DPFROMREG(fs, MIPSInst_FS(ir));
281     rv.w = ieee754dp_2008class(fs);
282     rfmt = w_fmt;
283     - break;
284     + goto copcsr;
285     }
286    
287     case fmin_op: {
288     @@ -2159,7 +2159,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
289     DPFROMREG(ft, MIPSInst_FT(ir));
290     DPFROMREG(fs, MIPSInst_FS(ir));
291     rv.d = ieee754dp_fmin(fs, ft);
292     - break;
293     + goto copcsr;
294     }
295    
296     case fmina_op: {
297     @@ -2171,7 +2171,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
298     DPFROMREG(ft, MIPSInst_FT(ir));
299     DPFROMREG(fs, MIPSInst_FS(ir));
300     rv.d = ieee754dp_fmina(fs, ft);
301     - break;
302     + goto copcsr;
303     }
304    
305     case fmax_op: {
306     @@ -2183,7 +2183,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
307     DPFROMREG(ft, MIPSInst_FT(ir));
308     DPFROMREG(fs, MIPSInst_FS(ir));
309     rv.d = ieee754dp_fmax(fs, ft);
310     - break;
311     + goto copcsr;
312     }
313    
314     case fmaxa_op: {
315     @@ -2195,7 +2195,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
316     DPFROMREG(ft, MIPSInst_FT(ir));
317     DPFROMREG(fs, MIPSInst_FS(ir));
318     rv.d = ieee754dp_fmaxa(fs, ft);
319     - break;
320     + goto copcsr;
321     }
322    
323     case fabs_op:
324     diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
325     index c7f2a5295b3a..83a73cf5116a 100644
326     --- a/arch/sparc/mm/srmmu.c
327     +++ b/arch/sparc/mm/srmmu.c
328     @@ -54,6 +54,7 @@
329     enum mbus_module srmmu_modtype;
330     static unsigned int hwbug_bitmask;
331     int vac_cache_size;
332     +EXPORT_SYMBOL(vac_cache_size);
333     int vac_line_size;
334    
335     extern struct resource sparc_iomap;
336     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
337     index d29c745f10ad..0a324e120942 100644
338     --- a/arch/x86/kvm/mmu.c
339     +++ b/arch/x86/kvm/mmu.c
340     @@ -5052,13 +5052,13 @@ int kvm_mmu_module_init(void)
341     {
342     pte_list_desc_cache = kmem_cache_create("pte_list_desc",
343     sizeof(struct pte_list_desc),
344     - 0, 0, NULL);
345     + 0, SLAB_ACCOUNT, NULL);
346     if (!pte_list_desc_cache)
347     goto nomem;
348    
349     mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
350     sizeof(struct kvm_mmu_page),
351     - 0, 0, NULL);
352     + 0, SLAB_ACCOUNT, NULL);
353     if (!mmu_page_header_cache)
354     goto nomem;
355    
356     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
357     index 23f1a6bd7a0d..8148d8ca7930 100644
358     --- a/arch/x86/kvm/svm.c
359     +++ b/arch/x86/kvm/svm.c
360     @@ -1382,6 +1382,9 @@ static void avic_vm_destroy(struct kvm *kvm)
361     unsigned long flags;
362     struct kvm_arch *vm_data = &kvm->arch;
363    
364     + if (!avic)
365     + return;
366     +
367     avic_free_vm_id(vm_data->avic_vm_id);
368    
369     if (vm_data->avic_logical_id_table_page)
370     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
371     index a929ca03b7ed..263e56059fd5 100644
372     --- a/arch/x86/kvm/vmx.c
373     +++ b/arch/x86/kvm/vmx.c
374     @@ -1199,6 +1199,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
375     return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
376     }
377    
378     +static inline bool cpu_has_vmx_invvpid(void)
379     +{
380     + return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
381     +}
382     +
383     static inline bool cpu_has_vmx_ept(void)
384     {
385     return vmcs_config.cpu_based_2nd_exec_ctrl &
386     @@ -3816,6 +3821,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
387     __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
388     }
389    
390     +static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
391     +{
392     + if (enable_ept)
393     + vmx_flush_tlb(vcpu);
394     +}
395     +
396     static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
397     {
398     ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
399     @@ -6428,8 +6439,10 @@ static __init int hardware_setup(void)
400     if (boot_cpu_has(X86_FEATURE_NX))
401     kvm_enable_efer_bits(EFER_NX);
402    
403     - if (!cpu_has_vmx_vpid())
404     + if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
405     + !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
406     enable_vpid = 0;
407     +
408     if (!cpu_has_vmx_shadow_vmcs())
409     enable_shadow_vmcs = 0;
410     if (enable_shadow_vmcs)
411     @@ -8494,6 +8507,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
412     } else {
413     sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
414     sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
415     + vmx_flush_tlb_ept_only(vcpu);
416     }
417     vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
418    
419     @@ -8519,8 +8533,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
420     */
421     if (!is_guest_mode(vcpu) ||
422     !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
423     - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
424     + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
425     vmcs_write64(APIC_ACCESS_ADDR, hpa);
426     + vmx_flush_tlb_ept_only(vcpu);
427     + }
428     }
429    
430     static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
431     @@ -10093,6 +10109,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
432     if (nested_cpu_has_ept(vmcs12)) {
433     kvm_mmu_unload(vcpu);
434     nested_ept_init_mmu_context(vcpu);
435     + } else if (nested_cpu_has2(vmcs12,
436     + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
437     + vmx_flush_tlb_ept_only(vcpu);
438     }
439    
440     if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
441     @@ -10833,6 +10852,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
442     vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
443     vmx_set_virtual_x2apic_mode(vcpu,
444     vcpu->arch.apic_base & X2APIC_ENABLE);
445     + } else if (!nested_cpu_has_ept(vmcs12) &&
446     + nested_cpu_has2(vmcs12,
447     + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
448     + vmx_flush_tlb_ept_only(vcpu);
449     }
450    
451     /* This is needed for same reason as it was needed in prepare_vmcs02 */
452     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
453     index 26b580ad268f..f4d893713d54 100644
454     --- a/arch/x86/kvm/x86.c
455     +++ b/arch/x86/kvm/x86.c
456     @@ -8443,11 +8443,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
457     {
458     struct x86_exception fault;
459    
460     - trace_kvm_async_pf_ready(work->arch.token, work->gva);
461     if (work->wakeup_all)
462     work->arch.token = ~0; /* broadcast wakeup */
463     else
464     kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
465     + trace_kvm_async_pf_ready(work->arch.token, work->gva);
466    
467     if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
468     !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
469     diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
470     index 6441dfda489f..a7c5b79371a7 100644
471     --- a/drivers/base/power/opp/core.c
472     +++ b/drivers/base/power/opp/core.c
473     @@ -331,7 +331,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
474     opp_table = _find_opp_table(dev);
475     if (IS_ERR(opp_table)) {
476     count = PTR_ERR(opp_table);
477     - dev_err(dev, "%s: OPP table not found (%d)\n",
478     + dev_dbg(dev, "%s: OPP table not found (%d)\n",
479     __func__, count);
480     goto out_unlock;
481     }
482     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
483     index 98b767d3171e..7d506cb73e54 100644
484     --- a/drivers/block/nbd.c
485     +++ b/drivers/block/nbd.c
486     @@ -654,7 +654,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
487     return nbd_size_set(nbd, bdev, nbd->blksize, arg);
488    
489     case NBD_SET_TIMEOUT:
490     - nbd->tag_set.timeout = arg * HZ;
491     + if (arg) {
492     + nbd->tag_set.timeout = arg * HZ;
493     + blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
494     + }
495     return 0;
496    
497     case NBD_SET_FLAGS:
498     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
499     index 3ae950c82922..693028659ccc 100644
500     --- a/drivers/bluetooth/btusb.c
501     +++ b/drivers/bluetooth/btusb.c
502     @@ -1059,10 +1059,6 @@ static int btusb_open(struct hci_dev *hdev)
503     }
504    
505     data->intf->needs_remote_wakeup = 1;
506     - /* device specific wakeup source enabled and required for USB
507     - * remote wakeup while host is suspended
508     - */
509     - device_wakeup_enable(&data->udev->dev);
510    
511     if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
512     goto done;
513     @@ -1126,7 +1122,6 @@ static int btusb_close(struct hci_dev *hdev)
514     goto failed;
515    
516     data->intf->needs_remote_wakeup = 0;
517     - device_wakeup_disable(&data->udev->dev);
518     usb_autopm_put_interface(data->intf);
519    
520     failed:
521     diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
522     index df97e25aec76..9fe0939c1273 100644
523     --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
524     +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
525     @@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
526     0x150, 0, 4, 24, 2, BIT(31),
527     CLK_SET_RATE_PARENT);
528    
529     -static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
530     +static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0);
531    
532     static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
533    
534     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
535     index 530f255a898b..35e34c0e0429 100644
536     --- a/drivers/cpufreq/cpufreq.c
537     +++ b/drivers/cpufreq/cpufreq.c
538     @@ -918,11 +918,19 @@ static struct kobj_type ktype_cpufreq = {
539     .release = cpufreq_sysfs_release,
540     };
541    
542     -static int add_cpu_dev_symlink(struct cpufreq_policy *policy,
543     - struct device *dev)
544     +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
545     {
546     + struct device *dev = get_cpu_device(cpu);
547     +
548     + if (!dev)
549     + return;
550     +
551     + if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
552     + return;
553     +
554     dev_dbg(dev, "%s: Adding symlink\n", __func__);
555     - return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
556     + if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
557     + dev_err(dev, "cpufreq symlink creation failed\n");
558     }
559    
560     static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
561     @@ -1184,10 +1192,10 @@ static int cpufreq_online(unsigned int cpu)
562     policy->user_policy.min = policy->min;
563     policy->user_policy.max = policy->max;
564    
565     - write_lock_irqsave(&cpufreq_driver_lock, flags);
566     - for_each_cpu(j, policy->related_cpus)
567     + for_each_cpu(j, policy->related_cpus) {
568     per_cpu(cpufreq_cpu_data, j) = policy;
569     - write_unlock_irqrestore(&cpufreq_driver_lock, flags);
570     + add_cpu_dev_symlink(policy, j);
571     + }
572     } else {
573     policy->min = policy->user_policy.min;
574     policy->max = policy->user_policy.max;
575     @@ -1284,13 +1292,15 @@ static int cpufreq_online(unsigned int cpu)
576    
577     if (cpufreq_driver->exit)
578     cpufreq_driver->exit(policy);
579     +
580     + for_each_cpu(j, policy->real_cpus)
581     + remove_cpu_dev_symlink(policy, get_cpu_device(j));
582     +
583     out_free_policy:
584     cpufreq_policy_free(policy, !new_policy);
585     return ret;
586     }
587    
588     -static int cpufreq_offline(unsigned int cpu);
589     -
590     /**
591     * cpufreq_add_dev - the cpufreq interface for a CPU device.
592     * @dev: CPU device.
593     @@ -1312,16 +1322,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
594    
595     /* Create sysfs link on CPU registration */
596     policy = per_cpu(cpufreq_cpu_data, cpu);
597     - if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
598     - return 0;
599     + if (policy)
600     + add_cpu_dev_symlink(policy, cpu);
601    
602     - ret = add_cpu_dev_symlink(policy, dev);
603     - if (ret) {
604     - cpumask_clear_cpu(cpu, policy->real_cpus);
605     - cpufreq_offline(cpu);
606     - }
607     -
608     - return ret;
609     + return 0;
610     }
611    
612     static int cpufreq_offline(unsigned int cpu)
613     diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
614     index 7fe442ca38f4..854a56781100 100644
615     --- a/drivers/cpuidle/cpuidle-powernv.c
616     +++ b/drivers/cpuidle/cpuidle-powernv.c
617     @@ -164,6 +164,24 @@ static int powernv_cpuidle_driver_init(void)
618     drv->state_count += 1;
619     }
620    
621     + /*
622     + * On the PowerNV platform cpu_present may be less than cpu_possible in
623     + * cases when firmware detects the CPU, but it is not available to the
624     + * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
625     + * run time and hence cpu_devices are not created for those CPUs by the
626     + * generic topology_init().
627     + *
628     + * drv->cpumask defaults to cpu_possible_mask in
629     + * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
630     + * cpu_devices are not created for CPUs in cpu_possible_mask that
631     + * cannot be hot-added later at run time.
632     + *
633     + * Trying cpuidle_register_device() on a CPU without a cpu_device is
634     + * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
635     + */
636     +
637     + drv->cpumask = (struct cpumask *)cpu_present_mask;
638     +
639     return 0;
640     }
641    
642     diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
643     index c73207abb5a4..35237c8d5206 100644
644     --- a/drivers/cpuidle/cpuidle.c
645     +++ b/drivers/cpuidle/cpuidle.c
646     @@ -189,6 +189,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
647     return -EBUSY;
648     }
649     target_state = &drv->states[index];
650     + broadcast = false;
651     }
652    
653     /* Take note of the planned idle state. */
654     diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
655     index 832a2c3f01ff..9e98a5fbbc1d 100644
656     --- a/drivers/cpuidle/sysfs.c
657     +++ b/drivers/cpuidle/sysfs.c
658     @@ -613,6 +613,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
659     struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
660     int error;
661    
662     + /*
663     + * Return if cpu_device is not setup for this CPU.
664     + *
665     + * This could happen if the arch did not set up cpu_device
666     + * since this CPU is not in cpu_present mask and the
667     + * driver did not send a correct CPU mask during registration.
668     + * Without this check we would end up passing bogus
669     + * value for &cpu_dev->kobj in kobject_init_and_add()
670     + */
671     + if (!cpu_dev)
672     + return -ENODEV;
673     +
674     kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
675     if (!kdev)
676     return -ENOMEM;
677     diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
678     index ecfdcfe3698d..4f41d6da5acc 100644
679     --- a/drivers/crypto/amcc/crypto4xx_core.h
680     +++ b/drivers/crypto/amcc/crypto4xx_core.h
681     @@ -34,12 +34,12 @@
682     #define PPC405EX_CE_RESET 0x00000008
683    
684     #define CRYPTO4XX_CRYPTO_PRIORITY 300
685     -#define PPC4XX_LAST_PD 63
686     -#define PPC4XX_NUM_PD 64
687     -#define PPC4XX_LAST_GD 1023
688     +#define PPC4XX_NUM_PD 256
689     +#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
690     #define PPC4XX_NUM_GD 1024
691     -#define PPC4XX_LAST_SD 63
692     -#define PPC4XX_NUM_SD 64
693     +#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
694     +#define PPC4XX_NUM_SD 256
695     +#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
696     #define PPC4XX_SD_BUFFER_SIZE 2048
697    
698     #define PD_ENTRY_INUSE 1
699     diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
700     index db607d51ee2b..8eed456a67be 100644
701     --- a/drivers/hid/Kconfig
702     +++ b/drivers/hid/Kconfig
703     @@ -190,6 +190,7 @@ config HID_CORSAIR
704    
705     Supported devices:
706     - Vengeance K90
707     + - Scimitar PRO RGB
708    
709     config HID_PRODIKEYS
710     tristate "Prodikeys PC-MIDI Keyboard support"
711     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
712     index bdde8859e191..e32862ca5223 100644
713     --- a/drivers/hid/hid-core.c
714     +++ b/drivers/hid/hid-core.c
715     @@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
716     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
717     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
718     { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
719     + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
720     { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
721     { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
722     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
723     @@ -2106,6 +2107,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
724     { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
725     { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
726     { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
727     + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
728     { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
729     { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
730     { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
731     diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
732     index c0303f61c26a..9ba5d98a1180 100644
733     --- a/drivers/hid/hid-corsair.c
734     +++ b/drivers/hid/hid-corsair.c
735     @@ -3,8 +3,10 @@
736     *
737     * Supported devices:
738     * - Vengeance K90 Keyboard
739     + * - Scimitar PRO RGB Gaming Mouse
740     *
741     * Copyright (c) 2015 Clement Vuchener
742     + * Copyright (c) 2017 Oscar Campos
743     */
744    
745     /*
746     @@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
747     return 0;
748     }
749    
750     +/*
751     + * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
752     + * non parseable as they define two consecutive Logical Minimum for
753     + * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
754     + * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
755     + * prevents poper parsing of the report descriptor due Logical
756     + * Minimum being larger than Logical Maximum.
757     + *
758     + * This driver fixes the report descriptor for:
759     + * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
760     + */
761     +
762     +static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
763     + unsigned int *rsize)
764     +{
765     + struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
766     +
767     + if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
768     + /*
769     + * Corsair Scimitar RGB Pro report descriptor is broken and
770     + * defines two different Logical Minimum for the Consumer
771     + * Application. The byte 77 should be a 0x26 defining a 16
772     + * bits integer for the Logical Maximum but it is a 0x16
773     + * instead (Logical Minimum)
774     + */
775     + switch (hdev->product) {
776     + case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
777     + if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
778     + && rdesc[78] == 0xff && rdesc[79] == 0x0f) {
779     + hid_info(hdev, "Fixing up report descriptor\n");
780     + rdesc[77] = 0x26;
781     + }
782     + break;
783     + }
784     +
785     + }
786     + return rdesc;
787     +}
788     +
789     static const struct hid_device_id corsair_devices[] = {
790     { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
791     .driver_data = CORSAIR_USE_K90_MACRO |
792     CORSAIR_USE_K90_BACKLIGHT },
793     + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
794     + USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
795     {}
796     };
797    
798     @@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
799     .event = corsair_event,
800     .remove = corsair_remove,
801     .input_mapping = corsair_input_mapping,
802     + .report_fixup = corsair_mouse_report_fixup,
803     };
804    
805     module_hid_driver(corsair_driver);
806    
807     MODULE_LICENSE("GPL");
808     +/* Original K90 driver author */
809     MODULE_AUTHOR("Clement Vuchener");
810     +/* Scimitar PRO RGB driver author */
811     +MODULE_AUTHOR("Oscar Campos");
812     MODULE_DESCRIPTION("HID driver for Corsair devices");
813     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
814     index 433d5f675c03..244b97c1b74e 100644
815     --- a/drivers/hid/hid-ids.h
816     +++ b/drivers/hid/hid-ids.h
817     @@ -277,6 +277,9 @@
818     #define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13
819     #define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15
820     #define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17
821     +#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38
822     +#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39
823     +#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e
824    
825     #define USB_VENDOR_ID_CREATIVELABS 0x041e
826     #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
827     @@ -1077,6 +1080,7 @@
828    
829     #define USB_VENDOR_ID_XIN_MO 0x16c0
830     #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
831     +#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
832    
833     #define USB_VENDOR_ID_XIROKU 0x1477
834     #define USB_DEVICE_ID_XIROKU_SPX 0x1006
835     diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
836     index 7df5227a7e61..9ad7731d2e10 100644
837     --- a/drivers/hid/hid-xinmo.c
838     +++ b/drivers/hid/hid-xinmo.c
839     @@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
840    
841     static const struct hid_device_id xinmo_devices[] = {
842     { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
843     + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
844     { }
845     };
846    
847     diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
848     index 2b1620797959..1916f80a692d 100644
849     --- a/drivers/hid/usbhid/hid-quirks.c
850     +++ b/drivers/hid/usbhid/hid-quirks.c
851     @@ -80,6 +80,9 @@ static const struct hid_blacklist {
852     { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
853     { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
854     { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
855     + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
856     + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
857     + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
858     { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
859     { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
860     { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
861     diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
862     index cccef87963e0..975c43d446f8 100644
863     --- a/drivers/hwmon/asus_atk0110.c
864     +++ b/drivers/hwmon/asus_atk0110.c
865     @@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
866     else
867     err = atk_read_value_new(sensor, value);
868    
869     + if (err)
870     + return err;
871     +
872     sensor->is_valid = true;
873     sensor->last_updated = jiffies;
874     sensor->cached_value = *value;
875     diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
876     index c1b9275978f9..281491cca510 100644
877     --- a/drivers/hwmon/max31790.c
878     +++ b/drivers/hwmon/max31790.c
879     @@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
880     data->pwm[channel] = val << 8;
881     err = i2c_smbus_write_word_swapped(client,
882     MAX31790_REG_PWMOUT(channel),
883     - val);
884     + data->pwm[channel]);
885     break;
886     case hwmon_pwm_enable:
887     fan_config = data->fan_config[channel];
888     diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
889     index a754fc727de5..ff12b8d176ce 100644
890     --- a/drivers/infiniband/core/cq.c
891     +++ b/drivers/infiniband/core/cq.c
892     @@ -196,7 +196,7 @@ void ib_free_cq(struct ib_cq *cq)
893     irq_poll_disable(&cq->iop);
894     break;
895     case IB_POLL_WORKQUEUE:
896     - flush_work(&cq->work);
897     + cancel_work_sync(&cq->work);
898     break;
899     default:
900     WARN_ON_ONCE(1);
901     diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
902     index 6fd043b1d714..7db2001775cb 100644
903     --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
904     +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
905     @@ -159,6 +159,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
906     return NOTIFY_DONE;
907    
908     iwdev = &hdl->device;
909     + if (iwdev->init_state < INET_NOTIFIER)
910     + return NOTIFY_DONE;
911     +
912     netdev = iwdev->ldev->netdev;
913     upper_dev = netdev_master_upper_dev_get(netdev);
914     if (netdev != event_netdev)
915     @@ -231,6 +234,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
916     return NOTIFY_DONE;
917    
918     iwdev = &hdl->device;
919     + if (iwdev->init_state < INET_NOTIFIER)
920     + return NOTIFY_DONE;
921     +
922     netdev = iwdev->ldev->netdev;
923     if (netdev != event_netdev)
924     return NOTIFY_DONE;
925     @@ -280,6 +286,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
926     if (!iwhdl)
927     return NOTIFY_DONE;
928     iwdev = &iwhdl->device;
929     + if (iwdev->init_state < INET_NOTIFIER)
930     + return NOTIFY_DONE;
931     p = (__be32 *)neigh->primary_key;
932     i40iw_copy_ip_ntohl(local_ipaddr, p);
933     if (neigh->nud_state & NUD_VALID) {
934     diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
935     index e202b8142759..6b712eecbd37 100644
936     --- a/drivers/infiniband/sw/rdmavt/mmap.c
937     +++ b/drivers/infiniband/sw/rdmavt/mmap.c
938     @@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
939    
940     spin_lock_irq(&rdi->mmap_offset_lock);
941     if (rdi->mmap_offset == 0)
942     - rdi->mmap_offset = PAGE_SIZE;
943     + rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
944     ip->offset = rdi->mmap_offset;
945     - rdi->mmap_offset += size;
946     + rdi->mmap_offset += ALIGN(size, SHMLBA);
947     spin_unlock_irq(&rdi->mmap_offset_lock);
948    
949     INIT_LIST_HEAD(&ip->pending_mmaps);
950     diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
951     index c572a4c09359..bd812e00988e 100644
952     --- a/drivers/infiniband/sw/rxe/rxe_mmap.c
953     +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
954     @@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
955     spin_lock_bh(&rxe->mmap_offset_lock);
956    
957     if (rxe->mmap_offset == 0)
958     - rxe->mmap_offset = PAGE_SIZE;
959     + rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
960    
961     ip->info.offset = rxe->mmap_offset;
962     - rxe->mmap_offset += size;
963     + rxe->mmap_offset += ALIGN(size, SHMLBA);
964    
965     spin_unlock_bh(&rxe->mmap_offset_lock);
966    
967     diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
968     index ee26a1b1b4ed..1c4e5b2e6835 100644
969     --- a/drivers/infiniband/sw/rxe/rxe_pool.c
970     +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
971     @@ -412,6 +412,8 @@ void *rxe_alloc(struct rxe_pool *pool)
972     elem = kmem_cache_zalloc(pool_cache(pool),
973     (pool->flags & RXE_POOL_ATOMIC) ?
974     GFP_ATOMIC : GFP_KERNEL);
975     + if (!elem)
976     + return NULL;
977    
978     elem->pool = pool;
979     kref_init(&elem->ref_cnt);
980     diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
981     index 9d084780ac91..5b0ca35c06ab 100644
982     --- a/drivers/infiniband/sw/rxe/rxe_req.c
983     +++ b/drivers/infiniband/sw/rxe/rxe_req.c
984     @@ -726,11 +726,11 @@ int rxe_requester(void *arg)
985     ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
986     if (ret) {
987     qp->need_req_skb = 1;
988     - kfree_skb(skb);
989    
990     rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
991    
992     if (ret == -EAGAIN) {
993     + kfree_skb(skb);
994     rxe_run_task(&qp->req.task, 1);
995     goto exit;
996     }
997     diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
998     index 7705820cdac6..8c0ddd7165ae 100644
999     --- a/drivers/infiniband/sw/rxe/rxe_resp.c
1000     +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
1001     @@ -799,18 +799,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
1002     /* Unreachable */
1003     WARN_ON(1);
1004    
1005     - /* We successfully processed this new request. */
1006     - qp->resp.msn++;
1007     -
1008     /* next expected psn, read handles this separately */
1009     qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
1010    
1011     qp->resp.opcode = pkt->opcode;
1012     qp->resp.status = IB_WC_SUCCESS;
1013    
1014     - if (pkt->mask & RXE_COMP_MASK)
1015     + if (pkt->mask & RXE_COMP_MASK) {
1016     + /* We successfully processed this new request. */
1017     + qp->resp.msn++;
1018     return RESPST_COMPLETE;
1019     - else if (qp_type(qp) == IB_QPT_RC)
1020     + } else if (qp_type(qp) == IB_QPT_RC)
1021     return RESPST_ACKNOWLEDGE;
1022     else
1023     return RESPST_CLEANUP;
1024     diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
1025     index 0be6a7c5ddb5..cb48e22afff7 100644
1026     --- a/drivers/infiniband/ulp/iser/iscsi_iser.h
1027     +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
1028     @@ -430,6 +430,7 @@ struct iser_fr_desc {
1029     struct list_head list;
1030     struct iser_reg_resources rsc;
1031     struct iser_pi_context *pi_ctx;
1032     + struct list_head all_list;
1033     };
1034    
1035     /**
1036     @@ -443,6 +444,7 @@ struct iser_fr_pool {
1037     struct list_head list;
1038     spinlock_t lock;
1039     int size;
1040     + struct list_head all_list;
1041     };
1042    
1043     /**
1044     diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
1045     index a4b791dfaa1d..bc6f5bb6c524 100644
1046     --- a/drivers/infiniband/ulp/iser/iser_verbs.c
1047     +++ b/drivers/infiniband/ulp/iser/iser_verbs.c
1048     @@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
1049     int i, ret;
1050    
1051     INIT_LIST_HEAD(&fr_pool->list);
1052     + INIT_LIST_HEAD(&fr_pool->all_list);
1053     spin_lock_init(&fr_pool->lock);
1054     fr_pool->size = 0;
1055     for (i = 0; i < cmds_max; i++) {
1056     @@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
1057     }
1058    
1059     list_add_tail(&desc->list, &fr_pool->list);
1060     + list_add_tail(&desc->all_list, &fr_pool->all_list);
1061     fr_pool->size++;
1062     }
1063    
1064     @@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
1065     struct iser_fr_desc *desc, *tmp;
1066     int i = 0;
1067    
1068     - if (list_empty(&fr_pool->list))
1069     + if (list_empty(&fr_pool->all_list))
1070     return;
1071    
1072     iser_info("freeing conn %p fr pool\n", ib_conn);
1073    
1074     - list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
1075     - list_del(&desc->list);
1076     + list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
1077     + list_del(&desc->all_list);
1078     iser_free_reg_res(&desc->rsc);
1079     if (desc->pi_ctx)
1080     iser_free_pi_ctx(desc->pi_ctx);
1081     diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
1082     index c7820b3ea80e..beef59eb94fa 100644
1083     --- a/drivers/iommu/exynos-iommu.c
1084     +++ b/drivers/iommu/exynos-iommu.c
1085     @@ -543,7 +543,10 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
1086     if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
1087     clk_enable(data->clk_master);
1088     if (sysmmu_block(data)) {
1089     - __sysmmu_tlb_invalidate_entry(data, iova, 1);
1090     + if (data->version >= MAKE_MMU_VER(5, 0))
1091     + __sysmmu_tlb_invalidate(data);
1092     + else
1093     + __sysmmu_tlb_invalidate_entry(data, iova, 1);
1094     sysmmu_unblock(data);
1095     }
1096     clk_disable(data->clk_master);
1097     diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
1098     index 823f6985b260..dd7e38ac29bd 100644
1099     --- a/drivers/isdn/capi/kcapi.c
1100     +++ b/drivers/isdn/capi/kcapi.c
1101     @@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
1102     sizeof(avmb1_carddef))))
1103     return -EFAULT;
1104     cdef.cardtype = AVM_CARDTYPE_B1;
1105     + cdef.cardnr = 0;
1106     } else {
1107     if ((retval = copy_from_user(&cdef, data,
1108     sizeof(avmb1_extcarddef))))
1109     diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
1110     index eef202d4399b..a5422f483ad5 100644
1111     --- a/drivers/misc/cxl/pci.c
1112     +++ b/drivers/misc/cxl/pci.c
1113     @@ -1758,6 +1758,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
1114     /* There should only be one entry, but go through the list
1115     * anyway
1116     */
1117     + if (afu->phb == NULL)
1118     + return result;
1119     +
1120     list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1121     if (!afu_dev->driver)
1122     continue;
1123     @@ -1801,6 +1804,11 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1124     /* Only participate in EEH if we are on a virtual PHB */
1125     if (afu->phb == NULL)
1126     return PCI_ERS_RESULT_NONE;
1127     +
1128     + /*
1129     + * Tell the AFU drivers; but we don't care what they
1130     + * say, we're going away.
1131     + */
1132     cxl_vphb_error_detected(afu, state);
1133     }
1134     return PCI_ERS_RESULT_DISCONNECT;
1135     @@ -1941,6 +1949,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
1136     if (cxl_afu_select_best_mode(afu))
1137     goto err;
1138    
1139     + if (afu->phb == NULL)
1140     + continue;
1141     +
1142     list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1143     /* Reset the device context.
1144     * TODO: make this less disruptive
1145     @@ -2003,6 +2014,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
1146     for (i = 0; i < adapter->slices; i++) {
1147     afu = adapter->afu[i];
1148    
1149     + if (afu->phb == NULL)
1150     + continue;
1151     +
1152     list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1153     if (afu_dev->driver && afu_dev->driver->err_handler &&
1154     afu_dev->driver->err_handler->resume)
1155     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1156     index 0b894d76aa41..bbb3641eddcb 100644
1157     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1158     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1159     @@ -2381,6 +2381,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
1160     return 0;
1161     }
1162    
1163     +static void bnxt_init_cp_rings(struct bnxt *bp)
1164     +{
1165     + int i;
1166     +
1167     + for (i = 0; i < bp->cp_nr_rings; i++) {
1168     + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
1169     + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
1170     +
1171     + ring->fw_ring_id = INVALID_HW_RING_ID;
1172     + }
1173     +}
1174     +
1175     static int bnxt_init_rx_rings(struct bnxt *bp)
1176     {
1177     int i, rc = 0;
1178     @@ -4700,6 +4712,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
1179    
1180     static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
1181     {
1182     + bnxt_init_cp_rings(bp);
1183     bnxt_init_rx_rings(bp);
1184     bnxt_init_tx_rings(bp);
1185     bnxt_init_ring_grps(bp, irq_re_init);
1186     diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
1187     index 9e59663a6ead..0f6811860ad5 100644
1188     --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
1189     +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
1190     @@ -1930,13 +1930,13 @@ static void
1191     bfa_ioc_send_enable(struct bfa_ioc *ioc)
1192     {
1193     struct bfi_ioc_ctrl_req enable_req;
1194     - struct timeval tv;
1195    
1196     bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1197     bfa_ioc_portid(ioc));
1198     enable_req.clscode = htons(ioc->clscode);
1199     - do_gettimeofday(&tv);
1200     - enable_req.tv_sec = ntohl(tv.tv_sec);
1201     + enable_req.rsvd = htons(0);
1202     + /* overflow in 2106 */
1203     + enable_req.tv_sec = ntohl(ktime_get_real_seconds());
1204     bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1205     }
1206    
1207     @@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
1208    
1209     bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1210     bfa_ioc_portid(ioc));
1211     + disable_req.clscode = htons(ioc->clscode);
1212     + disable_req.rsvd = htons(0);
1213     + /* overflow in 2106 */
1214     + disable_req.tv_sec = ntohl(ktime_get_real_seconds());
1215     bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1216     }
1217    
1218     diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
1219     index 05c1c1dd7751..cebfe3bd086e 100644
1220     --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
1221     +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
1222     @@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
1223     return PTR_ERR(kern_buf);
1224    
1225     rc = sscanf(kern_buf, "%x:%x", &addr, &len);
1226     - if (rc < 2) {
1227     + if (rc < 2 || len > UINT_MAX >> 2) {
1228     netdev_warn(bnad->netdev, "failed to read user buffer\n");
1229     kfree(kern_buf);
1230     return -EINVAL;
1231     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
1232     index 4d19e46f7c55..3693ae104c2a 100644
1233     --- a/drivers/net/ethernet/intel/fm10k/fm10k.h
1234     +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
1235     @@ -508,8 +508,8 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
1236     int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
1237     int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
1238     int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
1239     -int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
1240     - int unused);
1241     +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
1242     + int __always_unused min_rate, int max_rate);
1243     int fm10k_ndo_get_vf_config(struct net_device *netdev,
1244     int vf_idx, struct ifla_vf_info *ivi);
1245    
1246     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
1247     index 5f4dac0d36ef..e72fd52bacfe 100644
1248     --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
1249     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
1250     @@ -126,6 +126,9 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
1251     struct fm10k_mbx_info *mbx = &vf_info->mbx;
1252     u16 glort = vf_info->glort;
1253    
1254     + /* process the SM mailbox first to drain outgoing messages */
1255     + hw->mbx.ops.process(hw, &hw->mbx);
1256     +
1257     /* verify port mapping is valid, if not reset port */
1258     if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
1259     hw->iov.ops.reset_lport(hw, vf_info);
1260     @@ -482,7 +485,7 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
1261     }
1262    
1263     int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
1264     - int __always_unused unused, int rate)
1265     + int __always_unused min_rate, int max_rate)
1266     {
1267     struct fm10k_intfc *interface = netdev_priv(netdev);
1268     struct fm10k_iov_data *iov_data = interface->iov_data;
1269     @@ -493,14 +496,15 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
1270     return -EINVAL;
1271    
1272     /* rate limit cannot be less than 10Mbs or greater than link speed */
1273     - if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
1274     + if (max_rate &&
1275     + (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
1276     return -EINVAL;
1277    
1278     /* store values */
1279     - iov_data->vf_info[vf_idx].rate = rate;
1280     + iov_data->vf_info[vf_idx].rate = max_rate;
1281    
1282     /* update hardware configuration */
1283     - hw->iov.ops.configure_tc(hw, vf_idx, rate);
1284     + hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
1285    
1286     return 0;
1287     }
1288     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1289     index 2caafebb0295..becffd15c092 100644
1290     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1291     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1292     @@ -4217,8 +4217,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
1293     if (!vsi->netdev)
1294     return;
1295    
1296     - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
1297     - napi_enable(&vsi->q_vectors[q_idx]->napi);
1298     + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
1299     + struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
1300     +
1301     + if (q_vector->rx.ring || q_vector->tx.ring)
1302     + napi_enable(&q_vector->napi);
1303     + }
1304     }
1305    
1306     /**
1307     @@ -4232,8 +4236,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
1308     if (!vsi->netdev)
1309     return;
1310    
1311     - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
1312     - napi_disable(&vsi->q_vectors[q_idx]->napi);
1313     + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
1314     + struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
1315     +
1316     + if (q_vector->rx.ring || q_vector->tx.ring)
1317     + napi_disable(&q_vector->napi);
1318     + }
1319     }
1320    
1321     /**
1322     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1323     index 16839600fb78..ca54f7684668 100644
1324     --- a/drivers/net/ethernet/intel/igb/igb_main.c
1325     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
1326     @@ -3102,6 +3102,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
1327     /* Setup and initialize a copy of the hw vlan table array */
1328     adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
1329     GFP_ATOMIC);
1330     + if (!adapter->shadow_vfta)
1331     + return -ENOMEM;
1332    
1333     /* This call may decrease the number of queues */
1334     if (igb_init_interrupt_scheme(adapter, true)) {
1335     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
1336     index 77d3039283f6..ad3362293cbd 100644
1337     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
1338     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
1339     @@ -3696,10 +3696,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
1340     fw_cmd.ver_build = build;
1341     fw_cmd.ver_sub = sub;
1342     fw_cmd.hdr.checksum = 0;
1343     - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
1344     - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
1345     fw_cmd.pad = 0;
1346     fw_cmd.pad2 = 0;
1347     + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
1348     + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
1349    
1350     for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
1351     ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
1352     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
1353     index 60f0bf779073..77a60aa5dc7e 100644
1354     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
1355     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
1356     @@ -617,6 +617,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
1357     /* convert offset from words to bytes */
1358     buffer.address = cpu_to_be32((offset + current_word) * 2);
1359     buffer.length = cpu_to_be16(words_to_read * 2);
1360     + buffer.pad2 = 0;
1361     + buffer.pad3 = 0;
1362    
1363     status = ixgbe_host_interface_command(hw, &buffer,
1364     sizeof(buffer),
1365     diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
1366     index 4367dd6879a2..0622fd03941b 100644
1367     --- a/drivers/net/ethernet/moxa/moxart_ether.c
1368     +++ b/drivers/net/ethernet/moxa/moxart_ether.c
1369     @@ -25,6 +25,7 @@
1370     #include <linux/of_irq.h>
1371     #include <linux/crc32.h>
1372     #include <linux/crc32c.h>
1373     +#include <linux/circ_buf.h>
1374    
1375     #include "moxart_ether.h"
1376    
1377     @@ -278,6 +279,13 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
1378     return rx;
1379     }
1380    
1381     +static int moxart_tx_queue_space(struct net_device *ndev)
1382     +{
1383     + struct moxart_mac_priv_t *priv = netdev_priv(ndev);
1384     +
1385     + return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
1386     +}
1387     +
1388     static void moxart_tx_finished(struct net_device *ndev)
1389     {
1390     struct moxart_mac_priv_t *priv = netdev_priv(ndev);
1391     @@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
1392     tx_tail = TX_NEXT(tx_tail);
1393     }
1394     priv->tx_tail = tx_tail;
1395     + if (netif_queue_stopped(ndev) &&
1396     + moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
1397     + netif_wake_queue(ndev);
1398     }
1399    
1400     static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
1401     @@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1402     struct moxart_mac_priv_t *priv = netdev_priv(ndev);
1403     void *desc;
1404     unsigned int len;
1405     - unsigned int tx_head = priv->tx_head;
1406     + unsigned int tx_head;
1407     u32 txdes1;
1408     int ret = NETDEV_TX_BUSY;
1409    
1410     + spin_lock_irq(&priv->txlock);
1411     +
1412     + tx_head = priv->tx_head;
1413     desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
1414    
1415     - spin_lock_irq(&priv->txlock);
1416     + if (moxart_tx_queue_space(ndev) == 1)
1417     + netif_stop_queue(ndev);
1418     +
1419     if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
1420     net_dbg_ratelimited("no TX space for packet\n");
1421     priv->stats.tx_dropped++;
1422     diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
1423     index 93a9563ac7c6..afc32ec998c0 100644
1424     --- a/drivers/net/ethernet/moxa/moxart_ether.h
1425     +++ b/drivers/net/ethernet/moxa/moxart_ether.h
1426     @@ -59,6 +59,7 @@
1427     #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
1428     #define TX_BUF_SIZE 1600
1429     #define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
1430     +#define TX_WAKE_THRESHOLD 16
1431    
1432     #define RX_DESC_NUM 64
1433     #define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
1434     diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
1435     index a0849f49bbec..c0192f97ecc8 100644
1436     --- a/drivers/net/irda/vlsi_ir.c
1437     +++ b/drivers/net/irda/vlsi_ir.c
1438     @@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
1439     memset(rd, 0, sizeof(*rd));
1440     rd->hw = hwmap + i;
1441     rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
1442     - if (rd->buf == NULL ||
1443     - !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
1444     + if (rd->buf)
1445     + busaddr = pci_map_single(pdev, rd->buf, len, dir);
1446     + if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
1447     if (rd->buf) {
1448     net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
1449     __func__, rd->buf);
1450     @@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
1451     rd = r->rd + j;
1452     busaddr = rd_get_addr(rd);
1453     rd_set_addr_status(rd, 0, 0);
1454     - if (busaddr)
1455     - pci_unmap_single(pdev, busaddr, len, dir);
1456     + pci_unmap_single(pdev, busaddr, len, dir);
1457     kfree(rd->buf);
1458     rd->buf = NULL;
1459     }
1460     diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
1461     index a52b560e428b..3603eec7217f 100644
1462     --- a/drivers/net/phy/at803x.c
1463     +++ b/drivers/net/phy/at803x.c
1464     @@ -166,7 +166,7 @@ static int at803x_set_wol(struct phy_device *phydev,
1465     mac = (const u8 *) ndev->dev_addr;
1466    
1467     if (!is_valid_ether_addr(mac))
1468     - return -EFAULT;
1469     + return -EINVAL;
1470    
1471     for (i = 0; i < 3; i++) {
1472     phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
1473     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1474     index 62725655d8e4..105fbfb47e3a 100644
1475     --- a/drivers/net/usb/qmi_wwan.c
1476     +++ b/drivers/net/usb/qmi_wwan.c
1477     @@ -582,6 +582,10 @@ static const struct usb_device_id products[] = {
1478     USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
1479     .driver_info = (unsigned long)&qmi_wwan_info,
1480     },
1481     + { /* Motorola Mapphone devices with MDM6600 */
1482     + USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
1483     + .driver_info = (unsigned long)&qmi_wwan_info,
1484     + },
1485    
1486     /* 2. Combined interface devices matching on class+protocol */
1487     { /* Huawei E367 and possibly others in "Windows mode" */
1488     diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1489     index afb953a258cd..b2d7c7e32250 100644
1490     --- a/drivers/net/usb/r8152.c
1491     +++ b/drivers/net/usb/r8152.c
1492     @@ -32,7 +32,7 @@
1493     #define NETNEXT_VERSION "08"
1494    
1495     /* Information for net */
1496     -#define NET_VERSION "8"
1497     +#define NET_VERSION "9"
1498    
1499     #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
1500     #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
1501     @@ -501,6 +501,8 @@ enum rtl_register_content {
1502     #define RTL8153_RMS RTL8153_MAX_PACKET
1503     #define RTL8152_TX_TIMEOUT (5 * HZ)
1504     #define RTL8152_NAPI_WEIGHT 64
1505     +#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
1506     + sizeof(struct rx_desc) + RX_ALIGN)
1507    
1508     /* rtl8152 flags */
1509     enum rtl8152_flags {
1510     @@ -1292,6 +1294,7 @@ static void intr_callback(struct urb *urb)
1511     }
1512     } else {
1513     if (netif_carrier_ok(tp->netdev)) {
1514     + netif_stop_queue(tp->netdev);
1515     set_bit(RTL8152_LINK_CHG, &tp->flags);
1516     schedule_delayed_work(&tp->schedule, 0);
1517     }
1518     @@ -1362,6 +1365,7 @@ static int alloc_all_mem(struct r8152 *tp)
1519     spin_lock_init(&tp->rx_lock);
1520     spin_lock_init(&tp->tx_lock);
1521     INIT_LIST_HEAD(&tp->tx_free);
1522     + INIT_LIST_HEAD(&tp->rx_done);
1523     skb_queue_head_init(&tp->tx_queue);
1524     skb_queue_head_init(&tp->rx_queue);
1525    
1526     @@ -2252,8 +2256,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
1527    
1528     static void r8153_set_rx_early_size(struct r8152 *tp)
1529     {
1530     - u32 mtu = tp->netdev->mtu;
1531     - u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
1532     + u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
1533    
1534     ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
1535     }
1536     @@ -3165,6 +3168,9 @@ static void set_carrier(struct r8152 *tp)
1537     napi_enable(&tp->napi);
1538     netif_wake_queue(netdev);
1539     netif_info(tp, link, netdev, "carrier on\n");
1540     + } else if (netif_queue_stopped(netdev) &&
1541     + skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
1542     + netif_wake_queue(netdev);
1543     }
1544     } else {
1545     if (netif_carrier_ok(netdev)) {
1546     @@ -3698,8 +3704,18 @@ static int rtl8152_resume(struct usb_interface *intf)
1547     tp->rtl_ops.autosuspend_en(tp, false);
1548     napi_disable(&tp->napi);
1549     set_bit(WORK_ENABLE, &tp->flags);
1550     - if (netif_carrier_ok(tp->netdev))
1551     - rtl_start_rx(tp);
1552     +
1553     + if (netif_carrier_ok(tp->netdev)) {
1554     + if (rtl8152_get_speed(tp) & LINK_STATUS) {
1555     + rtl_start_rx(tp);
1556     + } else {
1557     + netif_carrier_off(tp->netdev);
1558     + tp->rtl_ops.disable(tp);
1559     + netif_info(tp, link, tp->netdev,
1560     + "linking down\n");
1561     + }
1562     + }
1563     +
1564     napi_enable(&tp->napi);
1565     clear_bit(SELECTIVE_SUSPEND, &tp->flags);
1566     smp_mb__after_atomic();
1567     diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
1568     index c8e612c1c72f..e56ca3fb107e 100644
1569     --- a/drivers/nvme/target/loop.c
1570     +++ b/drivers/nvme/target/loop.c
1571     @@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1572     static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
1573     struct nvme_loop_iod *iod, unsigned int queue_idx)
1574     {
1575     - BUG_ON(queue_idx >= ctrl->queue_count);
1576     -
1577     iod->req.cmd = &iod->cmd;
1578     iod->req.rsp = &iod->rsp;
1579     iod->queue = &ctrl->queues[queue_idx];
1580     @@ -314,6 +312,43 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
1581     kfree(ctrl);
1582     }
1583    
1584     +static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
1585     +{
1586     + int i;
1587     +
1588     + for (i = 1; i < ctrl->queue_count; i++)
1589     + nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
1590     +}
1591     +
1592     +static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
1593     +{
1594     + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1595     + unsigned int nr_io_queues;
1596     + int ret, i;
1597     +
1598     + nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
1599     + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
1600     + if (ret || !nr_io_queues)
1601     + return ret;
1602     +
1603     + dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
1604     +
1605     + for (i = 1; i <= nr_io_queues; i++) {
1606     + ctrl->queues[i].ctrl = ctrl;
1607     + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
1608     + if (ret)
1609     + goto out_destroy_queues;
1610     +
1611     + ctrl->queue_count++;
1612     + }
1613     +
1614     + return 0;
1615     +
1616     +out_destroy_queues:
1617     + nvme_loop_destroy_io_queues(ctrl);
1618     + return ret;
1619     +}
1620     +
1621     static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
1622     {
1623     int error;
1624     @@ -385,17 +420,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
1625    
1626     static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
1627     {
1628     - int i;
1629     -
1630     nvme_stop_keep_alive(&ctrl->ctrl);
1631    
1632     if (ctrl->queue_count > 1) {
1633     nvme_stop_queues(&ctrl->ctrl);
1634     blk_mq_tagset_busy_iter(&ctrl->tag_set,
1635     nvme_cancel_request, &ctrl->ctrl);
1636     -
1637     - for (i = 1; i < ctrl->queue_count; i++)
1638     - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
1639     + nvme_loop_destroy_io_queues(ctrl);
1640     }
1641    
1642     if (ctrl->ctrl.state == NVME_CTRL_LIVE)
1643     @@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
1644     if (ret)
1645     goto out_disable;
1646    
1647     - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
1648     - ctrl->queues[i].ctrl = ctrl;
1649     - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
1650     - if (ret)
1651     - goto out_free_queues;
1652     -
1653     - ctrl->queue_count++;
1654     - }
1655     + ret = nvme_loop_init_io_queues(ctrl);
1656     + if (ret)
1657     + goto out_destroy_admin;
1658    
1659     - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
1660     + for (i = 1; i < ctrl->queue_count; i++) {
1661     ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1662     if (ret)
1663     - goto out_free_queues;
1664     + goto out_destroy_io;
1665     }
1666    
1667     changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1668     @@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
1669    
1670     return;
1671    
1672     -out_free_queues:
1673     - for (i = 1; i < ctrl->queue_count; i++)
1674     - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
1675     +out_destroy_io:
1676     + nvme_loop_destroy_io_queues(ctrl);
1677     +out_destroy_admin:
1678     nvme_loop_destroy_admin_queue(ctrl);
1679     out_disable:
1680     dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
1681     @@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
1682    
1683     static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
1684     {
1685     - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1686     int ret, i;
1687    
1688     - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
1689     - if (ret || !opts->nr_io_queues)
1690     + ret = nvme_loop_init_io_queues(ctrl);
1691     + if (ret)
1692     return ret;
1693    
1694     - dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
1695     - opts->nr_io_queues);
1696     -
1697     - for (i = 1; i <= opts->nr_io_queues; i++) {
1698     - ctrl->queues[i].ctrl = ctrl;
1699     - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
1700     - if (ret)
1701     - goto out_destroy_queues;
1702     -
1703     - ctrl->queue_count++;
1704     - }
1705     -
1706     memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
1707     ctrl->tag_set.ops = &nvme_loop_mq_ops;
1708     ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
1709     @@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
1710     goto out_free_tagset;
1711     }
1712    
1713     - for (i = 1; i <= opts->nr_io_queues; i++) {
1714     + for (i = 1; i < ctrl->queue_count; i++) {
1715     ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1716     if (ret)
1717     goto out_cleanup_connect_q;
1718     @@ -588,8 +601,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
1719     out_free_tagset:
1720     blk_mq_free_tag_set(&ctrl->tag_set);
1721     out_destroy_queues:
1722     - for (i = 1; i < ctrl->queue_count; i++)
1723     - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
1724     + nvme_loop_destroy_io_queues(ctrl);
1725     return ret;
1726     }
1727    
1728     diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
1729     index 47227820406d..1d32fe2d97aa 100644
1730     --- a/drivers/pci/iov.c
1731     +++ b/drivers/pci/iov.c
1732     @@ -164,7 +164,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
1733     pci_device_add(virtfn, virtfn->bus);
1734     mutex_unlock(&iov->dev->sriov->lock);
1735    
1736     - pci_bus_add_device(virtfn);
1737     sprintf(buf, "virtfn%u", id);
1738     rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
1739     if (rc)
1740     @@ -175,6 +174,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
1741    
1742     kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
1743    
1744     + pci_bus_add_device(virtfn);
1745     +
1746     return 0;
1747    
1748     failed2:
1749     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1750     index e7d4048e81f2..a87c8e1aef68 100644
1751     --- a/drivers/pci/pci.c
1752     +++ b/drivers/pci/pci.c
1753     @@ -4214,6 +4214,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
1754     {
1755     struct pci_dev *dev;
1756    
1757     +
1758     + if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
1759     + return false;
1760     +
1761     list_for_each_entry(dev, &bus->devices, bus_list) {
1762     if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
1763     (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
1764     diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
1765     index b1303b32053f..057465adf0b6 100644
1766     --- a/drivers/pci/pcie/aer/aerdrv_core.c
1767     +++ b/drivers/pci/pcie/aer/aerdrv_core.c
1768     @@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
1769     * If the error is reported by an end point, we think this
1770     * error is related to the upstream link of the end point.
1771     */
1772     - pci_walk_bus(dev->bus, cb, &result_data);
1773     + if (state == pci_channel_io_normal)
1774     + /*
1775     + * the error is non fatal so the bus is ok, just invoke
1776     + * the callback for the function that logged the error.
1777     + */
1778     + cb(dev, &result_data);
1779     + else
1780     + pci_walk_bus(dev->bus, cb, &result_data);
1781     }
1782    
1783     return result_data.result;
1784     diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
1785     index b7bb37167969..50c45bdf93be 100644
1786     --- a/drivers/pinctrl/pinctrl-st.c
1787     +++ b/drivers/pinctrl/pinctrl-st.c
1788     @@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
1789     writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
1790     }
1791    
1792     +static int st_gpio_irq_request_resources(struct irq_data *d)
1793     +{
1794     + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1795     +
1796     + st_gpio_direction_input(gc, d->hwirq);
1797     +
1798     + return gpiochip_lock_as_irq(gc, d->hwirq);
1799     +}
1800     +
1801     +static void st_gpio_irq_release_resources(struct irq_data *d)
1802     +{
1803     + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1804     +
1805     + gpiochip_unlock_as_irq(gc, d->hwirq);
1806     +}
1807     +
1808     static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
1809     {
1810     struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1811     @@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
1812     };
1813    
1814     static struct irq_chip st_gpio_irqchip = {
1815     - .name = "GPIO",
1816     - .irq_disable = st_gpio_irq_mask,
1817     - .irq_mask = st_gpio_irq_mask,
1818     - .irq_unmask = st_gpio_irq_unmask,
1819     - .irq_set_type = st_gpio_irq_set_type,
1820     - .flags = IRQCHIP_SKIP_SET_WAKE,
1821     + .name = "GPIO",
1822     + .irq_request_resources = st_gpio_irq_request_resources,
1823     + .irq_release_resources = st_gpio_irq_release_resources,
1824     + .irq_disable = st_gpio_irq_mask,
1825     + .irq_mask = st_gpio_irq_mask,
1826     + .irq_unmask = st_gpio_irq_unmask,
1827     + .irq_set_type = st_gpio_irq_set_type,
1828     + .flags = IRQCHIP_SKIP_SET_WAKE,
1829     };
1830    
1831     static int st_gpiolib_register_bank(struct st_pinctrl *info,
1832     diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
1833     index 9f31bc1a47d0..18716025b1db 100644
1834     --- a/drivers/platform/x86/asus-wireless.c
1835     +++ b/drivers/platform/x86/asus-wireless.c
1836     @@ -97,6 +97,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
1837     return;
1838     }
1839     input_report_key(data->idev, KEY_RFKILL, 1);
1840     + input_sync(data->idev);
1841     input_report_key(data->idev, KEY_RFKILL, 0);
1842     input_sync(data->idev);
1843     }
1844     diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
1845     index 84a52db9b05f..6ebd42aad291 100644
1846     --- a/drivers/rtc/interface.c
1847     +++ b/drivers/rtc/interface.c
1848     @@ -772,7 +772,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
1849     }
1850    
1851     timerqueue_add(&rtc->timerqueue, &timer->node);
1852     - if (!next) {
1853     + if (!next || ktime_before(timer->node.expires, next->expires)) {
1854     struct rtc_wkalrm alarm;
1855     int err;
1856     alarm.time = rtc_ktime_to_tm(timer->node.expires);
1857     diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
1858     index e1687e19c59f..a30f24cb6c83 100644
1859     --- a/drivers/rtc/rtc-pl031.c
1860     +++ b/drivers/rtc/rtc-pl031.c
1861     @@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev)
1862    
1863     dev_pm_clear_wake_irq(&adev->dev);
1864     device_init_wakeup(&adev->dev, false);
1865     - free_irq(adev->irq[0], ldata);
1866     + if (adev->irq[0])
1867     + free_irq(adev->irq[0], ldata);
1868     rtc_device_unregister(ldata->rtc);
1869     iounmap(ldata->base);
1870     kfree(ldata);
1871     @@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
1872     goto out_no_rtc;
1873     }
1874    
1875     - if (request_irq(adev->irq[0], pl031_interrupt,
1876     - vendor->irqflags, "rtc-pl031", ldata)) {
1877     - ret = -EIO;
1878     - goto out_no_irq;
1879     + if (adev->irq[0]) {
1880     + ret = request_irq(adev->irq[0], pl031_interrupt,
1881     + vendor->irqflags, "rtc-pl031", ldata);
1882     + if (ret)
1883     + goto out_no_irq;
1884     + dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
1885     }
1886     - dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
1887     return 0;
1888    
1889     out_no_irq:
1890     diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
1891     index e2bd2ad01b15..e72234efb648 100644
1892     --- a/drivers/s390/net/qeth_core.h
1893     +++ b/drivers/s390/net/qeth_core.h
1894     @@ -969,7 +969,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
1895     int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
1896     int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
1897     int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
1898     -int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
1899     +int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
1900     + int extra_elems, int data_offset);
1901     int qeth_get_elements_for_frags(struct sk_buff *);
1902     int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
1903     struct sk_buff *, struct qeth_hdr *, int, int, int);
1904     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
1905     index b5fa6bb56b29..838ed6213118 100644
1906     --- a/drivers/s390/net/qeth_core_main.c
1907     +++ b/drivers/s390/net/qeth_core_main.c
1908     @@ -3842,6 +3842,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
1909     * @card: qeth card structure, to check max. elems.
1910     * @skb: SKB address
1911     * @extra_elems: extra elems needed, to check against max.
1912     + * @data_offset: range starts at skb->data + data_offset
1913     *
1914     * Returns the number of pages, and thus QDIO buffer elements, needed to cover
1915     * skb data, including linear part and fragments. Checks if the result plus
1916     @@ -3849,10 +3850,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
1917     * Note: extra_elems is not included in the returned result.
1918     */
1919     int qeth_get_elements_no(struct qeth_card *card,
1920     - struct sk_buff *skb, int extra_elems)
1921     + struct sk_buff *skb, int extra_elems, int data_offset)
1922     {
1923     int elements = qeth_get_elements_for_range(
1924     - (addr_t)skb->data,
1925     + (addr_t)skb->data + data_offset,
1926     (addr_t)skb->data + skb_headlen(skb)) +
1927     qeth_get_elements_for_frags(skb);
1928    
1929     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
1930     index ac33f6c999b1..5082dfeacb95 100644
1931     --- a/drivers/s390/net/qeth_l2_main.c
1932     +++ b/drivers/s390/net/qeth_l2_main.c
1933     @@ -865,7 +865,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1934     * chaining we can not send long frag lists
1935     */
1936     if ((card->info.type != QETH_CARD_TYPE_IQD) &&
1937     - !qeth_get_elements_no(card, new_skb, 0)) {
1938     + !qeth_get_elements_no(card, new_skb, 0, 0)) {
1939     int lin_rc = skb_linearize(new_skb);
1940    
1941     if (card->options.performance_stats) {
1942     @@ -910,7 +910,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1943     }
1944     }
1945    
1946     - elements = qeth_get_elements_no(card, new_skb, elements_needed);
1947     + elements = qeth_get_elements_no(card, new_skb, elements_needed,
1948     + (data_offset > 0) ? data_offset : 0);
1949     if (!elements) {
1950     if (data_offset >= 0)
1951     kmem_cache_free(qeth_core_header_cache, hdr);
1952     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1953     index 5735fc3be6c7..f91e70c369ed 100644
1954     --- a/drivers/s390/net/qeth_l3_main.c
1955     +++ b/drivers/s390/net/qeth_l3_main.c
1956     @@ -2612,17 +2612,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
1957     char daddr[16];
1958     struct af_iucv_trans_hdr *iucv_hdr;
1959    
1960     - skb_pull(skb, 14);
1961     - card->dev->header_ops->create(skb, card->dev, 0,
1962     - card->dev->dev_addr, card->dev->dev_addr,
1963     - card->dev->addr_len);
1964     - skb_pull(skb, 14);
1965     - iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
1966     memset(hdr, 0, sizeof(struct qeth_hdr));
1967     hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
1968     hdr->hdr.l3.ext_flags = 0;
1969     - hdr->hdr.l3.length = skb->len;
1970     + hdr->hdr.l3.length = skb->len - ETH_HLEN;
1971     hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
1972     +
1973     + iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
1974     memset(daddr, 0, sizeof(daddr));
1975     daddr[0] = 0xfe;
1976     daddr[1] = 0x80;
1977     @@ -2826,10 +2822,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1978     if ((card->info.type == QETH_CARD_TYPE_IQD) &&
1979     !skb_is_nonlinear(skb)) {
1980     new_skb = skb;
1981     - if (new_skb->protocol == ETH_P_AF_IUCV)
1982     - data_offset = 0;
1983     - else
1984     - data_offset = ETH_HLEN;
1985     + data_offset = ETH_HLEN;
1986     hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
1987     if (!hdr)
1988     goto tx_drop;
1989     @@ -2870,7 +2863,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1990     */
1991     if ((card->info.type != QETH_CARD_TYPE_IQD) &&
1992     ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
1993     - (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
1994     + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
1995     int lin_rc = skb_linearize(new_skb);
1996    
1997     if (card->options.performance_stats) {
1998     @@ -2912,7 +2905,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1999    
2000     elements = use_tso ?
2001     qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2002     - qeth_get_elements_no(card, new_skb, hdr_elements);
2003     + qeth_get_elements_no(card, new_skb, hdr_elements,
2004     + (data_offset > 0) ? data_offset : 0);
2005     if (!elements) {
2006     if (data_offset >= 0)
2007     kmem_cache_free(qeth_core_header_cache, hdr);
2008     diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2009     index 0039bebaa9e2..358ec32927ba 100644
2010     --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2011     +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2012     @@ -1347,6 +1347,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
2013     csk, csk->state, csk->flags, csk->tid);
2014    
2015     cxgbi_sock_free_cpl_skbs(csk);
2016     + cxgbi_sock_purge_write_queue(csk);
2017     if (csk->wr_cred != csk->wr_max_cred) {
2018     cxgbi_sock_purge_wr_queue(csk);
2019     cxgbi_sock_reset_wr_list(csk);
2020     diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
2021     index 9c9563312a3d..fc7addaf24da 100644
2022     --- a/drivers/scsi/lpfc/lpfc_els.c
2023     +++ b/drivers/scsi/lpfc/lpfc_els.c
2024     @@ -7782,7 +7782,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2025     did, vport->port_state, ndlp->nlp_flag);
2026    
2027     phba->fc_stat.elsRcvPRLI++;
2028     - if (vport->port_state < LPFC_DISC_AUTH) {
2029     + if ((vport->port_state < LPFC_DISC_AUTH) &&
2030     + (vport->fc_flag & FC_FABRIC)) {
2031     rjt_err = LSRJT_UNABLE_TPC;
2032     rjt_exp = LSEXP_NOTHING_MORE;
2033     break;
2034     diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
2035     index ed223937798a..7d2ad633b6bc 100644
2036     --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
2037     +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
2038     @@ -4784,7 +4784,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2039     lpfc_cancel_retry_delay_tmo(vport, ndlp);
2040     if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
2041     !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
2042     - !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
2043     + !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
2044     + phba->sli_rev != LPFC_SLI_REV4) {
2045     /* For this case we need to cleanup the default rpi
2046     * allocated by the firmware.
2047     */
2048     diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
2049     index 55faa94637a9..2a436dff1589 100644
2050     --- a/drivers/scsi/lpfc/lpfc_hw4.h
2051     +++ b/drivers/scsi/lpfc/lpfc_hw4.h
2052     @@ -3232,7 +3232,7 @@ struct lpfc_mbx_get_port_name {
2053     #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
2054     #define MB_CQE_STATUS_DMA_FAILED 0x5
2055    
2056     -#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
2057     +#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
2058     struct lpfc_mbx_wr_object {
2059     struct mbox_header header;
2060     union {
2061     diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2062     index 289374cbcb47..468acab04d3d 100644
2063     --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2064     +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2065     @@ -4770,6 +4770,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2066     } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
2067     scmd->result = DID_RESET << 16;
2068     break;
2069     + } else if ((scmd->device->channel == RAID_CHANNEL) &&
2070     + (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
2071     + MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
2072     + scmd->result = DID_RESET << 16;
2073     + break;
2074     }
2075     scmd->result = DID_SOFT_ERROR << 16;
2076     break;
2077     diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
2078     index 8dffd8a7e762..9f01427f35f9 100644
2079     --- a/drivers/staging/greybus/light.c
2080     +++ b/drivers/staging/greybus/light.c
2081     @@ -924,6 +924,8 @@ static void __gb_lights_led_unregister(struct gb_channel *channel)
2082     return;
2083    
2084     led_classdev_unregister(cdev);
2085     + kfree(cdev->name);
2086     + cdev->name = NULL;
2087     channel->led = NULL;
2088     }
2089    
2090     diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
2091     index f6429666a1cf..c5285ed34fdd 100644
2092     --- a/drivers/thermal/hisi_thermal.c
2093     +++ b/drivers/thermal/hisi_thermal.c
2094     @@ -35,8 +35,9 @@
2095     #define TEMP0_RST_MSK (0x1C)
2096     #define TEMP0_VALUE (0x28)
2097    
2098     -#define HISI_TEMP_BASE (-60)
2099     +#define HISI_TEMP_BASE (-60000)
2100     #define HISI_TEMP_RESET (100000)
2101     +#define HISI_TEMP_STEP (784)
2102    
2103     #define HISI_MAX_SENSORS 4
2104    
2105     @@ -61,19 +62,38 @@ struct hisi_thermal_data {
2106     void __iomem *regs;
2107     };
2108    
2109     -/* in millicelsius */
2110     -static inline int _step_to_temp(int step)
2111     +/*
2112     + * The temperature computation on the tsensor is as follow:
2113     + * Unit: millidegree Celsius
2114     + * Step: 255/200 (0.7843)
2115     + * Temperature base: -60°C
2116     + *
2117     + * The register is programmed in temperature steps, every step is 784
2118     + * millidegree and begins at -60 000 m°C
2119     + *
2120     + * The temperature from the steps:
2121     + *
2122     + * Temp = TempBase + (steps x 784)
2123     + *
2124     + * and the steps from the temperature:
2125     + *
2126     + * steps = (Temp - TempBase) / 784
2127     + *
2128     + */
2129     +static inline int hisi_thermal_step_to_temp(int step)
2130     {
2131     - /*
2132     - * Every step equals (1 * 200) / 255 celsius, and finally
2133     - * need convert to millicelsius.
2134     - */
2135     - return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
2136     + return HISI_TEMP_BASE + (step * HISI_TEMP_STEP);
2137     }
2138    
2139     -static inline long _temp_to_step(long temp)
2140     +static inline long hisi_thermal_temp_to_step(long temp)
2141     {
2142     - return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
2143     + return (temp - HISI_TEMP_BASE) / HISI_TEMP_STEP;
2144     +}
2145     +
2146     +static inline long hisi_thermal_round_temp(int temp)
2147     +{
2148     + return hisi_thermal_step_to_temp(
2149     + hisi_thermal_temp_to_step(temp));
2150     }
2151    
2152     static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
2153     @@ -99,7 +119,7 @@ static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
2154     usleep_range(3000, 5000);
2155    
2156     val = readl(data->regs + TEMP0_VALUE);
2157     - val = _step_to_temp(val);
2158     + val = hisi_thermal_step_to_temp(val);
2159    
2160     mutex_unlock(&data->thermal_lock);
2161    
2162     @@ -126,10 +146,11 @@ static void hisi_thermal_enable_bind_irq_sensor
2163     writel((sensor->id << 12), data->regs + TEMP0_CFG);
2164    
2165     /* enable for interrupt */
2166     - writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
2167     + writel(hisi_thermal_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
2168     data->regs + TEMP0_TH);
2169    
2170     - writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH);
2171     + writel(hisi_thermal_temp_to_step(HISI_TEMP_RESET),
2172     + data->regs + TEMP0_RST_TH);
2173    
2174     /* enable module */
2175     writel(0x1, data->regs + TEMP0_RST_MSK);
2176     @@ -230,7 +251,7 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
2177     sensor = &data->sensors[data->irq_bind_sensor];
2178    
2179     dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n",
2180     - sensor->thres_temp / 1000);
2181     + sensor->thres_temp);
2182     mutex_unlock(&data->thermal_lock);
2183    
2184     for (i = 0; i < HISI_MAX_SENSORS; i++) {
2185     @@ -269,7 +290,7 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
2186    
2187     for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) {
2188     if (trip[i].type == THERMAL_TRIP_PASSIVE) {
2189     - sensor->thres_temp = trip[i].temperature;
2190     + sensor->thres_temp = hisi_thermal_round_temp(trip[i].temperature);
2191     break;
2192     }
2193     }
2194     @@ -317,15 +338,6 @@ static int hisi_thermal_probe(struct platform_device *pdev)
2195     if (data->irq < 0)
2196     return data->irq;
2197    
2198     - ret = devm_request_threaded_irq(&pdev->dev, data->irq,
2199     - hisi_thermal_alarm_irq,
2200     - hisi_thermal_alarm_irq_thread,
2201     - 0, "hisi_thermal", data);
2202     - if (ret < 0) {
2203     - dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
2204     - return ret;
2205     - }
2206     -
2207     platform_set_drvdata(pdev, data);
2208    
2209     data->clk = devm_clk_get(&pdev->dev, "thermal_clk");
2210     @@ -345,8 +357,7 @@ static int hisi_thermal_probe(struct platform_device *pdev)
2211     }
2212    
2213     hisi_thermal_enable_bind_irq_sensor(data);
2214     - irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
2215     - &data->irq_enabled);
2216     + data->irq_enabled = true;
2217    
2218     for (i = 0; i < HISI_MAX_SENSORS; ++i) {
2219     ret = hisi_thermal_register_sensor(pdev, data,
2220     @@ -358,6 +369,17 @@ static int hisi_thermal_probe(struct platform_device *pdev)
2221     hisi_thermal_toggle_sensor(&data->sensors[i], true);
2222     }
2223    
2224     + ret = devm_request_threaded_irq(&pdev->dev, data->irq,
2225     + hisi_thermal_alarm_irq,
2226     + hisi_thermal_alarm_irq_thread,
2227     + 0, "hisi_thermal", data);
2228     + if (ret < 0) {
2229     + dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
2230     + return ret;
2231     + }
2232     +
2233     + enable_irq(data->irq);
2234     +
2235     return 0;
2236     }
2237    
2238     @@ -397,8 +419,11 @@ static int hisi_thermal_suspend(struct device *dev)
2239     static int hisi_thermal_resume(struct device *dev)
2240     {
2241     struct hisi_thermal_data *data = dev_get_drvdata(dev);
2242     + int ret;
2243    
2244     - clk_prepare_enable(data->clk);
2245     + ret = clk_prepare_enable(data->clk);
2246     + if (ret)
2247     + return ret;
2248    
2249     data->irq_enabled = true;
2250     hisi_thermal_enable_bind_irq_sensor(data);
2251     diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
2252     index c7689d05356c..f8a1881609a2 100644
2253     --- a/drivers/usb/gadget/function/f_uvc.c
2254     +++ b/drivers/usb/gadget/function/f_uvc.c
2255     @@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
2256     opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
2257     opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
2258    
2259     + /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
2260     + if (opts->streaming_maxburst &&
2261     + (opts->streaming_maxpacket % 1024) != 0) {
2262     + opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
2263     + INFO(cdev, "overriding streaming_maxpacket to %d\n",
2264     + opts->streaming_maxpacket);
2265     + }
2266     +
2267     /* Fill in the FS/HS/SS Video Streaming specific descriptors from the
2268     * module parameters.
2269     *
2270     diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
2271     index a97da645c1b9..8a365aad66fe 100644
2272     --- a/drivers/usb/gadget/udc/pch_udc.c
2273     +++ b/drivers/usb/gadget/udc/pch_udc.c
2274     @@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
2275     td = phys_to_virt(addr);
2276     addr2 = (dma_addr_t)td->next;
2277     pci_pool_free(dev->data_requests, td, addr);
2278     - td->next = 0x00;
2279     addr = addr2;
2280     }
2281     req->chain_len = 1;
2282     diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
2283     index ca8b0b1ae37d..dec100811946 100644
2284     --- a/drivers/usb/host/xhci-plat.c
2285     +++ b/drivers/usb/host/xhci-plat.c
2286     @@ -335,6 +335,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
2287     static struct platform_driver usb_xhci_driver = {
2288     .probe = xhci_plat_probe,
2289     .remove = xhci_plat_remove,
2290     + .shutdown = usb_hcd_platform_shutdown,
2291     .driver = {
2292     .name = "xhci-hcd",
2293     .pm = DEV_PM_OPS,
2294     diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
2295     index 65d4a3015542..9f1ec4392209 100644
2296     --- a/drivers/vfio/pci/vfio_pci_config.c
2297     +++ b/drivers/vfio/pci/vfio_pci_config.c
2298     @@ -851,11 +851,13 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
2299    
2300     /*
2301     * Allow writes to device control fields, except devctl_phantom,
2302     - * which could confuse IOMMU, and the ARI bit in devctl2, which
2303     + * which could confuse IOMMU, MPS, which can break communication
2304     + * with other physical devices, and the ARI bit in devctl2, which
2305     * is set at probe time. FLR gets virtualized via our writefn.
2306     */
2307     p_setw(perm, PCI_EXP_DEVCTL,
2308     - PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
2309     + PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD,
2310     + ~PCI_EXP_DEVCTL_PHANTOM);
2311     p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
2312     return 0;
2313     }
2314     diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
2315     index e3fad302b4fb..0ec970ca64ce 100644
2316     --- a/drivers/vhost/vsock.c
2317     +++ b/drivers/vhost/vsock.c
2318     @@ -218,6 +218,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
2319     return len;
2320     }
2321    
2322     +static int
2323     +vhost_transport_cancel_pkt(struct vsock_sock *vsk)
2324     +{
2325     + struct vhost_vsock *vsock;
2326     + struct virtio_vsock_pkt *pkt, *n;
2327     + int cnt = 0;
2328     + LIST_HEAD(freeme);
2329     +
2330     + /* Find the vhost_vsock according to guest context id */
2331     + vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
2332     + if (!vsock)
2333     + return -ENODEV;
2334     +
2335     + spin_lock_bh(&vsock->send_pkt_list_lock);
2336     + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
2337     + if (pkt->vsk != vsk)
2338     + continue;
2339     + list_move(&pkt->list, &freeme);
2340     + }
2341     + spin_unlock_bh(&vsock->send_pkt_list_lock);
2342     +
2343     + list_for_each_entry_safe(pkt, n, &freeme, list) {
2344     + if (pkt->reply)
2345     + cnt++;
2346     + list_del(&pkt->list);
2347     + virtio_transport_free_pkt(pkt);
2348     + }
2349     +
2350     + if (cnt) {
2351     + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
2352     + int new_cnt;
2353     +
2354     + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
2355     + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
2356     + vhost_poll_queue(&tx_vq->poll);
2357     + }
2358     +
2359     + return 0;
2360     +}
2361     +
2362     static struct virtio_vsock_pkt *
2363     vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
2364     unsigned int out, unsigned int in)
2365     @@ -669,6 +709,7 @@ static struct virtio_transport vhost_transport = {
2366     .release = virtio_transport_release,
2367     .connect = virtio_transport_connect,
2368     .shutdown = virtio_transport_shutdown,
2369     + .cancel_pkt = vhost_transport_cancel_pkt,
2370    
2371     .dgram_enqueue = virtio_transport_dgram_enqueue,
2372     .dgram_dequeue = virtio_transport_dgram_dequeue,
2373     diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
2374     index 12614006211e..d95ae092f154 100644
2375     --- a/drivers/video/backlight/pwm_bl.c
2376     +++ b/drivers/video/backlight/pwm_bl.c
2377     @@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
2378     static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
2379     {
2380     unsigned int lth = pb->lth_brightness;
2381     - int duty_cycle;
2382     + u64 duty_cycle;
2383    
2384     if (pb->levels)
2385     duty_cycle = pb->levels[brightness];
2386     else
2387     duty_cycle = brightness;
2388    
2389     - return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
2390     + duty_cycle *= pb->period - lth;
2391     + do_div(duty_cycle, pb->scale);
2392     +
2393     + return duty_cycle + lth;
2394     }
2395    
2396     static int pwm_backlight_update_status(struct backlight_device *bl)
2397     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
2398     index 2c2e6792f7e0..a7c08cc4c1b7 100644
2399     --- a/drivers/virtio/virtio_balloon.c
2400     +++ b/drivers/virtio/virtio_balloon.c
2401     @@ -241,11 +241,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
2402    
2403     #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
2404    
2405     -static void update_balloon_stats(struct virtio_balloon *vb)
2406     +static unsigned int update_balloon_stats(struct virtio_balloon *vb)
2407     {
2408     unsigned long events[NR_VM_EVENT_ITEMS];
2409     struct sysinfo i;
2410     - int idx = 0;
2411     + unsigned int idx = 0;
2412     long available;
2413    
2414     all_vm_events(events);
2415     @@ -253,18 +253,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
2416    
2417     available = si_mem_available();
2418    
2419     +#ifdef CONFIG_VM_EVENT_COUNTERS
2420     update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
2421     pages_to_bytes(events[PSWPIN]));
2422     update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
2423     pages_to_bytes(events[PSWPOUT]));
2424     update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
2425     update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
2426     +#endif
2427     update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
2428     pages_to_bytes(i.freeram));
2429     update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
2430     pages_to_bytes(i.totalram));
2431     update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
2432     pages_to_bytes(available));
2433     +
2434     + return idx;
2435     }
2436    
2437     /*
2438     @@ -290,14 +294,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
2439     {
2440     struct virtqueue *vq;
2441     struct scatterlist sg;
2442     - unsigned int len;
2443     + unsigned int len, num_stats;
2444    
2445     - update_balloon_stats(vb);
2446     + num_stats = update_balloon_stats(vb);
2447    
2448     vq = vb->stats_vq;
2449     if (!virtqueue_get_buf(vq, &len))
2450     return;
2451     - sg_init_one(&sg, vb->stats, sizeof(vb->stats));
2452     + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
2453     virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
2454     virtqueue_kick(vq);
2455     }
2456     @@ -421,15 +425,16 @@ static int init_vqs(struct virtio_balloon *vb)
2457     vb->deflate_vq = vqs[1];
2458     if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
2459     struct scatterlist sg;
2460     + unsigned int num_stats;
2461     vb->stats_vq = vqs[2];
2462    
2463     /*
2464     * Prime this virtqueue with one buffer so the hypervisor can
2465     * use it to signal us later (it can't be broken yet!).
2466     */
2467     - update_balloon_stats(vb);
2468     + num_stats = update_balloon_stats(vb);
2469    
2470     - sg_init_one(&sg, vb->stats, sizeof vb->stats);
2471     + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
2472     if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
2473     < 0)
2474     BUG();
2475     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
2476     index 77f9efc1f7aa..9a47b5598df7 100644
2477     --- a/fs/btrfs/send.c
2478     +++ b/fs/btrfs/send.c
2479     @@ -6196,8 +6196,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
2480     goto out;
2481     }
2482    
2483     + /*
2484     + * Check that we don't overflow at later allocations, we request
2485     + * clone_sources_count + 1 items, and compare to unsigned long inside
2486     + * access_ok.
2487     + */
2488     if (arg->clone_sources_count >
2489     - ULLONG_MAX / sizeof(*arg->clone_sources)) {
2490     + ULONG_MAX / sizeof(struct clone_root) - 1) {
2491     ret = -EINVAL;
2492     goto out;
2493     }
2494     diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
2495     index 3101141661a1..4c4e9358c146 100644
2496     --- a/include/linux/bpf_verifier.h
2497     +++ b/include/linux/bpf_verifier.h
2498     @@ -68,6 +68,7 @@ struct bpf_verifier_state_list {
2499    
2500     struct bpf_insn_aux_data {
2501     enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
2502     + bool seen; /* this insn was processed by the verifier */
2503     };
2504    
2505     #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
2506     diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
2507     index 9638bfeb0d1f..584f9a647ad4 100644
2508     --- a/include/linux/virtio_vsock.h
2509     +++ b/include/linux/virtio_vsock.h
2510     @@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
2511     struct virtio_vsock_hdr hdr;
2512     struct work_struct work;
2513     struct list_head list;
2514     + /* socket refcnt not held, only use for cancellation */
2515     + struct vsock_sock *vsk;
2516     void *buf;
2517     u32 len;
2518     u32 off;
2519     @@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
2520    
2521     struct virtio_vsock_pkt_info {
2522     u32 remote_cid, remote_port;
2523     + struct vsock_sock *vsk;
2524     struct msghdr *msg;
2525     u32 pkt_len;
2526     u16 type;
2527     diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
2528     index f2758964ce6f..f32ed9ac181a 100644
2529     --- a/include/net/af_vsock.h
2530     +++ b/include/net/af_vsock.h
2531     @@ -100,6 +100,9 @@ struct vsock_transport {
2532     void (*destruct)(struct vsock_sock *);
2533     void (*release)(struct vsock_sock *);
2534    
2535     + /* Cancel all pending packets sent on vsock. */
2536     + int (*cancel_pkt)(struct vsock_sock *vsk);
2537     +
2538     /* Connections. */
2539     int (*connect)(struct vsock_sock *);
2540    
2541     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2542     index 372454aa7f37..8b1ebe4c6aba 100644
2543     --- a/kernel/bpf/verifier.c
2544     +++ b/kernel/bpf/verifier.c
2545     @@ -1790,10 +1790,17 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2546     /* case: R = imm
2547     * remember the value we stored into this reg
2548     */
2549     + u64 imm;
2550     +
2551     + if (BPF_CLASS(insn->code) == BPF_ALU64)
2552     + imm = insn->imm;
2553     + else
2554     + imm = (u32)insn->imm;
2555     +
2556     regs[insn->dst_reg].type = CONST_IMM;
2557     - regs[insn->dst_reg].imm = insn->imm;
2558     - regs[insn->dst_reg].max_value = insn->imm;
2559     - regs[insn->dst_reg].min_value = insn->imm;
2560     + regs[insn->dst_reg].imm = imm;
2561     + regs[insn->dst_reg].max_value = imm;
2562     + regs[insn->dst_reg].min_value = imm;
2563     }
2564    
2565     } else if (opcode > BPF_END) {
2566     @@ -1861,10 +1868,28 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2567     ((BPF_SRC(insn->code) == BPF_X &&
2568     regs[insn->src_reg].type == CONST_IMM) ||
2569     BPF_SRC(insn->code) == BPF_K)) {
2570     - if (BPF_SRC(insn->code) == BPF_X)
2571     + if (BPF_SRC(insn->code) == BPF_X) {
2572     + /* check in case the register contains a big
2573     + * 64-bit value
2574     + */
2575     + if (regs[insn->src_reg].imm < -MAX_BPF_STACK ||
2576     + regs[insn->src_reg].imm > MAX_BPF_STACK) {
2577     + verbose("R%d value too big in R%d pointer arithmetic\n",
2578     + insn->src_reg, insn->dst_reg);
2579     + return -EACCES;
2580     + }
2581     dst_reg->imm += regs[insn->src_reg].imm;
2582     - else
2583     + } else {
2584     + /* safe against overflow: addition of 32-bit
2585     + * numbers in 64-bit representation
2586     + */
2587     dst_reg->imm += insn->imm;
2588     + }
2589     + if (dst_reg->imm > 0 || dst_reg->imm < -MAX_BPF_STACK) {
2590     + verbose("R%d out-of-bounds pointer arithmetic\n",
2591     + insn->dst_reg);
2592     + return -EACCES;
2593     + }
2594     return 0;
2595     } else if (opcode == BPF_ADD &&
2596     BPF_CLASS(insn->code) == BPF_ALU64 &&
2597     @@ -2862,6 +2887,7 @@ static int do_check(struct bpf_verifier_env *env)
2598     if (err)
2599     return err;
2600    
2601     + env->insn_aux_data[insn_idx].seen = true;
2602     if (class == BPF_ALU || class == BPF_ALU64) {
2603     err = check_alu_op(env, insn);
2604     if (err)
2605     @@ -3059,6 +3085,7 @@ static int do_check(struct bpf_verifier_env *env)
2606     return err;
2607    
2608     insn_idx++;
2609     + env->insn_aux_data[insn_idx].seen = true;
2610     } else {
2611     verbose("invalid BPF_LD mode\n");
2612     return -EINVAL;
2613     @@ -3210,6 +3237,63 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
2614     insn->src_reg = 0;
2615     }
2616    
2617     +/* single env->prog->insni[off] instruction was replaced with the range
2618     + * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
2619     + * [0, off) and [off, end) to new locations, so the patched range stays zero
2620     + */
2621     +static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
2622     + u32 off, u32 cnt)
2623     +{
2624     + struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
2625     + int i;
2626     +
2627     + if (cnt == 1)
2628     + return 0;
2629     + new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
2630     + if (!new_data)
2631     + return -ENOMEM;
2632     + memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
2633     + memcpy(new_data + off + cnt - 1, old_data + off,
2634     + sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
2635     + for (i = off; i < off + cnt - 1; i++)
2636     + new_data[i].seen = true;
2637     + env->insn_aux_data = new_data;
2638     + vfree(old_data);
2639     + return 0;
2640     +}
2641     +
2642     +static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
2643     + const struct bpf_insn *patch, u32 len)
2644     +{
2645     + struct bpf_prog *new_prog;
2646     +
2647     + new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
2648     + if (!new_prog)
2649     + return NULL;
2650     + if (adjust_insn_aux_data(env, new_prog->len, off, len))
2651     + return NULL;
2652     + return new_prog;
2653     +}
2654     +
2655     +/* The verifier does more data flow analysis than llvm and will not explore
2656     + * branches that are dead at run time. Malicious programs can have dead code
2657     + * too. Therefore replace all dead at-run-time code with nops.
2658     + */
2659     +static void sanitize_dead_code(struct bpf_verifier_env *env)
2660     +{
2661     + struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2662     + struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
2663     + struct bpf_insn *insn = env->prog->insnsi;
2664     + const int insn_cnt = env->prog->len;
2665     + int i;
2666     +
2667     + for (i = 0; i < insn_cnt; i++) {
2668     + if (aux_data[i].seen)
2669     + continue;
2670     + memcpy(insn + i, &nop, sizeof(nop));
2671     + }
2672     +}
2673     +
2674     /* convert load instructions that access fields of 'struct __sk_buff'
2675     * into sequence of instructions that access fields of 'struct sk_buff'
2676     */
2677     @@ -3229,10 +3313,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2678     verbose("bpf verifier is misconfigured\n");
2679     return -EINVAL;
2680     } else if (cnt) {
2681     - new_prog = bpf_patch_insn_single(env->prog, 0,
2682     - insn_buf, cnt);
2683     + new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
2684     if (!new_prog)
2685     return -ENOMEM;
2686     +
2687     env->prog = new_prog;
2688     delta += cnt - 1;
2689     }
2690     @@ -3253,7 +3337,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2691     else
2692     continue;
2693    
2694     - if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
2695     + if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
2696     continue;
2697    
2698     cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
2699     @@ -3263,8 +3347,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2700     return -EINVAL;
2701     }
2702    
2703     - new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
2704     - cnt);
2705     + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2706     if (!new_prog)
2707     return -ENOMEM;
2708    
2709     @@ -3372,6 +3455,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
2710     while (pop_stack(env, NULL) >= 0);
2711     free_states(env);
2712    
2713     + if (ret == 0)
2714     + sanitize_dead_code(env);
2715     +
2716     if (ret == 0)
2717     /* program is valid, convert *(u32*)(ctx + off) accesses */
2718     ret = convert_ctx_accesses(env);
2719     diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
2720     index f3a960ed75a1..0664044ade06 100644
2721     --- a/kernel/trace/trace_events_hist.c
2722     +++ b/kernel/trace/trace_events_hist.c
2723     @@ -449,7 +449,7 @@ static int create_val_field(struct hist_trigger_data *hist_data,
2724     }
2725    
2726     field = trace_find_event_field(file->event_call, field_name);
2727     - if (!field) {
2728     + if (!field || !field->size) {
2729     ret = -EINVAL;
2730     goto out;
2731     }
2732     @@ -547,7 +547,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
2733     }
2734    
2735     field = trace_find_event_field(file->event_call, field_name);
2736     - if (!field) {
2737     + if (!field || !field->size) {
2738     ret = -EINVAL;
2739     goto out;
2740     }
2741     diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
2742     index 0df2aa652530..a7f05f0130e8 100644
2743     --- a/net/core/sysctl_net_core.c
2744     +++ b/net/core/sysctl_net_core.c
2745     @@ -369,14 +369,16 @@ static struct ctl_table net_core_table[] = {
2746     .data = &sysctl_net_busy_poll,
2747     .maxlen = sizeof(unsigned int),
2748     .mode = 0644,
2749     - .proc_handler = proc_dointvec
2750     + .proc_handler = proc_dointvec_minmax,
2751     + .extra1 = &zero,
2752     },
2753     {
2754     .procname = "busy_read",
2755     .data = &sysctl_net_busy_read,
2756     .maxlen = sizeof(unsigned int),
2757     .mode = 0644,
2758     - .proc_handler = proc_dointvec
2759     + .proc_handler = proc_dointvec_minmax,
2760     + .extra1 = &zero,
2761     },
2762     #endif
2763     #ifdef CONFIG_NET_SCHED
2764     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2765     index 453db950dc9f..4bf3b8af0257 100644
2766     --- a/net/ipv4/ip_fragment.c
2767     +++ b/net/ipv4/ip_fragment.c
2768     @@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
2769     qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
2770     net = container_of(qp->q.net, struct net, ipv4.frags);
2771    
2772     + rcu_read_lock();
2773     spin_lock(&qp->q.lock);
2774    
2775     if (qp->q.flags & INET_FRAG_COMPLETE)
2776     @@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
2777     __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
2778    
2779     if (!inet_frag_evicting(&qp->q)) {
2780     - struct sk_buff *head = qp->q.fragments;
2781     + struct sk_buff *clone, *head = qp->q.fragments;
2782     const struct iphdr *iph;
2783     int err;
2784    
2785     @@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
2786     if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
2787     goto out;
2788    
2789     - rcu_read_lock();
2790     head->dev = dev_get_by_index_rcu(net, qp->iif);
2791     if (!head->dev)
2792     - goto out_rcu_unlock;
2793     + goto out;
2794     +
2795    
2796     /* skb has no dst, perform route lookup again */
2797     iph = ip_hdr(head);
2798     err = ip_route_input_noref(head, iph->daddr, iph->saddr,
2799     iph->tos, head->dev);
2800     if (err)
2801     - goto out_rcu_unlock;
2802     + goto out;
2803    
2804     /* Only an end host needs to send an ICMP
2805     * "Fragment Reassembly Timeout" message, per RFC792.
2806     */
2807     if (frag_expire_skip_icmp(qp->user) &&
2808     (skb_rtable(head)->rt_type != RTN_LOCAL))
2809     - goto out_rcu_unlock;
2810     + goto out;
2811     +
2812     + clone = skb_clone(head, GFP_ATOMIC);
2813    
2814     /* Send an ICMP "Fragment Reassembly Timeout" message. */
2815     - icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
2816     -out_rcu_unlock:
2817     - rcu_read_unlock();
2818     + if (clone) {
2819     + spin_unlock(&qp->q.lock);
2820     + icmp_send(clone, ICMP_TIME_EXCEEDED,
2821     + ICMP_EXC_FRAGTIME, 0);
2822     + consume_skb(clone);
2823     + goto out_rcu_unlock;
2824     + }
2825     }
2826     out:
2827     spin_unlock(&qp->q.lock);
2828     +out_rcu_unlock:
2829     + rcu_read_unlock();
2830     ipq_put(qp);
2831     }
2832    
2833     diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
2834     index 071a785c65eb..b23464d9c538 100644
2835     --- a/net/ipv4/ipconfig.c
2836     +++ b/net/ipv4/ipconfig.c
2837     @@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
2838     while ((d = next)) {
2839     next = d->next;
2840     dev = d->dev;
2841     - if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
2842     + if (d != ic_dev && !netdev_uses_dsa(dev)) {
2843     pr_debug("IP-Config: Downing %s\n", dev->name);
2844     dev_change_flags(dev, d->flags);
2845     }
2846     diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
2847     index 5a8f7c360887..53e49f5011d3 100644
2848     --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
2849     +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
2850     @@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
2851     .timeout = 180,
2852     };
2853    
2854     -static struct nf_conntrack_helper snmp_helper __read_mostly = {
2855     - .me = THIS_MODULE,
2856     - .help = help,
2857     - .expect_policy = &snmp_exp_policy,
2858     - .name = "snmp",
2859     - .tuple.src.l3num = AF_INET,
2860     - .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
2861     - .tuple.dst.protonum = IPPROTO_UDP,
2862     -};
2863     -
2864     static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
2865     .me = THIS_MODULE,
2866     .help = help,
2867     @@ -1288,17 +1278,10 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
2868    
2869     static int __init nf_nat_snmp_basic_init(void)
2870     {
2871     - int ret = 0;
2872     -
2873     BUG_ON(nf_nat_snmp_hook != NULL);
2874     RCU_INIT_POINTER(nf_nat_snmp_hook, help);
2875    
2876     - ret = nf_conntrack_helper_register(&snmp_trap_helper);
2877     - if (ret < 0) {
2878     - nf_conntrack_helper_unregister(&snmp_helper);
2879     - return ret;
2880     - }
2881     - return ret;
2882     + return nf_conntrack_helper_register(&snmp_trap_helper);
2883     }
2884    
2885     static void __exit nf_nat_snmp_basic_fini(void)
2886     diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
2887     index 4c4bac1b5eab..3ecb61ee42fb 100644
2888     --- a/net/ipv4/tcp_vegas.c
2889     +++ b/net/ipv4/tcp_vegas.c
2890     @@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
2891    
2892     static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
2893     {
2894     - return min(tp->snd_ssthresh, tp->snd_cwnd-1);
2895     + return min(tp->snd_ssthresh, tp->snd_cwnd);
2896     }
2897    
2898     static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2899     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2900     index a4fb90c4819f..1594d9fc9c92 100644
2901     --- a/net/ipv6/addrconf.c
2902     +++ b/net/ipv6/addrconf.c
2903     @@ -286,10 +286,10 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
2904     .keep_addr_on_down = 0,
2905     };
2906    
2907     -/* Check if a valid qdisc is available */
2908     -static inline bool addrconf_qdisc_ok(const struct net_device *dev)
2909     +/* Check if link is ready: is it up and is a valid qdisc available */
2910     +static inline bool addrconf_link_ready(const struct net_device *dev)
2911     {
2912     - return !qdisc_tx_is_noop(dev);
2913     + return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
2914     }
2915    
2916     static void addrconf_del_rs_timer(struct inet6_dev *idev)
2917     @@ -434,7 +434,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
2918    
2919     ndev->token = in6addr_any;
2920    
2921     - if (netif_running(dev) && addrconf_qdisc_ok(dev))
2922     + if (netif_running(dev) && addrconf_link_ready(dev))
2923     ndev->if_flags |= IF_READY;
2924    
2925     ipv6_mc_init_dev(ndev);
2926     @@ -3368,7 +3368,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2927     /* restore routes for permanent addresses */
2928     addrconf_permanent_addr(dev);
2929    
2930     - if (!addrconf_qdisc_ok(dev)) {
2931     + if (!addrconf_link_ready(dev)) {
2932     /* device is not ready yet. */
2933     pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
2934     dev->name);
2935     @@ -3383,7 +3383,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2936     run_pending = 1;
2937     }
2938     } else if (event == NETDEV_CHANGE) {
2939     - if (!addrconf_qdisc_ok(dev)) {
2940     + if (!addrconf_link_ready(dev)) {
2941     /* device is still not ready. */
2942     break;
2943     }
2944     diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
2945     index b1fcfa08f0b4..28d065394c09 100644
2946     --- a/net/netfilter/nfnetlink_cthelper.c
2947     +++ b/net/netfilter/nfnetlink_cthelper.c
2948     @@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
2949     MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
2950     MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
2951    
2952     +struct nfnl_cthelper {
2953     + struct list_head list;
2954     + struct nf_conntrack_helper helper;
2955     +};
2956     +
2957     +static LIST_HEAD(nfnl_cthelper_list);
2958     +
2959     static int
2960     nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
2961     struct nf_conn *ct, enum ip_conntrack_info ctinfo)
2962     @@ -205,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
2963     struct nf_conntrack_tuple *tuple)
2964     {
2965     struct nf_conntrack_helper *helper;
2966     + struct nfnl_cthelper *nfcth;
2967     int ret;
2968    
2969     if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
2970     return -EINVAL;
2971    
2972     - helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
2973     - if (helper == NULL)
2974     + nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
2975     + if (nfcth == NULL)
2976     return -ENOMEM;
2977     + helper = &nfcth->helper;
2978    
2979     ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
2980     if (ret < 0)
2981     - goto err;
2982     + goto err1;
2983    
2984     strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
2985     helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
2986     @@ -247,14 +256,100 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
2987    
2988     ret = nf_conntrack_helper_register(helper);
2989     if (ret < 0)
2990     - goto err;
2991     + goto err2;
2992    
2993     + list_add_tail(&nfcth->list, &nfnl_cthelper_list);
2994     return 0;
2995     -err:
2996     - kfree(helper);
2997     +err2:
2998     + kfree(helper->expect_policy);
2999     +err1:
3000     + kfree(nfcth);
3001     return ret;
3002     }
3003    
3004     +static int
3005     +nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
3006     + struct nf_conntrack_expect_policy *new_policy,
3007     + const struct nlattr *attr)
3008     +{
3009     + struct nlattr *tb[NFCTH_POLICY_MAX + 1];
3010     + int err;
3011     +
3012     + err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
3013     + nfnl_cthelper_expect_pol);
3014     + if (err < 0)
3015     + return err;
3016     +
3017     + if (!tb[NFCTH_POLICY_NAME] ||
3018     + !tb[NFCTH_POLICY_EXPECT_MAX] ||
3019     + !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
3020     + return -EINVAL;
3021     +
3022     + if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
3023     + return -EBUSY;
3024     +
3025     + new_policy->max_expected =
3026     + ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
3027     + new_policy->timeout =
3028     + ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
3029     +
3030     + return 0;
3031     +}
3032     +
3033     +static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
3034     + struct nf_conntrack_helper *helper)
3035     +{
3036     + struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
3037     + struct nf_conntrack_expect_policy *policy;
3038     + int i, err;
3039     +
3040     + /* Check first that all policy attributes are well-formed, so we don't
3041     + * leave things in inconsistent state on errors.
3042     + */
3043     + for (i = 0; i < helper->expect_class_max + 1; i++) {
3044     +
3045     + if (!tb[NFCTH_POLICY_SET + i])
3046     + return -EINVAL;
3047     +
3048     + err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
3049     + &new_policy[i],
3050     + tb[NFCTH_POLICY_SET + i]);
3051     + if (err < 0)
3052     + return err;
3053     + }
3054     + /* Now we can safely update them. */
3055     + for (i = 0; i < helper->expect_class_max + 1; i++) {
3056     + policy = (struct nf_conntrack_expect_policy *)
3057     + &helper->expect_policy[i];
3058     + policy->max_expected = new_policy->max_expected;
3059     + policy->timeout = new_policy->timeout;
3060     + }
3061     +
3062     + return 0;
3063     +}
3064     +
3065     +static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
3066     + const struct nlattr *attr)
3067     +{
3068     + struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
3069     + unsigned int class_max;
3070     + int err;
3071     +
3072     + err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
3073     + nfnl_cthelper_expect_policy_set);
3074     + if (err < 0)
3075     + return err;
3076     +
3077     + if (!tb[NFCTH_POLICY_SET_NUM])
3078     + return -EINVAL;
3079     +
3080     + class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
3081     + if (helper->expect_class_max + 1 != class_max)
3082     + return -EBUSY;
3083     +
3084     + return nfnl_cthelper_update_policy_all(tb, helper);
3085     +}
3086     +
3087     static int
3088     nfnl_cthelper_update(const struct nlattr * const tb[],
3089     struct nf_conntrack_helper *helper)
3090     @@ -265,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
3091     return -EBUSY;
3092    
3093     if (tb[NFCTH_POLICY]) {
3094     - ret = nfnl_cthelper_parse_expect_policy(helper,
3095     - tb[NFCTH_POLICY]);
3096     + ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
3097     if (ret < 0)
3098     return ret;
3099     }
3100     @@ -295,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
3101     const char *helper_name;
3102     struct nf_conntrack_helper *cur, *helper = NULL;
3103     struct nf_conntrack_tuple tuple;
3104     - int ret = 0, i;
3105     + struct nfnl_cthelper *nlcth;
3106     + int ret = 0;
3107    
3108     if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
3109     return -EINVAL;
3110     @@ -306,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
3111     if (ret < 0)
3112     return ret;
3113    
3114     - rcu_read_lock();
3115     - for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
3116     - hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
3117     + list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
3118     + cur = &nlcth->helper;
3119    
3120     - /* skip non-userspace conntrack helpers. */
3121     - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
3122     - continue;
3123     + if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
3124     + continue;
3125    
3126     - if (strncmp(cur->name, helper_name,
3127     - NF_CT_HELPER_NAME_LEN) != 0)
3128     - continue;
3129     + if ((tuple.src.l3num != cur->tuple.src.l3num ||
3130     + tuple.dst.protonum != cur->tuple.dst.protonum))
3131     + continue;
3132    
3133     - if ((tuple.src.l3num != cur->tuple.src.l3num ||
3134     - tuple.dst.protonum != cur->tuple.dst.protonum))
3135     - continue;
3136     + if (nlh->nlmsg_flags & NLM_F_EXCL)
3137     + return -EEXIST;
3138    
3139     - if (nlh->nlmsg_flags & NLM_F_EXCL) {
3140     - ret = -EEXIST;
3141     - goto err;
3142     - }
3143     - helper = cur;
3144     - break;
3145     - }
3146     + helper = cur;
3147     + break;
3148     }
3149     - rcu_read_unlock();
3150    
3151     if (helper == NULL)
3152     ret = nfnl_cthelper_create(tb, &tuple);
3153     @@ -338,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
3154     ret = nfnl_cthelper_update(tb, helper);
3155    
3156     return ret;
3157     -err:
3158     - rcu_read_unlock();
3159     - return ret;
3160     }
3161    
3162     static int
3163     @@ -504,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
3164     struct sk_buff *skb, const struct nlmsghdr *nlh,
3165     const struct nlattr * const tb[])
3166     {
3167     - int ret = -ENOENT, i;
3168     + int ret = -ENOENT;
3169     struct nf_conntrack_helper *cur;
3170     struct sk_buff *skb2;
3171     char *helper_name = NULL;
3172     struct nf_conntrack_tuple tuple;
3173     + struct nfnl_cthelper *nlcth;
3174     bool tuple_set = false;
3175    
3176     if (nlh->nlmsg_flags & NLM_F_DUMP) {
3177     @@ -529,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
3178     tuple_set = true;
3179     }
3180    
3181     - for (i = 0; i < nf_ct_helper_hsize; i++) {
3182     - hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
3183     + list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
3184     + cur = &nlcth->helper;
3185     + if (helper_name &&
3186     + strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
3187     + continue;
3188    
3189     - /* skip non-userspace conntrack helpers. */
3190     - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
3191     - continue;
3192     + if (tuple_set &&
3193     + (tuple.src.l3num != cur->tuple.src.l3num ||
3194     + tuple.dst.protonum != cur->tuple.dst.protonum))
3195     + continue;
3196    
3197     - if (helper_name && strncmp(cur->name, helper_name,
3198     - NF_CT_HELPER_NAME_LEN) != 0) {
3199     - continue;
3200     - }
3201     - if (tuple_set &&
3202     - (tuple.src.l3num != cur->tuple.src.l3num ||
3203     - tuple.dst.protonum != cur->tuple.dst.protonum))
3204     - continue;
3205     -
3206     - skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3207     - if (skb2 == NULL) {
3208     - ret = -ENOMEM;
3209     - break;
3210     - }
3211     + skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3212     + if (skb2 == NULL) {
3213     + ret = -ENOMEM;
3214     + break;
3215     + }
3216    
3217     - ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
3218     - nlh->nlmsg_seq,
3219     - NFNL_MSG_TYPE(nlh->nlmsg_type),
3220     - NFNL_MSG_CTHELPER_NEW, cur);
3221     - if (ret <= 0) {
3222     - kfree_skb(skb2);
3223     - break;
3224     - }
3225     + ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
3226     + nlh->nlmsg_seq,
3227     + NFNL_MSG_TYPE(nlh->nlmsg_type),
3228     + NFNL_MSG_CTHELPER_NEW, cur);
3229     + if (ret <= 0) {
3230     + kfree_skb(skb2);
3231     + break;
3232     + }
3233    
3234     - ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
3235     - MSG_DONTWAIT);
3236     - if (ret > 0)
3237     - ret = 0;
3238     + ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
3239     + MSG_DONTWAIT);
3240     + if (ret > 0)
3241     + ret = 0;
3242    
3243     - /* this avoids a loop in nfnetlink. */
3244     - return ret == -EAGAIN ? -ENOBUFS : ret;
3245     - }
3246     + /* this avoids a loop in nfnetlink. */
3247     + return ret == -EAGAIN ? -ENOBUFS : ret;
3248     }
3249     return ret;
3250     }
3251     @@ -578,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
3252     {
3253     char *helper_name = NULL;
3254     struct nf_conntrack_helper *cur;
3255     - struct hlist_node *tmp;
3256     struct nf_conntrack_tuple tuple;
3257     bool tuple_set = false, found = false;
3258     - int i, j = 0, ret;
3259     + struct nfnl_cthelper *nlcth, *n;
3260     + int j = 0, ret;
3261    
3262     if (tb[NFCTH_NAME])
3263     helper_name = nla_data(tb[NFCTH_NAME]);
3264     @@ -594,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
3265     tuple_set = true;
3266     }
3267    
3268     - for (i = 0; i < nf_ct_helper_hsize; i++) {
3269     - hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
3270     - hnode) {
3271     - /* skip non-userspace conntrack helpers. */
3272     - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
3273     - continue;
3274     + list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
3275     + cur = &nlcth->helper;
3276     + j++;
3277    
3278     - j++;
3279     + if (helper_name &&
3280     + strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
3281     + continue;
3282    
3283     - if (helper_name && strncmp(cur->name, helper_name,
3284     - NF_CT_HELPER_NAME_LEN) != 0) {
3285     - continue;
3286     - }
3287     - if (tuple_set &&
3288     - (tuple.src.l3num != cur->tuple.src.l3num ||
3289     - tuple.dst.protonum != cur->tuple.dst.protonum))
3290     - continue;
3291     + if (tuple_set &&
3292     + (tuple.src.l3num != cur->tuple.src.l3num ||
3293     + tuple.dst.protonum != cur->tuple.dst.protonum))
3294     + continue;
3295    
3296     - found = true;
3297     - nf_conntrack_helper_unregister(cur);
3298     - }
3299     + found = true;
3300     + nf_conntrack_helper_unregister(cur);
3301     + kfree(cur->expect_policy);
3302     +
3303     + list_del(&nlcth->list);
3304     + kfree(nlcth);
3305     }
3306     +
3307     /* Make sure we return success if we flush and there is no helpers */
3308     return (found || j == 0) ? 0 : -ENOENT;
3309     }
3310     @@ -664,20 +741,16 @@ static int __init nfnl_cthelper_init(void)
3311     static void __exit nfnl_cthelper_exit(void)
3312     {
3313     struct nf_conntrack_helper *cur;
3314     - struct hlist_node *tmp;
3315     - int i;
3316     + struct nfnl_cthelper *nlcth, *n;
3317    
3318     nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
3319    
3320     - for (i=0; i<nf_ct_helper_hsize; i++) {
3321     - hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
3322     - hnode) {
3323     - /* skip non-userspace conntrack helpers. */
3324     - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
3325     - continue;
3326     + list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
3327     + cur = &nlcth->helper;
3328    
3329     - nf_conntrack_helper_unregister(cur);
3330     - }
3331     + nf_conntrack_helper_unregister(cur);
3332     + kfree(cur->expect_policy);
3333     + kfree(nlcth);
3334     }
3335     }
3336    
3337     diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
3338     index af832c526048..5efb40291ac3 100644
3339     --- a/net/netfilter/nfnetlink_queue.c
3340     +++ b/net/netfilter/nfnetlink_queue.c
3341     @@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
3342     skb = alloc_skb(size, GFP_ATOMIC);
3343     if (!skb) {
3344     skb_tx_error(entskb);
3345     - return NULL;
3346     + goto nlmsg_failure;
3347     }
3348    
3349     nlh = nlmsg_put(skb, 0, 0,
3350     @@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
3351     if (!nlh) {
3352     skb_tx_error(entskb);
3353     kfree_skb(skb);
3354     - return NULL;
3355     + goto nlmsg_failure;
3356     }
3357     nfmsg = nlmsg_data(nlh);
3358     nfmsg->nfgen_family = entry->state.pf;
3359     @@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
3360     }
3361    
3362     nlh->nlmsg_len = skb->len;
3363     + if (seclen)
3364     + security_release_secctx(secdata, seclen);
3365     return skb;
3366    
3367     nla_put_failure:
3368     skb_tx_error(entskb);
3369     kfree_skb(skb);
3370     net_err_ratelimited("nf_queue: error creating packet message\n");
3371     +nlmsg_failure:
3372     + if (seclen)
3373     + security_release_secctx(secdata, seclen);
3374     return NULL;
3375     }
3376    
3377     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3378     index c9fac08a53b1..1ff497bd9c20 100644
3379     --- a/net/netlink/af_netlink.c
3380     +++ b/net/netlink/af_netlink.c
3381     @@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
3382    
3383     static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
3384    
3385     +static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
3386     +
3387     +static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
3388     + "nlk_cb_mutex-ROUTE",
3389     + "nlk_cb_mutex-1",
3390     + "nlk_cb_mutex-USERSOCK",
3391     + "nlk_cb_mutex-FIREWALL",
3392     + "nlk_cb_mutex-SOCK_DIAG",
3393     + "nlk_cb_mutex-NFLOG",
3394     + "nlk_cb_mutex-XFRM",
3395     + "nlk_cb_mutex-SELINUX",
3396     + "nlk_cb_mutex-ISCSI",
3397     + "nlk_cb_mutex-AUDIT",
3398     + "nlk_cb_mutex-FIB_LOOKUP",
3399     + "nlk_cb_mutex-CONNECTOR",
3400     + "nlk_cb_mutex-NETFILTER",
3401     + "nlk_cb_mutex-IP6_FW",
3402     + "nlk_cb_mutex-DNRTMSG",
3403     + "nlk_cb_mutex-KOBJECT_UEVENT",
3404     + "nlk_cb_mutex-GENERIC",
3405     + "nlk_cb_mutex-17",
3406     + "nlk_cb_mutex-SCSITRANSPORT",
3407     + "nlk_cb_mutex-ECRYPTFS",
3408     + "nlk_cb_mutex-RDMA",
3409     + "nlk_cb_mutex-CRYPTO",
3410     + "nlk_cb_mutex-SMC",
3411     + "nlk_cb_mutex-23",
3412     + "nlk_cb_mutex-24",
3413     + "nlk_cb_mutex-25",
3414     + "nlk_cb_mutex-26",
3415     + "nlk_cb_mutex-27",
3416     + "nlk_cb_mutex-28",
3417     + "nlk_cb_mutex-29",
3418     + "nlk_cb_mutex-30",
3419     + "nlk_cb_mutex-31",
3420     + "nlk_cb_mutex-MAX_LINKS"
3421     +};
3422     +
3423     static int netlink_dump(struct sock *sk);
3424     static void netlink_skb_destructor(struct sk_buff *skb);
3425    
3426     @@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
3427     } else {
3428     nlk->cb_mutex = &nlk->cb_def_mutex;
3429     mutex_init(nlk->cb_mutex);
3430     + lockdep_set_class_and_name(nlk->cb_mutex,
3431     + nlk_cb_mutex_keys + protocol,
3432     + nlk_cb_mutex_key_strings[protocol]);
3433     }
3434     init_waitqueue_head(&nlk->wait);
3435    
3436     diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
3437     index 1308bbf460f7..b56d57984439 100644
3438     --- a/net/sched/sch_dsmark.c
3439     +++ b/net/sched/sch_dsmark.c
3440     @@ -200,9 +200,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
3441     pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
3442    
3443     if (p->set_tc_index) {
3444     + int wlen = skb_network_offset(skb);
3445     +
3446     switch (tc_skb_protocol(skb)) {
3447     case htons(ETH_P_IP):
3448     - if (skb_cow_head(skb, sizeof(struct iphdr)))
3449     + wlen += sizeof(struct iphdr);
3450     + if (!pskb_may_pull(skb, wlen) ||
3451     + skb_try_make_writable(skb, wlen))
3452     goto drop;
3453    
3454     skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
3455     @@ -210,7 +214,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
3456     break;
3457    
3458     case htons(ETH_P_IPV6):
3459     - if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
3460     + wlen += sizeof(struct ipv6hdr);
3461     + if (!pskb_may_pull(skb, wlen) ||
3462     + skb_try_make_writable(skb, wlen))
3463     goto drop;
3464    
3465     skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
3466     diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
3467     index 582585393d35..0994ce491e7c 100644
3468     --- a/net/sctp/outqueue.c
3469     +++ b/net/sctp/outqueue.c
3470     @@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
3471     }
3472    
3473     static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
3474     - struct sctp_sndrcvinfo *sinfo,
3475     - struct list_head *queue, int msg_len)
3476     + struct sctp_sndrcvinfo *sinfo, int msg_len)
3477     {
3478     + struct sctp_outq *q = &asoc->outqueue;
3479     struct sctp_chunk *chk, *temp;
3480    
3481     - list_for_each_entry_safe(chk, temp, queue, list) {
3482     + list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
3483     if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
3484     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
3485     continue;
3486    
3487     list_del_init(&chk->list);
3488     + q->out_qlen -= chk->skb->len;
3489     asoc->sent_cnt_removable--;
3490     asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
3491    
3492     @@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
3493     return;
3494     }
3495    
3496     - sctp_prsctp_prune_unsent(asoc, sinfo,
3497     - &asoc->outqueue.out_chunk_list,
3498     - msg_len);
3499     + sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
3500     }
3501    
3502     /* Mark all the eligible packets on a transport for retransmission. */
3503     diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
3504     index 9d94e65d0894..271cd66e4b3b 100644
3505     --- a/net/tipc/subscr.c
3506     +++ b/net/tipc/subscr.c
3507     @@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
3508     static void tipc_subscrp_timeout(unsigned long data)
3509     {
3510     struct tipc_subscription *sub = (struct tipc_subscription *)data;
3511     + struct tipc_subscriber *subscriber = sub->subscriber;
3512     +
3513     + spin_lock_bh(&subscriber->lock);
3514     + tipc_nametbl_unsubscribe(sub);
3515     + spin_unlock_bh(&subscriber->lock);
3516    
3517     /* Notify subscriber of timeout */
3518     tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
3519     @@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
3520     struct tipc_subscriber *subscriber = sub->subscriber;
3521    
3522     spin_lock_bh(&subscriber->lock);
3523     - tipc_nametbl_unsubscribe(sub);
3524     list_del(&sub->subscrp_list);
3525     atomic_dec(&tn->subscription_count);
3526     spin_unlock_bh(&subscriber->lock);
3527     @@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
3528     if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
3529     continue;
3530    
3531     + tipc_nametbl_unsubscribe(sub);
3532     tipc_subscrp_get(sub);
3533     spin_unlock_bh(&subscriber->lock);
3534     tipc_subscrp_delete(sub);
3535     diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
3536     index 2f633eec6b7a..ee12e176256c 100644
3537     --- a/net/vmw_vsock/af_vsock.c
3538     +++ b/net/vmw_vsock/af_vsock.c
3539     @@ -1101,10 +1101,19 @@ static const struct proto_ops vsock_dgram_ops = {
3540     .sendpage = sock_no_sendpage,
3541     };
3542    
3543     +static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
3544     +{
3545     + if (!transport->cancel_pkt)
3546     + return -EOPNOTSUPP;
3547     +
3548     + return transport->cancel_pkt(vsk);
3549     +}
3550     +
3551     static void vsock_connect_timeout(struct work_struct *work)
3552     {
3553     struct sock *sk;
3554     struct vsock_sock *vsk;
3555     + int cancel = 0;
3556    
3557     vsk = container_of(work, struct vsock_sock, dwork.work);
3558     sk = sk_vsock(vsk);
3559     @@ -1115,8 +1124,11 @@ static void vsock_connect_timeout(struct work_struct *work)
3560     sk->sk_state = SS_UNCONNECTED;
3561     sk->sk_err = ETIMEDOUT;
3562     sk->sk_error_report(sk);
3563     + cancel = 1;
3564     }
3565     release_sock(sk);
3566     + if (cancel)
3567     + vsock_transport_cancel_pkt(vsk);
3568    
3569     sock_put(sk);
3570     }
3571     @@ -1223,11 +1235,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
3572     err = sock_intr_errno(timeout);
3573     sk->sk_state = SS_UNCONNECTED;
3574     sock->state = SS_UNCONNECTED;
3575     + vsock_transport_cancel_pkt(vsk);
3576     goto out_wait;
3577     } else if (timeout == 0) {
3578     err = -ETIMEDOUT;
3579     sk->sk_state = SS_UNCONNECTED;
3580     sock->state = SS_UNCONNECTED;
3581     + vsock_transport_cancel_pkt(vsk);
3582     goto out_wait;
3583     }
3584    
3585     diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
3586     index 62c056ea403b..9c07c76c504d 100644
3587     --- a/net/vmw_vsock/virtio_transport_common.c
3588     +++ b/net/vmw_vsock/virtio_transport_common.c
3589     @@ -57,6 +57,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
3590     pkt->len = len;
3591     pkt->hdr.len = cpu_to_le32(len);
3592     pkt->reply = info->reply;
3593     + pkt->vsk = info->vsk;
3594    
3595     if (info->msg && len > 0) {
3596     pkt->buf = kmalloc(len, GFP_KERNEL);
3597     @@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
3598     struct virtio_vsock_pkt_info info = {
3599     .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
3600     .type = type,
3601     + .vsk = vsk,
3602     };
3603    
3604     return virtio_transport_send_pkt_info(vsk, &info);
3605     @@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
3606     struct virtio_vsock_pkt_info info = {
3607     .op = VIRTIO_VSOCK_OP_REQUEST,
3608     .type = VIRTIO_VSOCK_TYPE_STREAM,
3609     + .vsk = vsk,
3610     };
3611    
3612     return virtio_transport_send_pkt_info(vsk, &info);
3613     @@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
3614     VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
3615     (mode & SEND_SHUTDOWN ?
3616     VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
3617     + .vsk = vsk,
3618     };
3619    
3620     return virtio_transport_send_pkt_info(vsk, &info);
3621     @@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
3622     .type = VIRTIO_VSOCK_TYPE_STREAM,
3623     .msg = msg,
3624     .pkt_len = len,
3625     + .vsk = vsk,
3626     };
3627    
3628     return virtio_transport_send_pkt_info(vsk, &info);
3629     @@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
3630     .op = VIRTIO_VSOCK_OP_RST,
3631     .type = VIRTIO_VSOCK_TYPE_STREAM,
3632     .reply = !!pkt,
3633     + .vsk = vsk,
3634     };
3635    
3636     /* Send RST only if the original pkt is not a RST pkt */
3637     @@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
3638     .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
3639     .remote_port = le32_to_cpu(pkt->hdr.src_port),
3640     .reply = true,
3641     + .vsk = vsk,
3642     };
3643    
3644     return virtio_transport_send_pkt_info(vsk, &info);
3645     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3646     index f2e4e99ce651..2c3065c1f3fb 100644
3647     --- a/sound/pci/hda/patch_conexant.c
3648     +++ b/sound/pci/hda/patch_conexant.c
3649     @@ -261,6 +261,7 @@ enum {
3650     CXT_FIXUP_HP_530,
3651     CXT_FIXUP_CAP_MIX_AMP_5047,
3652     CXT_FIXUP_MUTE_LED_EAPD,
3653     + CXT_FIXUP_HP_DOCK,
3654     CXT_FIXUP_HP_SPECTRE,
3655     CXT_FIXUP_HP_GATE_MIC,
3656     };
3657     @@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
3658     .type = HDA_FIXUP_FUNC,
3659     .v.func = cxt_fixup_mute_led_eapd,
3660     },
3661     + [CXT_FIXUP_HP_DOCK] = {
3662     + .type = HDA_FIXUP_PINS,
3663     + .v.pins = (const struct hda_pintbl[]) {
3664     + { 0x16, 0x21011020 }, /* line-out */
3665     + { 0x18, 0x2181103f }, /* line-in */
3666     + { }
3667     + }
3668     + },
3669     [CXT_FIXUP_HP_SPECTRE] = {
3670     .type = HDA_FIXUP_PINS,
3671     .v.pins = (const struct hda_pintbl[]) {
3672     @@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3673     SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
3674     SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
3675     SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
3676     + SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
3677     SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
3678     SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
3679     SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
3680     @@ -872,6 +882,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
3681     { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
3682     { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
3683     { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
3684     + { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
3685     {}
3686     };
3687    
3688     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3689     index d7fa7373cb94..ba40596b9d92 100644
3690     --- a/sound/pci/hda/patch_realtek.c
3691     +++ b/sound/pci/hda/patch_realtek.c
3692     @@ -4854,6 +4854,7 @@ enum {
3693     ALC286_FIXUP_HP_GPIO_LED,
3694     ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
3695     ALC280_FIXUP_HP_DOCK_PINS,
3696     + ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
3697     ALC280_FIXUP_HP_9480M,
3698     ALC288_FIXUP_DELL_HEADSET_MODE,
3699     ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
3700     @@ -5394,6 +5395,16 @@ static const struct hda_fixup alc269_fixups[] = {
3701     .chained = true,
3702     .chain_id = ALC280_FIXUP_HP_GPIO4
3703     },
3704     + [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
3705     + .type = HDA_FIXUP_PINS,
3706     + .v.pins = (const struct hda_pintbl[]) {
3707     + { 0x1b, 0x21011020 }, /* line-out */
3708     + { 0x18, 0x2181103f }, /* line-in */
3709     + { },
3710     + },
3711     + .chained = true,
3712     + .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
3713     + },
3714     [ALC280_FIXUP_HP_9480M] = {
3715     .type = HDA_FIXUP_FUNC,
3716     .v.func = alc280_fixup_hp_9480m,
3717     @@ -5646,7 +5657,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3718     SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
3719     SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
3720     SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
3721     - SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
3722     + SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
3723     SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
3724     SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
3725     SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
3726     @@ -5812,6 +5823,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
3727     {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
3728     {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
3729     {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
3730     + {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
3731     {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
3732     {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
3733     {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
3734     diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
3735     index c1610a054d65..3cf522d66755 100644
3736     --- a/sound/soc/img/img-parallel-out.c
3737     +++ b/sound/soc/img/img-parallel-out.c
3738     @@ -166,9 +166,11 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
3739     return -EINVAL;
3740     }
3741    
3742     + pm_runtime_get_sync(prl->dev);
3743     reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
3744     reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
3745     img_prl_out_writel(prl, reg, IMG_PRL_OUT_CTL);
3746     + pm_runtime_put(prl->dev);
3747    
3748     return 0;
3749     }
3750     diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
3751     index 0e1c3ee56675..9735b4caaed3 100644
3752     --- a/sound/soc/sti/uniperif_reader.c
3753     +++ b/sound/soc/sti/uniperif_reader.c
3754     @@ -364,6 +364,8 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
3755     struct uniperif *reader = priv->dai_data.uni;
3756     int ret;
3757    
3758     + reader->substream = substream;
3759     +
3760     if (!UNIPERIF_TYPE_IS_TDM(reader))
3761     return 0;
3762    
3763     @@ -393,6 +395,7 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
3764     /* Stop the reader */
3765     uni_reader_stop(reader);
3766     }
3767     + reader->substream = NULL;
3768     }
3769    
3770     static const struct snd_soc_dai_ops uni_reader_dai_ops = {
3771     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
3772     index 4569fdcab701..1b20768e781d 100644
3773     --- a/virt/kvm/kvm_main.c
3774     +++ b/virt/kvm/kvm_main.c
3775     @@ -1060,7 +1060,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
3776     * changes) is disallowed above, so any other attribute changes getting
3777     * here can be skipped.
3778     */
3779     - if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
3780     + if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
3781     r = kvm_iommu_map_pages(kvm, &new);
3782     return r;
3783     }
3784     @@ -3904,7 +3904,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3785     if (!vcpu_align)
3786     vcpu_align = __alignof__(struct kvm_vcpu);
3787     kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
3788     - 0, NULL);
3789     + SLAB_ACCOUNT, NULL);
3790     if (!kvm_vcpu_cache) {
3791     r = -ENOMEM;
3792     goto out_free_3;