Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0109-4.19.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3388 - (hide annotations) (download)
Fri Aug 2 11:47:09 2019 UTC (5 years, 1 month ago) by niro
File size: 166350 byte(s)
-linux-4.19.10
1 niro 3388 diff --git a/Makefile b/Makefile
2     index 8717f34464d5..36d9de42def3 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 9
10     +SUBLEVEL = 10
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
15     index 1d158cfda15f..c45aef806870 100644
16     --- a/arch/arm/boot/dts/am3517-evm.dts
17     +++ b/arch/arm/boot/dts/am3517-evm.dts
18     @@ -227,7 +227,7 @@
19     vmmc-supply = <&vmmc_fixed>;
20     bus-width = <4>;
21     wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
22     - cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */
23     + cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
24     };
25    
26     &mmc3 {
27     diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
28     index dae6e458e59f..b1c988eed87c 100644
29     --- a/arch/arm/boot/dts/am3517-som.dtsi
30     +++ b/arch/arm/boot/dts/am3517-som.dtsi
31     @@ -163,7 +163,7 @@
32     compatible = "ti,wl1271";
33     reg = <2>;
34     interrupt-parent = <&gpio6>;
35     - interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */
36     + interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
37     ref-clock-frequency = <26000000>;
38     tcxo-clock-frequency = <26000000>;
39     };
40     diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
41     index ac343330d0c8..98b682a8080c 100644
42     --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
43     +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
44     @@ -129,7 +129,7 @@
45     };
46    
47     &mmc3 {
48     - interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
49     + interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
50     pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
51     pinctrl-names = "default";
52     vmmc-supply = <&wl12xx_vmmc>;
53     diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
54     index 9d5d53fbe9c0..c39cf2ca54da 100644
55     --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
56     +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
57     @@ -35,7 +35,7 @@
58     * jumpering combinations for the long run.
59     */
60     &mmc3 {
61     - interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
62     + interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
63     pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
64     pinctrl-names = "default";
65     vmmc-supply = <&wl12xx_vmmc>;
66     diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
67     index 61f68e5c48e9..b405992eb601 100644
68     --- a/arch/arm/boot/dts/sama5d2.dtsi
69     +++ b/arch/arm/boot/dts/sama5d2.dtsi
70     @@ -308,7 +308,7 @@
71     0x1 0x0 0x60000000 0x10000000
72     0x2 0x0 0x70000000 0x10000000
73     0x3 0x0 0x80000000 0x10000000>;
74     - clocks = <&mck>;
75     + clocks = <&h32ck>;
76     status = "disabled";
77    
78     nand_controller: nand-controller {
79     diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
80     index dd28d2614d7f..d10d8831f527 100644
81     --- a/arch/arm/mach-omap1/board-ams-delta.c
82     +++ b/arch/arm/mach-omap1/board-ams-delta.c
83     @@ -726,6 +726,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
84     struct modem_private_data *priv = port->private_data;
85     int ret;
86    
87     + if (!priv)
88     + return;
89     +
90     if (IS_ERR(priv->regulator))
91     return;
92    
93     diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
94     index 7b95729e8359..38a1be6c3694 100644
95     --- a/arch/arm/mach-omap2/prm44xx.c
96     +++ b/arch/arm/mach-omap2/prm44xx.c
97     @@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
98     * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
99     * omap44xx_prm_reconfigure_io_chain() must be called. No return value.
100     */
101     -static void __init omap44xx_prm_enable_io_wakeup(void)
102     +static void omap44xx_prm_enable_io_wakeup(void)
103     {
104     s32 inst = omap4_prmst_get_prm_dev_inst();
105    
106     diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
107     index 6d651f314193..6921f8dc5ebb 100644
108     --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
109     +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
110     @@ -31,6 +31,10 @@
111     status = "okay";
112     };
113    
114     +&tlmm {
115     + gpio-reserved-ranges = <0 4>, <81 4>;
116     +};
117     +
118     &uart9 {
119     status = "okay";
120     };
121     diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
122     index 74091fd3101e..d5523adeddbf 100644
123     --- a/arch/s390/kernel/perf_cpum_cf.c
124     +++ b/arch/s390/kernel/perf_cpum_cf.c
125     @@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
126     break;
127    
128     case PERF_TYPE_HARDWARE:
129     + if (is_sampling_event(event)) /* No sampling support */
130     + return -ENOENT;
131     ev = attr->config;
132     /* Count user space (problem-state) only */
133     if (!attr->exclude_user && attr->exclude_kernel) {
134     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
135     index 2216d21e955d..3692de84c420 100644
136     --- a/arch/x86/kvm/lapic.c
137     +++ b/arch/x86/kvm/lapic.c
138     @@ -55,7 +55,7 @@
139     #define PRIo64 "o"
140    
141     /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
142     -#define apic_debug(fmt, arg...)
143     +#define apic_debug(fmt, arg...) do {} while (0)
144    
145     /* 14 is the version for Xeon and Pentium 8.4.8*/
146     #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
147     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
148     index e55f7a90d4b2..c97a9d60d305 100644
149     --- a/arch/x86/kvm/vmx.c
150     +++ b/arch/x86/kvm/vmx.c
151     @@ -962,6 +962,7 @@ struct vcpu_vmx {
152     struct shared_msr_entry *guest_msrs;
153     int nmsrs;
154     int save_nmsrs;
155     + bool guest_msrs_dirty;
156     unsigned long host_idt_base;
157     #ifdef CONFIG_X86_64
158     u64 msr_host_kernel_gs_base;
159     @@ -1284,7 +1285,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
160     static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
161     u16 error_code);
162     static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
163     -static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
164     +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
165     u32 msr, int type);
166    
167     static DEFINE_PER_CPU(struct vmcs *, vmxarea);
168     @@ -2874,6 +2875,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
169    
170     vmx->req_immediate_exit = false;
171    
172     + /*
173     + * Note that guest MSRs to be saved/restored can also be changed
174     + * when guest state is loaded. This happens when guest transitions
175     + * to/from long-mode by setting MSR_EFER.LMA.
176     + */
177     + if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
178     + vmx->guest_msrs_dirty = false;
179     + for (i = 0; i < vmx->save_nmsrs; ++i)
180     + kvm_set_shared_msr(vmx->guest_msrs[i].index,
181     + vmx->guest_msrs[i].data,
182     + vmx->guest_msrs[i].mask);
183     +
184     + }
185     +
186     if (vmx->loaded_cpu_state)
187     return;
188    
189     @@ -2934,11 +2949,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
190     vmcs_writel(HOST_GS_BASE, gs_base);
191     host_state->gs_base = gs_base;
192     }
193     -
194     - for (i = 0; i < vmx->save_nmsrs; ++i)
195     - kvm_set_shared_msr(vmx->guest_msrs[i].index,
196     - vmx->guest_msrs[i].data,
197     - vmx->guest_msrs[i].mask);
198     }
199    
200     static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
201     @@ -3418,6 +3428,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
202     move_msr_up(vmx, index, save_nmsrs++);
203    
204     vmx->save_nmsrs = save_nmsrs;
205     + vmx->guest_msrs_dirty = true;
206    
207     if (cpu_has_vmx_msr_bitmap())
208     vmx_update_msr_bitmap(&vmx->vcpu);
209     @@ -5924,7 +5935,7 @@ static void free_vpid(int vpid)
210     spin_unlock(&vmx_vpid_lock);
211     }
212    
213     -static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
214     +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
215     u32 msr, int type)
216     {
217     int f = sizeof(unsigned long);
218     @@ -5962,7 +5973,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
219     }
220     }
221    
222     -static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
223     +static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
224     u32 msr, int type)
225     {
226     int f = sizeof(unsigned long);
227     @@ -6000,7 +6011,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
228     }
229     }
230    
231     -static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
232     +static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
233     u32 msr, int type, bool value)
234     {
235     if (value)
236     diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
237     index 2eeddd814653..c6c7c9b7b5c1 100644
238     --- a/arch/x86/xen/enlighten.c
239     +++ b/arch/x86/xen/enlighten.c
240     @@ -7,7 +7,6 @@
241    
242     #include <xen/features.h>
243     #include <xen/page.h>
244     -#include <xen/interface/memory.h>
245    
246     #include <asm/xen/hypercall.h>
247     #include <asm/xen/hypervisor.h>
248     @@ -343,80 +342,3 @@ void xen_arch_unregister_cpu(int num)
249     }
250     EXPORT_SYMBOL(xen_arch_unregister_cpu);
251     #endif
252     -
253     -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
254     -void __init arch_xen_balloon_init(struct resource *hostmem_resource)
255     -{
256     - struct xen_memory_map memmap;
257     - int rc;
258     - unsigned int i, last_guest_ram;
259     - phys_addr_t max_addr = PFN_PHYS(max_pfn);
260     - struct e820_table *xen_e820_table;
261     - const struct e820_entry *entry;
262     - struct resource *res;
263     -
264     - if (!xen_initial_domain())
265     - return;
266     -
267     - xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
268     - if (!xen_e820_table)
269     - return;
270     -
271     - memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
272     - set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
273     - rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
274     - if (rc) {
275     - pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
276     - goto out;
277     - }
278     -
279     - last_guest_ram = 0;
280     - for (i = 0; i < memmap.nr_entries; i++) {
281     - if (xen_e820_table->entries[i].addr >= max_addr)
282     - break;
283     - if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
284     - last_guest_ram = i;
285     - }
286     -
287     - entry = &xen_e820_table->entries[last_guest_ram];
288     - if (max_addr >= entry->addr + entry->size)
289     - goto out; /* No unallocated host RAM. */
290     -
291     - hostmem_resource->start = max_addr;
292     - hostmem_resource->end = entry->addr + entry->size;
293     -
294     - /*
295     - * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
296     - * as unavailable. The rest of that region can be used for hotplug-based
297     - * ballooning.
298     - */
299     - for (; i < memmap.nr_entries; i++) {
300     - entry = &xen_e820_table->entries[i];
301     -
302     - if (entry->type == E820_TYPE_RAM)
303     - continue;
304     -
305     - if (entry->addr >= hostmem_resource->end)
306     - break;
307     -
308     - res = kzalloc(sizeof(*res), GFP_KERNEL);
309     - if (!res)
310     - goto out;
311     -
312     - res->name = "Unavailable host RAM";
313     - res->start = entry->addr;
314     - res->end = (entry->addr + entry->size < hostmem_resource->end) ?
315     - entry->addr + entry->size : hostmem_resource->end;
316     - rc = insert_resource(hostmem_resource, res);
317     - if (rc) {
318     - pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
319     - __func__, res->start, res->end, rc);
320     - kfree(res);
321     - goto out;
322     - }
323     - }
324     -
325     - out:
326     - kfree(xen_e820_table);
327     -}
328     -#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
329     diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
330     index 1163e33121fb..075ed47993bb 100644
331     --- a/arch/x86/xen/setup.c
332     +++ b/arch/x86/xen/setup.c
333     @@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
334     addr = xen_e820_table.entries[0].addr;
335     size = xen_e820_table.entries[0].size;
336     while (i < xen_e820_table.nr_entries) {
337     + bool discard = false;
338    
339     chunk_size = size;
340     type = xen_e820_table.entries[i].type;
341     @@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
342     xen_add_extra_mem(pfn_s, n_pfns);
343     xen_max_p2m_pfn = pfn_s + n_pfns;
344     } else
345     - type = E820_TYPE_UNUSABLE;
346     + discard = true;
347     }
348    
349     - xen_align_and_add_e820_region(addr, chunk_size, type);
350     + if (!discard)
351     + xen_align_and_add_e820_region(addr, chunk_size, type);
352    
353     addr += chunk_size;
354     size -= chunk_size;
355     diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
356     index 08f26db2da7e..e938576e58cb 100644
357     --- a/drivers/acpi/arm64/iort.c
358     +++ b/drivers/acpi/arm64/iort.c
359     @@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
360     */
361     static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
362     {
363     - struct acpi_iort_node *node, *msi_parent;
364     + struct acpi_iort_node *node, *msi_parent = NULL;
365     struct fwnode_handle *iort_fwnode;
366     struct acpi_iort_its_group *its;
367     int i;
368     diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
369     index af3a20dd5aa4..99c99a5d57fe 100644
370     --- a/drivers/fsi/Kconfig
371     +++ b/drivers/fsi/Kconfig
372     @@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF
373     tristate "FSI master based on Aspeed ColdFire coprocessor"
374     depends on GPIOLIB
375     depends on GPIO_ASPEED
376     + select GENERIC_ALLOCATOR
377     ---help---
378     This option enables a FSI master using the AST2400 and AST2500 GPIO
379     lines driven by the internal ColdFire coprocessor. This requires
380     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
381     index ef00d14f8645..325e2213cac5 100644
382     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
383     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
384     @@ -2243,12 +2243,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
385     #endif
386    
387     WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
388     + udelay(50);
389    
390     /* carrizo do enable cp interrupt after cp inited */
391     - if (!(adev->flags & AMD_IS_APU))
392     + if (!(adev->flags & AMD_IS_APU)) {
393     gfx_v9_0_enable_gui_idle_interrupt(adev, true);
394     -
395     - udelay(50);
396     + udelay(50);
397     + }
398    
399     #ifdef AMDGPU_RLC_DEBUG_RETRY
400     /* RLC_GPM_GENERAL_6 : RLC Ucode version */
401     diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
402     index 7c6ac3cadb6b..8bb355d5d43d 100644
403     --- a/drivers/gpu/drm/ast/ast_mode.c
404     +++ b/drivers/gpu/drm/ast/ast_mode.c
405     @@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
406     {
407     struct ast_i2c_chan *i2c = i2c_priv;
408     struct ast_private *ast = i2c->dev->dev_private;
409     - uint32_t val;
410     + uint32_t val, val2, count, pass;
411     +
412     + count = 0;
413     + pass = 0;
414     + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
415     + do {
416     + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
417     + if (val == val2) {
418     + pass++;
419     + } else {
420     + pass = 0;
421     + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
422     + }
423     + } while ((pass < 5) && (count++ < 0x10000));
424    
425     - val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
426     return val & 1 ? 1 : 0;
427     }
428    
429     @@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
430     {
431     struct ast_i2c_chan *i2c = i2c_priv;
432     struct ast_private *ast = i2c->dev->dev_private;
433     - uint32_t val;
434     + uint32_t val, val2, count, pass;
435     +
436     + count = 0;
437     + pass = 0;
438     + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
439     + do {
440     + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
441     + if (val == val2) {
442     + pass++;
443     + } else {
444     + pass = 0;
445     + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
446     + }
447     + } while ((pass < 5) && (count++ < 0x10000));
448    
449     - val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
450     return val & 1 ? 1 : 0;
451     }
452    
453     @@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
454    
455     for (i = 0; i < 0x10000; i++) {
456     ujcrb7 = ((clock & 0x01) ? 0 : 1);
457     - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
458     + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
459     jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
460     if (ujcrb7 == jtemp)
461     break;
462     @@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
463    
464     for (i = 0; i < 0x10000; i++) {
465     ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
466     - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
467     + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
468     jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
469     if (ujcrb7 == jtemp)
470     break;
471     diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
472     index 14aac661f38b..7a3a6ed9f27b 100644
473     --- a/drivers/gpu/drm/meson/meson_venc.c
474     +++ b/drivers/gpu/drm/meson/meson_venc.c
475     @@ -715,6 +715,7 @@ struct meson_hdmi_venc_vic_mode {
476     { 5, &meson_hdmi_encp_mode_1080i60 },
477     { 20, &meson_hdmi_encp_mode_1080i50 },
478     { 32, &meson_hdmi_encp_mode_1080p24 },
479     + { 33, &meson_hdmi_encp_mode_1080p50 },
480     { 34, &meson_hdmi_encp_mode_1080p30 },
481     { 31, &meson_hdmi_encp_mode_1080p50 },
482     { 16, &meson_hdmi_encp_mode_1080p60 },
483     diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
484     index 71d3445ba869..07ee19573b3f 100644
485     --- a/drivers/hwmon/ina2xx.c
486     +++ b/drivers/hwmon/ina2xx.c
487     @@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
488     break;
489     case INA2XX_CURRENT:
490     /* signed register, result in mA */
491     - val = regval * data->current_lsb_uA;
492     + val = (s16)regval * data->current_lsb_uA;
493     val = DIV_ROUND_CLOSEST(val, 1000);
494     break;
495     case INA2XX_CALIBRATION:
496     @@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
497     }
498    
499     data->groups[group++] = &ina2xx_group;
500     - if (id->driver_data == ina226)
501     + if (chip == ina226)
502     data->groups[group++] = &ina226_group;
503    
504     hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
505     @@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
506     return PTR_ERR(hwmon_dev);
507    
508     dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
509     - id->name, data->rshunt);
510     + client->name, data->rshunt);
511    
512     return 0;
513     }
514     diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
515     index de46577c7d5a..d8fa4bea4bc8 100644
516     --- a/drivers/hwmon/mlxreg-fan.c
517     +++ b/drivers/hwmon/mlxreg-fan.c
518     @@ -51,7 +51,7 @@
519     */
520     #define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
521     ((rval) + (s)) * (d)))
522     -#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
523     +#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask)))
524     #define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
525     MLXREG_FAN_MAX_STATE, \
526     MLXREG_FAN_MAX_DUTY))
527     diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
528     index be5ba4690895..0d0457245e7d 100644
529     --- a/drivers/hwmon/raspberrypi-hwmon.c
530     +++ b/drivers/hwmon/raspberrypi-hwmon.c
531     @@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
532     {
533     struct device *dev = &pdev->dev;
534     struct rpi_hwmon_data *data;
535     - int ret;
536    
537     data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
538     if (!data)
539     @@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
540     /* Parent driver assure that firmware is correct */
541     data->fw = dev_get_drvdata(dev->parent);
542    
543     - /* Init throttled */
544     - ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
545     - &data->last_throttled,
546     - sizeof(data->last_throttled));
547     -
548     data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
549     data,
550     &rpi_chip_info,
551     diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
552     index 49276bbdac3d..1bb80f992aa8 100644
553     --- a/drivers/hwmon/w83795.c
554     +++ b/drivers/hwmon/w83795.c
555     @@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
556     * somewhere else in the code
557     */
558     #define SENSOR_ATTR_TEMP(index) { \
559     - SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
560     + SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
561     show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
562     SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
563     NULL, TEMP_READ, index - 1), \
564     diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
565     index ee366199b169..25d43c8f1c2a 100644
566     --- a/drivers/infiniband/core/roce_gid_mgmt.c
567     +++ b/drivers/infiniband/core/roce_gid_mgmt.c
568     @@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
569    
570     case NETDEV_CHANGEADDR:
571     cmds[0] = netdev_del_cmd;
572     - cmds[1] = add_default_gid_cmd;
573     - cmds[2] = add_cmd;
574     + if (ndev->reg_state == NETREG_REGISTERED) {
575     + cmds[1] = add_default_gid_cmd;
576     + cmds[2] = add_cmd;
577     + }
578     break;
579    
580     case NETDEV_CHANGEUPPER:
581     diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
582     index 85cd1a3593d6..22bd9784fa2e 100644
583     --- a/drivers/infiniband/hw/bnxt_re/main.c
584     +++ b/drivers/infiniband/hw/bnxt_re/main.c
585     @@ -1252,6 +1252,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
586     /* Registered a new RoCE device instance to netdev */
587     rc = bnxt_re_register_netdev(rdev);
588     if (rc) {
589     + rtnl_unlock();
590     pr_err("Failed to register with netedev: %#x\n", rc);
591     return -EINVAL;
592     }
593     @@ -1461,6 +1462,7 @@ static void bnxt_re_task(struct work_struct *work)
594     "Failed to register with IB: %#x", rc);
595     bnxt_re_remove_one(rdev);
596     bnxt_re_dev_unreg(rdev);
597     + goto exit;
598     }
599     break;
600     case NETDEV_UP:
601     @@ -1484,6 +1486,7 @@ static void bnxt_re_task(struct work_struct *work)
602     }
603     smp_mb__before_atomic();
604     atomic_dec(&rdev->sched_count);
605     +exit:
606     kfree(re_work);
607     }
608    
609     diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
610     index e1668bcc2d13..902d12d6d88b 100644
611     --- a/drivers/infiniband/hw/hfi1/chip.c
612     +++ b/drivers/infiniband/hw/hfi1/chip.c
613     @@ -12485,7 +12485,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
614     }
615    
616     /* allocate space for the counter values */
617     - dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
618     + dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
619     + GFP_KERNEL);
620     if (!dd->cntrs)
621     goto bail;
622    
623     diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
624     index d9470317983f..cfd252386356 100644
625     --- a/drivers/infiniband/hw/hfi1/hfi.h
626     +++ b/drivers/infiniband/hw/hfi1/hfi.h
627     @@ -154,6 +154,8 @@ struct hfi1_ib_stats {
628     extern struct hfi1_ib_stats hfi1_stats;
629     extern const struct pci_error_handlers hfi1_pci_err_handler;
630    
631     +extern int num_driver_cntrs;
632     +
633     /*
634     * First-cut criterion for "device is active" is
635     * two thousand dwords combined Tx, Rx traffic per
636     diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
637     index a7c586a5589d..3dfb4cf2f8c9 100644
638     --- a/drivers/infiniband/hw/hfi1/verbs.c
639     +++ b/drivers/infiniband/hw/hfi1/verbs.c
640     @@ -1701,7 +1701,7 @@ static const char * const driver_cntr_names[] = {
641     static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
642     static const char **dev_cntr_names;
643     static const char **port_cntr_names;
644     -static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
645     +int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
646     static int num_dev_cntrs;
647     static int num_port_cntrs;
648     static int cntr_names_initialized;
649     diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
650     index 0218c0f8c2a7..a442b29e7611 100644
651     --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
652     +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
653     @@ -1661,10 +1661,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
654     return hns_roce_cmq_send(hr_dev, &desc, 1);
655     }
656    
657     -static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
658     - unsigned long mtpt_idx)
659     +static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
660     + struct hns_roce_mr *mr)
661     {
662     - struct hns_roce_v2_mpt_entry *mpt_entry;
663     struct scatterlist *sg;
664     u64 page_addr;
665     u64 *pages;
666     @@ -1672,6 +1671,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
667     int len;
668     int entry;
669    
670     + mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
671     + mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
672     + roce_set_field(mpt_entry->byte_48_mode_ba,
673     + V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
674     + upper_32_bits(mr->pbl_ba >> 3));
675     +
676     + pages = (u64 *)__get_free_page(GFP_KERNEL);
677     + if (!pages)
678     + return -ENOMEM;
679     +
680     + i = 0;
681     + for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
682     + len = sg_dma_len(sg) >> PAGE_SHIFT;
683     + for (j = 0; j < len; ++j) {
684     + page_addr = sg_dma_address(sg) +
685     + (j << mr->umem->page_shift);
686     + pages[i] = page_addr >> 6;
687     + /* Record the first 2 entry directly to MTPT table */
688     + if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
689     + goto found;
690     + i++;
691     + }
692     + }
693     +found:
694     + mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
695     + roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
696     + V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
697     +
698     + mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
699     + roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
700     + V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
701     + roce_set_field(mpt_entry->byte_64_buf_pa1,
702     + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
703     + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
704     + mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
705     +
706     + free_page((unsigned long)pages);
707     +
708     + return 0;
709     +}
710     +
711     +static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
712     + unsigned long mtpt_idx)
713     +{
714     + struct hns_roce_v2_mpt_entry *mpt_entry;
715     + int ret;
716     +
717     mpt_entry = mb_buf;
718     memset(mpt_entry, 0, sizeof(*mpt_entry));
719    
720     @@ -1686,7 +1732,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
721     mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
722     roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
723     V2_MPT_BYTE_4_PD_S, mr->pd);
724     - mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
725    
726     roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
727     roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
728     @@ -1700,13 +1745,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
729     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
730     roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
731     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
732     - mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
733    
734     roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
735     mr->type == MR_TYPE_MR ? 0 : 1);
736     roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
737     1);
738     - mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
739    
740     mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
741     mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
742     @@ -1717,53 +1760,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
743     if (mr->type == MR_TYPE_DMA)
744     return 0;
745    
746     - mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
747     -
748     - mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
749     - roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
750     - V2_MPT_BYTE_48_PBL_BA_H_S,
751     - upper_32_bits(mr->pbl_ba >> 3));
752     - mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
753     -
754     - pages = (u64 *)__get_free_page(GFP_KERNEL);
755     - if (!pages)
756     - return -ENOMEM;
757     -
758     - i = 0;
759     - for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
760     - len = sg_dma_len(sg) >> PAGE_SHIFT;
761     - for (j = 0; j < len; ++j) {
762     - page_addr = sg_dma_address(sg) +
763     - (j << mr->umem->page_shift);
764     - pages[i] = page_addr >> 6;
765     -
766     - /* Record the first 2 entry directly to MTPT table */
767     - if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
768     - goto found;
769     - i++;
770     - }
771     - }
772     + ret = set_mtpt_pbl(mpt_entry, mr);
773    
774     -found:
775     - mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
776     - roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
777     - V2_MPT_BYTE_56_PA0_H_S,
778     - upper_32_bits(pages[0]));
779     - mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
780     -
781     - mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
782     - roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
783     - V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
784     -
785     - free_page((unsigned long)pages);
786     -
787     - roce_set_field(mpt_entry->byte_64_buf_pa1,
788     - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
789     - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
790     - mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
791     - mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
792     -
793     - return 0;
794     + return ret;
795     }
796    
797     static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
798     @@ -1772,6 +1771,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
799     u64 size, void *mb_buf)
800     {
801     struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
802     + int ret = 0;
803    
804     if (flags & IB_MR_REREG_PD) {
805     roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
806     @@ -1784,14 +1784,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
807     V2_MPT_BYTE_8_BIND_EN_S,
808     (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
809     roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
810     - V2_MPT_BYTE_8_ATOMIC_EN_S,
811     - (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
812     + V2_MPT_BYTE_8_ATOMIC_EN_S,
813     + mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
814     roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
815     - (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
816     + mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
817     roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
818     - (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
819     + mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
820     roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
821     - (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
822     + mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
823     }
824    
825     if (flags & IB_MR_REREG_TRANS) {
826     @@ -1800,21 +1800,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
827     mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
828     mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
829    
830     - mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
831     - mpt_entry->pbl_ba_l =
832     - cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
833     - roce_set_field(mpt_entry->byte_48_mode_ba,
834     - V2_MPT_BYTE_48_PBL_BA_H_M,
835     - V2_MPT_BYTE_48_PBL_BA_H_S,
836     - upper_32_bits(mr->pbl_ba >> 3));
837     - mpt_entry->byte_48_mode_ba =
838     - cpu_to_le32(mpt_entry->byte_48_mode_ba);
839     -
840     mr->iova = iova;
841     mr->size = size;
842     +
843     + ret = set_mtpt_pbl(mpt_entry, mr);
844     }
845    
846     - return 0;
847     + return ret;
848     }
849    
850     static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
851     diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
852     index d216e0d2921d..9e1cac8cb260 100644
853     --- a/drivers/infiniband/hw/mlx5/odp.c
854     +++ b/drivers/infiniband/hw/mlx5/odp.c
855     @@ -724,6 +724,7 @@ next_mr:
856     head = frame;
857    
858     bcnt -= frame->bcnt;
859     + offset = 0;
860     }
861     break;
862    
863     diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
864     index d53d954ac8af..183fe5c8ceb7 100644
865     --- a/drivers/infiniband/hw/mlx5/qp.c
866     +++ b/drivers/infiniband/hw/mlx5/qp.c
867     @@ -4413,17 +4413,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
868     goto out;
869     }
870    
871     - if (wr->opcode == IB_WR_LOCAL_INV ||
872     - wr->opcode == IB_WR_REG_MR) {
873     + if (wr->opcode == IB_WR_REG_MR) {
874     fence = dev->umr_fence;
875     next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
876     - } else if (wr->send_flags & IB_SEND_FENCE) {
877     - if (qp->next_fence)
878     - fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
879     - else
880     - fence = MLX5_FENCE_MODE_FENCE;
881     - } else {
882     - fence = qp->next_fence;
883     + } else {
884     + if (wr->send_flags & IB_SEND_FENCE) {
885     + if (qp->next_fence)
886     + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
887     + else
888     + fence = MLX5_FENCE_MODE_FENCE;
889     + } else {
890     + fence = qp->next_fence;
891     + }
892     }
893    
894     switch (ibqp->qp_type) {
895     diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
896     index 89ec0f64abfc..084bb4baebb5 100644
897     --- a/drivers/infiniband/sw/rdmavt/ah.c
898     +++ b/drivers/infiniband/sw/rdmavt/ah.c
899     @@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
900     * rvt_create_ah - create an address handle
901     * @pd: the protection domain
902     * @ah_attr: the attributes of the AH
903     + * @udata: pointer to user's input output buffer information.
904     *
905     * This may be called from interrupt context.
906     *
907     * Return: newly allocated ah
908     */
909     struct ib_ah *rvt_create_ah(struct ib_pd *pd,
910     - struct rdma_ah_attr *ah_attr)
911     + struct rdma_ah_attr *ah_attr,
912     + struct ib_udata *udata)
913     {
914     struct rvt_ah *ah;
915     struct rvt_dev_info *dev = ib_to_rvt(pd->device);
916     diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
917     index 16105af99189..25271b48a683 100644
918     --- a/drivers/infiniband/sw/rdmavt/ah.h
919     +++ b/drivers/infiniband/sw/rdmavt/ah.h
920     @@ -51,7 +51,8 @@
921     #include <rdma/rdma_vt.h>
922    
923     struct ib_ah *rvt_create_ah(struct ib_pd *pd,
924     - struct rdma_ah_attr *ah_attr);
925     + struct rdma_ah_attr *ah_attr,
926     + struct ib_udata *udata);
927     int rvt_destroy_ah(struct ib_ah *ibah);
928     int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
929     int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
930     diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
931     index 55af04fa03a7..6c8dcb65ff03 100644
932     --- a/drivers/net/ethernet/cavium/thunder/nic_main.c
933     +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
934     @@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
935     {
936     struct nicpf *nic = pci_get_drvdata(pdev);
937    
938     + if (!nic)
939     + return;
940     +
941     if (nic->flags & NIC_SRIOV_ENABLED)
942     pci_disable_sriov(pdev);
943    
944     diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
945     index 14374a856d30..6127697ede12 100644
946     --- a/drivers/net/ethernet/hisilicon/hip04_eth.c
947     +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
948     @@ -914,10 +914,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
949     }
950    
951     ret = register_netdev(ndev);
952     - if (ret) {
953     - free_netdev(ndev);
954     + if (ret)
955     goto alloc_fail;
956     - }
957    
958     return 0;
959    
960     diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
961     index e2f80cca9bed..0d2de6f67676 100644
962     --- a/drivers/net/ethernet/ibm/emac/emac.h
963     +++ b/drivers/net/ethernet/ibm/emac/emac.h
964     @@ -231,7 +231,7 @@ struct emac_regs {
965     #define EMAC_STACR_PHYE 0x00004000
966     #define EMAC_STACR_STAC_MASK 0x00003000
967     #define EMAC_STACR_STAC_READ 0x00001000
968     -#define EMAC_STACR_STAC_WRITE 0x00000800
969     +#define EMAC_STACR_STAC_WRITE 0x00002000
970     #define EMAC_STACR_OPBC_MASK 0x00000C00
971     #define EMAC_STACR_OPBC_50 0x00000000
972     #define EMAC_STACR_OPBC_66 0x00000400
973     diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
974     index c54ebedca6da..c393cb2c0f16 100644
975     --- a/drivers/net/ethernet/intel/igb/e1000_i210.c
976     +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
977     @@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
978     nvm_word = E1000_INVM_DEFAULT_AL;
979     tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
980     igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
981     + phy_word = E1000_PHY_PLL_UNCONF;
982     for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
983     /* check current state directly from internal PHY */
984     igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
985     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
986     index a8148c7126e5..9772016222c3 100644
987     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
988     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
989     @@ -2248,7 +2248,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
990     *autoneg = false;
991    
992     if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
993     - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
994     + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
995     + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
996     + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
997     *speed = IXGBE_LINK_SPEED_1GB_FULL;
998     return 0;
999     }
1000     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1001     index f11b45001cad..d290f0787dfb 100644
1002     --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1003     +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1004     @@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
1005    
1006     tx_pause = !!(pause->tx_pause);
1007     rx_pause = !!(pause->rx_pause);
1008     - rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
1009     - tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
1010     + rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
1011     + tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
1012    
1013     err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1014     priv->rx_skb_size + ETH_FCS_LEN,
1015     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1016     index fe49384eba48..0d7fd3f043cf 100644
1017     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1018     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1019     @@ -3494,8 +3494,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1020     dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
1021     }
1022    
1023     - /* MTU range: 46 - hw-specific max */
1024     - dev->min_mtu = MLX4_EN_MIN_MTU;
1025     + /* MTU range: 68 - hw-specific max */
1026     + dev->min_mtu = ETH_MIN_MTU;
1027     dev->max_mtu = priv->max_mtu;
1028    
1029     mdev->pndev[port] = dev;
1030     diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1031     index c3228b89df46..240f9c9ca943 100644
1032     --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1033     +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1034     @@ -161,7 +161,6 @@
1035     #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
1036     ETH_HLEN + PREAMBLE_LEN)
1037    
1038     -#define MLX4_EN_MIN_MTU 46
1039     /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
1040     * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
1041     */
1042     diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
1043     index 81045dfa1cd8..44f6e4873aad 100644
1044     --- a/drivers/net/ethernet/realtek/8139cp.c
1045     +++ b/drivers/net/ethernet/realtek/8139cp.c
1046     @@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
1047     struct cp_private *cp;
1048     int handled = 0;
1049     u16 status;
1050     + u16 mask;
1051    
1052     if (unlikely(dev == NULL))
1053     return IRQ_NONE;
1054     @@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
1055    
1056     spin_lock(&cp->lock);
1057    
1058     + mask = cpr16(IntrMask);
1059     + if (!mask)
1060     + goto out_unlock;
1061     +
1062     status = cpr16(IntrStatus);
1063     if (!status || (status == 0xFFFF))
1064     goto out_unlock;
1065     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1066     index 733e35b7c4bb..20d1be2b070b 100644
1067     --- a/drivers/net/phy/phy_device.c
1068     +++ b/drivers/net/phy/phy_device.c
1069     @@ -1738,20 +1738,17 @@ EXPORT_SYMBOL(genphy_loopback);
1070    
1071     static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
1072     {
1073     - phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
1074     - PHY_10BT_FEATURES);
1075     -
1076     switch (max_speed) {
1077     - default:
1078     - return -ENOTSUPP;
1079     - case SPEED_1000:
1080     - phydev->supported |= PHY_1000BT_FEATURES;
1081     + case SPEED_10:
1082     + phydev->supported &= ~PHY_100BT_FEATURES;
1083     /* fall through */
1084     case SPEED_100:
1085     - phydev->supported |= PHY_100BT_FEATURES;
1086     - /* fall through */
1087     - case SPEED_10:
1088     - phydev->supported |= PHY_10BT_FEATURES;
1089     + phydev->supported &= ~PHY_1000BT_FEATURES;
1090     + break;
1091     + case SPEED_1000:
1092     + break;
1093     + default:
1094     + return -ENOTSUPP;
1095     }
1096    
1097     return 0;
1098     diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
1099     index 83060fb349f4..ad9db652874d 100644
1100     --- a/drivers/net/phy/sfp-bus.c
1101     +++ b/drivers/net/phy/sfp-bus.c
1102     @@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
1103     /* 1000Base-PX or 1000Base-BX10 */
1104     if ((id->base.e_base_px || id->base.e_base_bx10) &&
1105     br_min <= 1300 && br_max >= 1200)
1106     - phylink_set(support, 1000baseX_Full);
1107     + phylink_set(modes, 1000baseX_Full);
1108    
1109     /* For active or passive cables, select the link modes
1110     * based on the bit rates and the cable compliance bytes.
1111     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1112     index 573620771154..8c1abcba4cbd 100644
1113     --- a/drivers/net/tun.c
1114     +++ b/drivers/net/tun.c
1115     @@ -2268,9 +2268,9 @@ static void tun_setup(struct net_device *dev)
1116     static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
1117     struct netlink_ext_ack *extack)
1118     {
1119     - if (!data)
1120     - return 0;
1121     - return -EINVAL;
1122     + NL_SET_ERR_MSG(extack,
1123     + "tun/tap creation via rtnetlink is not supported.");
1124     + return -EOPNOTSUPP;
1125     }
1126    
1127     static size_t tun_get_size(const struct net_device *dev)
1128     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1129     index c2ca6cd3fbe0..ad14fbfa1864 100644
1130     --- a/drivers/net/virtio_net.c
1131     +++ b/drivers/net/virtio_net.c
1132     @@ -365,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
1133     static struct sk_buff *page_to_skb(struct virtnet_info *vi,
1134     struct receive_queue *rq,
1135     struct page *page, unsigned int offset,
1136     - unsigned int len, unsigned int truesize)
1137     + unsigned int len, unsigned int truesize,
1138     + bool hdr_valid)
1139     {
1140     struct sk_buff *skb;
1141     struct virtio_net_hdr_mrg_rxbuf *hdr;
1142     @@ -387,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
1143     else
1144     hdr_padded_len = sizeof(struct padded_vnet_hdr);
1145    
1146     - memcpy(hdr, p, hdr_len);
1147     + if (hdr_valid)
1148     + memcpy(hdr, p, hdr_len);
1149    
1150     len -= hdr_len;
1151     offset += hdr_padded_len;
1152     @@ -739,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
1153     struct virtnet_rq_stats *stats)
1154     {
1155     struct page *page = buf;
1156     - struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
1157     + struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
1158     + PAGE_SIZE, true);
1159    
1160     stats->bytes += len - vi->hdr_len;
1161     if (unlikely(!skb))
1162     @@ -842,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1163     rcu_read_unlock();
1164     put_page(page);
1165     head_skb = page_to_skb(vi, rq, xdp_page,
1166     - offset, len, PAGE_SIZE);
1167     + offset, len,
1168     + PAGE_SIZE, false);
1169     return head_skb;
1170     }
1171     break;
1172     @@ -898,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1173     goto err_skb;
1174     }
1175    
1176     - head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
1177     + head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
1178     curr_skb = head_skb;
1179    
1180     if (unlikely(!curr_skb))
1181     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1182     index 0ba301f7e8b4..b7b2659e02fa 100644
1183     --- a/drivers/nvme/host/core.c
1184     +++ b/drivers/nvme/host/core.c
1185     @@ -3308,6 +3308,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1186     struct nvme_ns *ns, *next;
1187     LIST_HEAD(ns_list);
1188    
1189     + /* prevent racing with ns scanning */
1190     + flush_work(&ctrl->scan_work);
1191     +
1192     /*
1193     * The dead states indicates the controller was not gracefully
1194     * disconnected. In that case, we won't be able to flush any data while
1195     @@ -3463,7 +3466,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
1196     nvme_mpath_stop(ctrl);
1197     nvme_stop_keep_alive(ctrl);
1198     flush_work(&ctrl->async_event_work);
1199     - flush_work(&ctrl->scan_work);
1200     cancel_work_sync(&ctrl->fw_act_work);
1201     if (ctrl->ops->stop_ctrl)
1202     ctrl->ops->stop_ctrl(ctrl);
1203     diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
1204     index bb4a2003c097..60220de2db52 100644
1205     --- a/drivers/nvme/host/nvme.h
1206     +++ b/drivers/nvme/host/nvme.h
1207     @@ -537,6 +537,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
1208     static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
1209     struct nvme_id_ctrl *id)
1210     {
1211     + if (ctrl->subsys->cmic & (1 << 3))
1212     + dev_warn(ctrl->device,
1213     +"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
1214     return 0;
1215     }
1216     static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1217     diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
1218     index dc042017c293..b6a28de682e8 100644
1219     --- a/drivers/nvme/host/rdma.c
1220     +++ b/drivers/nvme/host/rdma.c
1221     @@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
1222     qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
1223     if (ib_dma_mapping_error(ibdev, qe->dma)) {
1224     kfree(qe->data);
1225     + qe->data = NULL;
1226     return -ENOMEM;
1227     }
1228    
1229     @@ -816,6 +817,7 @@ out_free_tagset:
1230     out_free_async_qe:
1231     nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
1232     sizeof(struct nvme_command), DMA_TO_DEVICE);
1233     + ctrl->async_event_sqe.data = NULL;
1234     out_free_queue:
1235     nvme_rdma_free_queue(&ctrl->queues[0]);
1236     return error;
1237     diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
1238     index 4a9a673b4777..975050a69494 100644
1239     --- a/drivers/pci/controller/dwc/pci-imx6.c
1240     +++ b/drivers/pci/controller/dwc/pci-imx6.c
1241     @@ -80,8 +80,6 @@ struct imx6_pcie {
1242     #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
1243     #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
1244     #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
1245     -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
1246     -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
1247    
1248     #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
1249     #define PCIE_PHY_CTRL_DATA_LOC 0
1250     @@ -641,12 +639,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
1251     return 0;
1252     }
1253    
1254     -static int imx6_pcie_link_up(struct dw_pcie *pci)
1255     -{
1256     - return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
1257     - PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
1258     -}
1259     -
1260     static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
1261     .host_init = imx6_pcie_host_init,
1262     };
1263     @@ -679,7 +671,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
1264     }
1265    
1266     static const struct dw_pcie_ops dw_pcie_ops = {
1267     - .link_up = imx6_pcie_link_up,
1268     + /* No special ops needed, but pcie-designware still expects this struct */
1269     };
1270    
1271     static int imx6_pcie_probe(struct platform_device *pdev)
1272     diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
1273     index e70e425f26f5..69c92843eb3b 100644
1274     --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
1275     +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
1276     @@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
1277     .mask_core_ready = CORE_READY_STATUS,
1278     .has_pll_override = true,
1279     .autoresume_en = BIT(0),
1280     + .update_tune1_with_efuse = true,
1281     };
1282    
1283     static const char * const qusb2_phy_vreg_names[] = {
1284     @@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
1285    
1286     /*
1287     * Read efuse register having TUNE2/1 parameter's high nibble.
1288     - * If efuse register shows value as 0x0, or if we fail to find
1289     - * a valid efuse register settings, then use default value
1290     - * as 0xB for high nibble that we have already set while
1291     - * configuring phy.
1292     + * If efuse register shows value as 0x0 (indicating value is not
1293     + * fused), or if we fail to find a valid efuse register setting,
1294     + * then use default value for high nibble that we have already
1295     + * set while configuring the phy.
1296     */
1297     val = nvmem_cell_read(qphy->cell, NULL);
1298     if (IS_ERR(val) || !val[0]) {
1299     @@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
1300    
1301     /* Fused TUNE1/2 value is the higher nibble only */
1302     if (cfg->update_tune1_with_efuse)
1303     - qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
1304     - val[0] << 0x4);
1305     + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
1306     + val[0] << HSTX_TRIM_SHIFT,
1307     + HSTX_TRIM_MASK);
1308     else
1309     - qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
1310     - val[0] << 0x4);
1311     -
1312     + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
1313     + val[0] << HSTX_TRIM_SHIFT,
1314     + HSTX_TRIM_MASK);
1315     }
1316    
1317     static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)
1318     diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
1319     index fd77e46eb3b2..70a006ba4d05 100644
1320     --- a/drivers/s390/cio/vfio_ccw_cp.c
1321     +++ b/drivers/s390/cio/vfio_ccw_cp.c
1322     @@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
1323     * orb specified one of the unsupported formats, we defer
1324     * checking for IDAWs in unsupported formats to here.
1325     */
1326     - if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
1327     + if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
1328     + kfree(p);
1329     return -EOPNOTSUPP;
1330     + }
1331    
1332     if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
1333     break;
1334     @@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
1335    
1336     ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
1337     if (ret < 0)
1338     - goto out_init;
1339     + goto out_unpin;
1340    
1341     /* Translate this direct ccw to a idal ccw. */
1342     idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
1343     diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
1344     index 508c61c669e7..e2be7da74343 100644
1345     --- a/drivers/spi/spi-omap2-mcspi.c
1346     +++ b/drivers/spi/spi-omap2-mcspi.c
1347     @@ -1455,13 +1455,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1348     /* work with hotplug and coldplug */
1349     MODULE_ALIAS("platform:omap2_mcspi");
1350    
1351     -#ifdef CONFIG_SUSPEND
1352     -static int omap2_mcspi_suspend_noirq(struct device *dev)
1353     +static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
1354     {
1355     - return pinctrl_pm_select_sleep_state(dev);
1356     + struct spi_master *master = dev_get_drvdata(dev);
1357     + struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1358     + int error;
1359     +
1360     + error = pinctrl_pm_select_sleep_state(dev);
1361     + if (error)
1362     + dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1363     + __func__, error);
1364     +
1365     + error = spi_master_suspend(master);
1366     + if (error)
1367     + dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
1368     + __func__, error);
1369     +
1370     + return pm_runtime_force_suspend(dev);
1371     }
1372    
1373     -static int omap2_mcspi_resume_noirq(struct device *dev)
1374     +static int __maybe_unused omap2_mcspi_resume(struct device *dev)
1375     {
1376     struct spi_master *master = dev_get_drvdata(dev);
1377     struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1378     @@ -1472,17 +1485,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
1379     dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1380     __func__, error);
1381    
1382     - return 0;
1383     -}
1384     + error = spi_master_resume(master);
1385     + if (error)
1386     + dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
1387     + __func__, error);
1388    
1389     -#else
1390     -#define omap2_mcspi_suspend_noirq NULL
1391     -#define omap2_mcspi_resume_noirq NULL
1392     -#endif
1393     + return pm_runtime_force_resume(dev);
1394     +}
1395    
1396     static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1397     - .suspend_noirq = omap2_mcspi_suspend_noirq,
1398     - .resume_noirq = omap2_mcspi_resume_noirq,
1399     + SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
1400     + omap2_mcspi_resume)
1401     .runtime_resume = omap_mcspi_runtime_resume,
1402     };
1403    
1404     diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
1405     index c38298d960ff..4f120e72c7d2 100644
1406     --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
1407     +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
1408     @@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
1409     exit:
1410     kfree(ptmp);
1411    
1412     - return 0;
1413     + return ret;
1414     }
1415    
1416     static int rtw_wx_write32(struct net_device *dev,
1417     diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
1418     index 7442bc4c6433..dd9ae6f5d19c 100644
1419     --- a/drivers/thunderbolt/switch.c
1420     +++ b/drivers/thunderbolt/switch.c
1421     @@ -864,6 +864,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1422     }
1423     static DEVICE_ATTR(key, 0600, key_show, key_store);
1424    
1425     +static void nvm_authenticate_start(struct tb_switch *sw)
1426     +{
1427     + struct pci_dev *root_port;
1428     +
1429     + /*
1430     + * During host router NVM upgrade we should not allow root port to
1431     + * go into D3cold because some root ports cannot trigger PME
1432     + * itself. To be on the safe side keep the root port in D0 during
1433     + * the whole upgrade process.
1434     + */
1435     + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1436     + if (root_port)
1437     + pm_runtime_get_noresume(&root_port->dev);
1438     +}
1439     +
1440     +static void nvm_authenticate_complete(struct tb_switch *sw)
1441     +{
1442     + struct pci_dev *root_port;
1443     +
1444     + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1445     + if (root_port)
1446     + pm_runtime_put(&root_port->dev);
1447     +}
1448     +
1449     static ssize_t nvm_authenticate_show(struct device *dev,
1450     struct device_attribute *attr, char *buf)
1451     {
1452     @@ -913,10 +937,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
1453    
1454     sw->nvm->authenticating = true;
1455    
1456     - if (!tb_route(sw))
1457     + if (!tb_route(sw)) {
1458     + /*
1459     + * Keep root port from suspending as long as the
1460     + * NVM upgrade process is running.
1461     + */
1462     + nvm_authenticate_start(sw);
1463     ret = nvm_authenticate_host(sw);
1464     - else
1465     + if (ret)
1466     + nvm_authenticate_complete(sw);
1467     + } else {
1468     ret = nvm_authenticate_device(sw);
1469     + }
1470     pm_runtime_mark_last_busy(&sw->dev);
1471     pm_runtime_put_autosuspend(&sw->dev);
1472     }
1473     @@ -1336,6 +1368,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
1474     if (ret <= 0)
1475     return ret;
1476    
1477     + /* Now we can allow root port to suspend again */
1478     + if (!tb_route(sw))
1479     + nvm_authenticate_complete(sw);
1480     +
1481     if (status) {
1482     tb_sw_info(sw, "switch flash authentication failed\n");
1483     tb_switch_set_uuid(sw);
1484     diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
1485     index 1000d864929c..0f026d445e31 100644
1486     --- a/drivers/usb/gadget/function/u_ether.c
1487     +++ b/drivers/usb/gadget/function/u_ether.c
1488     @@ -401,12 +401,12 @@ done:
1489     static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
1490     {
1491     struct usb_request *req;
1492     - struct usb_request *tmp;
1493     unsigned long flags;
1494    
1495     /* fill unused rxq slots with some skb */
1496     spin_lock_irqsave(&dev->req_lock, flags);
1497     - list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
1498     + while (!list_empty(&dev->rx_reqs)) {
1499     + req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1500     list_del_init(&req->list);
1501     spin_unlock_irqrestore(&dev->req_lock, flags);
1502    
1503     @@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
1504     {
1505     struct eth_dev *dev = link->ioport;
1506     struct usb_request *req;
1507     - struct usb_request *tmp;
1508    
1509     WARN_ON(!dev);
1510     if (!dev)
1511     @@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
1512     */
1513     usb_ep_disable(link->in_ep);
1514     spin_lock(&dev->req_lock);
1515     - list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
1516     + while (!list_empty(&dev->tx_reqs)) {
1517     + req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1518     list_del(&req->list);
1519    
1520     spin_unlock(&dev->req_lock);
1521     @@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
1522    
1523     usb_ep_disable(link->out_ep);
1524     spin_lock(&dev->req_lock);
1525     - list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
1526     + while (!list_empty(&dev->rx_reqs)) {
1527     + req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1528     list_del(&req->list);
1529    
1530     spin_unlock(&dev->req_lock);
1531     diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
1532     index 3a16431da321..fcf13ef33b31 100644
1533     --- a/drivers/usb/gadget/udc/omap_udc.c
1534     +++ b/drivers/usb/gadget/udc/omap_udc.c
1535     @@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
1536     {
1537     return machine_is_omap_innovator()
1538     || machine_is_omap_osk()
1539     + || machine_is_omap_palmte()
1540     || machine_is_sx1()
1541     /* No known omap7xx boards with vbus sense */
1542     || cpu_is_omap7xx();
1543     @@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
1544     static int omap_udc_start(struct usb_gadget *g,
1545     struct usb_gadget_driver *driver)
1546     {
1547     - int status = -ENODEV;
1548     + int status;
1549     struct omap_ep *ep;
1550     unsigned long flags;
1551    
1552     @@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
1553     goto done;
1554     }
1555     } else {
1556     + status = 0;
1557     if (can_pullup(udc))
1558     pullup_enable(udc);
1559     else
1560     @@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
1561    
1562     static void omap_udc_release(struct device *dev)
1563     {
1564     - complete(udc->done);
1565     + pullup_disable(udc);
1566     + if (!IS_ERR_OR_NULL(udc->transceiver)) {
1567     + usb_put_phy(udc->transceiver);
1568     + udc->transceiver = NULL;
1569     + }
1570     + omap_writew(0, UDC_SYSCON1);
1571     + remove_proc_file();
1572     + if (udc->dc_clk) {
1573     + if (udc->clk_requested)
1574     + omap_udc_enable_clock(0);
1575     + clk_put(udc->hhc_clk);
1576     + clk_put(udc->dc_clk);
1577     + }
1578     + if (udc->done)
1579     + complete(udc->done);
1580     kfree(udc);
1581     - udc = NULL;
1582     }
1583    
1584     static int
1585     @@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
1586     udc->gadget.speed = USB_SPEED_UNKNOWN;
1587     udc->gadget.max_speed = USB_SPEED_FULL;
1588     udc->gadget.name = driver_name;
1589     + udc->gadget.quirk_ep_out_aligned_size = 1;
1590     udc->transceiver = xceiv;
1591    
1592     /* ep0 is special; put it right after the SETUP buffer */
1593     @@ -2867,8 +2883,8 @@ bad_on_1710:
1594     udc->clr_halt = UDC_RESET_EP;
1595    
1596     /* USB general purpose IRQ: ep0, state changes, dma, etc */
1597     - status = request_irq(pdev->resource[1].start, omap_udc_irq,
1598     - 0, driver_name, udc);
1599     + status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
1600     + omap_udc_irq, 0, driver_name, udc);
1601     if (status != 0) {
1602     ERR("can't get irq %d, err %d\n",
1603     (int) pdev->resource[1].start, status);
1604     @@ -2876,20 +2892,20 @@ bad_on_1710:
1605     }
1606    
1607     /* USB "non-iso" IRQ (PIO for all but ep0) */
1608     - status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
1609     - 0, "omap_udc pio", udc);
1610     + status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
1611     + omap_udc_pio_irq, 0, "omap_udc pio", udc);
1612     if (status != 0) {
1613     ERR("can't get irq %d, err %d\n",
1614     (int) pdev->resource[2].start, status);
1615     - goto cleanup2;
1616     + goto cleanup1;
1617     }
1618     #ifdef USE_ISO
1619     - status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
1620     - 0, "omap_udc iso", udc);
1621     + status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
1622     + omap_udc_iso_irq, 0, "omap_udc iso", udc);
1623     if (status != 0) {
1624     ERR("can't get irq %d, err %d\n",
1625     (int) pdev->resource[3].start, status);
1626     - goto cleanup3;
1627     + goto cleanup1;
1628     }
1629     #endif
1630     if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
1631     @@ -2900,23 +2916,8 @@ bad_on_1710:
1632     }
1633    
1634     create_proc_file();
1635     - status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
1636     - omap_udc_release);
1637     - if (status)
1638     - goto cleanup4;
1639     -
1640     - return 0;
1641     -
1642     -cleanup4:
1643     - remove_proc_file();
1644     -
1645     -#ifdef USE_ISO
1646     -cleanup3:
1647     - free_irq(pdev->resource[2].start, udc);
1648     -#endif
1649     -
1650     -cleanup2:
1651     - free_irq(pdev->resource[1].start, udc);
1652     + return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
1653     + omap_udc_release);
1654    
1655     cleanup1:
1656     kfree(udc);
1657     @@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
1658     {
1659     DECLARE_COMPLETION_ONSTACK(done);
1660    
1661     - if (!udc)
1662     - return -ENODEV;
1663     -
1664     - usb_del_gadget_udc(&udc->gadget);
1665     - if (udc->driver)
1666     - return -EBUSY;
1667     -
1668     udc->done = &done;
1669    
1670     - pullup_disable(udc);
1671     - if (!IS_ERR_OR_NULL(udc->transceiver)) {
1672     - usb_put_phy(udc->transceiver);
1673     - udc->transceiver = NULL;
1674     - }
1675     - omap_writew(0, UDC_SYSCON1);
1676     -
1677     - remove_proc_file();
1678     -
1679     -#ifdef USE_ISO
1680     - free_irq(pdev->resource[3].start, udc);
1681     -#endif
1682     - free_irq(pdev->resource[2].start, udc);
1683     - free_irq(pdev->resource[1].start, udc);
1684     + usb_del_gadget_udc(&udc->gadget);
1685    
1686     - if (udc->dc_clk) {
1687     - if (udc->clk_requested)
1688     - omap_udc_enable_clock(0);
1689     - clk_put(udc->hhc_clk);
1690     - clk_put(udc->dc_clk);
1691     - }
1692     + wait_for_completion(&done);
1693    
1694     release_mem_region(pdev->resource[0].start,
1695     pdev->resource[0].end - pdev->resource[0].start + 1);
1696    
1697     - wait_for_completion(&done);
1698     -
1699     return 0;
1700     }
1701    
1702     diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
1703     index e12bb256036f..7ab6caef599c 100644
1704     --- a/drivers/xen/balloon.c
1705     +++ b/drivers/xen/balloon.c
1706     @@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
1707     kfree(resource);
1708     }
1709    
1710     -/*
1711     - * Host memory not allocated to dom0. We can use this range for hotplug-based
1712     - * ballooning.
1713     - *
1714     - * It's a type-less resource. Setting IORESOURCE_MEM will make resource
1715     - * management algorithms (arch_remove_reservations()) look into guest e820,
1716     - * which we don't want.
1717     - */
1718     -static struct resource hostmem_resource = {
1719     - .name = "Host RAM",
1720     -};
1721     -
1722     -void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
1723     -{}
1724     -
1725     static struct resource *additional_memory_resource(phys_addr_t size)
1726     {
1727     - struct resource *res, *res_hostmem;
1728     - int ret = -ENOMEM;
1729     + struct resource *res;
1730     + int ret;
1731    
1732     res = kzalloc(sizeof(*res), GFP_KERNEL);
1733     if (!res)
1734     @@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
1735     res->name = "System RAM";
1736     res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1737    
1738     - res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
1739     - if (res_hostmem) {
1740     - /* Try to grab a range from hostmem */
1741     - res_hostmem->name = "Host memory";
1742     - ret = allocate_resource(&hostmem_resource, res_hostmem,
1743     - size, 0, -1,
1744     - PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
1745     - }
1746     -
1747     - if (!ret) {
1748     - /*
1749     - * Insert this resource into iomem. Because hostmem_resource
1750     - * tracks portion of guest e820 marked as UNUSABLE noone else
1751     - * should try to use it.
1752     - */
1753     - res->start = res_hostmem->start;
1754     - res->end = res_hostmem->end;
1755     - ret = insert_resource(&iomem_resource, res);
1756     - if (ret < 0) {
1757     - pr_err("Can't insert iomem_resource [%llx - %llx]\n",
1758     - res->start, res->end);
1759     - release_memory_resource(res_hostmem);
1760     - res_hostmem = NULL;
1761     - res->start = res->end = 0;
1762     - }
1763     - }
1764     -
1765     - if (ret) {
1766     - ret = allocate_resource(&iomem_resource, res,
1767     - size, 0, -1,
1768     - PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
1769     - if (ret < 0) {
1770     - pr_err("Cannot allocate new System RAM resource\n");
1771     - kfree(res);
1772     - return NULL;
1773     - }
1774     + ret = allocate_resource(&iomem_resource, res,
1775     + size, 0, -1,
1776     + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
1777     + if (ret < 0) {
1778     + pr_err("Cannot allocate new System RAM resource\n");
1779     + kfree(res);
1780     + return NULL;
1781     }
1782    
1783     #ifdef CONFIG_SPARSEMEM
1784     @@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
1785     pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
1786     pfn, limit);
1787     release_memory_resource(res);
1788     - release_memory_resource(res_hostmem);
1789     return NULL;
1790     }
1791     }
1792     @@ -747,8 +702,6 @@ static int __init balloon_init(void)
1793     set_online_page_callback(&xen_online_page);
1794     register_memory_notifier(&xen_memory_nb);
1795     register_sysctl_table(xen_root);
1796     -
1797     - arch_xen_balloon_init(&hostmem_resource);
1798     #endif
1799    
1800     #ifdef CONFIG_XEN_PV
1801     diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
1802     index 2f11ca72a281..77224d8f3e6f 100644
1803     --- a/drivers/xen/pvcalls-front.c
1804     +++ b/drivers/xen/pvcalls-front.c
1805     @@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
1806     out_error:
1807     if (*evtchn >= 0)
1808     xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
1809     - kfree(map->active.data.in);
1810     - kfree(map->active.ring);
1811     + free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
1812     + free_page((unsigned long)map->active.ring);
1813     return ret;
1814     }
1815    
1816     diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
1817     index 23f1387b3ef7..e7df65d32c91 100644
1818     --- a/drivers/xen/xlate_mmu.c
1819     +++ b/drivers/xen/xlate_mmu.c
1820     @@ -36,6 +36,7 @@
1821     #include <asm/xen/hypervisor.h>
1822    
1823     #include <xen/xen.h>
1824     +#include <xen/xen-ops.h>
1825     #include <xen/page.h>
1826     #include <xen/interface/xen.h>
1827     #include <xen/interface/memory.h>
1828     diff --git a/fs/afs/inode.c b/fs/afs/inode.c
1829     index 479b7fdda124..071075d775a9 100644
1830     --- a/fs/afs/inode.c
1831     +++ b/fs/afs/inode.c
1832     @@ -379,7 +379,7 @@ void afs_zap_data(struct afs_vnode *vnode)
1833     int afs_validate(struct afs_vnode *vnode, struct key *key)
1834     {
1835     time64_t now = ktime_get_real_seconds();
1836     - bool valid = false;
1837     + bool valid;
1838     int ret;
1839    
1840     _enter("{v={%x:%u} fl=%lx},%x",
1841     @@ -399,15 +399,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
1842     vnode->cb_v_break = vnode->volume->cb_v_break;
1843     valid = false;
1844     } else if (vnode->status.type == AFS_FTYPE_DIR &&
1845     - test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
1846     - vnode->cb_expires_at - 10 > now) {
1847     - valid = true;
1848     - } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
1849     - vnode->cb_expires_at - 10 > now) {
1850     + (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
1851     + vnode->cb_expires_at - 10 <= now)) {
1852     + valid = false;
1853     + } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
1854     + vnode->cb_expires_at - 10 <= now) {
1855     + valid = false;
1856     + } else {
1857     valid = true;
1858     }
1859     } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
1860     valid = true;
1861     + } else {
1862     + vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
1863     + vnode->cb_v_break = vnode->volume->cb_v_break;
1864     + valid = false;
1865     }
1866    
1867     read_sequnlock_excl(&vnode->cb_lock);
1868     diff --git a/fs/aio.c b/fs/aio.c
1869     index b9350f3360c6..04c4d6218978 100644
1870     --- a/fs/aio.c
1871     +++ b/fs/aio.c
1872     @@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
1873     ret = ioprio_check_cap(iocb->aio_reqprio);
1874     if (ret) {
1875     pr_debug("aio ioprio check cap error: %d\n", ret);
1876     + fput(req->ki_filp);
1877     return ret;
1878     }
1879    
1880     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
1881     index ba8950bfd9c7..84cb6e5ef36c 100644
1882     --- a/fs/btrfs/send.c
1883     +++ b/fs/btrfs/send.c
1884     @@ -3344,7 +3344,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
1885     kfree(m);
1886     }
1887    
1888     -static void tail_append_pending_moves(struct pending_dir_move *moves,
1889     +static void tail_append_pending_moves(struct send_ctx *sctx,
1890     + struct pending_dir_move *moves,
1891     struct list_head *stack)
1892     {
1893     if (list_empty(&moves->list)) {
1894     @@ -3355,6 +3356,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
1895     list_add_tail(&moves->list, stack);
1896     list_splice_tail(&list, stack);
1897     }
1898     + if (!RB_EMPTY_NODE(&moves->node)) {
1899     + rb_erase(&moves->node, &sctx->pending_dir_moves);
1900     + RB_CLEAR_NODE(&moves->node);
1901     + }
1902     }
1903    
1904     static int apply_children_dir_moves(struct send_ctx *sctx)
1905     @@ -3369,7 +3374,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
1906     return 0;
1907    
1908     INIT_LIST_HEAD(&stack);
1909     - tail_append_pending_moves(pm, &stack);
1910     + tail_append_pending_moves(sctx, pm, &stack);
1911    
1912     while (!list_empty(&stack)) {
1913     pm = list_first_entry(&stack, struct pending_dir_move, list);
1914     @@ -3380,7 +3385,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
1915     goto out;
1916     pm = get_pending_dir_moves(sctx, parent_ino);
1917     if (pm)
1918     - tail_append_pending_moves(pm, &stack);
1919     + tail_append_pending_moves(sctx, pm, &stack);
1920     }
1921     return 0;
1922    
1923     diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
1924     index 95983c744164..5ab411d4bc59 100644
1925     --- a/fs/cachefiles/namei.c
1926     +++ b/fs/cachefiles/namei.c
1927     @@ -244,11 +244,13 @@ wait_for_old_object:
1928    
1929     ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
1930    
1931     - cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry);
1932     + cache->cache.ops->put_object(&xobject->fscache,
1933     + (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
1934     goto try_again;
1935    
1936     requeue:
1937     - cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
1938     + cache->cache.ops->put_object(&xobject->fscache,
1939     + (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
1940     _leave(" = -ETIMEDOUT");
1941     return -ETIMEDOUT;
1942     }
1943     diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
1944     index 40f7595aad10..8a577409d030 100644
1945     --- a/fs/cachefiles/rdwr.c
1946     +++ b/fs/cachefiles/rdwr.c
1947     @@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
1948     netpage->index, cachefiles_gfp);
1949     if (ret < 0) {
1950     if (ret == -EEXIST) {
1951     + put_page(backpage);
1952     + backpage = NULL;
1953     put_page(netpage);
1954     + netpage = NULL;
1955     fscache_retrieval_complete(op, 1);
1956     continue;
1957     }
1958     @@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
1959     netpage->index, cachefiles_gfp);
1960     if (ret < 0) {
1961     if (ret == -EEXIST) {
1962     + put_page(backpage);
1963     + backpage = NULL;
1964     put_page(netpage);
1965     + netpage = NULL;
1966     fscache_retrieval_complete(op, 1);
1967     continue;
1968     }
1969     @@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
1970     __releases(&object->fscache.cookie->lock)
1971     {
1972     struct cachefiles_object *object;
1973     - struct cachefiles_cache *cache;
1974    
1975     object = container_of(_object, struct cachefiles_object, fscache);
1976     - cache = container_of(object->fscache.cache,
1977     - struct cachefiles_cache, cache);
1978    
1979     _enter("%p,{%lu}", object, page->index);
1980    
1981     diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
1982     index 0a29a00aed2e..511e6c68156a 100644
1983     --- a/fs/cachefiles/xattr.c
1984     +++ b/fs/cachefiles/xattr.c
1985     @@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
1986     struct dentry *dentry = object->dentry;
1987     int ret;
1988    
1989     - ASSERT(dentry);
1990     + if (!dentry)
1991     + return -ESTALE;
1992    
1993     _enter("%p,#%d", object, auxdata->len);
1994    
1995     diff --git a/fs/dax.c b/fs/dax.c
1996     index b0cd1364c68f..3a2682a6c832 100644
1997     --- a/fs/dax.c
1998     +++ b/fs/dax.c
1999     @@ -423,7 +423,7 @@ bool dax_lock_mapping_entry(struct page *page)
2000     for (;;) {
2001     mapping = READ_ONCE(page->mapping);
2002    
2003     - if (!dax_mapping(mapping))
2004     + if (!mapping || !dax_mapping(mapping))
2005     break;
2006    
2007     /*
2008     diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
2009     index 645158dc33f1..63707abcbeb3 100644
2010     --- a/fs/exportfs/expfs.c
2011     +++ b/fs/exportfs/expfs.c
2012     @@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
2013     struct dentry *parent = dget_parent(dentry);
2014    
2015     dput(dentry);
2016     - if (IS_ROOT(dentry)) {
2017     + if (dentry == parent) {
2018     dput(parent);
2019     return false;
2020     }
2021     diff --git a/fs/fscache/object.c b/fs/fscache/object.c
2022     index 9edc920f651f..6d9cb1719de5 100644
2023     --- a/fs/fscache/object.c
2024     +++ b/fs/fscache/object.c
2025     @@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
2026    
2027     if (awaken)
2028     wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
2029     + if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
2030     + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
2031     +
2032    
2033     /* Prevent a race with our last child, which has to signal EV_CLEARED
2034     * before dropping our spinlock.
2035     diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
2036     index 374b5688e29e..9bdff5e40626 100644
2037     --- a/fs/hfs/btree.c
2038     +++ b/fs/hfs/btree.c
2039     @@ -329,13 +329,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
2040    
2041     nidx -= len * 8;
2042     i = node->next;
2043     - hfs_bnode_put(node);
2044     if (!i) {
2045     /* panic */;
2046     pr_crit("unable to free bnode %u. bmap not found!\n",
2047     node->this);
2048     + hfs_bnode_put(node);
2049     return;
2050     }
2051     + hfs_bnode_put(node);
2052     node = hfs_bnode_find(tree, i);
2053     if (IS_ERR(node))
2054     return;
2055     diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
2056     index de14b2b6881b..3de3bc4918b5 100644
2057     --- a/fs/hfsplus/btree.c
2058     +++ b/fs/hfsplus/btree.c
2059     @@ -454,14 +454,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
2060    
2061     nidx -= len * 8;
2062     i = node->next;
2063     - hfs_bnode_put(node);
2064     if (!i) {
2065     /* panic */;
2066     pr_crit("unable to free bnode %u. "
2067     "bmap not found!\n",
2068     node->this);
2069     + hfs_bnode_put(node);
2070     return;
2071     }
2072     + hfs_bnode_put(node);
2073     node = hfs_bnode_find(tree, i);
2074     if (IS_ERR(node))
2075     return;
2076     diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
2077     index 86ac2c5b93fe..e0fe9a0f1bf1 100644
2078     --- a/fs/nfs/flexfilelayout/flexfilelayout.c
2079     +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
2080     @@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
2081     if (fh)
2082     hdr->args.fh = fh;
2083    
2084     - if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
2085     + if (vers == 4 &&
2086     + !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
2087     goto out_failed;
2088    
2089     /*
2090     @@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
2091     if (fh)
2092     hdr->args.fh = fh;
2093    
2094     - if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
2095     + if (vers == 4 &&
2096     + !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
2097     goto out_failed;
2098    
2099     /*
2100     diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
2101     index 9f88188060db..4bf8d5854b27 100644
2102     --- a/fs/ocfs2/export.c
2103     +++ b/fs/ocfs2/export.c
2104     @@ -125,10 +125,10 @@ check_err:
2105    
2106     check_gen:
2107     if (handle->ih_generation != inode->i_generation) {
2108     - iput(inode);
2109     trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
2110     handle->ih_generation,
2111     inode->i_generation);
2112     + iput(inode);
2113     result = ERR_PTR(-ESTALE);
2114     goto bail;
2115     }
2116     diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
2117     index 7eb3b0a6347e..f55f82ca3425 100644
2118     --- a/fs/ocfs2/move_extents.c
2119     +++ b/fs/ocfs2/move_extents.c
2120     @@ -156,18 +156,14 @@ out:
2121     }
2122    
2123     /*
2124     - * lock allocators, and reserving appropriate number of bits for
2125     - * meta blocks and data clusters.
2126     - *
2127     - * in some cases, we don't need to reserve clusters, just let data_ac
2128     - * be NULL.
2129     + * lock allocator, and reserve appropriate number of bits for
2130     + * meta blocks.
2131     */
2132     -static int ocfs2_lock_allocators_move_extents(struct inode *inode,
2133     +static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
2134     struct ocfs2_extent_tree *et,
2135     u32 clusters_to_move,
2136     u32 extents_to_split,
2137     struct ocfs2_alloc_context **meta_ac,
2138     - struct ocfs2_alloc_context **data_ac,
2139     int extra_blocks,
2140     int *credits)
2141     {
2142     @@ -192,13 +188,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
2143     goto out;
2144     }
2145    
2146     - if (data_ac) {
2147     - ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
2148     - if (ret) {
2149     - mlog_errno(ret);
2150     - goto out;
2151     - }
2152     - }
2153    
2154     *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
2155    
2156     @@ -257,10 +246,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
2157     }
2158     }
2159    
2160     - ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
2161     - &context->meta_ac,
2162     - &context->data_ac,
2163     - extra_blocks, &credits);
2164     + ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
2165     + *len, 1,
2166     + &context->meta_ac,
2167     + extra_blocks, &credits);
2168     if (ret) {
2169     mlog_errno(ret);
2170     goto out;
2171     @@ -283,6 +272,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
2172     }
2173     }
2174    
2175     + /*
2176     + * Make sure ocfs2_reserve_cluster is called after
2177     + * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
2178     + *
2179     + * If ocfs2_reserve_cluster is called
2180     + * before __ocfs2_flush_truncate_log, dead lock on global bitmap
2181     + * may happen.
2182     + *
2183     + */
2184     + ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
2185     + if (ret) {
2186     + mlog_errno(ret);
2187     + goto out_unlock_mutex;
2188     + }
2189     +
2190     handle = ocfs2_start_trans(osb, credits);
2191     if (IS_ERR(handle)) {
2192     ret = PTR_ERR(handle);
2193     @@ -600,9 +604,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
2194     }
2195     }
2196    
2197     - ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
2198     - &context->meta_ac,
2199     - NULL, extra_blocks, &credits);
2200     + ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
2201     + len, 1,
2202     + &context->meta_ac,
2203     + extra_blocks, &credits);
2204     if (ret) {
2205     mlog_errno(ret);
2206     goto out;
2207     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
2208     index f4fd2e72add4..03cd59375abe 100644
2209     --- a/fs/pstore/ram.c
2210     +++ b/fs/pstore/ram.c
2211     @@ -806,17 +806,14 @@ static int ramoops_probe(struct platform_device *pdev)
2212    
2213     cxt->pstore.data = cxt;
2214     /*
2215     - * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
2216     - * have to handle dumps, we must have at least record_size buffer. And
2217     - * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
2218     - * ZERO_SIZE_PTR).
2219     + * Since bufsize is only used for dmesg crash dumps, it
2220     + * must match the size of the dprz record (after PRZ header
2221     + * and ECC bytes have been accounted for).
2222     */
2223     - if (cxt->console_size)
2224     - cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
2225     - cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
2226     - cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
2227     + cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
2228     + cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
2229     if (!cxt->pstore.buf) {
2230     - pr_err("cannot allocate pstore buffer\n");
2231     + pr_err("cannot allocate pstore crash dump buffer\n");
2232     err = -ENOMEM;
2233     goto fail_clear;
2234     }
2235     diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
2236     index 499a20a5a010..273736f41be3 100644
2237     --- a/fs/sysv/inode.c
2238     +++ b/fs/sysv/inode.c
2239     @@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
2240     }
2241     }
2242     brelse(bh);
2243     - return 0;
2244     + return err;
2245     }
2246    
2247     int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
2248     diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
2249     index 34cf0fdd7dc7..610815e3f1aa 100644
2250     --- a/include/linux/fscache-cache.h
2251     +++ b/include/linux/fscache-cache.h
2252     @@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
2253     static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
2254     int n_pages)
2255     {
2256     - atomic_sub(n_pages, &op->n_pages);
2257     - if (atomic_read(&op->n_pages) <= 0)
2258     + if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
2259     fscache_op_complete(&op->op, false);
2260     }
2261    
2262     diff --git a/include/linux/pstore.h b/include/linux/pstore.h
2263     index a15bc4d48752..30fcec375a3a 100644
2264     --- a/include/linux/pstore.h
2265     +++ b/include/linux/pstore.h
2266     @@ -90,7 +90,10 @@ struct pstore_record {
2267     *
2268     * @buf_lock: spinlock to serialize access to @buf
2269     * @buf: preallocated crash dump buffer
2270     - * @bufsize: size of @buf available for crash dump writes
2271     + * @bufsize: size of @buf available for crash dump bytes (must match
2272     + * smallest number of bytes available for writing to a
2273     + * backend entry, since compressed bytes don't take kindly
2274     + * to being truncated)
2275     *
2276     * @read_mutex: serializes @open, @read, @close, and @erase callbacks
2277     * @flags: bitfield of frontends the backend can accept writes for
2278     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2279     index e6ef9cc05e60..60a2e7646985 100644
2280     --- a/include/linux/skbuff.h
2281     +++ b/include/linux/skbuff.h
2282     @@ -1355,6 +1355,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
2283     }
2284     }
2285    
2286     +static inline void skb_mark_not_on_list(struct sk_buff *skb)
2287     +{
2288     + skb->next = NULL;
2289     +}
2290     +
2291     +static inline void skb_list_del_init(struct sk_buff *skb)
2292     +{
2293     + __list_del_entry(&skb->list);
2294     + skb_mark_not_on_list(skb);
2295     +}
2296     +
2297     /**
2298     * skb_queue_empty - check if a queue is empty
2299     * @list: queue head
2300     diff --git a/include/net/neighbour.h b/include/net/neighbour.h
2301     index 6c1eecd56a4d..beeeed126872 100644
2302     --- a/include/net/neighbour.h
2303     +++ b/include/net/neighbour.h
2304     @@ -453,6 +453,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
2305    
2306     static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
2307     {
2308     + unsigned int hh_alen = 0;
2309     unsigned int seq;
2310     unsigned int hh_len;
2311    
2312     @@ -460,16 +461,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
2313     seq = read_seqbegin(&hh->hh_lock);
2314     hh_len = hh->hh_len;
2315     if (likely(hh_len <= HH_DATA_MOD)) {
2316     - /* this is inlined by gcc */
2317     - memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
2318     + hh_alen = HH_DATA_MOD;
2319     +
2320     + /* skb_push() would proceed silently if we have room for
2321     + * the unaligned size but not for the aligned size:
2322     + * check headroom explicitly.
2323     + */
2324     + if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
2325     + /* this is inlined by gcc */
2326     + memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
2327     + HH_DATA_MOD);
2328     + }
2329     } else {
2330     - unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
2331     + hh_alen = HH_DATA_ALIGN(hh_len);
2332    
2333     - memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
2334     + if (likely(skb_headroom(skb) >= hh_alen)) {
2335     + memcpy(skb->data - hh_alen, hh->hh_data,
2336     + hh_alen);
2337     + }
2338     }
2339     } while (read_seqretry(&hh->hh_lock, seq));
2340    
2341     - skb_push(skb, hh_len);
2342     + if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
2343     + kfree_skb(skb);
2344     + return NET_XMIT_DROP;
2345     + }
2346     +
2347     + __skb_push(skb, hh_len);
2348     return dev_queue_xmit(skb);
2349     }
2350    
2351     diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
2352     index cd24be4c4a99..13d55206bb9f 100644
2353     --- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
2354     +++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
2355     @@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
2356     const struct nf_nat_range2 *range,
2357     const struct net_device *out);
2358    
2359     -void nf_nat_masquerade_ipv4_register_notifier(void);
2360     +int nf_nat_masquerade_ipv4_register_notifier(void);
2361     void nf_nat_masquerade_ipv4_unregister_notifier(void);
2362    
2363     #endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
2364     diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
2365     index 0c3b5ebf0bb8..2917bf95c437 100644
2366     --- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
2367     +++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
2368     @@ -5,7 +5,7 @@
2369     unsigned int
2370     nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
2371     const struct net_device *out);
2372     -void nf_nat_masquerade_ipv6_register_notifier(void);
2373     +int nf_nat_masquerade_ipv6_register_notifier(void);
2374     void nf_nat_masquerade_ipv6_unregister_notifier(void);
2375    
2376     #endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
2377     diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
2378     index a11f93790476..feada358d872 100644
2379     --- a/include/net/sctp/structs.h
2380     +++ b/include/net/sctp/structs.h
2381     @@ -2075,6 +2075,8 @@ struct sctp_association {
2382    
2383     __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
2384     __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
2385     +
2386     + struct rcu_head rcu;
2387     };
2388    
2389    
2390     diff --git a/include/xen/balloon.h b/include/xen/balloon.h
2391     index 61f410fd74e4..4914b93a23f2 100644
2392     --- a/include/xen/balloon.h
2393     +++ b/include/xen/balloon.h
2394     @@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
2395     {
2396     }
2397     #endif
2398     -
2399     -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
2400     -struct resource;
2401     -void arch_xen_balloon_init(struct resource *hostmem_resource);
2402     -#endif
2403     diff --git a/init/initramfs.c b/init/initramfs.c
2404     index 640557788026..f6f4a1e4cd54 100644
2405     --- a/init/initramfs.c
2406     +++ b/init/initramfs.c
2407     @@ -291,16 +291,6 @@ static int __init do_reset(void)
2408     return 1;
2409     }
2410    
2411     -static int __init maybe_link(void)
2412     -{
2413     - if (nlink >= 2) {
2414     - char *old = find_link(major, minor, ino, mode, collected);
2415     - if (old)
2416     - return (ksys_link(old, collected) < 0) ? -1 : 1;
2417     - }
2418     - return 0;
2419     -}
2420     -
2421     static void __init clean_path(char *path, umode_t fmode)
2422     {
2423     struct kstat st;
2424     @@ -313,6 +303,18 @@ static void __init clean_path(char *path, umode_t fmode)
2425     }
2426     }
2427    
2428     +static int __init maybe_link(void)
2429     +{
2430     + if (nlink >= 2) {
2431     + char *old = find_link(major, minor, ino, mode, collected);
2432     + if (old) {
2433     + clean_path(collected, 0);
2434     + return (ksys_link(old, collected) < 0) ? -1 : 1;
2435     + }
2436     + }
2437     + return 0;
2438     +}
2439     +
2440     static __initdata int wfd;
2441    
2442     static int __init do_name(void)
2443     diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
2444     index 830d7f095748..fc1605aee5ea 100644
2445     --- a/kernel/bpf/local_storage.c
2446     +++ b/kernel/bpf/local_storage.c
2447     @@ -138,7 +138,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
2448     return -ENOENT;
2449    
2450     new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
2451     - map->value_size, __GFP_ZERO | GFP_USER,
2452     + map->value_size,
2453     + __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
2454     map->numa_node);
2455     if (!new)
2456     return -ENOMEM;
2457     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2458     index 5780876ac81a..56acfbb80104 100644
2459     --- a/kernel/bpf/verifier.c
2460     +++ b/kernel/bpf/verifier.c
2461     @@ -5283,7 +5283,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
2462     return;
2463     /* NOTE: fake 'exit' subprog should be updated as well. */
2464     for (i = 0; i <= env->subprog_cnt; i++) {
2465     - if (env->subprog_info[i].start < off)
2466     + if (env->subprog_info[i].start <= off)
2467     continue;
2468     env->subprog_info[i].start += len - 1;
2469     }
2470     diff --git a/kernel/kcov.c b/kernel/kcov.c
2471     index 3ebd09efe72a..97959d7b77e2 100644
2472     --- a/kernel/kcov.c
2473     +++ b/kernel/kcov.c
2474     @@ -56,7 +56,7 @@ struct kcov {
2475     struct task_struct *t;
2476     };
2477    
2478     -static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
2479     +static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
2480     {
2481     unsigned int mode;
2482    
2483     @@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
2484     return mode == needed_mode;
2485     }
2486    
2487     -static unsigned long canonicalize_ip(unsigned long ip)
2488     +static notrace unsigned long canonicalize_ip(unsigned long ip)
2489     {
2490     #ifdef CONFIG_RANDOMIZE_BASE
2491     ip -= kaslr_offset();
2492     diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
2493     index 08fcfe440c63..9864a35c8bb5 100644
2494     --- a/kernel/trace/bpf_trace.c
2495     +++ b/kernel/trace/bpf_trace.c
2496     @@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
2497     i++;
2498     } else if (fmt[i] == 'p' || fmt[i] == 's') {
2499     mod[fmt_cnt]++;
2500     - i++;
2501     - if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
2502     + /* disallow any further format extensions */
2503     + if (fmt[i + 1] != 0 &&
2504     + !isspace(fmt[i + 1]) &&
2505     + !ispunct(fmt[i + 1]))
2506     return -EINVAL;
2507     fmt_cnt++;
2508     - if (fmt[i - 1] == 's') {
2509     + if (fmt[i] == 's') {
2510     if (str_seen)
2511     /* allow only one '%s' per fmt string */
2512     return -EINVAL;
2513     diff --git a/lib/debugobjects.c b/lib/debugobjects.c
2514     index 70935ed91125..14afeeb7d6ef 100644
2515     --- a/lib/debugobjects.c
2516     +++ b/lib/debugobjects.c
2517     @@ -135,7 +135,6 @@ static void fill_pool(void)
2518     if (!new)
2519     return;
2520    
2521     - kmemleak_ignore(new);
2522     raw_spin_lock_irqsave(&pool_lock, flags);
2523     hlist_add_head(&new->node, &obj_pool);
2524     debug_objects_allocated++;
2525     @@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
2526     obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
2527     if (!obj)
2528     goto free;
2529     - kmemleak_ignore(obj);
2530     hlist_add_head(&obj->node, &objects);
2531     }
2532    
2533     @@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
2534    
2535     obj_cache = kmem_cache_create("debug_objects_cache",
2536     sizeof (struct debug_obj), 0,
2537     - SLAB_DEBUG_OBJECTS, NULL);
2538     + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
2539     + NULL);
2540    
2541     if (!obj_cache || debug_objects_replace_static_objects()) {
2542     debug_objects_enabled = 0;
2543     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2544     index b721631d78ab..6a62b2421cdf 100644
2545     --- a/mm/page_alloc.c
2546     +++ b/mm/page_alloc.c
2547     @@ -5733,8 +5733,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
2548     unsigned long size)
2549     {
2550     struct pglist_data *pgdat = zone->zone_pgdat;
2551     + int zone_idx = zone_idx(zone) + 1;
2552    
2553     - pgdat->nr_zones = zone_idx(zone) + 1;
2554     + if (zone_idx > pgdat->nr_zones)
2555     + pgdat->nr_zones = zone_idx;
2556    
2557     zone->zone_start_pfn = zone_start_pfn;
2558    
2559     diff --git a/net/core/dev.c b/net/core/dev.c
2560     index 22af88c47756..1f1aae27d41f 100644
2561     --- a/net/core/dev.c
2562     +++ b/net/core/dev.c
2563     @@ -2161,6 +2161,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
2564     return active;
2565     }
2566    
2567     +static void reset_xps_maps(struct net_device *dev,
2568     + struct xps_dev_maps *dev_maps,
2569     + bool is_rxqs_map)
2570     +{
2571     + if (is_rxqs_map) {
2572     + static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2573     + RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2574     + } else {
2575     + RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2576     + }
2577     + static_key_slow_dec_cpuslocked(&xps_needed);
2578     + kfree_rcu(dev_maps, rcu);
2579     +}
2580     +
2581     static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2582     struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2583     u16 offset, u16 count, bool is_rxqs_map)
2584     @@ -2172,18 +2186,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2585     j < nr_ids;)
2586     active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2587     count);
2588     - if (!active) {
2589     - if (is_rxqs_map) {
2590     - RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2591     - } else {
2592     - RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2593     + if (!active)
2594     + reset_xps_maps(dev, dev_maps, is_rxqs_map);
2595    
2596     - for (i = offset + (count - 1); count--; i--)
2597     - netdev_queue_numa_node_write(
2598     - netdev_get_tx_queue(dev, i),
2599     - NUMA_NO_NODE);
2600     + if (!is_rxqs_map) {
2601     + for (i = offset + (count - 1); count--; i--) {
2602     + netdev_queue_numa_node_write(
2603     + netdev_get_tx_queue(dev, i),
2604     + NUMA_NO_NODE);
2605     }
2606     - kfree_rcu(dev_maps, rcu);
2607     }
2608     }
2609    
2610     @@ -2220,10 +2231,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2611     false);
2612    
2613     out_no_maps:
2614     - if (static_key_enabled(&xps_rxqs_needed))
2615     - static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2616     -
2617     - static_key_slow_dec_cpuslocked(&xps_needed);
2618     mutex_unlock(&xps_map_mutex);
2619     cpus_read_unlock();
2620     }
2621     @@ -2341,9 +2348,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2622     if (!new_dev_maps)
2623     goto out_no_new_maps;
2624    
2625     - static_key_slow_inc_cpuslocked(&xps_needed);
2626     - if (is_rxqs_map)
2627     - static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2628     + if (!dev_maps) {
2629     + /* Increment static keys at most once per type */
2630     + static_key_slow_inc_cpuslocked(&xps_needed);
2631     + if (is_rxqs_map)
2632     + static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2633     + }
2634    
2635     for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2636     j < nr_ids;) {
2637     @@ -2441,13 +2451,8 @@ out_no_new_maps:
2638     }
2639    
2640     /* free map if not active */
2641     - if (!active) {
2642     - if (is_rxqs_map)
2643     - RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2644     - else
2645     - RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2646     - kfree_rcu(dev_maps, rcu);
2647     - }
2648     + if (!active)
2649     + reset_xps_maps(dev, dev_maps, is_rxqs_map);
2650    
2651     out_no_maps:
2652     mutex_unlock(&xps_map_mutex);
2653     @@ -4981,7 +4986,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
2654     struct net_device *orig_dev = skb->dev;
2655     struct packet_type *pt_prev = NULL;
2656    
2657     - list_del(&skb->list);
2658     + skb_list_del_init(skb);
2659     __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
2660     if (!pt_prev)
2661     continue;
2662     @@ -5137,7 +5142,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
2663     INIT_LIST_HEAD(&sublist);
2664     list_for_each_entry_safe(skb, next, head, list) {
2665     net_timestamp_check(netdev_tstamp_prequeue, skb);
2666     - list_del(&skb->list);
2667     + skb_list_del_init(skb);
2668     if (!skb_defer_rx_timestamp(skb))
2669     list_add_tail(&skb->list, &sublist);
2670     }
2671     @@ -5148,7 +5153,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
2672     rcu_read_lock();
2673     list_for_each_entry_safe(skb, next, head, list) {
2674     xdp_prog = rcu_dereference(skb->dev->xdp_prog);
2675     - list_del(&skb->list);
2676     + skb_list_del_init(skb);
2677     if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
2678     list_add_tail(&skb->list, &sublist);
2679     }
2680     @@ -5167,7 +5172,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
2681    
2682     if (cpu >= 0) {
2683     /* Will be handled, remove from list */
2684     - list_del(&skb->list);
2685     + skb_list_del_init(skb);
2686     enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2687     }
2688     }
2689     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2690     index 6e5d61a20a70..ebde98b565e9 100644
2691     --- a/net/core/rtnetlink.c
2692     +++ b/net/core/rtnetlink.c
2693     @@ -3730,6 +3730,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
2694     {
2695     int err;
2696    
2697     + if (dev->type != ARPHRD_ETHER)
2698     + return -EINVAL;
2699     +
2700     netif_addr_lock_bh(dev);
2701     err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
2702     if (err)
2703     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2704     index cb8fa5d7afe1..f686d7761acb 100644
2705     --- a/net/ipv4/ip_fragment.c
2706     +++ b/net/ipv4/ip_fragment.c
2707     @@ -513,6 +513,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
2708     struct rb_node *rbn;
2709     int len;
2710     int ihlen;
2711     + int delta;
2712     int err;
2713     u8 ecn;
2714    
2715     @@ -554,10 +555,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
2716     if (len > 65535)
2717     goto out_oversize;
2718    
2719     + delta = - head->truesize;
2720     +
2721     /* Head of list must not be cloned. */
2722     if (skb_unclone(head, GFP_ATOMIC))
2723     goto out_nomem;
2724    
2725     + delta += head->truesize;
2726     + if (delta)
2727     + add_frag_mem_limit(qp->q.net, delta);
2728     +
2729     /* If the first fragment is fragmented itself, we split
2730     * it to two chunks: the first with data and paged part
2731     * and the second, holding only fragments. */
2732     diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2733     index 3196cf58f418..27c863f6dd83 100644
2734     --- a/net/ipv4/ip_input.c
2735     +++ b/net/ipv4/ip_input.c
2736     @@ -551,7 +551,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
2737     list_for_each_entry_safe(skb, next, head, list) {
2738     struct dst_entry *dst;
2739    
2740     - list_del(&skb->list);
2741     + skb_list_del_init(skb);
2742     /* if ingress device is enslaved to an L3 master device pass the
2743     * skb to its handler for processing
2744     */
2745     @@ -598,7 +598,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
2746     struct net_device *dev = skb->dev;
2747     struct net *net = dev_net(dev);
2748    
2749     - list_del(&skb->list);
2750     + skb_list_del_init(skb);
2751     skb = ip_rcv_core(skb, net);
2752     if (skb == NULL)
2753     continue;
2754     diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
2755     index ce1512b02cb2..fd3f9e8a74da 100644
2756     --- a/net/ipv4/netfilter/ipt_MASQUERADE.c
2757     +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
2758     @@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
2759     int ret;
2760    
2761     ret = xt_register_target(&masquerade_tg_reg);
2762     + if (ret)
2763     + return ret;
2764    
2765     - if (ret == 0)
2766     - nf_nat_masquerade_ipv4_register_notifier();
2767     + ret = nf_nat_masquerade_ipv4_register_notifier();
2768     + if (ret)
2769     + xt_unregister_target(&masquerade_tg_reg);
2770    
2771     return ret;
2772     }
2773     diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
2774     index ad3aeff152ed..4c7fcd32f8e6 100644
2775     --- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
2776     +++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
2777     @@ -131,28 +131,50 @@ static struct notifier_block masq_inet_notifier = {
2778     .notifier_call = masq_inet_event,
2779     };
2780    
2781     -static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
2782     +static int masq_refcnt;
2783     +static DEFINE_MUTEX(masq_mutex);
2784    
2785     -void nf_nat_masquerade_ipv4_register_notifier(void)
2786     +int nf_nat_masquerade_ipv4_register_notifier(void)
2787     {
2788     + int ret = 0;
2789     +
2790     + mutex_lock(&masq_mutex);
2791     /* check if the notifier was already set */
2792     - if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
2793     - return;
2794     + if (++masq_refcnt > 1)
2795     + goto out_unlock;
2796    
2797     /* Register for device down reports */
2798     - register_netdevice_notifier(&masq_dev_notifier);
2799     + ret = register_netdevice_notifier(&masq_dev_notifier);
2800     + if (ret)
2801     + goto err_dec;
2802     /* Register IP address change reports */
2803     - register_inetaddr_notifier(&masq_inet_notifier);
2804     + ret = register_inetaddr_notifier(&masq_inet_notifier);
2805     + if (ret)
2806     + goto err_unregister;
2807     +
2808     + mutex_unlock(&masq_mutex);
2809     + return ret;
2810     +
2811     +err_unregister:
2812     + unregister_netdevice_notifier(&masq_dev_notifier);
2813     +err_dec:
2814     + masq_refcnt--;
2815     +out_unlock:
2816     + mutex_unlock(&masq_mutex);
2817     + return ret;
2818     }
2819     EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
2820    
2821     void nf_nat_masquerade_ipv4_unregister_notifier(void)
2822     {
2823     + mutex_lock(&masq_mutex);
2824     /* check if the notifier still has clients */
2825     - if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
2826     - return;
2827     + if (--masq_refcnt > 0)
2828     + goto out_unlock;
2829    
2830     unregister_netdevice_notifier(&masq_dev_notifier);
2831     unregister_inetaddr_notifier(&masq_inet_notifier);
2832     +out_unlock:
2833     + mutex_unlock(&masq_mutex);
2834     }
2835     EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
2836     diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
2837     index f1193e1e928a..6847de1d1db8 100644
2838     --- a/net/ipv4/netfilter/nft_masq_ipv4.c
2839     +++ b/net/ipv4/netfilter/nft_masq_ipv4.c
2840     @@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
2841     if (ret < 0)
2842     return ret;
2843    
2844     - nf_nat_masquerade_ipv4_register_notifier();
2845     + ret = nf_nat_masquerade_ipv4_register_notifier();
2846     + if (ret)
2847     + nft_unregister_expr(&nft_masq_ipv4_type);
2848    
2849     return ret;
2850     }
2851     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2852     index 68f65ddf9e3c..bd134e3a0473 100644
2853     --- a/net/ipv4/tcp_output.c
2854     +++ b/net/ipv4/tcp_output.c
2855     @@ -1902,7 +1902,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
2856     * This algorithm is from John Heffner.
2857     */
2858     static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2859     - bool *is_cwnd_limited, u32 max_segs)
2860     + bool *is_cwnd_limited,
2861     + bool *is_rwnd_limited,
2862     + u32 max_segs)
2863     {
2864     const struct inet_connection_sock *icsk = inet_csk(sk);
2865     u32 age, send_win, cong_win, limit, in_flight;
2866     @@ -1910,9 +1912,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2867     struct sk_buff *head;
2868     int win_divisor;
2869    
2870     - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2871     - goto send_now;
2872     -
2873     if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2874     goto send_now;
2875    
2876     @@ -1971,10 +1970,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2877     if (age < (tp->srtt_us >> 4))
2878     goto send_now;
2879    
2880     - /* Ok, it looks like it is advisable to defer. */
2881     + /* Ok, it looks like it is advisable to defer.
2882     + * Three cases are tracked :
2883     + * 1) We are cwnd-limited
2884     + * 2) We are rwnd-limited
2885     + * 3) We are application limited.
2886     + */
2887     + if (cong_win < send_win) {
2888     + if (cong_win <= skb->len) {
2889     + *is_cwnd_limited = true;
2890     + return true;
2891     + }
2892     + } else {
2893     + if (send_win <= skb->len) {
2894     + *is_rwnd_limited = true;
2895     + return true;
2896     + }
2897     + }
2898    
2899     - if (cong_win < send_win && cong_win <= skb->len)
2900     - *is_cwnd_limited = true;
2901     + /* If this packet won't get more data, do not wait. */
2902     + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2903     + goto send_now;
2904    
2905     return true;
2906    
2907     @@ -2338,7 +2354,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2908     } else {
2909     if (!push_one &&
2910     tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2911     - max_segs))
2912     + &is_rwnd_limited, max_segs))
2913     break;
2914     }
2915    
2916     @@ -2476,15 +2492,18 @@ void tcp_send_loss_probe(struct sock *sk)
2917     goto rearm_timer;
2918     }
2919     skb = skb_rb_last(&sk->tcp_rtx_queue);
2920     + if (unlikely(!skb)) {
2921     + WARN_ONCE(tp->packets_out,
2922     + "invalid inflight: %u state %u cwnd %u mss %d\n",
2923     + tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2924     + inet_csk(sk)->icsk_pending = 0;
2925     + return;
2926     + }
2927    
2928     /* At most one outstanding TLP retransmission. */
2929     if (tp->tlp_high_seq)
2930     goto rearm_timer;
2931    
2932     - /* Retransmit last segment. */
2933     - if (WARN_ON(!skb))
2934     - goto rearm_timer;
2935     -
2936     if (skb_still_in_host_queue(sk, skb))
2937     goto rearm_timer;
2938    
2939     diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
2940     index 6242682be876..6b74523fc1c4 100644
2941     --- a/net/ipv6/ip6_input.c
2942     +++ b/net/ipv6/ip6_input.c
2943     @@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
2944     list_for_each_entry_safe(skb, next, head, list) {
2945     struct dst_entry *dst;
2946    
2947     - list_del(&skb->list);
2948     + skb_list_del_init(skb);
2949     /* if ingress device is enslaved to an L3 master device pass the
2950     * skb to its handler for processing
2951     */
2952     @@ -295,7 +295,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
2953     struct net_device *dev = skb->dev;
2954     struct net *net = dev_net(dev);
2955    
2956     - list_del(&skb->list);
2957     + skb_list_del_init(skb);
2958     skb = ip6_rcv_core(skb, dev, net);
2959     if (skb == NULL)
2960     continue;
2961     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2962     index f9f8f554d141..2694def1e72c 100644
2963     --- a/net/ipv6/ip6_output.c
2964     +++ b/net/ipv6/ip6_output.c
2965     @@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
2966     const struct ipv6_pinfo *np = inet6_sk(sk);
2967     struct in6_addr *first_hop = &fl6->daddr;
2968     struct dst_entry *dst = skb_dst(skb);
2969     + unsigned int head_room;
2970     struct ipv6hdr *hdr;
2971     u8 proto = fl6->flowi6_proto;
2972     int seg_len = skb->len;
2973     int hlimit = -1;
2974     u32 mtu;
2975    
2976     - if (opt) {
2977     - unsigned int head_room;
2978     + head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
2979     + if (opt)
2980     + head_room += opt->opt_nflen + opt->opt_flen;
2981    
2982     - /* First: exthdrs may take lots of space (~8K for now)
2983     - MAX_HEADER is not enough.
2984     - */
2985     - head_room = opt->opt_nflen + opt->opt_flen;
2986     - seg_len += head_room;
2987     - head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
2988     -
2989     - if (skb_headroom(skb) < head_room) {
2990     - struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
2991     - if (!skb2) {
2992     - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2993     - IPSTATS_MIB_OUTDISCARDS);
2994     - kfree_skb(skb);
2995     - return -ENOBUFS;
2996     - }
2997     - if (skb->sk)
2998     - skb_set_owner_w(skb2, skb->sk);
2999     - consume_skb(skb);
3000     - skb = skb2;
3001     + if (unlikely(skb_headroom(skb) < head_room)) {
3002     + struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
3003     + if (!skb2) {
3004     + IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
3005     + IPSTATS_MIB_OUTDISCARDS);
3006     + kfree_skb(skb);
3007     + return -ENOBUFS;
3008     }
3009     + if (skb->sk)
3010     + skb_set_owner_w(skb2, skb->sk);
3011     + consume_skb(skb);
3012     + skb = skb2;
3013     + }
3014     +
3015     + if (opt) {
3016     + seg_len += opt->opt_nflen + opt->opt_flen;
3017     +
3018     if (opt->opt_flen)
3019     ipv6_push_frag_opts(skb, opt, &proto);
3020     +
3021     if (opt->opt_nflen)
3022     ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
3023     &fl6->saddr);
3024     diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
3025     index 5ae8e1c51079..8b075f0bc351 100644
3026     --- a/net/ipv6/netfilter.c
3027     +++ b/net/ipv6/netfilter.c
3028     @@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
3029     unsigned int hh_len;
3030     struct dst_entry *dst;
3031     struct flowi6 fl6 = {
3032     - .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
3033     + .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
3034     + rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
3035     .flowi6_mark = skb->mark,
3036     .flowi6_uid = sock_net_uid(net, sk),
3037     .daddr = iph->daddr,
3038     diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
3039     index 491f808e356a..29c7f1915a96 100644
3040     --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
3041     +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
3042     @@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void)
3043     int err;
3044    
3045     err = xt_register_target(&masquerade_tg6_reg);
3046     - if (err == 0)
3047     - nf_nat_masquerade_ipv6_register_notifier();
3048     + if (err)
3049     + return err;
3050     +
3051     + err = nf_nat_masquerade_ipv6_register_notifier();
3052     + if (err)
3053     + xt_unregister_target(&masquerade_tg6_reg);
3054    
3055     return err;
3056     }
3057     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3058     index f76bd4d15704..043ed8eb0ab9 100644
3059     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3060     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3061     @@ -341,7 +341,7 @@ static bool
3062     nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
3063     {
3064     struct sk_buff *fp, *head = fq->q.fragments;
3065     - int payload_len;
3066     + int payload_len, delta;
3067     u8 ecn;
3068    
3069     inet_frag_kill(&fq->q);
3070     @@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
3071     return false;
3072     }
3073    
3074     + delta = - head->truesize;
3075     +
3076     /* Head of list must not be cloned. */
3077     if (skb_unclone(head, GFP_ATOMIC))
3078     return false;
3079    
3080     + delta += head->truesize;
3081     + if (delta)
3082     + add_frag_mem_limit(fq->q.net, delta);
3083     +
3084     /* If the first fragment is fragmented itself, we split
3085     * it to two chunks: the first with data and paged part
3086     * and the second, holding only fragments. */
3087     diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
3088     index e6eb7cf9b54f..37b1d413c825 100644
3089     --- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
3090     +++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
3091     @@ -120,8 +120,8 @@ static void iterate_cleanup_work(struct work_struct *work)
3092     * of ipv6 addresses being deleted), we also need to add an upper
3093     * limit to the number of queued work items.
3094     */
3095     -static int masq_inet_event(struct notifier_block *this,
3096     - unsigned long event, void *ptr)
3097     +static int masq_inet6_event(struct notifier_block *this,
3098     + unsigned long event, void *ptr)
3099     {
3100     struct inet6_ifaddr *ifa = ptr;
3101     const struct net_device *dev;
3102     @@ -158,30 +158,53 @@ static int masq_inet_event(struct notifier_block *this,
3103     return NOTIFY_DONE;
3104     }
3105    
3106     -static struct notifier_block masq_inet_notifier = {
3107     - .notifier_call = masq_inet_event,
3108     +static struct notifier_block masq_inet6_notifier = {
3109     + .notifier_call = masq_inet6_event,
3110     };
3111    
3112     -static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
3113     +static int masq_refcnt;
3114     +static DEFINE_MUTEX(masq_mutex);
3115    
3116     -void nf_nat_masquerade_ipv6_register_notifier(void)
3117     +int nf_nat_masquerade_ipv6_register_notifier(void)
3118     {
3119     + int ret = 0;
3120     +
3121     + mutex_lock(&masq_mutex);
3122     /* check if the notifier is already set */
3123     - if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
3124     - return;
3125     + if (++masq_refcnt > 1)
3126     + goto out_unlock;
3127     +
3128     + ret = register_netdevice_notifier(&masq_dev_notifier);
3129     + if (ret)
3130     + goto err_dec;
3131     +
3132     + ret = register_inet6addr_notifier(&masq_inet6_notifier);
3133     + if (ret)
3134     + goto err_unregister;
3135    
3136     - register_netdevice_notifier(&masq_dev_notifier);
3137     - register_inet6addr_notifier(&masq_inet_notifier);
3138     + mutex_unlock(&masq_mutex);
3139     + return ret;
3140     +
3141     +err_unregister:
3142     + unregister_netdevice_notifier(&masq_dev_notifier);
3143     +err_dec:
3144     + masq_refcnt--;
3145     +out_unlock:
3146     + mutex_unlock(&masq_mutex);
3147     + return ret;
3148     }
3149     EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
3150    
3151     void nf_nat_masquerade_ipv6_unregister_notifier(void)
3152     {
3153     + mutex_lock(&masq_mutex);
3154     /* check if the notifier still has clients */
3155     - if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
3156     - return;
3157     + if (--masq_refcnt > 0)
3158     + goto out_unlock;
3159    
3160     - unregister_inet6addr_notifier(&masq_inet_notifier);
3161     + unregister_inet6addr_notifier(&masq_inet6_notifier);
3162     unregister_netdevice_notifier(&masq_dev_notifier);
3163     +out_unlock:
3164     + mutex_unlock(&masq_mutex);
3165     }
3166     EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
3167     diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
3168     index dd0122f3cffe..e06c82e9dfcd 100644
3169     --- a/net/ipv6/netfilter/nft_masq_ipv6.c
3170     +++ b/net/ipv6/netfilter/nft_masq_ipv6.c
3171     @@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void)
3172     if (ret < 0)
3173     return ret;
3174    
3175     - nf_nat_masquerade_ipv6_register_notifier();
3176     + ret = nf_nat_masquerade_ipv6_register_notifier();
3177     + if (ret)
3178     + nft_unregister_expr(&nft_masq_ipv6_type);
3179    
3180     return ret;
3181     }
3182     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
3183     index 5c5b4f79296e..d3fd2d7e5aa4 100644
3184     --- a/net/ipv6/reassembly.c
3185     +++ b/net/ipv6/reassembly.c
3186     @@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
3187     {
3188     struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
3189     struct sk_buff *fp, *head = fq->q.fragments;
3190     - int payload_len;
3191     + int payload_len, delta;
3192     unsigned int nhoff;
3193     int sum_truesize;
3194     u8 ecn;
3195     @@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
3196     if (payload_len > IPV6_MAXPLEN)
3197     goto out_oversize;
3198    
3199     + delta = - head->truesize;
3200     +
3201     /* Head of list must not be cloned. */
3202     if (skb_unclone(head, GFP_ATOMIC))
3203     goto out_oom;
3204    
3205     + delta += head->truesize;
3206     + if (delta)
3207     + add_frag_mem_limit(fq->q.net, delta);
3208     +
3209     /* If the first fragment is fragmented itself, we split
3210     * it to two chunks: the first with data and paged part
3211     * and the second, holding only fragments. */
3212     diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
3213     index a8854dd3e9c5..8181ee7e1e27 100644
3214     --- a/net/ipv6/seg6_iptunnel.c
3215     +++ b/net/ipv6/seg6_iptunnel.c
3216     @@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
3217     struct ipv6hdr *hdr = ipv6_hdr(skb);
3218     struct flowi6 fl6;
3219    
3220     + memset(&fl6, 0, sizeof(fl6));
3221     fl6.daddr = hdr->daddr;
3222     fl6.saddr = hdr->saddr;
3223     fl6.flowlabel = ip6_flowinfo(hdr);
3224     diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
3225     index 62eefea48973..518364f4abcc 100644
3226     --- a/net/netfilter/ipvs/ip_vs_ctl.c
3227     +++ b/net/netfilter/ipvs/ip_vs_ctl.c
3228     @@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
3229    
3230     static struct notifier_block ip_vs_dst_notifier = {
3231     .notifier_call = ip_vs_dst_event,
3232     +#ifdef CONFIG_IP_VS_IPV6
3233     + .priority = ADDRCONF_NOTIFY_PRIORITY + 5,
3234     +#endif
3235     };
3236    
3237     int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
3238     diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
3239     index 02ca7df793f5..b6d0f6deea86 100644
3240     --- a/net/netfilter/nf_conncount.c
3241     +++ b/net/netfilter/nf_conncount.c
3242     @@ -49,6 +49,7 @@ struct nf_conncount_tuple {
3243     struct nf_conntrack_zone zone;
3244     int cpu;
3245     u32 jiffies32;
3246     + bool dead;
3247     struct rcu_head rcu_head;
3248     };
3249    
3250     @@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
3251     conn->zone = *zone;
3252     conn->cpu = raw_smp_processor_id();
3253     conn->jiffies32 = (u32)jiffies;
3254     - spin_lock(&list->list_lock);
3255     + conn->dead = false;
3256     + spin_lock_bh(&list->list_lock);
3257     if (list->dead == true) {
3258     kmem_cache_free(conncount_conn_cachep, conn);
3259     - spin_unlock(&list->list_lock);
3260     + spin_unlock_bh(&list->list_lock);
3261     return NF_CONNCOUNT_SKIP;
3262     }
3263     list_add_tail(&conn->node, &list->head);
3264     list->count++;
3265     - spin_unlock(&list->list_lock);
3266     + spin_unlock_bh(&list->list_lock);
3267     return NF_CONNCOUNT_ADDED;
3268     }
3269     EXPORT_SYMBOL_GPL(nf_conncount_add);
3270     @@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
3271     {
3272     bool free_entry = false;
3273    
3274     - spin_lock(&list->list_lock);
3275     + spin_lock_bh(&list->list_lock);
3276    
3277     - if (list->count == 0) {
3278     - spin_unlock(&list->list_lock);
3279     - return free_entry;
3280     + if (conn->dead) {
3281     + spin_unlock_bh(&list->list_lock);
3282     + return free_entry;
3283     }
3284    
3285     list->count--;
3286     + conn->dead = true;
3287     list_del_rcu(&conn->node);
3288     - if (list->count == 0)
3289     + if (list->count == 0) {
3290     + list->dead = true;
3291     free_entry = true;
3292     + }
3293    
3294     - spin_unlock(&list->list_lock);
3295     + spin_unlock_bh(&list->list_lock);
3296     call_rcu(&conn->rcu_head, __conn_free);
3297     return free_entry;
3298     }
3299     @@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
3300     {
3301     spin_lock_init(&list->list_lock);
3302     INIT_LIST_HEAD(&list->head);
3303     - list->count = 1;
3304     + list->count = 0;
3305     list->dead = false;
3306     }
3307     EXPORT_SYMBOL_GPL(nf_conncount_list_init);
3308     @@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
3309     struct nf_conn *found_ct;
3310     unsigned int collected = 0;
3311     bool free_entry = false;
3312     + bool ret = false;
3313    
3314     list_for_each_entry_safe(conn, conn_n, &list->head, node) {
3315     found = find_or_evict(net, list, conn, &free_entry);
3316     @@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
3317     if (collected > CONNCOUNT_GC_MAX_NODES)
3318     return false;
3319     }
3320     - return false;
3321     +
3322     + spin_lock_bh(&list->list_lock);
3323     + if (!list->count) {
3324     + list->dead = true;
3325     + ret = true;
3326     + }
3327     + spin_unlock_bh(&list->list_lock);
3328     +
3329     + return ret;
3330     }
3331     EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
3332    
3333     @@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
3334     while (gc_count) {
3335     rbconn = gc_nodes[--gc_count];
3336     spin_lock(&rbconn->list.list_lock);
3337     - if (rbconn->list.count == 0 && rbconn->list.dead == false) {
3338     - rbconn->list.dead = true;
3339     - rb_erase(&rbconn->node, root);
3340     - call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3341     - }
3342     + rb_erase(&rbconn->node, root);
3343     + call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3344     spin_unlock(&rbconn->list.list_lock);
3345     }
3346     }
3347     @@ -414,6 +425,7 @@ insert_tree(struct net *net,
3348     nf_conncount_list_init(&rbconn->list);
3349     list_add(&conn->node, &rbconn->list.head);
3350     count = 1;
3351     + rbconn->list.count = count;
3352    
3353     rb_link_node(&rbconn->node, parent, rbnode);
3354     rb_insert_color(&rbconn->node, root);
3355     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3356     index 2cfb173cd0b2..fe0558b15fd3 100644
3357     --- a/net/netfilter/nf_tables_api.c
3358     +++ b/net/netfilter/nf_tables_api.c
3359     @@ -2432,7 +2432,7 @@ err:
3360     static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
3361     struct nft_rule *rule)
3362     {
3363     - struct nft_expr *expr;
3364     + struct nft_expr *expr, *next;
3365    
3366     lockdep_assert_held(&ctx->net->nft.commit_mutex);
3367     /*
3368     @@ -2441,8 +2441,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
3369     */
3370     expr = nft_expr_first(rule);
3371     while (expr != nft_expr_last(rule) && expr->ops) {
3372     + next = nft_expr_next(expr);
3373     nf_tables_expr_destroy(ctx, expr);
3374     - expr = nft_expr_next(expr);
3375     + expr = next;
3376     }
3377     kfree(rule);
3378     }
3379     @@ -2645,21 +2646,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
3380     }
3381    
3382     if (nlh->nlmsg_flags & NLM_F_REPLACE) {
3383     - if (!nft_is_active_next(net, old_rule)) {
3384     - err = -ENOENT;
3385     - goto err2;
3386     - }
3387     - trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
3388     - old_rule);
3389     + trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
3390     if (trans == NULL) {
3391     err = -ENOMEM;
3392     goto err2;
3393     }
3394     - nft_deactivate_next(net, old_rule);
3395     - chain->use--;
3396     -
3397     - if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
3398     - err = -ENOMEM;
3399     + err = nft_delrule(&ctx, old_rule);
3400     + if (err < 0) {
3401     + nft_trans_destroy(trans);
3402     goto err2;
3403     }
3404    
3405     @@ -6277,7 +6271,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
3406     call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
3407     }
3408    
3409     -static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain)
3410     +static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
3411     {
3412     struct nft_rule **g0, **g1;
3413     bool next_genbit;
3414     @@ -6363,11 +6357,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
3415    
3416     /* step 2. Make rules_gen_X visible to packet path */
3417     list_for_each_entry(table, &net->nft.tables, list) {
3418     - list_for_each_entry(chain, &table->chains, list) {
3419     - if (!nft_is_active_next(net, chain))
3420     - continue;
3421     - nf_tables_commit_chain_active(net, chain);
3422     - }
3423     + list_for_each_entry(chain, &table->chains, list)
3424     + nf_tables_commit_chain(net, chain);
3425     }
3426    
3427     /*
3428     diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
3429     index ad2fe6a7e47d..29d6fc73caf9 100644
3430     --- a/net/netfilter/nft_compat.c
3431     +++ b/net/netfilter/nft_compat.c
3432     @@ -501,6 +501,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
3433     void *info)
3434     {
3435     struct xt_match *match = expr->ops->data;
3436     + struct module *me = match->me;
3437     struct xt_mtdtor_param par;
3438    
3439     par.net = ctx->net;
3440     @@ -511,7 +512,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
3441     par.match->destroy(&par);
3442    
3443     if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
3444     - module_put(match->me);
3445     + module_put(me);
3446     }
3447    
3448     static void
3449     diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
3450     index d6bab8c3cbb0..5fd4c57c79cc 100644
3451     --- a/net/netfilter/nft_flow_offload.c
3452     +++ b/net/netfilter/nft_flow_offload.c
3453     @@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void)
3454     {
3455     int err;
3456    
3457     - register_netdevice_notifier(&flow_offload_netdev_notifier);
3458     + err = register_netdevice_notifier(&flow_offload_netdev_notifier);
3459     + if (err)
3460     + goto err;
3461    
3462     err = nft_register_expr(&nft_flow_offload_type);
3463     if (err < 0)
3464     @@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void)
3465    
3466     register_expr:
3467     unregister_netdevice_notifier(&flow_offload_netdev_notifier);
3468     +err:
3469     return err;
3470     }
3471    
3472     diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
3473     index dec843cadf46..9e05c86ba5c4 100644
3474     --- a/net/netfilter/xt_RATEEST.c
3475     +++ b/net/netfilter/xt_RATEEST.c
3476     @@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net)
3477     return 0;
3478     }
3479    
3480     -static void __net_exit xt_rateest_net_exit(struct net *net)
3481     -{
3482     - struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
3483     - int i;
3484     -
3485     - for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
3486     - WARN_ON_ONCE(!hlist_empty(&xn->hash[i]));
3487     -}
3488     -
3489     static struct pernet_operations xt_rateest_net_ops = {
3490     .init = xt_rateest_net_init,
3491     - .exit = xt_rateest_net_exit,
3492     .id = &xt_rateest_id,
3493     .size = sizeof(struct xt_rateest_net),
3494     };
3495     diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
3496     index 3e7d259e5d8d..1ad4017f9b73 100644
3497     --- a/net/netfilter/xt_hashlimit.c
3498     +++ b/net/netfilter/xt_hashlimit.c
3499     @@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
3500    
3501     /* copy match config into hashtable config */
3502     ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
3503     -
3504     - if (ret)
3505     + if (ret) {
3506     + vfree(hinfo);
3507     return ret;
3508     + }
3509    
3510     hinfo->cfg.size = size;
3511     if (hinfo->cfg.max == 0)
3512     @@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
3513     int ret;
3514    
3515     ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
3516     -
3517     if (ret)
3518     return ret;
3519    
3520     @@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
3521     int ret;
3522    
3523     ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
3524     -
3525     if (ret)
3526     return ret;
3527    
3528     @@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
3529     return ret;
3530    
3531     ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
3532     -
3533     if (ret)
3534     return ret;
3535    
3536     @@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
3537     return ret;
3538    
3539     ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
3540     -
3541     if (ret)
3542     return ret;
3543    
3544     diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
3545     index ad18a2052416..74c0f656f28c 100644
3546     --- a/net/sched/sch_netem.c
3547     +++ b/net/sched/sch_netem.c
3548     @@ -441,6 +441,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
3549     int count = 1;
3550     int rc = NET_XMIT_SUCCESS;
3551    
3552     + /* Do not fool qdisc_drop_all() */
3553     + skb->prev = NULL;
3554     +
3555     /* Random duplication */
3556     if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
3557     ++count;
3558     diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3559     index 6a28b96e779e..914750b819b2 100644
3560     --- a/net/sctp/associola.c
3561     +++ b/net/sctp/associola.c
3562     @@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init(
3563     asoc->flowlabel = sp->flowlabel;
3564     asoc->dscp = sp->dscp;
3565    
3566     - /* Initialize default path MTU. */
3567     - asoc->pathmtu = sp->pathmtu;
3568     -
3569     /* Set association default SACK delay */
3570     asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
3571     asoc->sackfreq = sp->sackfreq;
3572     @@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init(
3573     0, gfp))
3574     goto fail_init;
3575    
3576     + /* Initialize default path MTU. */
3577     + asoc->pathmtu = sp->pathmtu;
3578     + sctp_assoc_update_frag_point(asoc);
3579     +
3580     /* Assume that peer would support both address types unless we are
3581     * told otherwise.
3582     */
3583     @@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
3584    
3585     WARN_ON(atomic_read(&asoc->rmem_alloc));
3586    
3587     - kfree(asoc);
3588     + kfree_rcu(asoc, rcu);
3589     SCTP_DBG_OBJCNT_DEC(assoc);
3590     }
3591    
3592     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
3593     index 4a4fd1971255..f4ac6c592e13 100644
3594     --- a/net/sctp/sm_make_chunk.c
3595     +++ b/net/sctp/sm_make_chunk.c
3596     @@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
3597     asoc->c.sinit_max_instreams, gfp))
3598     goto clean_up;
3599    
3600     + /* Update frag_point when stream_interleave may get changed. */
3601     + sctp_assoc_update_frag_point(asoc);
3602     +
3603     if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
3604     goto clean_up;
3605    
3606     diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
3607     index 64c3cb0fb926..654a50319198 100644
3608     --- a/sound/firewire/fireface/ff-protocol-ff400.c
3609     +++ b/sound/firewire/fireface/ff-protocol-ff400.c
3610     @@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate,
3611     int err;
3612    
3613     err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
3614     - FF400_SYNC_STATUS, &reg, sizeof(reg), 0);
3615     + FF400_CLOCK_CONFIG, &reg, sizeof(reg), 0);
3616     if (err < 0)
3617     return err;
3618     data = le32_to_cpu(reg);
3619     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3620     index 22ca1f0a858f..8a3d0694d2e5 100644
3621     --- a/sound/pci/hda/patch_realtek.c
3622     +++ b/sound/pci/hda/patch_realtek.c
3623     @@ -5520,6 +5520,9 @@ enum {
3624     ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
3625     ALC295_FIXUP_HP_AUTO_MUTE,
3626     ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
3627     + ALC294_FIXUP_ASUS_MIC,
3628     + ALC294_FIXUP_ASUS_HEADSET_MIC,
3629     + ALC294_FIXUP_ASUS_SPK,
3630     };
3631    
3632     static const struct hda_fixup alc269_fixups[] = {
3633     @@ -6392,6 +6395,8 @@ static const struct hda_fixup alc269_fixups[] = {
3634     [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
3635     .type = HDA_FIXUP_FUNC,
3636     .v.func = alc285_fixup_invalidate_dacs,
3637     + .chained = true,
3638     + .chain_id = ALC269_FIXUP_THINKPAD_ACPI
3639     },
3640     [ALC295_FIXUP_HP_AUTO_MUTE] = {
3641     .type = HDA_FIXUP_FUNC,
3642     @@ -6406,6 +6411,36 @@ static const struct hda_fixup alc269_fixups[] = {
3643     .chained = true,
3644     .chain_id = ALC269_FIXUP_HEADSET_MIC
3645     },
3646     + [ALC294_FIXUP_ASUS_MIC] = {
3647     + .type = HDA_FIXUP_PINS,
3648     + .v.pins = (const struct hda_pintbl[]) {
3649     + { 0x13, 0x90a60160 }, /* use as internal mic */
3650     + { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
3651     + { }
3652     + },
3653     + .chained = true,
3654     + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
3655     + },
3656     + [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
3657     + .type = HDA_FIXUP_PINS,
3658     + .v.pins = (const struct hda_pintbl[]) {
3659     + { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
3660     + { }
3661     + },
3662     + .chained = true,
3663     + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
3664     + },
3665     + [ALC294_FIXUP_ASUS_SPK] = {
3666     + .type = HDA_FIXUP_VERBS,
3667     + .v.verbs = (const struct hda_verb[]) {
3668     + /* Set EAPD high */
3669     + { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
3670     + { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
3671     + { }
3672     + },
3673     + .chained = true,
3674     + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
3675     + },
3676     };
3677    
3678     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3679     @@ -6548,6 +6583,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3680     SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
3681     SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
3682     SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
3683     + SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
3684     SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
3685     SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
3686     SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
3687     @@ -7155,6 +7191,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
3688     SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
3689     ALC292_STANDARD_PINS,
3690     {0x13, 0x90a60140}),
3691     + SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
3692     + {0x14, 0x90170110},
3693     + {0x1b, 0x90a70130},
3694     + {0x21, 0x04211020}),
3695     + SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
3696     + {0x12, 0x90a60130},
3697     + {0x17, 0x90170110},
3698     + {0x21, 0x04211020}),
3699     SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
3700     ALC295_STANDARD_PINS,
3701     {0x17, 0x21014020},
3702     @@ -7227,6 +7271,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
3703     alc_update_coef_idx(codec, 0x4, 0, 1<<11);
3704     }
3705    
3706     +static void alc294_hp_init(struct hda_codec *codec)
3707     +{
3708     + struct alc_spec *spec = codec->spec;
3709     + hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
3710     + int i, val;
3711     +
3712     + if (!hp_pin)
3713     + return;
3714     +
3715     + snd_hda_codec_write(codec, hp_pin, 0,
3716     + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
3717     +
3718     + msleep(100);
3719     +
3720     + snd_hda_codec_write(codec, hp_pin, 0,
3721     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
3722     +
3723     + alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
3724     + alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
3725     +
3726     + /* Wait for depop procedure finish */
3727     + val = alc_read_coefex_idx(codec, 0x58, 0x01);
3728     + for (i = 0; i < 20 && val & 0x0080; i++) {
3729     + msleep(50);
3730     + val = alc_read_coefex_idx(codec, 0x58, 0x01);
3731     + }
3732     + /* Set HP depop to auto mode */
3733     + alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
3734     + msleep(50);
3735     +}
3736     +
3737     /*
3738     */
3739     static int patch_alc269(struct hda_codec *codec)
3740     @@ -7352,6 +7427,7 @@ static int patch_alc269(struct hda_codec *codec)
3741     spec->codec_variant = ALC269_TYPE_ALC294;
3742     spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
3743     alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
3744     + alc294_hp_init(codec);
3745     break;
3746     case 0x10ec0300:
3747     spec->codec_variant = ALC269_TYPE_ALC300;
3748     @@ -7363,6 +7439,7 @@ static int patch_alc269(struct hda_codec *codec)
3749     spec->codec_variant = ALC269_TYPE_ALC700;
3750     spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
3751     alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
3752     + alc294_hp_init(codec);
3753     break;
3754    
3755     }
3756     diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
3757     index 7b8533abf637..b61d518f4fef 100644
3758     --- a/sound/soc/codecs/hdac_hdmi.c
3759     +++ b/sound/soc/codecs/hdac_hdmi.c
3760     @@ -2184,11 +2184,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
3761     */
3762     snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
3763     AC_PWRST_D3);
3764     - err = snd_hdac_display_power(bus, false);
3765     - if (err < 0) {
3766     - dev_err(dev, "Cannot turn on display power on i915\n");
3767     - return err;
3768     - }
3769    
3770     hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
3771     if (!hlink) {
3772     @@ -2198,7 +2193,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
3773    
3774     snd_hdac_ext_bus_link_put(bus, hlink);
3775    
3776     - return 0;
3777     + err = snd_hdac_display_power(bus, false);
3778     + if (err < 0)
3779     + dev_err(dev, "Cannot turn off display power on i915\n");
3780     +
3781     + return err;
3782     }
3783    
3784     static int hdac_hdmi_runtime_resume(struct device *dev)
3785     diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
3786     index f61656070225..4d3ec295679d 100644
3787     --- a/sound/soc/codecs/wm_adsp.c
3788     +++ b/sound/soc/codecs/wm_adsp.c
3789     @@ -765,38 +765,41 @@ static unsigned int wm_adsp_region_to_reg(struct wm_adsp_region const *mem,
3790    
3791     static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
3792     {
3793     - u16 scratch[4];
3794     + unsigned int scratch[4];
3795     + unsigned int addr = dsp->base + ADSP2_SCRATCH0;
3796     + unsigned int i;
3797     int ret;
3798    
3799     - ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0,
3800     - scratch, sizeof(scratch));
3801     - if (ret) {
3802     - adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
3803     - return;
3804     + for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
3805     + ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
3806     + if (ret) {
3807     + adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
3808     + return;
3809     + }
3810     }
3811    
3812     adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
3813     - be16_to_cpu(scratch[0]),
3814     - be16_to_cpu(scratch[1]),
3815     - be16_to_cpu(scratch[2]),
3816     - be16_to_cpu(scratch[3]));
3817     + scratch[0], scratch[1], scratch[2], scratch[3]);
3818     }
3819    
3820     static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
3821     {
3822     - u32 scratch[2];
3823     + unsigned int scratch[2];
3824     int ret;
3825    
3826     - ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
3827     - scratch, sizeof(scratch));
3828     -
3829     + ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
3830     + &scratch[0]);
3831     if (ret) {
3832     - adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
3833     + adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
3834     return;
3835     }
3836    
3837     - scratch[0] = be32_to_cpu(scratch[0]);
3838     - scratch[1] = be32_to_cpu(scratch[1]);
3839     + ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
3840     + &scratch[1]);
3841     + if (ret) {
3842     + adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
3843     + return;
3844     + }
3845    
3846     adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
3847     scratch[0] & 0xFFFF,
3848     diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
3849     index 1d17be0f78a0..50f16a0f6535 100644
3850     --- a/sound/soc/intel/skylake/skl.c
3851     +++ b/sound/soc/intel/skylake/skl.c
3852     @@ -752,6 +752,12 @@ static void skl_probe_work(struct work_struct *work)
3853     }
3854     }
3855    
3856     + /*
3857     + * we are done probing so decrement link counts
3858     + */
3859     + list_for_each_entry(hlink, &bus->hlink_list, list)
3860     + snd_hdac_ext_bus_link_put(bus, hlink);
3861     +
3862     if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
3863     err = snd_hdac_display_power(bus, false);
3864     if (err < 0) {
3865     @@ -761,12 +767,6 @@ static void skl_probe_work(struct work_struct *work)
3866     }
3867     }
3868    
3869     - /*
3870     - * we are done probing so decrement link counts
3871     - */
3872     - list_for_each_entry(hlink, &bus->hlink_list, list)
3873     - snd_hdac_ext_bus_link_put(bus, hlink);
3874     -
3875     /* configure PM */
3876     pm_runtime_put_noidle(bus->dev);
3877     pm_runtime_allow(bus->dev);
3878     diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
3879     index d5ae9eb8c756..fed45b41f9d3 100644
3880     --- a/sound/soc/omap/omap-abe-twl6040.c
3881     +++ b/sound/soc/omap/omap-abe-twl6040.c
3882     @@ -36,6 +36,8 @@
3883     #include "../codecs/twl6040.h"
3884    
3885     struct abe_twl6040 {
3886     + struct snd_soc_card card;
3887     + struct snd_soc_dai_link dai_links[2];
3888     int jack_detection; /* board can detect jack events */
3889     int mclk_freq; /* MCLK frequency speed for twl6040 */
3890     };
3891     @@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
3892     ARRAY_SIZE(dmic_audio_map));
3893     }
3894    
3895     -/* Digital audio interface glue - connects codec <--> CPU */
3896     -static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
3897     - {
3898     - .name = "TWL6040",
3899     - .stream_name = "TWL6040",
3900     - .codec_dai_name = "twl6040-legacy",
3901     - .codec_name = "twl6040-codec",
3902     - .init = omap_abe_twl6040_init,
3903     - .ops = &omap_abe_ops,
3904     - },
3905     - {
3906     - .name = "DMIC",
3907     - .stream_name = "DMIC Capture",
3908     - .codec_dai_name = "dmic-hifi",
3909     - .codec_name = "dmic-codec",
3910     - .init = omap_abe_dmic_init,
3911     - .ops = &omap_abe_dmic_ops,
3912     - },
3913     -};
3914     -
3915     -/* Audio machine driver */
3916     -static struct snd_soc_card omap_abe_card = {
3917     - .owner = THIS_MODULE,
3918     -
3919     - .dapm_widgets = twl6040_dapm_widgets,
3920     - .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
3921     - .dapm_routes = audio_map,
3922     - .num_dapm_routes = ARRAY_SIZE(audio_map),
3923     -};
3924     -
3925     static int omap_abe_probe(struct platform_device *pdev)
3926     {
3927     struct device_node *node = pdev->dev.of_node;
3928     - struct snd_soc_card *card = &omap_abe_card;
3929     + struct snd_soc_card *card;
3930     struct device_node *dai_node;
3931     struct abe_twl6040 *priv;
3932     int num_links = 0;
3933     @@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
3934     return -ENODEV;
3935     }
3936    
3937     - card->dev = &pdev->dev;
3938     -
3939     priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
3940     if (priv == NULL)
3941     return -ENOMEM;
3942    
3943     + card = &priv->card;
3944     + card->dev = &pdev->dev;
3945     + card->owner = THIS_MODULE;
3946     + card->dapm_widgets = twl6040_dapm_widgets;
3947     + card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
3948     + card->dapm_routes = audio_map;
3949     + card->num_dapm_routes = ARRAY_SIZE(audio_map);
3950     +
3951     if (snd_soc_of_parse_card_name(card, "ti,model")) {
3952     dev_err(&pdev->dev, "Card name is not provided\n");
3953     return -ENODEV;
3954     @@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
3955     dev_err(&pdev->dev, "McPDM node is not provided\n");
3956     return -EINVAL;
3957     }
3958     - abe_twl6040_dai_links[0].cpu_of_node = dai_node;
3959     - abe_twl6040_dai_links[0].platform_of_node = dai_node;
3960     +
3961     + priv->dai_links[0].name = "DMIC";
3962     + priv->dai_links[0].stream_name = "TWL6040";
3963     + priv->dai_links[0].cpu_of_node = dai_node;
3964     + priv->dai_links[0].platform_of_node = dai_node;
3965     + priv->dai_links[0].codec_dai_name = "twl6040-legacy";
3966     + priv->dai_links[0].codec_name = "twl6040-codec";
3967     + priv->dai_links[0].init = omap_abe_twl6040_init;
3968     + priv->dai_links[0].ops = &omap_abe_ops;
3969    
3970     dai_node = of_parse_phandle(node, "ti,dmic", 0);
3971     if (dai_node) {
3972     num_links = 2;
3973     - abe_twl6040_dai_links[1].cpu_of_node = dai_node;
3974     - abe_twl6040_dai_links[1].platform_of_node = dai_node;
3975     + priv->dai_links[1].name = "TWL6040";
3976     + priv->dai_links[1].stream_name = "DMIC Capture";
3977     + priv->dai_links[1].cpu_of_node = dai_node;
3978     + priv->dai_links[1].platform_of_node = dai_node;
3979     + priv->dai_links[1].codec_dai_name = "dmic-hifi";
3980     + priv->dai_links[1].codec_name = "dmic-codec";
3981     + priv->dai_links[1].init = omap_abe_dmic_init;
3982     + priv->dai_links[1].ops = &omap_abe_dmic_ops;
3983     } else {
3984     num_links = 1;
3985     }
3986     @@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
3987     return -ENODEV;
3988     }
3989    
3990     - card->dai_link = abe_twl6040_dai_links;
3991     + card->dai_link = priv->dai_links;
3992     card->num_links = num_links;
3993    
3994     snd_soc_card_set_drvdata(card, priv);
3995     diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
3996     index fe966272bd0c..cba9645b6487 100644
3997     --- a/sound/soc/omap/omap-dmic.c
3998     +++ b/sound/soc/omap/omap-dmic.c
3999     @@ -48,6 +48,8 @@ struct omap_dmic {
4000     struct device *dev;
4001     void __iomem *io_base;
4002     struct clk *fclk;
4003     + struct pm_qos_request pm_qos_req;
4004     + int latency;
4005     int fclk_freq;
4006     int out_freq;
4007     int clk_div;
4008     @@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
4009    
4010     mutex_lock(&dmic->mutex);
4011    
4012     + pm_qos_remove_request(&dmic->pm_qos_req);
4013     +
4014     if (!dai->active)
4015     dmic->active = 0;
4016    
4017     @@ -228,6 +232,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
4018     /* packet size is threshold * channels */
4019     dma_data = snd_soc_dai_get_dma_data(dai, substream);
4020     dma_data->maxburst = dmic->threshold * channels;
4021     + dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
4022     + params_rate(params);
4023    
4024     return 0;
4025     }
4026     @@ -238,6 +244,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
4027     struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
4028     u32 ctrl;
4029    
4030     + if (pm_qos_request_active(&dmic->pm_qos_req))
4031     + pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
4032     +
4033     /* Configure uplink threshold */
4034     omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
4035    
4036     diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
4037     index d0ebb6b9bfac..2d6decbfc99e 100644
4038     --- a/sound/soc/omap/omap-mcbsp.c
4039     +++ b/sound/soc/omap/omap-mcbsp.c
4040     @@ -308,9 +308,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
4041     pkt_size = channels;
4042     }
4043    
4044     - latency = ((((buffer_size - pkt_size) / channels) * 1000)
4045     - / (params->rate_num / params->rate_den));
4046     -
4047     + latency = (buffer_size - pkt_size) / channels;
4048     + latency = latency * USEC_PER_SEC /
4049     + (params->rate_num / params->rate_den);
4050     mcbsp->latency[substream->stream] = latency;
4051    
4052     omap_mcbsp_set_threshold(substream, pkt_size);
4053     diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
4054     index 4c1be36c2207..7d5bdc5a2890 100644
4055     --- a/sound/soc/omap/omap-mcpdm.c
4056     +++ b/sound/soc/omap/omap-mcpdm.c
4057     @@ -54,6 +54,8 @@ struct omap_mcpdm {
4058     unsigned long phys_base;
4059     void __iomem *io_base;
4060     int irq;
4061     + struct pm_qos_request pm_qos_req;
4062     + int latency[2];
4063    
4064     struct mutex mutex;
4065    
4066     @@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
4067     struct snd_soc_dai *dai)
4068     {
4069     struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
4070     + int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
4071     + int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
4072     + int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
4073    
4074     mutex_lock(&mcpdm->mutex);
4075    
4076     @@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
4077     }
4078     }
4079    
4080     + if (mcpdm->latency[stream2])
4081     + pm_qos_update_request(&mcpdm->pm_qos_req,
4082     + mcpdm->latency[stream2]);
4083     + else if (mcpdm->latency[stream1])
4084     + pm_qos_remove_request(&mcpdm->pm_qos_req);
4085     +
4086     + mcpdm->latency[stream1] = 0;
4087     +
4088     mutex_unlock(&mcpdm->mutex);
4089     }
4090    
4091     @@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
4092     int stream = substream->stream;
4093     struct snd_dmaengine_dai_dma_data *dma_data;
4094     u32 threshold;
4095     - int channels;
4096     + int channels, latency;
4097     int link_mask = 0;
4098    
4099     channels = params_channels(params);
4100     @@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
4101    
4102     dma_data->maxburst =
4103     (MCPDM_DN_THRES_MAX - threshold) * channels;
4104     + latency = threshold;
4105     } else {
4106     /* If playback is not running assume a stereo stream to come */
4107     if (!mcpdm->config[!stream].link_mask)
4108     mcpdm->config[!stream].link_mask = (0x3 << 3);
4109    
4110     dma_data->maxburst = threshold * channels;
4111     + latency = (MCPDM_DN_THRES_MAX - threshold);
4112     }
4113    
4114     + /*
4115     + * The DMA must act to a DMA request within latency time (usec) to avoid
4116     + * under/overflow
4117     + */
4118     + mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
4119     +
4120     + if (!mcpdm->latency[stream])
4121     + mcpdm->latency[stream] = 10;
4122     +
4123     /* Check if we need to restart McPDM with this stream */
4124     if (mcpdm->config[stream].link_mask &&
4125     mcpdm->config[stream].link_mask != link_mask)
4126     @@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
4127     struct snd_soc_dai *dai)
4128     {
4129     struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
4130     + struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
4131     + int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
4132     + int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
4133     + int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
4134     + int latency = mcpdm->latency[stream2];
4135     +
4136     + /* Prevent omap hardware from hitting off between FIFO fills */
4137     + if (!latency || mcpdm->latency[stream1] < latency)
4138     + latency = mcpdm->latency[stream1];
4139     +
4140     + if (pm_qos_request_active(pm_qos_req))
4141     + pm_qos_update_request(pm_qos_req, latency);
4142     + else if (latency)
4143     + pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
4144    
4145     if (!omap_mcpdm_active(mcpdm)) {
4146     omap_mcpdm_start(mcpdm);
4147     @@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
4148     free_irq(mcpdm->irq, (void *)mcpdm);
4149     pm_runtime_disable(mcpdm->dev);
4150    
4151     + if (pm_qos_request_active(&mcpdm->pm_qos_req))
4152     + pm_qos_remove_request(&mcpdm->pm_qos_req);
4153     +
4154     return 0;
4155     }
4156    
4157     diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
4158     index eb1b9da05dd4..4715527054e5 100644
4159     --- a/sound/soc/qcom/common.c
4160     +++ b/sound/soc/qcom/common.c
4161     @@ -13,6 +13,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
4162     struct device_node *cpu = NULL;
4163     struct device *dev = card->dev;
4164     struct snd_soc_dai_link *link;
4165     + struct of_phandle_args args;
4166     int ret, num_links;
4167    
4168     ret = snd_soc_of_parse_card_name(card, "model");
4169     @@ -47,12 +48,14 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
4170     goto err;
4171     }
4172    
4173     - link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
4174     - if (!link->cpu_of_node) {
4175     + ret = of_parse_phandle_with_args(cpu, "sound-dai",
4176     + "#sound-dai-cells", 0, &args);
4177     + if (ret) {
4178     dev_err(card->dev, "error getting cpu phandle\n");
4179     - ret = -EINVAL;
4180     goto err;
4181     }
4182     + link->cpu_of_node = args.np;
4183     + link->id = args.args[0];
4184    
4185     ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
4186     if (ret) {
4187     diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
4188     index 60ff4a2d3577..8f6c8fc073a9 100644
4189     --- a/sound/soc/qcom/qdsp6/q6afe-dai.c
4190     +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
4191     @@ -1112,204 +1112,204 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
4192     }
4193    
4194     static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
4195     - SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
4196     - SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
4197     - SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
4198     - SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
4199     - SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
4200     - SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
4201     - SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
4202     - SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
4203     - SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
4204     - SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
4205     - SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0),
4206     - SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0),
4207     - SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0),
4208     - SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
4209     - SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0),
4210     - SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
4211     + SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
4212     + SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
4213     + SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
4214     + SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
4215     + SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
4216     + SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
4217     + SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
4218     + SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
4219     + SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
4220     + SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
4221     + SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
4222     + SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
4223     + SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
4224     + SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
4225     + SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
4226     + SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
4227     0, 0, 0, 0),
4228     - SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
4229     + SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
4230     0, 0, 0, 0),
4231     - SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
4232     + SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
4233     0, 0, 0, 0),
4234     - SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
4235     + SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
4236     0, 0, 0, 0),
4237     - SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
4238     + SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
4239     0, 0, 0, 0),
4240     - SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
4241     + SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
4242     0, 0, 0, 0),
4243     - SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
4244     + SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
4245     "Secondary MI2S Playback SD1",
4246     0, 0, 0, 0),
4247     - SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
4248     + SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
4249     0, 0, 0, 0),
4250     - SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
4251     + SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
4252     0, 0, 0, 0),
4253    
4254     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
4255     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
4256     0, 0, 0, 0),
4257     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
4258     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
4259     0, 0, 0, 0),
4260     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
4261     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
4262     0, 0, 0, 0),
4263     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
4264     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
4265     0, 0, 0, 0),
4266     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
4267     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
4268     0, 0, 0, 0),
4269     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
4270     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
4271     0, 0, 0, 0),
4272     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
4273     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
4274     0, 0, 0, 0),
4275     - SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
4276     + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
4277     0, 0, 0, 0),
4278     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
4279     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
4280     0, 0, 0, 0),
4281     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
4282     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
4283     0, 0, 0, 0),
4284     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
4285     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
4286     0, 0, 0, 0),
4287     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
4288     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
4289     0, 0, 0, 0),
4290     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
4291     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
4292     0, 0, 0, 0),
4293     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
4294     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
4295     0, 0, 0, 0),
4296     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
4297     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
4298     0, 0, 0, 0),
4299     - SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
4300     + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
4301     0, 0, 0, 0),
4302    
4303     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
4304     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
4305     0, 0, 0, 0),
4306     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
4307     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
4308     0, 0, 0, 0),
4309     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
4310     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
4311     0, 0, 0, 0),
4312     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
4313     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
4314     0, 0, 0, 0),
4315     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
4316     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
4317     0, 0, 0, 0),
4318     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
4319     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
4320     0, 0, 0, 0),
4321     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
4322     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
4323     0, 0, 0, 0),
4324     - SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
4325     + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
4326     0, 0, 0, 0),
4327     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
4328     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
4329     0, 0, 0, 0),
4330     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
4331     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
4332     0, 0, 0, 0),
4333     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
4334     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
4335     0, 0, 0, 0),
4336     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
4337     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
4338     0, 0, 0, 0),
4339     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
4340     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
4341     0, 0, 0, 0),
4342     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
4343     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
4344     0, 0, 0, 0),
4345     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
4346     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
4347     0, 0, 0, 0),
4348     - SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
4349     + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
4350     0, 0, 0, 0),
4351    
4352     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
4353     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
4354     0, 0, 0, 0),
4355     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
4356     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
4357     0, 0, 0, 0),
4358     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
4359     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
4360     0, 0, 0, 0),
4361     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
4362     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
4363     0, 0, 0, 0),
4364     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
4365     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
4366     0, 0, 0, 0),
4367     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
4368     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
4369     0, 0, 0, 0),
4370     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
4371     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
4372     0, 0, 0, 0),
4373     - SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
4374     + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
4375     0, 0, 0, 0),
4376     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
4377     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
4378     0, 0, 0, 0),
4379     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
4380     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
4381     0, 0, 0, 0),
4382     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
4383     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
4384     0, 0, 0, 0),
4385     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
4386     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
4387     0, 0, 0, 0),
4388     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
4389     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
4390     0, 0, 0, 0),
4391     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
4392     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
4393     0, 0, 0, 0),
4394     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
4395     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
4396     0, 0, 0, 0),
4397     - SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
4398     + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
4399     0, 0, 0, 0),
4400    
4401     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
4402     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
4403     0, 0, 0, 0),
4404     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
4405     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
4406     0, 0, 0, 0),
4407     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
4408     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
4409     0, 0, 0, 0),
4410     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
4411     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
4412     0, 0, 0, 0),
4413     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
4414     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
4415     0, 0, 0, 0),
4416     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
4417     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
4418     0, 0, 0, 0),
4419     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
4420     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
4421     0, 0, 0, 0),
4422     - SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
4423     + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
4424     0, 0, 0, 0),
4425     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
4426     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
4427     0, 0, 0, 0),
4428     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
4429     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
4430     0, 0, 0, 0),
4431     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
4432     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
4433     0, 0, 0, 0),
4434     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
4435     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
4436     0, 0, 0, 0),
4437     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
4438     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
4439     0, 0, 0, 0),
4440     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
4441     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
4442     0, 0, 0, 0),
4443     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
4444     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
4445     0, 0, 0, 0),
4446     - SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
4447     + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
4448     0, 0, 0, 0),
4449    
4450     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
4451     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
4452     0, 0, 0, 0),
4453     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
4454     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
4455     0, 0, 0, 0),
4456     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
4457     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
4458     0, 0, 0, 0),
4459     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
4460     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
4461     0, 0, 0, 0),
4462     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
4463     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
4464     0, 0, 0, 0),
4465     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
4466     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
4467     0, 0, 0, 0),
4468     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
4469     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
4470     0, 0, 0, 0),
4471     - SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
4472     + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
4473     0, 0, 0, 0),
4474     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
4475     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
4476     0, 0, 0, 0),
4477     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
4478     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
4479     0, 0, 0, 0),
4480     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
4481     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
4482     0, 0, 0, 0),
4483     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
4484     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
4485     0, 0, 0, 0),
4486     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
4487     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
4488     0, 0, 0, 0),
4489     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
4490     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
4491     0, 0, 0, 0),
4492     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
4493     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
4494     0, 0, 0, 0),
4495     - SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
4496     + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
4497     0, 0, 0, 0),
4498     };
4499    
4500     diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
4501     index 000775b4bba8..829b5e987b2a 100644
4502     --- a/sound/soc/qcom/qdsp6/q6afe.c
4503     +++ b/sound/soc/qcom/qdsp6/q6afe.c
4504     @@ -49,14 +49,14 @@
4505     #define AFE_PORT_I2S_SD1 0x2
4506     #define AFE_PORT_I2S_SD2 0x3
4507     #define AFE_PORT_I2S_SD3 0x4
4508     -#define AFE_PORT_I2S_SD0_MASK BIT(0x1)
4509     -#define AFE_PORT_I2S_SD1_MASK BIT(0x2)
4510     -#define AFE_PORT_I2S_SD2_MASK BIT(0x3)
4511     -#define AFE_PORT_I2S_SD3_MASK BIT(0x4)
4512     -#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1)
4513     -#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3)
4514     -#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1)
4515     -#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1)
4516     +#define AFE_PORT_I2S_SD0_MASK BIT(0x0)
4517     +#define AFE_PORT_I2S_SD1_MASK BIT(0x1)
4518     +#define AFE_PORT_I2S_SD2_MASK BIT(0x2)
4519     +#define AFE_PORT_I2S_SD3_MASK BIT(0x3)
4520     +#define AFE_PORT_I2S_SD0_1_MASK GENMASK(1, 0)
4521     +#define AFE_PORT_I2S_SD2_3_MASK GENMASK(3, 2)
4522     +#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(2, 0)
4523     +#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
4524     #define AFE_PORT_I2S_QUAD01 0x5
4525     #define AFE_PORT_I2S_QUAD23 0x6
4526     #define AFE_PORT_I2S_6CHS 0x7
4527     diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
4528     index f77538319221..7029e0b85f9e 100644
4529     --- a/sound/soc/rockchip/rockchip_pcm.c
4530     +++ b/sound/soc/rockchip/rockchip_pcm.c
4531     @@ -32,6 +32,7 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
4532    
4533     static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
4534     .pcm_hardware = &snd_rockchip_hardware,
4535     + .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
4536     .prealloc_buffer_size = 32 * 1024,
4537     };
4538    
4539     diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
4540     index 3f880ec66459..a566dae3ec8a 100644
4541     --- a/sound/soc/sh/rcar/ssi.c
4542     +++ b/sound/soc/sh/rcar/ssi.c
4543     @@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
4544     if (rsnd_ssi_is_multi_slave(mod, io))
4545     return 0;
4546    
4547     - if (ssi->rate) {
4548     + if (ssi->usrcnt > 1) {
4549     if (ssi->rate != rate) {
4550     dev_err(dev, "SSI parent/child should use same rate\n");
4551     return -EINVAL;
4552     diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
4553     index b8e72b52db30..4fb29f0e561e 100644
4554     --- a/sound/soc/soc-acpi.c
4555     +++ b/sound/soc/soc-acpi.c
4556     @@ -10,11 +10,17 @@ struct snd_soc_acpi_mach *
4557     snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
4558     {
4559     struct snd_soc_acpi_mach *mach;
4560     + struct snd_soc_acpi_mach *mach_alt;
4561    
4562     for (mach = machines; mach->id[0]; mach++) {
4563     if (acpi_dev_present(mach->id, NULL, -1)) {
4564     - if (mach->machine_quirk)
4565     - mach = mach->machine_quirk(mach);
4566     + if (mach->machine_quirk) {
4567     + mach_alt = mach->machine_quirk(mach);
4568     + if (!mach_alt)
4569     + continue; /* not full match, ignore */
4570     + mach = mach_alt;
4571     + }
4572     +
4573     return mach;
4574     }
4575     }
4576     diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
4577     index 473eefe8658e..62aa320c2070 100644
4578     --- a/sound/soc/soc-core.c
4579     +++ b/sound/soc/soc-core.c
4580     @@ -2126,6 +2126,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
4581     }
4582    
4583     card->instantiated = 1;
4584     + dapm_mark_endpoints_dirty(card);
4585     snd_soc_dapm_sync(&card->dapm);
4586     mutex_unlock(&card->mutex);
4587     mutex_unlock(&client_mutex);
4588     diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
4589     index fb37dd927e33..bf615fa16dc8 100644
4590     --- a/sound/soc/sunxi/sun8i-codec.c
4591     +++ b/sound/soc/sunxi/sun8i-codec.c
4592     @@ -589,16 +589,10 @@ err_pm_disable:
4593    
4594     static int sun8i_codec_remove(struct platform_device *pdev)
4595     {
4596     - struct snd_soc_card *card = platform_get_drvdata(pdev);
4597     - struct sun8i_codec *scodec = snd_soc_card_get_drvdata(card);
4598     -
4599     pm_runtime_disable(&pdev->dev);
4600     if (!pm_runtime_status_suspended(&pdev->dev))
4601     sun8i_codec_runtime_suspend(&pdev->dev);
4602    
4603     - clk_disable_unprepare(scodec->clk_module);
4604     - clk_disable_unprepare(scodec->clk_bus);
4605     -
4606     return 0;
4607     }
4608    
4609     diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
4610     index 08aa78007020..1c73b9ed44a6 100644
4611     --- a/sound/usb/quirks-table.h
4612     +++ b/sound/usb/quirks-table.h
4613     @@ -3387,5 +3387,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
4614     .ifnum = QUIRK_NO_INTERFACE
4615     }
4616     },
4617     +/* Dell WD19 Dock */
4618     +{
4619     + USB_DEVICE(0x0bda, 0x402e),
4620     + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
4621     + .vendor_name = "Dell",
4622     + .product_name = "WD19 Dock",
4623     + .profile_name = "Dell-WD15-Dock",
4624     + .ifnum = QUIRK_NO_INTERFACE
4625     + }
4626     +},
4627    
4628     #undef USB_DEVICE_VENDOR_SPEC
4629     diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
4630     index b3a0709ea7ed..fcaf00621102 100644
4631     --- a/tools/bpf/bpftool/common.c
4632     +++ b/tools/bpf/bpftool/common.c
4633     @@ -304,7 +304,7 @@ char *get_fdinfo(int fd, const char *key)
4634     return NULL;
4635     }
4636    
4637     - while ((n = getline(&line, &line_n, fdi))) {
4638     + while ((n = getline(&line, &line_n, fdi)) > 0) {
4639     char *value;
4640     int len;
4641    
4642     diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
4643     index dce960d22106..0de024a6cc2b 100644
4644     --- a/tools/bpf/bpftool/prog.c
4645     +++ b/tools/bpf/bpftool/prog.c
4646     @@ -749,6 +749,7 @@ static int do_load(int argc, char **argv)
4647     }
4648     NEXT_ARG();
4649     } else if (is_prefix(*argv, "map")) {
4650     + void *new_map_replace;
4651     char *endptr, *name;
4652     int fd;
4653    
4654     @@ -782,12 +783,15 @@ static int do_load(int argc, char **argv)
4655     if (fd < 0)
4656     goto err_free_reuse_maps;
4657    
4658     - map_replace = reallocarray(map_replace, old_map_fds + 1,
4659     - sizeof(*map_replace));
4660     - if (!map_replace) {
4661     + new_map_replace = reallocarray(map_replace,
4662     + old_map_fds + 1,
4663     + sizeof(*map_replace));
4664     + if (!new_map_replace) {
4665     p_err("mem alloc failed");
4666     goto err_free_reuse_maps;
4667     }
4668     + map_replace = new_map_replace;
4669     +
4670     map_replace[old_map_fds].idx = idx;
4671     map_replace[old_map_fds].name = name;
4672     map_replace[old_map_fds].fd = fd;
4673     diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
4674     index 7ec85d567598..b75d004f6482 100644
4675     --- a/tools/objtool/elf.c
4676     +++ b/tools/objtool/elf.c
4677     @@ -31,6 +31,8 @@
4678     #include "elf.h"
4679     #include "warn.h"
4680    
4681     +#define MAX_NAME_LEN 128
4682     +
4683     struct section *find_section_by_name(struct elf *elf, const char *name)
4684     {
4685     struct section *sec;
4686     @@ -298,6 +300,8 @@ static int read_symbols(struct elf *elf)
4687     /* Create parent/child links for any cold subfunctions */
4688     list_for_each_entry(sec, &elf->sections, list) {
4689     list_for_each_entry(sym, &sec->symbol_list, list) {
4690     + char pname[MAX_NAME_LEN + 1];
4691     + size_t pnamelen;
4692     if (sym->type != STT_FUNC)
4693     continue;
4694     sym->pfunc = sym->cfunc = sym;
4695     @@ -305,14 +309,21 @@ static int read_symbols(struct elf *elf)
4696     if (!coldstr)
4697     continue;
4698    
4699     - coldstr[0] = '\0';
4700     - pfunc = find_symbol_by_name(elf, sym->name);
4701     - coldstr[0] = '.';
4702     + pnamelen = coldstr - sym->name;
4703     + if (pnamelen > MAX_NAME_LEN) {
4704     + WARN("%s(): parent function name exceeds maximum length of %d characters",
4705     + sym->name, MAX_NAME_LEN);
4706     + return -1;
4707     + }
4708     +
4709     + strncpy(pname, sym->name, pnamelen);
4710     + pname[pnamelen] = '\0';
4711     + pfunc = find_symbol_by_name(elf, pname);
4712    
4713     if (!pfunc) {
4714     WARN("%s(): can't find parent function",
4715     sym->name);
4716     - goto err;
4717     + return -1;
4718     }
4719    
4720     sym->pfunc = pfunc;
4721     diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
4722     index 37940665f736..efd0157b9d22 100644
4723     --- a/tools/perf/tests/attr/base-record
4724     +++ b/tools/perf/tests/attr/base-record
4725     @@ -9,7 +9,7 @@ size=112
4726     config=0
4727     sample_period=*
4728     sample_type=263
4729     -read_format=0
4730     +read_format=0|4
4731     disabled=1
4732     inherit=1
4733     pinned=0
4734     diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
4735     index 03a72310315f..e7dbdcc8d465 100644
4736     --- a/tools/perf/util/evsel.c
4737     +++ b/tools/perf/util/evsel.c
4738     @@ -1088,7 +1088,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
4739     attr->exclude_user = 1;
4740     }
4741    
4742     - if (evsel->own_cpus)
4743     + if (evsel->own_cpus || evsel->unit)
4744     evsel->attr.read_format |= PERF_FORMAT_ID;
4745    
4746     /*
4747     diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
4748     index cf8bd123cf73..aed170bd4384 100644
4749     --- a/tools/perf/util/namespaces.c
4750     +++ b/tools/perf/util/namespaces.c
4751     @@ -18,6 +18,7 @@
4752     #include <stdio.h>
4753     #include <string.h>
4754     #include <unistd.h>
4755     +#include <asm/bug.h>
4756    
4757     struct namespaces *namespaces__new(struct namespaces_event *event)
4758     {
4759     @@ -186,6 +187,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
4760     char curpath[PATH_MAX];
4761     int oldns = -1;
4762     int newns = -1;
4763     + char *oldcwd = NULL;
4764    
4765     if (nc == NULL)
4766     return;
4767     @@ -199,9 +201,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
4768     if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
4769     return;
4770    
4771     + oldcwd = get_current_dir_name();
4772     + if (!oldcwd)
4773     + return;
4774     +
4775     oldns = open(curpath, O_RDONLY);
4776     if (oldns < 0)
4777     - return;
4778     + goto errout;
4779    
4780     newns = open(nsi->mntns_path, O_RDONLY);
4781     if (newns < 0)
4782     @@ -210,11 +216,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
4783     if (setns(newns, CLONE_NEWNS) < 0)
4784     goto errout;
4785    
4786     + nc->oldcwd = oldcwd;
4787     nc->oldns = oldns;
4788     nc->newns = newns;
4789     return;
4790    
4791     errout:
4792     + free(oldcwd);
4793     if (oldns > -1)
4794     close(oldns);
4795     if (newns > -1)
4796     @@ -223,11 +231,16 @@ errout:
4797    
4798     void nsinfo__mountns_exit(struct nscookie *nc)
4799     {
4800     - if (nc == NULL || nc->oldns == -1 || nc->newns == -1)
4801     + if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
4802     return;
4803    
4804     setns(nc->oldns, CLONE_NEWNS);
4805    
4806     + if (nc->oldcwd) {
4807     + WARN_ON_ONCE(chdir(nc->oldcwd));
4808     + zfree(&nc->oldcwd);
4809     + }
4810     +
4811     if (nc->oldns > -1) {
4812     close(nc->oldns);
4813     nc->oldns = -1;
4814     diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
4815     index cae1a9a39722..d5f46c09ea31 100644
4816     --- a/tools/perf/util/namespaces.h
4817     +++ b/tools/perf/util/namespaces.h
4818     @@ -38,6 +38,7 @@ struct nsinfo {
4819     struct nscookie {
4820     int oldns;
4821     int newns;
4822     + char *oldcwd;
4823     };
4824    
4825     int nsinfo__init(struct nsinfo *nsi);
4826     diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
4827     index f1fe492c8e17..f0017c831e57 100644
4828     --- a/tools/testing/selftests/Makefile
4829     +++ b/tools/testing/selftests/Makefile
4830     @@ -24,6 +24,7 @@ TARGETS += memory-hotplug
4831     TARGETS += mount
4832     TARGETS += mqueue
4833     TARGETS += net
4834     +TARGETS += netfilter
4835     TARGETS += nsfs
4836     TARGETS += powerpc
4837     TARGETS += proc
4838     diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
4839     index 67c412d19c09..2bde9ee04db7 100644
4840     --- a/tools/testing/selftests/bpf/test_verifier.c
4841     +++ b/tools/testing/selftests/bpf/test_verifier.c
4842     @@ -12511,6 +12511,25 @@ static struct bpf_test tests[] = {
4843     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4844     .result = ACCEPT,
4845     },
4846     + {
4847     + "calls: ctx read at start of subprog",
4848     + .insns = {
4849     + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4850     + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
4851     + BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
4852     + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4853     + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
4854     + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4855     + BPF_EXIT_INSN(),
4856     + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
4857     + BPF_MOV64_IMM(BPF_REG_0, 0),
4858     + BPF_EXIT_INSN(),
4859     + },
4860     + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
4861     + .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
4862     + .result_unpriv = REJECT,
4863     + .result = ACCEPT,
4864     + },
4865     };
4866    
4867     static int probe_filter_length(const struct bpf_insn *fp)
4868     diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
4869     new file mode 100644
4870     index 000000000000..47ed6cef93fb
4871     --- /dev/null
4872     +++ b/tools/testing/selftests/netfilter/Makefile
4873     @@ -0,0 +1,6 @@
4874     +# SPDX-License-Identifier: GPL-2.0
4875     +# Makefile for netfilter selftests
4876     +
4877     +TEST_PROGS := nft_trans_stress.sh
4878     +
4879     +include ../lib.mk
4880     diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
4881     new file mode 100644
4882     index 000000000000..1017313e41a8
4883     --- /dev/null
4884     +++ b/tools/testing/selftests/netfilter/config
4885     @@ -0,0 +1,2 @@
4886     +CONFIG_NET_NS=y
4887     +NF_TABLES_INET=y
4888     diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
4889     new file mode 100755
4890     index 000000000000..f1affd12c4b1
4891     --- /dev/null
4892     +++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
4893     @@ -0,0 +1,78 @@
4894     +#!/bin/bash
4895     +#
4896     +# This test is for stress-testing the nf_tables config plane path vs.
4897     +# packet path processing: Make sure we never release rules that are
4898     +# still visible to other cpus.
4899     +#
4900     +# set -e
4901     +
4902     +# Kselftest framework requirement - SKIP code is 4.
4903     +ksft_skip=4
4904     +
4905     +testns=testns1
4906     +tables="foo bar baz quux"
4907     +
4908     +nft --version > /dev/null 2>&1
4909     +if [ $? -ne 0 ];then
4910     + echo "SKIP: Could not run test without nft tool"
4911     + exit $ksft_skip
4912     +fi
4913     +
4914     +ip -Version > /dev/null 2>&1
4915     +if [ $? -ne 0 ];then
4916     + echo "SKIP: Could not run test without ip tool"
4917     + exit $ksft_skip
4918     +fi
4919     +
4920     +tmp=$(mktemp)
4921     +
4922     +for table in $tables; do
4923     + echo add table inet "$table" >> "$tmp"
4924     + echo flush table inet "$table" >> "$tmp"
4925     +
4926     + echo "add chain inet $table INPUT { type filter hook input priority 0; }" >> "$tmp"
4927     + echo "add chain inet $table OUTPUT { type filter hook output priority 0; }" >> "$tmp"
4928     + for c in $(seq 1 400); do
4929     + chain=$(printf "chain%03u" "$c")
4930     + echo "add chain inet $table $chain" >> "$tmp"
4931     + done
4932     +
4933     + for c in $(seq 1 400); do
4934     + chain=$(printf "chain%03u" "$c")
4935     + for BASE in INPUT OUTPUT; do
4936     + echo "add rule inet $table $BASE counter jump $chain" >> "$tmp"
4937     + done
4938     + echo "add rule inet $table $chain counter return" >> "$tmp"
4939     + done
4940     +done
4941     +
4942     +ip netns add "$testns"
4943     +ip -netns "$testns" link set lo up
4944     +
4945     +lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
4946     +cpunum=$((cpunum-1))
4947     +for i in $(seq 0 $cpunum);do
4948     + mask=$(printf 0x%x $((1<<$i)))
4949     + ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
4950     + ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
4951     +done)
4952     +
4953     +sleep 1
4954     +
4955     +for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
4956     +
4957     +for table in $tables;do
4958     + randsleep=$((RANDOM%10))
4959     + sleep $randsleep
4960     + ip netns exec "$testns" nft delete table inet $table 2>/dev/null
4961     +done
4962     +
4963     +randsleep=$((RANDOM%10))
4964     +sleep $randsleep
4965     +
4966     +pkill -9 ping
4967     +
4968     +wait
4969     +
4970     +rm -f "$tmp"
4971     +ip netns del "$testns"
4972     diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
4973     index 6f1f4a6e1ecb..85744425b08d 100644
4974     --- a/tools/testing/selftests/proc/proc-self-map-files-002.c
4975     +++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
4976     @@ -13,7 +13,7 @@
4977     * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
4978     * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
4979     */
4980     -/* Test readlink /proc/self/map_files/... with address 0. */
4981     +/* Test readlink /proc/self/map_files/... with minimum address. */
4982     #include <errno.h>
4983     #include <sys/types.h>
4984     #include <sys/stat.h>
4985     @@ -47,6 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
4986     int main(void)
4987     {
4988     const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
4989     +#ifdef __arm__
4990     + unsigned long va = 2 * PAGE_SIZE;
4991     +#else
4992     + unsigned long va = 0;
4993     +#endif
4994     void *p;
4995     int fd;
4996     unsigned long a, b;
4997     @@ -55,7 +60,7 @@ int main(void)
4998     if (fd == -1)
4999     return 1;
5000    
5001     - p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
5002     + p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
5003     if (p == MAP_FAILED) {
5004     if (errno == EPERM)
5005     return 2;