Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.19/0108-4.19.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3271 - (hide annotations) (download)
Mon Dec 17 06:44:33 2018 UTC (5 years, 9 months ago) by niro
File size: 157982 byte(s)
-linux-4.19.9
1 niro 3271 diff --git a/Makefile b/Makefile
2     index 34bc4c752c49..8717f34464d5 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 8
10     +SUBLEVEL = 9
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
15     index b2aa9b32bff2..2c118a6ab358 100644
16     --- a/arch/arm/probes/kprobes/opt-arm.c
17     +++ b/arch/arm/probes/kprobes/opt-arm.c
18     @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
19     }
20    
21     /* Copy arch-dep-instance from template. */
22     - memcpy(code, &optprobe_template_entry,
23     + memcpy(code, (unsigned char *)optprobe_template_entry,
24     TMPL_END_IDX * sizeof(kprobe_opcode_t));
25    
26     /* Adjust buffer according to instruction. */
27     diff --git a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
28     index 8978d924eb83..85cf0b6bdda9 100644
29     --- a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
30     +++ b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
31     @@ -75,18 +75,6 @@
32     regulator-always-on;
33     vin-supply = <&vcc_sys>;
34     };
35     -
36     - vdd_log: vdd-log {
37     - compatible = "pwm-regulator";
38     - pwms = <&pwm2 0 25000 0>;
39     - regulator-name = "vdd_log";
40     - regulator-min-microvolt = <800000>;
41     - regulator-max-microvolt = <1400000>;
42     - regulator-always-on;
43     - regulator-boot-on;
44     - vin-supply = <&vcc_sys>;
45     - };
46     -
47     };
48    
49     &cpu_l0 {
50     diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
51     index 6b2686d54411..29cdc99688f3 100644
52     --- a/arch/arm64/kernel/hibernate.c
53     +++ b/arch/arm64/kernel/hibernate.c
54     @@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
55     }
56    
57     memcpy((void *)dst, src_start, length);
58     - flush_icache_range(dst, dst + length);
59     + __flush_icache_range(dst, dst + length);
60    
61     pgdp = pgd_offset_raw(allocator(mask), dst_addr);
62     if (pgd_none(READ_ONCE(*pgdp))) {
63     diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
64     index 5ce030266e7d..253d7ca71472 100644
65     --- a/arch/parisc/Makefile
66     +++ b/arch/parisc/Makefile
67     @@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS
68     KBUILD_CFLAGS_KERNEL += -mlong-calls
69     endif
70    
71     +# Without this, "ld -r" results in .text sections that are too big (> 0x40000)
72     +# for branches to reach stubs. And multiple .text sections trigger a warning
73     +# when creating the sysfs module information section.
74     +ifndef CONFIG_64BIT
75     +KBUILD_CFLAGS_MODULE += -ffunction-sections
76     +endif
77     +
78     # select which processor to optimise for
79     cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100
80     cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200
81     diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
82     index 349df33808c4..cd2af4b013e3 100644
83     --- a/arch/riscv/include/asm/module.h
84     +++ b/arch/riscv/include/asm/module.h
85     @@ -8,6 +8,7 @@
86    
87     #define MODULE_ARCH_VERMAGIC "riscv"
88    
89     +struct module;
90     u64 module_emit_got_entry(struct module *mod, u64 val);
91     u64 module_emit_plt_entry(struct module *mod, u64 val);
92    
93     diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
94     index 8b4c5e001157..544ac4fafd11 100644
95     --- a/arch/x86/boot/compressed/eboot.c
96     +++ b/arch/x86/boot/compressed/eboot.c
97     @@ -1,3 +1,4 @@
98     +
99     /* -----------------------------------------------------------------------
100     *
101     * Copyright 2011 Intel Corporation; author Matt Fleming
102     @@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
103     return status;
104     }
105    
106     +static efi_status_t allocate_e820(struct boot_params *params,
107     + struct setup_data **e820ext,
108     + u32 *e820ext_size)
109     +{
110     + unsigned long map_size, desc_size, buff_size;
111     + struct efi_boot_memmap boot_map;
112     + efi_memory_desc_t *map;
113     + efi_status_t status;
114     + __u32 nr_desc;
115     +
116     + boot_map.map = &map;
117     + boot_map.map_size = &map_size;
118     + boot_map.desc_size = &desc_size;
119     + boot_map.desc_ver = NULL;
120     + boot_map.key_ptr = NULL;
121     + boot_map.buff_size = &buff_size;
122     +
123     + status = efi_get_memory_map(sys_table, &boot_map);
124     + if (status != EFI_SUCCESS)
125     + return status;
126     +
127     + nr_desc = buff_size / desc_size;
128     +
129     + if (nr_desc > ARRAY_SIZE(params->e820_table)) {
130     + u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
131     +
132     + status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
133     + if (status != EFI_SUCCESS)
134     + return status;
135     + }
136     +
137     + return EFI_SUCCESS;
138     +}
139     +
140     struct exit_boot_struct {
141     struct boot_params *boot_params;
142     struct efi_info *efi;
143     - struct setup_data *e820ext;
144     - __u32 e820ext_size;
145     };
146    
147     static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
148     struct efi_boot_memmap *map,
149     void *priv)
150     {
151     - static bool first = true;
152     const char *signature;
153     __u32 nr_desc;
154     efi_status_t status;
155     struct exit_boot_struct *p = priv;
156    
157     - if (first) {
158     - nr_desc = *map->buff_size / *map->desc_size;
159     - if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) {
160     - u32 nr_e820ext = nr_desc -
161     - ARRAY_SIZE(p->boot_params->e820_table);
162     -
163     - status = alloc_e820ext(nr_e820ext, &p->e820ext,
164     - &p->e820ext_size);
165     - if (status != EFI_SUCCESS)
166     - return status;
167     - }
168     - first = false;
169     - }
170     -
171     signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
172     : EFI32_LOADER_SIGNATURE;
173     memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
174     @@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
175     {
176     unsigned long map_sz, key, desc_size, buff_size;
177     efi_memory_desc_t *mem_map;
178     - struct setup_data *e820ext;
179     - __u32 e820ext_size;
180     + struct setup_data *e820ext = NULL;
181     + __u32 e820ext_size = 0;
182     efi_status_t status;
183     __u32 desc_version;
184     struct efi_boot_memmap map;
185     @@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
186     map.buff_size = &buff_size;
187     priv.boot_params = boot_params;
188     priv.efi = &boot_params->efi_info;
189     - priv.e820ext = NULL;
190     - priv.e820ext_size = 0;
191     +
192     + status = allocate_e820(boot_params, &e820ext, &e820ext_size);
193     + if (status != EFI_SUCCESS)
194     + return status;
195    
196     /* Might as well exit boot services now */
197     status = efi_exit_boot_services(sys_table, handle, &map, &priv,
198     @@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
199     if (status != EFI_SUCCESS)
200     return status;
201    
202     - e820ext = priv.e820ext;
203     - e820ext_size = priv.e820ext_size;
204     -
205     /* Historic? */
206     boot_params->alt_mem_k = 32 * 1024;
207    
208     diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
209     index c88c23c658c1..d1f25c831447 100644
210     --- a/arch/x86/kernel/e820.c
211     +++ b/arch/x86/kernel/e820.c
212     @@ -1248,7 +1248,6 @@ void __init e820__memblock_setup(void)
213     {
214     int i;
215     u64 end;
216     - u64 addr = 0;
217    
218     /*
219     * The bootstrap memblock region count maximum is 128 entries
220     @@ -1265,21 +1264,13 @@ void __init e820__memblock_setup(void)
221     struct e820_entry *entry = &e820_table->entries[i];
222    
223     end = entry->addr + entry->size;
224     - if (addr < entry->addr)
225     - memblock_reserve(addr, entry->addr - addr);
226     - addr = end;
227     if (end != (resource_size_t)end)
228     continue;
229    
230     - /*
231     - * all !E820_TYPE_RAM ranges (including gap ranges) are put
232     - * into memblock.reserved to make sure that struct pages in
233     - * such regions are not left uninitialized after bootup.
234     - */
235     if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
236     - memblock_reserve(entry->addr, entry->size);
237     - else
238     - memblock_add(entry->addr, entry->size);
239     + continue;
240     +
241     + memblock_add(entry->addr, entry->size);
242     }
243    
244     /* Throw away partial pages: */
245     diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
246     index 40b16b270656..6adf6e6c2933 100644
247     --- a/arch/x86/kernel/kprobes/opt.c
248     +++ b/arch/x86/kernel/kprobes/opt.c
249     @@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
250     int len = 0, ret;
251    
252     while (len < RELATIVEJUMP_SIZE) {
253     - ret = __copy_instruction(dest + len, src + len, real, &insn);
254     + ret = __copy_instruction(dest + len, src + len, real + len, &insn);
255     if (!ret || !can_boost(&insn, src + len))
256     return -EINVAL;
257     len += ret;
258     diff --git a/crypto/cbc.c b/crypto/cbc.c
259     index b761b1f9c6ca..dd5f332fd566 100644
260     --- a/crypto/cbc.c
261     +++ b/crypto/cbc.c
262     @@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
263     spawn = skcipher_instance_ctx(inst);
264     err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
265     CRYPTO_ALG_TYPE_MASK);
266     - crypto_mod_put(alg);
267     if (err)
268     - goto err_free_inst;
269     + goto err_put_alg;
270    
271     err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
272     if (err)
273     @@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
274     err = skcipher_register_instance(tmpl, inst);
275     if (err)
276     goto err_drop_spawn;
277     + crypto_mod_put(alg);
278    
279     out:
280     return err;
281    
282     err_drop_spawn:
283     crypto_drop_spawn(spawn);
284     +err_put_alg:
285     + crypto_mod_put(alg);
286     err_free_inst:
287     kfree(inst);
288     goto out;
289     diff --git a/crypto/cfb.c b/crypto/cfb.c
290     index a0d68c09e1b9..20987d0e09d8 100644
291     --- a/crypto/cfb.c
292     +++ b/crypto/cfb.c
293     @@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
294     spawn = skcipher_instance_ctx(inst);
295     err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
296     CRYPTO_ALG_TYPE_MASK);
297     - crypto_mod_put(alg);
298     if (err)
299     - goto err_free_inst;
300     + goto err_put_alg;
301    
302     err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
303     if (err)
304     @@ -317,12 +316,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
305     err = skcipher_register_instance(tmpl, inst);
306     if (err)
307     goto err_drop_spawn;
308     + crypto_mod_put(alg);
309    
310     out:
311     return err;
312    
313     err_drop_spawn:
314     crypto_drop_spawn(spawn);
315     +err_put_alg:
316     + crypto_mod_put(alg);
317     err_free_inst:
318     kfree(inst);
319     goto out;
320     diff --git a/crypto/pcbc.c b/crypto/pcbc.c
321     index ef802f6e9642..8aa10144407c 100644
322     --- a/crypto/pcbc.c
323     +++ b/crypto/pcbc.c
324     @@ -244,9 +244,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
325     spawn = skcipher_instance_ctx(inst);
326     err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
327     CRYPTO_ALG_TYPE_MASK);
328     - crypto_mod_put(alg);
329     if (err)
330     - goto err_free_inst;
331     + goto err_put_alg;
332    
333     err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
334     if (err)
335     @@ -275,12 +274,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
336     err = skcipher_register_instance(tmpl, inst);
337     if (err)
338     goto err_drop_spawn;
339     + crypto_mod_put(alg);
340    
341     out:
342     return err;
343    
344     err_drop_spawn:
345     crypto_drop_spawn(spawn);
346     +err_put_alg:
347     + crypto_mod_put(alg);
348     err_free_inst:
349     kfree(inst);
350     goto out;
351     diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
352     index 3f0e2a14895a..22b53bf26817 100644
353     --- a/drivers/cpufreq/ti-cpufreq.c
354     +++ b/drivers/cpufreq/ti-cpufreq.c
355     @@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
356     {},
357     };
358    
359     +static const struct of_device_id *ti_cpufreq_match_node(void)
360     +{
361     + struct device_node *np;
362     + const struct of_device_id *match;
363     +
364     + np = of_find_node_by_path("/");
365     + match = of_match_node(ti_cpufreq_of_match, np);
366     + of_node_put(np);
367     +
368     + return match;
369     +}
370     +
371     static int ti_cpufreq_probe(struct platform_device *pdev)
372     {
373     u32 version[VERSION_COUNT];
374     - struct device_node *np;
375     const struct of_device_id *match;
376     struct opp_table *ti_opp_table;
377     struct ti_cpufreq_data *opp_data;
378     const char * const reg_names[] = {"vdd", "vbb"};
379     int ret;
380    
381     - np = of_find_node_by_path("/");
382     - match = of_match_node(ti_cpufreq_of_match, np);
383     - of_node_put(np);
384     + match = dev_get_platdata(&pdev->dev);
385     if (!match)
386     return -ENODEV;
387    
388     @@ -290,7 +299,14 @@ fail_put_node:
389    
390     static int ti_cpufreq_init(void)
391     {
392     - platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
393     + const struct of_device_id *match;
394     +
395     + /* Check to ensure we are on a compatible platform */
396     + match = ti_cpufreq_match_node();
397     + if (match)
398     + platform_device_register_data(NULL, "ti-cpufreq", -1, match,
399     + sizeof(*match));
400     +
401     return 0;
402     }
403     module_init(ti_cpufreq_init);
404     diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
405     index f43e6dafe446..0f389e008ce6 100644
406     --- a/drivers/dma/dw/core.c
407     +++ b/drivers/dma/dw/core.c
408     @@ -1064,12 +1064,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
409     /*
410     * Program FIFO size of channels.
411     *
412     - * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
413     + * By default full FIFO (512 bytes) is assigned to channel 0. Here we
414     * slice FIFO on equal parts between channels.
415     */
416     static void idma32_fifo_partition(struct dw_dma *dw)
417     {
418     - u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
419     + u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
420     IDMA32C_FP_UPDATE;
421     u64 fifo_partition = 0;
422    
423     @@ -1082,7 +1082,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
424     /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
425     fifo_partition |= value << 32;
426    
427     - /* Program FIFO Partition registers - 128 bytes for each channel */
428     + /* Program FIFO Partition registers - 64 bytes per channel */
429     idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
430     idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
431     }
432     diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
433     index b4ec2d20e661..cb1b44d78a1f 100644
434     --- a/drivers/dma/imx-sdma.c
435     +++ b/drivers/dma/imx-sdma.c
436     @@ -24,7 +24,6 @@
437     #include <linux/spinlock.h>
438     #include <linux/device.h>
439     #include <linux/dma-mapping.h>
440     -#include <linux/dmapool.h>
441     #include <linux/firmware.h>
442     #include <linux/slab.h>
443     #include <linux/platform_device.h>
444     @@ -33,6 +32,7 @@
445     #include <linux/of_address.h>
446     #include <linux/of_device.h>
447     #include <linux/of_dma.h>
448     +#include <linux/workqueue.h>
449    
450     #include <asm/irq.h>
451     #include <linux/platform_data/dma-imx-sdma.h>
452     @@ -376,7 +376,7 @@ struct sdma_channel {
453     u32 shp_addr, per_addr;
454     enum dma_status status;
455     struct imx_dma_data data;
456     - struct dma_pool *bd_pool;
457     + struct work_struct terminate_worker;
458     };
459    
460     #define IMX_DMA_SG_LOOP BIT(0)
461     @@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)
462    
463     return 0;
464     }
465     -
466     -static int sdma_disable_channel_with_delay(struct dma_chan *chan)
467     +static void sdma_channel_terminate_work(struct work_struct *work)
468     {
469     - struct sdma_channel *sdmac = to_sdma_chan(chan);
470     + struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
471     + terminate_worker);
472     unsigned long flags;
473     LIST_HEAD(head);
474    
475     - sdma_disable_channel(chan);
476     - spin_lock_irqsave(&sdmac->vc.lock, flags);
477     - vchan_get_all_descriptors(&sdmac->vc, &head);
478     - sdmac->desc = NULL;
479     - spin_unlock_irqrestore(&sdmac->vc.lock, flags);
480     - vchan_dma_desc_free_list(&sdmac->vc, &head);
481     -
482     /*
483     * According to NXP R&D team a delay of one BD SDMA cost time
484     * (maximum is 1ms) should be added after disable of the channel
485     * bit, to ensure SDMA core has really been stopped after SDMA
486     * clients call .device_terminate_all.
487     */
488     - mdelay(1);
489     + usleep_range(1000, 2000);
490     +
491     + spin_lock_irqsave(&sdmac->vc.lock, flags);
492     + vchan_get_all_descriptors(&sdmac->vc, &head);
493     + sdmac->desc = NULL;
494     + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
495     + vchan_dma_desc_free_list(&sdmac->vc, &head);
496     +}
497     +
498     +static int sdma_disable_channel_async(struct dma_chan *chan)
499     +{
500     + struct sdma_channel *sdmac = to_sdma_chan(chan);
501     +
502     + sdma_disable_channel(chan);
503     +
504     + if (sdmac->desc)
505     + schedule_work(&sdmac->terminate_worker);
506    
507     return 0;
508     }
509    
510     +static void sdma_channel_synchronize(struct dma_chan *chan)
511     +{
512     + struct sdma_channel *sdmac = to_sdma_chan(chan);
513     +
514     + vchan_synchronize(&sdmac->vc);
515     +
516     + flush_work(&sdmac->terminate_worker);
517     +}
518     +
519     static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
520     {
521     struct sdma_engine *sdma = sdmac->sdma;
522     @@ -1192,10 +1210,11 @@ out:
523    
524     static int sdma_alloc_bd(struct sdma_desc *desc)
525     {
526     + u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
527     int ret = 0;
528    
529     - desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
530     - &desc->bd_phys);
531     + desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
532     + GFP_NOWAIT);
533     if (!desc->bd) {
534     ret = -ENOMEM;
535     goto out;
536     @@ -1206,7 +1225,9 @@ out:
537    
538     static void sdma_free_bd(struct sdma_desc *desc)
539     {
540     - dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
541     + u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
542     +
543     + dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
544     }
545    
546     static void sdma_desc_free(struct virt_dma_desc *vd)
547     @@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
548     if (ret)
549     goto disable_clk_ahb;
550    
551     - sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
552     - sizeof(struct sdma_buffer_descriptor),
553     - 32, 0);
554     -
555     return 0;
556    
557     disable_clk_ahb:
558     @@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
559     struct sdma_channel *sdmac = to_sdma_chan(chan);
560     struct sdma_engine *sdma = sdmac->sdma;
561    
562     - sdma_disable_channel_with_delay(chan);
563     + sdma_disable_channel_async(chan);
564     +
565     + sdma_channel_synchronize(chan);
566    
567     if (sdmac->event_id0)
568     sdma_event_disable(sdmac, sdmac->event_id0);
569     @@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
570    
571     clk_disable(sdma->clk_ipg);
572     clk_disable(sdma->clk_ahb);
573     -
574     - dma_pool_destroy(sdmac->bd_pool);
575     - sdmac->bd_pool = NULL;
576     }
577    
578     static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
579     @@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)
580    
581     sdmac->channel = i;
582     sdmac->vc.desc_free = sdma_desc_free;
583     + INIT_WORK(&sdmac->terminate_worker,
584     + sdma_channel_terminate_work);
585     /*
586     * Add the channel to the DMAC list. Do not add channel 0 though
587     * because we need it internally in the SDMA driver. This also means
588     @@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)
589     sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
590     sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
591     sdma->dma_device.device_config = sdma_config;
592     - sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
593     + sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
594     + sdma->dma_device.device_synchronize = sdma_channel_synchronize;
595     sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
596     sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
597     sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
598     diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
599     index 1497da367710..e507ec36c0d3 100644
600     --- a/drivers/dma/ti/cppi41.c
601     +++ b/drivers/dma/ti/cppi41.c
602     @@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
603    
604     desc_phys = lower_32_bits(c->desc_phys);
605     desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
606     - if (!cdd->chan_busy[desc_num])
607     + if (!cdd->chan_busy[desc_num]) {
608     + struct cppi41_channel *cc, *_ct;
609     +
610     + /*
611     + * channels might still be in the pendling list if
612     + * cppi41_dma_issue_pending() is called after
613     + * cppi41_runtime_suspend() is called
614     + */
615     + list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
616     + if (cc != c)
617     + continue;
618     + list_del(&cc->node);
619     + break;
620     + }
621     return 0;
622     + }
623    
624     ret = cppi41_tear_down_chan(c);
625     if (ret)
626     diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
627     index 71d014edd167..2c22836d3ffd 100644
628     --- a/drivers/gnss/sirf.c
629     +++ b/drivers/gnss/sirf.c
630     @@ -168,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active)
631     else
632     timeout = SIRF_HIBERNATE_TIMEOUT;
633    
634     - while (retries-- > 0) {
635     + do {
636     sirf_pulse_on_off(data);
637     ret = sirf_wait_for_power_state(data, active, timeout);
638     if (ret < 0) {
639     @@ -179,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active)
640     }
641    
642     break;
643     - }
644     + } while (retries--);
645    
646     - if (retries == 0)
647     + if (retries < 0)
648     return -ETIMEDOUT;
649    
650     return 0;
651     diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
652     index d66b7a768ecd..945bd13e5e79 100644
653     --- a/drivers/gpio/gpio-mockup.c
654     +++ b/drivers/gpio/gpio-mockup.c
655     @@ -32,8 +32,8 @@
656     #define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
657    
658     enum {
659     - GPIO_MOCKUP_DIR_OUT = 0,
660     - GPIO_MOCKUP_DIR_IN = 1,
661     + GPIO_MOCKUP_DIR_IN = 0,
662     + GPIO_MOCKUP_DIR_OUT = 1,
663     };
664    
665     /*
666     @@ -135,7 +135,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
667     {
668     struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
669    
670     - return chip->lines[offset].dir;
671     + return !chip->lines[offset].dir;
672     }
673    
674     static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
675     diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
676     index c18712dabf93..9f3f166f1760 100644
677     --- a/drivers/gpio/gpio-pxa.c
678     +++ b/drivers/gpio/gpio-pxa.c
679     @@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
680    
681     if (pxa_gpio_has_pinctrl()) {
682     ret = pinctrl_gpio_direction_input(chip->base + offset);
683     - if (!ret)
684     - return 0;
685     + if (ret)
686     + return ret;
687     }
688    
689     spin_lock_irqsave(&gpio_lock, flags);
690     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
691     index 6748cd7fc129..686a26de50f9 100644
692     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
693     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
694     @@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
695     "dither",
696     amdgpu_dither_enum_list, sz);
697    
698     + if (amdgpu_device_has_dc_support(adev)) {
699     + adev->mode_info.max_bpc_property =
700     + drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
701     + if (!adev->mode_info.max_bpc_property)
702     + return -ENOMEM;
703     + }
704     +
705     return 0;
706     }
707    
708     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
709     index b9e9e8b02fb7..d1b4d9b6aae0 100644
710     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
711     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
712     @@ -339,6 +339,8 @@ struct amdgpu_mode_info {
713     struct drm_property *audio_property;
714     /* FMT dithering */
715     struct drm_property *dither_property;
716     + /* maximum number of bits per channel for monitor color */
717     + struct drm_property *max_bpc_property;
718     /* hardcoded DFP edid from BIOS */
719     struct edid *bios_hardcoded_edid;
720     int bios_hardcoded_edid_size;
721     diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
722     index 9333109b210d..1a744f964b30 100644
723     --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
724     +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
725     @@ -55,6 +55,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
726     MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
727     MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
728     MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
729     +MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
730     +MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
731     +MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
732    
733     static const u32 golden_settings_tonga_a11[] =
734     {
735     @@ -223,13 +226,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
736     chip_name = "tonga";
737     break;
738     case CHIP_POLARIS11:
739     - chip_name = "polaris11";
740     + if (((adev->pdev->device == 0x67ef) &&
741     + ((adev->pdev->revision == 0xe0) ||
742     + (adev->pdev->revision == 0xe5))) ||
743     + ((adev->pdev->device == 0x67ff) &&
744     + ((adev->pdev->revision == 0xcf) ||
745     + (adev->pdev->revision == 0xef) ||
746     + (adev->pdev->revision == 0xff))))
747     + chip_name = "polaris11_k";
748     + else if ((adev->pdev->device == 0x67ef) &&
749     + (adev->pdev->revision == 0xe2))
750     + chip_name = "polaris11_k";
751     + else
752     + chip_name = "polaris11";
753     break;
754     case CHIP_POLARIS10:
755     - chip_name = "polaris10";
756     + if ((adev->pdev->device == 0x67df) &&
757     + ((adev->pdev->revision == 0xe1) ||
758     + (adev->pdev->revision == 0xf7)))
759     + chip_name = "polaris10_k";
760     + else
761     + chip_name = "polaris10";
762     break;
763     case CHIP_POLARIS12:
764     - chip_name = "polaris12";
765     + if (((adev->pdev->device == 0x6987) &&
766     + ((adev->pdev->revision == 0xc0) ||
767     + (adev->pdev->revision == 0xc3))) ||
768     + ((adev->pdev->device == 0x6981) &&
769     + ((adev->pdev->revision == 0x00) ||
770     + (adev->pdev->revision == 0x01) ||
771     + (adev->pdev->revision == 0x10))))
772     + chip_name = "polaris12_k";
773     + else
774     + chip_name = "polaris12";
775     break;
776     case CHIP_FIJI:
777     case CHIP_CARRIZO:
778     @@ -336,7 +365,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
779     const struct mc_firmware_header_v1_0 *hdr;
780     const __le32 *fw_data = NULL;
781     const __le32 *io_mc_regs = NULL;
782     - u32 data, vbios_version;
783     + u32 data;
784     int i, ucode_size, regs_size;
785    
786     /* Skip MC ucode loading on SR-IOV capable boards.
787     @@ -347,13 +376,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
788     if (amdgpu_sriov_bios(adev))
789     return 0;
790    
791     - WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
792     - data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
793     - vbios_version = data & 0xf;
794     -
795     - if (vbios_version == 0)
796     - return 0;
797     -
798     if (!adev->gmc.fw)
799     return -EINVAL;
800    
801     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
802     index ef5c6af4d964..299def84e69c 100644
803     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
804     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
805     @@ -2213,8 +2213,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
806     static enum dc_color_depth
807     convert_color_depth_from_display_info(const struct drm_connector *connector)
808     {
809     + struct dm_connector_state *dm_conn_state =
810     + to_dm_connector_state(connector->state);
811     uint32_t bpc = connector->display_info.bpc;
812    
813     + /* TODO: Remove this when there's support for max_bpc in drm */
814     + if (dm_conn_state && bpc > dm_conn_state->max_bpc)
815     + /* Round down to nearest even number. */
816     + bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
817     +
818     switch (bpc) {
819     case 0:
820     /* Temporary Work around, DRM don't parse color depth for
821     @@ -2796,6 +2803,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
822     } else if (property == adev->mode_info.underscan_property) {
823     dm_new_state->underscan_enable = val;
824     ret = 0;
825     + } else if (property == adev->mode_info.max_bpc_property) {
826     + dm_new_state->max_bpc = val;
827     + ret = 0;
828     }
829    
830     return ret;
831     @@ -2838,6 +2848,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
832     } else if (property == adev->mode_info.underscan_property) {
833     *val = dm_state->underscan_enable;
834     ret = 0;
835     + } else if (property == adev->mode_info.max_bpc_property) {
836     + *val = dm_state->max_bpc;
837     + ret = 0;
838     }
839     return ret;
840     }
841     @@ -3658,6 +3671,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
842     drm_object_attach_property(&aconnector->base.base,
843     adev->mode_info.underscan_vborder_property,
844     0);
845     + drm_object_attach_property(&aconnector->base.base,
846     + adev->mode_info.max_bpc_property,
847     + 0);
848    
849     }
850    
851     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
852     index aba2c5c1d2f8..74aedcffc4bb 100644
853     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
854     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
855     @@ -213,6 +213,7 @@ struct dm_connector_state {
856     enum amdgpu_rmx_type scaling;
857     uint8_t underscan_vborder;
858     uint8_t underscan_hborder;
859     + uint8_t max_bpc;
860     bool underscan_enable;
861     struct mod_freesync_user_enable user_enable;
862     bool freesync_capable;
863     diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
864     index 40179c5fc6b8..8750f3f02b3f 100644
865     --- a/drivers/gpu/drm/drm_internal.h
866     +++ b/drivers/gpu/drm/drm_internal.h
867     @@ -99,6 +99,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
868     int drm_sysfs_connector_add(struct drm_connector *connector);
869     void drm_sysfs_connector_remove(struct drm_connector *connector);
870    
871     +void drm_sysfs_lease_event(struct drm_device *dev);
872     +
873     /* drm_gem.c */
874     int drm_gem_init(struct drm_device *dev);
875     void drm_gem_destroy(struct drm_device *dev);
876     diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
877     index b82da96ded5c..fe6bfaf8b53f 100644
878     --- a/drivers/gpu/drm/drm_lease.c
879     +++ b/drivers/gpu/drm/drm_lease.c
880     @@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master)
881    
882     if (master->lessor) {
883     /* Tell the master to check the lessee list */
884     - drm_sysfs_hotplug_event(dev);
885     + drm_sysfs_lease_event(dev);
886     drm_master_put(&master->lessor);
887     }
888    
889     diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
890     index b3c1daad1169..ecb7b33002bb 100644
891     --- a/drivers/gpu/drm/drm_sysfs.c
892     +++ b/drivers/gpu/drm/drm_sysfs.c
893     @@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
894     connector->kdev = NULL;
895     }
896    
897     +void drm_sysfs_lease_event(struct drm_device *dev)
898     +{
899     + char *event_string = "LEASE=1";
900     + char *envp[] = { event_string, NULL };
901     +
902     + DRM_DEBUG("generating lease event\n");
903     +
904     + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
905     +}
906     +
907     /**
908     * drm_sysfs_hotplug_event - generate a DRM uevent
909     * @dev: DRM device
910     diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
911     index c3a64d6a18df..425df814de75 100644
912     --- a/drivers/gpu/drm/i915/intel_pm.c
913     +++ b/drivers/gpu/drm/i915/intel_pm.c
914     @@ -2951,8 +2951,8 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
915     unsigned int latency = wm[level];
916    
917     if (latency == 0) {
918     - DRM_ERROR("%s WM%d latency not provided\n",
919     - name, level);
920     + DRM_DEBUG_KMS("%s WM%d latency not provided\n",
921     + name, level);
922     continue;
923     }
924    
925     diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
926     index 7bd83e0afa97..c3ae7507d1c7 100644
927     --- a/drivers/gpu/drm/msm/msm_gem_submit.c
928     +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
929     @@ -410,7 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
930     struct msm_file_private *ctx = file->driver_priv;
931     struct msm_gem_submit *submit;
932     struct msm_gpu *gpu = priv->gpu;
933     - struct dma_fence *in_fence = NULL;
934     struct sync_file *sync_file = NULL;
935     struct msm_gpu_submitqueue *queue;
936     struct msm_ringbuffer *ring;
937     @@ -443,6 +442,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
938     ring = gpu->rb[queue->prio];
939    
940     if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
941     + struct dma_fence *in_fence;
942     +
943     in_fence = sync_file_get_fence(args->fence_fd);
944    
945     if (!in_fence)
946     @@ -452,11 +453,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
947     * Wait if the fence is from a foreign context, or if the fence
948     * array contains any fence from a foreign context.
949     */
950     - if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
951     + ret = 0;
952     + if (!dma_fence_match_context(in_fence, ring->fctx->context))
953     ret = dma_fence_wait(in_fence, true);
954     - if (ret)
955     - return ret;
956     - }
957     +
958     + dma_fence_put(in_fence);
959     + if (ret)
960     + return ret;
961     }
962    
963     ret = mutex_lock_interruptible(&dev->struct_mutex);
964     @@ -582,8 +585,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
965     }
966    
967     out:
968     - if (in_fence)
969     - dma_fence_put(in_fence);
970     submit_cleanup(submit);
971     if (ret)
972     msm_gem_submit_free(submit);
973     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
974     index 501c05cbec7e..46182d4dd1ce 100644
975     --- a/drivers/hid/hid-ids.h
976     +++ b/drivers/hid/hid-ids.h
977     @@ -271,6 +271,9 @@
978    
979     #define USB_VENDOR_ID_CIDC 0x1677
980    
981     +#define I2C_VENDOR_ID_CIRQUE 0x0488
982     +#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
983     +
984     #define USB_VENDOR_ID_CJTOUCH 0x24b8
985     #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
986     #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
987     @@ -931,6 +934,10 @@
988     #define USB_VENDOR_ID_REALTEK 0x0bda
989     #define USB_DEVICE_ID_REALTEK_READER 0x0152
990    
991     +#define USB_VENDOR_ID_RETROUSB 0xf000
992     +#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003
993     +#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1
994     +
995     #define USB_VENDOR_ID_ROCCAT 0x1e7d
996     #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
997     #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
998     @@ -1038,6 +1045,7 @@
999     #define USB_VENDOR_ID_SYMBOL 0x05e0
1000     #define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
1001     #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
1002     +#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
1003    
1004     #define USB_VENDOR_ID_SYNAPTICS 0x06cb
1005     #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
1006     diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1007     index a481eaf39e88..a3916e58dbf5 100644
1008     --- a/drivers/hid/hid-input.c
1009     +++ b/drivers/hid/hid-input.c
1010     @@ -325,6 +325,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
1011     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
1012     USB_DEVICE_ID_ELECOM_BM084),
1013     HID_BATTERY_QUIRK_IGNORE },
1014     + { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
1015     + USB_DEVICE_ID_SYMBOL_SCANNER_3),
1016     + HID_BATTERY_QUIRK_IGNORE },
1017     {}
1018     };
1019    
1020     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1021     index da954f3f4da7..2faf5421fdd0 100644
1022     --- a/drivers/hid/hid-multitouch.c
1023     +++ b/drivers/hid/hid-multitouch.c
1024     @@ -1822,6 +1822,12 @@ static const struct hid_device_id mt_devices[] = {
1025     MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
1026     USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
1027    
1028     + /* Cirque devices */
1029     + { .driver_data = MT_CLS_WIN_8_DUAL,
1030     + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
1031     + I2C_VENDOR_ID_CIRQUE,
1032     + I2C_PRODUCT_ID_CIRQUE_121F) },
1033     +
1034     /* CJTouch panels */
1035     { .driver_data = MT_CLS_NSMU,
1036     MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
1037     diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1038     index 0a0605a7e481..77316f022c5a 100644
1039     --- a/drivers/hid/hid-quirks.c
1040     +++ b/drivers/hid/hid-quirks.c
1041     @@ -136,6 +136,8 @@ static const struct hid_device_id hid_quirks[] = {
1042     { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
1043     { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
1044     { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
1045     + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1046     + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1047     { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
1048     { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
1049     { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
1050     diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
1051     index 051639c09f72..840634e0f1e3 100644
1052     --- a/drivers/hid/uhid.c
1053     +++ b/drivers/hid/uhid.c
1054     @@ -497,12 +497,13 @@ static int uhid_dev_create2(struct uhid_device *uhid,
1055     goto err_free;
1056     }
1057    
1058     - len = min(sizeof(hid->name), sizeof(ev->u.create2.name));
1059     - strlcpy(hid->name, ev->u.create2.name, len);
1060     - len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys));
1061     - strlcpy(hid->phys, ev->u.create2.phys, len);
1062     - len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq));
1063     - strlcpy(hid->uniq, ev->u.create2.uniq, len);
1064     + /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
1065     + len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
1066     + strncpy(hid->name, ev->u.create2.name, len);
1067     + len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
1068     + strncpy(hid->phys, ev->u.create2.phys, len);
1069     + len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
1070     + strncpy(hid->uniq, ev->u.create2.uniq, len);
1071    
1072     hid->ll_driver = &uhid_hid_driver;
1073     hid->bus = ev->u.create2.bus;
1074     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
1075     index c4a1ebcfffb6..16eb9b3f1cb1 100644
1076     --- a/drivers/hv/channel_mgmt.c
1077     +++ b/drivers/hv/channel_mgmt.c
1078     @@ -447,61 +447,16 @@ void vmbus_free_channels(void)
1079     }
1080     }
1081    
1082     -/*
1083     - * vmbus_process_offer - Process the offer by creating a channel/device
1084     - * associated with this offer
1085     - */
1086     -static void vmbus_process_offer(struct vmbus_channel *newchannel)
1087     +/* Note: the function can run concurrently for primary/sub channels. */
1088     +static void vmbus_add_channel_work(struct work_struct *work)
1089     {
1090     - struct vmbus_channel *channel;
1091     - bool fnew = true;
1092     + struct vmbus_channel *newchannel =
1093     + container_of(work, struct vmbus_channel, add_channel_work);
1094     + struct vmbus_channel *primary_channel = newchannel->primary_channel;
1095     unsigned long flags;
1096     u16 dev_type;
1097     int ret;
1098    
1099     - /* Make sure this is a new offer */
1100     - mutex_lock(&vmbus_connection.channel_mutex);
1101     -
1102     - /*
1103     - * Now that we have acquired the channel_mutex,
1104     - * we can release the potentially racing rescind thread.
1105     - */
1106     - atomic_dec(&vmbus_connection.offer_in_progress);
1107     -
1108     - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
1109     - if (!uuid_le_cmp(channel->offermsg.offer.if_type,
1110     - newchannel->offermsg.offer.if_type) &&
1111     - !uuid_le_cmp(channel->offermsg.offer.if_instance,
1112     - newchannel->offermsg.offer.if_instance)) {
1113     - fnew = false;
1114     - break;
1115     - }
1116     - }
1117     -
1118     - if (fnew)
1119     - list_add_tail(&newchannel->listentry,
1120     - &vmbus_connection.chn_list);
1121     -
1122     - mutex_unlock(&vmbus_connection.channel_mutex);
1123     -
1124     - if (!fnew) {
1125     - /*
1126     - * Check to see if this is a sub-channel.
1127     - */
1128     - if (newchannel->offermsg.offer.sub_channel_index != 0) {
1129     - /*
1130     - * Process the sub-channel.
1131     - */
1132     - newchannel->primary_channel = channel;
1133     - spin_lock_irqsave(&channel->lock, flags);
1134     - list_add_tail(&newchannel->sc_list, &channel->sc_list);
1135     - channel->num_sc++;
1136     - spin_unlock_irqrestore(&channel->lock, flags);
1137     - } else {
1138     - goto err_free_chan;
1139     - }
1140     - }
1141     -
1142     dev_type = hv_get_dev_type(newchannel);
1143    
1144     init_vp_index(newchannel, dev_type);
1145     @@ -519,27 +474,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
1146     /*
1147     * This state is used to indicate a successful open
1148     * so that when we do close the channel normally, we
1149     - * can cleanup properly
1150     + * can cleanup properly.
1151     */
1152     newchannel->state = CHANNEL_OPEN_STATE;
1153    
1154     - if (!fnew) {
1155     - struct hv_device *dev
1156     - = newchannel->primary_channel->device_obj;
1157     + if (primary_channel != NULL) {
1158     + /* newchannel is a sub-channel. */
1159     + struct hv_device *dev = primary_channel->device_obj;
1160    
1161     if (vmbus_add_channel_kobj(dev, newchannel))
1162     - goto err_free_chan;
1163     + goto err_deq_chan;
1164     +
1165     + if (primary_channel->sc_creation_callback != NULL)
1166     + primary_channel->sc_creation_callback(newchannel);
1167    
1168     - if (channel->sc_creation_callback != NULL)
1169     - channel->sc_creation_callback(newchannel);
1170     newchannel->probe_done = true;
1171     return;
1172     }
1173    
1174     /*
1175     - * Start the process of binding this offer to the driver
1176     - * We need to set the DeviceObject field before calling
1177     - * vmbus_child_dev_add()
1178     + * Start the process of binding the primary channel to the driver
1179     */
1180     newchannel->device_obj = vmbus_device_create(
1181     &newchannel->offermsg.offer.if_type,
1182     @@ -568,13 +522,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
1183    
1184     err_deq_chan:
1185     mutex_lock(&vmbus_connection.channel_mutex);
1186     - list_del(&newchannel->listentry);
1187     +
1188     + /*
1189     + * We need to set the flag, otherwise
1190     + * vmbus_onoffer_rescind() can be blocked.
1191     + */
1192     + newchannel->probe_done = true;
1193     +
1194     + if (primary_channel == NULL) {
1195     + list_del(&newchannel->listentry);
1196     + } else {
1197     + spin_lock_irqsave(&primary_channel->lock, flags);
1198     + list_del(&newchannel->sc_list);
1199     + spin_unlock_irqrestore(&primary_channel->lock, flags);
1200     + }
1201     +
1202     mutex_unlock(&vmbus_connection.channel_mutex);
1203    
1204     if (newchannel->target_cpu != get_cpu()) {
1205     put_cpu();
1206     smp_call_function_single(newchannel->target_cpu,
1207     - percpu_channel_deq, newchannel, true);
1208     + percpu_channel_deq,
1209     + newchannel, true);
1210     } else {
1211     percpu_channel_deq(newchannel);
1212     put_cpu();
1213     @@ -582,14 +551,104 @@ err_deq_chan:
1214    
1215     vmbus_release_relid(newchannel->offermsg.child_relid);
1216    
1217     -err_free_chan:
1218     free_channel(newchannel);
1219     }
1220    
1221     +/*
1222     + * vmbus_process_offer - Process the offer by creating a channel/device
1223     + * associated with this offer
1224     + */
1225     +static void vmbus_process_offer(struct vmbus_channel *newchannel)
1226     +{
1227     + struct vmbus_channel *channel;
1228     + struct workqueue_struct *wq;
1229     + unsigned long flags;
1230     + bool fnew = true;
1231     +
1232     + mutex_lock(&vmbus_connection.channel_mutex);
1233     +
1234     + /*
1235     + * Now that we have acquired the channel_mutex,
1236     + * we can release the potentially racing rescind thread.
1237     + */
1238     + atomic_dec(&vmbus_connection.offer_in_progress);
1239     +
1240     + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
1241     + if (!uuid_le_cmp(channel->offermsg.offer.if_type,
1242     + newchannel->offermsg.offer.if_type) &&
1243     + !uuid_le_cmp(channel->offermsg.offer.if_instance,
1244     + newchannel->offermsg.offer.if_instance)) {
1245     + fnew = false;
1246     + break;
1247     + }
1248     + }
1249     +
1250     + if (fnew)
1251     + list_add_tail(&newchannel->listentry,
1252     + &vmbus_connection.chn_list);
1253     + else {
1254     + /*
1255     + * Check to see if this is a valid sub-channel.
1256     + */
1257     + if (newchannel->offermsg.offer.sub_channel_index == 0) {
1258     + mutex_unlock(&vmbus_connection.channel_mutex);
1259     + /*
1260     + * Don't call free_channel(), because newchannel->kobj
1261     + * is not initialized yet.
1262     + */
1263     + kfree(newchannel);
1264     + WARN_ON_ONCE(1);
1265     + return;
1266     + }
1267     + /*
1268     + * Process the sub-channel.
1269     + */
1270     + newchannel->primary_channel = channel;
1271     + spin_lock_irqsave(&channel->lock, flags);
1272     + list_add_tail(&newchannel->sc_list, &channel->sc_list);
1273     + spin_unlock_irqrestore(&channel->lock, flags);
1274     + }
1275     +
1276     + mutex_unlock(&vmbus_connection.channel_mutex);
1277     +
1278     + /*
1279     + * vmbus_process_offer() mustn't call channel->sc_creation_callback()
1280     + * directly for sub-channels, because sc_creation_callback() ->
1281     + * vmbus_open() may never get the host's response to the
1282     + * OPEN_CHANNEL message (the host may rescind a channel at any time,
1283     + * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
1284     + * may not wake up the vmbus_open() as it's blocked due to a non-zero
1285     + * vmbus_connection.offer_in_progress, and finally we have a deadlock.
1286     + *
1287     + * The above is also true for primary channels, if the related device
1288     + * drivers use sync probing mode by default.
1289     + *
1290     + * And, usually the handling of primary channels and sub-channels can
1291     + * depend on each other, so we should offload them to different
1292     + * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
1293     + * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
1294     + * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
1295     + * and waits for all the sub-channels to appear, but the latter
1296     + * can't get the rtnl_lock and this blocks the handling of
1297     + * sub-channels.
1298     + */
1299     + INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
1300     + wq = fnew ? vmbus_connection.handle_primary_chan_wq :
1301     + vmbus_connection.handle_sub_chan_wq;
1302     + queue_work(wq, &newchannel->add_channel_work);
1303     +}
1304     +
1305     /*
1306     * We use this state to statically distribute the channel interrupt load.
1307     */
1308     static int next_numa_node_id;
1309     +/*
1310     + * init_vp_index() accesses global variables like next_numa_node_id, and
1311     + * it can run concurrently for primary channels and sub-channels: see
1312     + * vmbus_process_offer(), so we need the lock to protect the global
1313     + * variables.
1314     + */
1315     +static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
1316    
1317     /*
1318     * Starting with Win8, we can statically distribute the incoming
1319     @@ -625,6 +684,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
1320     return;
1321     }
1322    
1323     + spin_lock(&bind_channel_to_cpu_lock);
1324     +
1325     /*
1326     * Based on the channel affinity policy, we will assign the NUMA
1327     * nodes.
1328     @@ -707,6 +768,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
1329     channel->target_cpu = cur_cpu;
1330     channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
1331    
1332     + spin_unlock(&bind_channel_to_cpu_lock);
1333     +
1334     free_cpumask_var(available_mask);
1335     }
1336    
1337     diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
1338     index f4d08c8ac7f8..4fe117b761ce 100644
1339     --- a/drivers/hv/connection.c
1340     +++ b/drivers/hv/connection.c
1341     @@ -190,6 +190,20 @@ int vmbus_connect(void)
1342     goto cleanup;
1343     }
1344    
1345     + vmbus_connection.handle_primary_chan_wq =
1346     + create_workqueue("hv_pri_chan");
1347     + if (!vmbus_connection.handle_primary_chan_wq) {
1348     + ret = -ENOMEM;
1349     + goto cleanup;
1350     + }
1351     +
1352     + vmbus_connection.handle_sub_chan_wq =
1353     + create_workqueue("hv_sub_chan");
1354     + if (!vmbus_connection.handle_sub_chan_wq) {
1355     + ret = -ENOMEM;
1356     + goto cleanup;
1357     + }
1358     +
1359     INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
1360     spin_lock_init(&vmbus_connection.channelmsg_lock);
1361    
1362     @@ -280,10 +294,14 @@ void vmbus_disconnect(void)
1363     */
1364     vmbus_initiate_unload(false);
1365    
1366     - if (vmbus_connection.work_queue) {
1367     - drain_workqueue(vmbus_connection.work_queue);
1368     + if (vmbus_connection.handle_sub_chan_wq)
1369     + destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
1370     +
1371     + if (vmbus_connection.handle_primary_chan_wq)
1372     + destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
1373     +
1374     + if (vmbus_connection.work_queue)
1375     destroy_workqueue(vmbus_connection.work_queue);
1376     - }
1377    
1378     if (vmbus_connection.int_page) {
1379     free_pages((unsigned long)vmbus_connection.int_page, 0);
1380     diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
1381     index 72eaba3d50fc..87d3d7da78f8 100644
1382     --- a/drivers/hv/hyperv_vmbus.h
1383     +++ b/drivers/hv/hyperv_vmbus.h
1384     @@ -335,7 +335,14 @@ struct vmbus_connection {
1385     struct list_head chn_list;
1386     struct mutex channel_mutex;
1387    
1388     + /*
1389     + * An offer message is handled first on the work_queue, and then
1390     + * is further handled on handle_primary_chan_wq or
1391     + * handle_sub_chan_wq.
1392     + */
1393     struct workqueue_struct *work_queue;
1394     + struct workqueue_struct *handle_primary_chan_wq;
1395     + struct workqueue_struct *handle_sub_chan_wq;
1396     };
1397    
1398    
1399     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
1400     index 84b3e4445d46..e062ab9687c7 100644
1401     --- a/drivers/iommu/amd_iommu_init.c
1402     +++ b/drivers/iommu/amd_iommu_init.c
1403     @@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
1404     entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
1405     memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
1406     &entry, sizeof(entry));
1407     - entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
1408     + entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
1409     + (BIT_ULL(52)-1)) & ~7ULL;
1410     memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
1411     &entry, sizeof(entry));
1412     writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
1413     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1414     index bedc801b06a0..a76c47f20587 100644
1415     --- a/drivers/iommu/intel-iommu.c
1416     +++ b/drivers/iommu/intel-iommu.c
1417     @@ -3100,7 +3100,7 @@ static int copy_context_table(struct intel_iommu *iommu,
1418     }
1419    
1420     if (old_ce)
1421     - iounmap(old_ce);
1422     + memunmap(old_ce);
1423    
1424     ret = 0;
1425     if (devfn < 0x80)
1426     diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
1427     index 4a03e5090952..188f4eaed6e5 100644
1428     --- a/drivers/iommu/intel-svm.c
1429     +++ b/drivers/iommu/intel-svm.c
1430     @@ -596,7 +596,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
1431     pr_err("%s: Page request without PASID: %08llx %08llx\n",
1432     iommu->name, ((unsigned long long *)req)[0],
1433     ((unsigned long long *)req)[1]);
1434     - goto bad_req;
1435     + goto no_pasid;
1436     }
1437    
1438     if (!svm || svm->pasid != req->pasid) {
1439     diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
1440     index 22b94f8a9a04..d8598e44e381 100644
1441     --- a/drivers/iommu/ipmmu-vmsa.c
1442     +++ b/drivers/iommu/ipmmu-vmsa.c
1443     @@ -501,6 +501,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
1444    
1445     static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
1446     {
1447     + if (!domain->mmu)
1448     + return;
1449     +
1450     /*
1451     * Disable the context. Flush the TLB as required when modifying the
1452     * context registers.
1453     diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
1454     index dd8bad74a1f0..a537e518384b 100644
1455     --- a/drivers/media/cec/cec-adap.c
1456     +++ b/drivers/media/cec/cec-adap.c
1457     @@ -1167,6 +1167,8 @@ static int cec_config_log_addr(struct cec_adapter *adap,
1458     {
1459     struct cec_log_addrs *las = &adap->log_addrs;
1460     struct cec_msg msg = { };
1461     + const unsigned int max_retries = 2;
1462     + unsigned int i;
1463     int err;
1464    
1465     if (cec_has_log_addr(adap, log_addr))
1466     @@ -1175,19 +1177,44 @@ static int cec_config_log_addr(struct cec_adapter *adap,
1467     /* Send poll message */
1468     msg.len = 1;
1469     msg.msg[0] = (log_addr << 4) | log_addr;
1470     - err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1471    
1472     - /*
1473     - * While trying to poll the physical address was reset
1474     - * and the adapter was unconfigured, so bail out.
1475     - */
1476     - if (!adap->is_configuring)
1477     - return -EINTR;
1478     + for (i = 0; i < max_retries; i++) {
1479     + err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1480    
1481     - if (err)
1482     - return err;
1483     + /*
1484     + * While trying to poll the physical address was reset
1485     + * and the adapter was unconfigured, so bail out.
1486     + */
1487     + if (!adap->is_configuring)
1488     + return -EINTR;
1489     +
1490     + if (err)
1491     + return err;
1492    
1493     - if (msg.tx_status & CEC_TX_STATUS_OK)
1494     + /*
1495     + * The message was aborted due to a disconnect or
1496     + * unconfigure, just bail out.
1497     + */
1498     + if (msg.tx_status & CEC_TX_STATUS_ABORTED)
1499     + return -EINTR;
1500     + if (msg.tx_status & CEC_TX_STATUS_OK)
1501     + return 0;
1502     + if (msg.tx_status & CEC_TX_STATUS_NACK)
1503     + break;
1504     + /*
1505     + * Retry up to max_retries times if the message was neither
1506     + * OKed or NACKed. This can happen due to e.g. a Lost
1507     + * Arbitration condition.
1508     + */
1509     + }
1510     +
1511     + /*
1512     + * If we are unable to get an OK or a NACK after max_retries attempts
1513     + * (and note that each attempt already consists of four polls), then
1514     + * then we assume that something is really weird and that it is not a
1515     + * good idea to try and claim this logical address.
1516     + */
1517     + if (i == max_retries)
1518     return 0;
1519    
1520     /*
1521     diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
1522     index 6d4b2eec67b4..29836c1a40e9 100644
1523     --- a/drivers/media/dvb-frontends/dvb-pll.c
1524     +++ b/drivers/media/dvb-frontends/dvb-pll.c
1525     @@ -80,8 +80,8 @@ struct dvb_pll_desc {
1526    
1527     static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
1528     .name = "Thomson dtt7579",
1529     - .min = 177000000,
1530     - .max = 858000000,
1531     + .min = 177 * MHz,
1532     + .max = 858 * MHz,
1533     .iffreq= 36166667,
1534     .sleepdata = (u8[]){ 2, 0xb4, 0x03 },
1535     .count = 4,
1536     @@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf)
1537    
1538     static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
1539     .name = "Thomson dtt759x",
1540     - .min = 177000000,
1541     - .max = 896000000,
1542     + .min = 177 * MHz,
1543     + .max = 896 * MHz,
1544     .set = thomson_dtt759x_bw,
1545     .iffreq= 36166667,
1546     .sleepdata = (u8[]){ 2, 0x84, 0x03 },
1547     @@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf)
1548    
1549     static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
1550     .name = "Thomson dtt7520x",
1551     - .min = 185000000,
1552     - .max = 900000000,
1553     + .min = 185 * MHz,
1554     + .max = 900 * MHz,
1555     .set = thomson_dtt7520x_bw,
1556     .iffreq = 36166667,
1557     .count = 7,
1558     @@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
1559    
1560     static const struct dvb_pll_desc dvb_pll_lg_z201 = {
1561     .name = "LG z201",
1562     - .min = 174000000,
1563     - .max = 862000000,
1564     + .min = 174 * MHz,
1565     + .max = 862 * MHz,
1566     .iffreq= 36166667,
1567     .sleepdata = (u8[]){ 2, 0xbc, 0x03 },
1568     .count = 5,
1569     @@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = {
1570    
1571     static const struct dvb_pll_desc dvb_pll_unknown_1 = {
1572     .name = "unknown 1", /* used by dntv live dvb-t */
1573     - .min = 174000000,
1574     - .max = 862000000,
1575     + .min = 174 * MHz,
1576     + .max = 862 * MHz,
1577     .iffreq= 36166667,
1578     .count = 9,
1579     .entries = {
1580     @@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = {
1581     */
1582     static const struct dvb_pll_desc dvb_pll_tua6010xs = {
1583     .name = "Infineon TUA6010XS",
1584     - .min = 44250000,
1585     - .max = 858000000,
1586     + .min = 44250 * kHz,
1587     + .max = 858 * MHz,
1588     .iffreq= 36125000,
1589     .count = 3,
1590     .entries = {
1591     @@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = {
1592     /* Panasonic env57h1xd5 (some Philips PLL ?) */
1593     static const struct dvb_pll_desc dvb_pll_env57h1xd5 = {
1594     .name = "Panasonic ENV57H1XD5",
1595     - .min = 44250000,
1596     - .max = 858000000,
1597     + .min = 44250 * kHz,
1598     + .max = 858 * MHz,
1599     .iffreq= 36125000,
1600     .count = 4,
1601     .entries = {
1602     @@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf)
1603    
1604     static const struct dvb_pll_desc dvb_pll_tda665x = {
1605     .name = "Philips TDA6650/TDA6651",
1606     - .min = 44250000,
1607     - .max = 858000000,
1608     + .min = 44250 * kHz,
1609     + .max = 858 * MHz,
1610     .set = tda665x_bw,
1611     .iffreq= 36166667,
1612     .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab },
1613     @@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf)
1614    
1615     static const struct dvb_pll_desc dvb_pll_tua6034 = {
1616     .name = "Infineon TUA6034",
1617     - .min = 44250000,
1618     - .max = 858000000,
1619     + .min = 44250 * kHz,
1620     + .max = 858 * MHz,
1621     .iffreq= 36166667,
1622     .count = 3,
1623     .set = tua6034_bw,
1624     @@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf)
1625    
1626     static const struct dvb_pll_desc dvb_pll_tded4 = {
1627     .name = "ALPS TDED4",
1628     - .min = 47000000,
1629     - .max = 863000000,
1630     + .min = 47 * MHz,
1631     + .max = 863 * MHz,
1632     .iffreq= 36166667,
1633     .set = tded4_bw,
1634     .count = 4,
1635     @@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = {
1636     */
1637     static const struct dvb_pll_desc dvb_pll_tdhu2 = {
1638     .name = "ALPS TDHU2",
1639     - .min = 54000000,
1640     - .max = 864000000,
1641     + .min = 54 * MHz,
1642     + .max = 864 * MHz,
1643     .iffreq= 44000000,
1644     .count = 4,
1645     .entries = {
1646     @@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = {
1647     */
1648     static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
1649     .name = "Samsung TBMV30111IN / TBMV30712IN1",
1650     - .min = 54000000,
1651     - .max = 860000000,
1652     + .min = 54 * MHz,
1653     + .max = 860 * MHz,
1654     .iffreq= 44000000,
1655     .count = 6,
1656     .entries = {
1657     @@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
1658     */
1659     static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
1660     .name = "Philips SD1878",
1661     - .min = 950000,
1662     - .max = 2150000,
1663     + .min = 950 * MHz,
1664     + .max = 2150 * MHz,
1665     .iffreq= 249, /* zero-IF, offset 249 is to round up */
1666     .count = 4,
1667     .entries = {
1668     @@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
1669    
1670     static const struct dvb_pll_desc dvb_pll_opera1 = {
1671     .name = "Opera Tuner",
1672     - .min = 900000,
1673     - .max = 2250000,
1674     + .min = 900 * MHz,
1675     + .max = 2250 * MHz,
1676     .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 },
1677     .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 },
1678     .iffreq= 0,
1679     @@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
1680     /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */
1681     static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
1682     .name = "Samsung DTOS403IH102A",
1683     - .min = 44250000,
1684     - .max = 858000000,
1685     + .min = 44250 * kHz,
1686     + .max = 858 * MHz,
1687     .iffreq = 36125000,
1688     .count = 8,
1689     .set = samsung_dtos403ih102a_set,
1690     @@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
1691     /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */
1692     static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
1693     .name = "Samsung TDTC9251DH0",
1694     - .min = 48000000,
1695     - .max = 863000000,
1696     + .min = 48 * MHz,
1697     + .max = 863 * MHz,
1698     .iffreq = 36166667,
1699     .count = 3,
1700     .entries = {
1701     @@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
1702     /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */
1703     static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
1704     .name = "Samsung TBDU18132",
1705     - .min = 950000,
1706     - .max = 2150000, /* guesses */
1707     + .min = 950 * MHz,
1708     + .max = 2150 * MHz, /* guesses */
1709     .iffreq = 0,
1710     .count = 2,
1711     .entries = {
1712     @@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
1713     /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */
1714     static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
1715     .name = "Samsung TBMU24112",
1716     - .min = 950000,
1717     - .max = 2150000, /* guesses */
1718     + .min = 950 * MHz,
1719     + .max = 2150 * MHz, /* guesses */
1720     .iffreq = 0,
1721     .count = 2,
1722     .entries = {
1723     @@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
1724     * 822 - 862 1 * 0 0 1 0 0 0 0x88 */
1725     static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
1726     .name = "ALPS TDEE4",
1727     - .min = 47000000,
1728     - .max = 862000000,
1729     + .min = 47 * MHz,
1730     + .max = 862 * MHz,
1731     .iffreq = 36125000,
1732     .count = 4,
1733     .entries = {
1734     @@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
1735     /* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */
1736     static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
1737     .name = "Infineon TUA6034 ISDB-T (Friio)",
1738     - .min = 90000000,
1739     - .max = 770000000,
1740     + .min = 90 * MHz,
1741     + .max = 770 * MHz,
1742     .iffreq = 57000000,
1743     .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 },
1744     .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b },
1745     @@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
1746     /* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */
1747     static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = {
1748     .name = "Philips TDA6651 ISDB-T (EarthSoft PT1)",
1749     - .min = 90000000,
1750     - .max = 770000000,
1751     + .min = 90 * MHz,
1752     + .max = 770 * MHz,
1753     .iffreq = 57000000,
1754     .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 },
1755     .count = 10,
1756     @@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
1757     u32 div;
1758     int i;
1759    
1760     - if (frequency && (frequency < desc->min || frequency > desc->max))
1761     - return -EINVAL;
1762     -
1763     for (i = 0; i < desc->count; i++) {
1764     if (frequency > desc->entries[i].limit)
1765     continue;
1766     @@ -799,7 +796,6 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
1767     struct dvb_pll_priv *priv = NULL;
1768     int ret;
1769     const struct dvb_pll_desc *desc;
1770     - struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1771    
1772     b1 = kmalloc(1, GFP_KERNEL);
1773     if (!b1)
1774     @@ -845,18 +841,12 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
1775    
1776     strncpy(fe->ops.tuner_ops.info.name, desc->name,
1777     sizeof(fe->ops.tuner_ops.info.name));
1778     - switch (c->delivery_system) {
1779     - case SYS_DVBS:
1780     - case SYS_DVBS2:
1781     - case SYS_TURBO:
1782     - case SYS_ISDBS:
1783     - fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz;
1784     - fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz;
1785     - break;
1786     - default:
1787     - fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
1788     - fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
1789     - }
1790     +
1791     + fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
1792     + fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
1793     +
1794     + dprintk("%s tuner, frequency range: %u...%u\n",
1795     + desc->name, desc->min, desc->max);
1796    
1797     if (!desc->initdata)
1798     fe->ops.tuner_ops.init = NULL;
1799     diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
1800     index 29027159eced..ca1a4d8e972e 100644
1801     --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
1802     +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
1803     @@ -1846,12 +1846,12 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
1804     struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1805     unsigned int i;
1806    
1807     + media_device_unregister(&cio2->media_dev);
1808     cio2_notifier_exit(cio2);
1809     - cio2_fbpt_exit_dummy(cio2);
1810     for (i = 0; i < CIO2_QUEUES; i++)
1811     cio2_queue_exit(cio2, &cio2->queue[i]);
1812     + cio2_fbpt_exit_dummy(cio2);
1813     v4l2_device_unregister(&cio2->v4l2_dev);
1814     - media_device_unregister(&cio2->media_dev);
1815     media_device_cleanup(&cio2->media_dev);
1816     mutex_destroy(&cio2->lock);
1817     }
1818     diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
1819     index 842e2235047d..432bc7fbedc9 100644
1820     --- a/drivers/media/platform/omap3isp/isp.c
1821     +++ b/drivers/media/platform/omap3isp/isp.c
1822     @@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev)
1823    
1824     static void isp_unregister_entities(struct isp_device *isp)
1825     {
1826     + media_device_unregister(&isp->media_dev);
1827     +
1828     omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
1829     omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
1830     omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
1831     @@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp)
1832     omap3isp_stat_unregister_entities(&isp->isp_hist);
1833    
1834     v4l2_device_unregister(&isp->v4l2_dev);
1835     - media_device_unregister(&isp->media_dev);
1836     media_device_cleanup(&isp->media_dev);
1837     }
1838    
1839     diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
1840     index 408cd55d3580..7a33a52eacca 100644
1841     --- a/drivers/media/platform/vicodec/vicodec-core.c
1842     +++ b/drivers/media/platform/vicodec/vicodec-core.c
1843     @@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info");
1844     #define MAX_WIDTH 4096U
1845     #define MIN_WIDTH 640U
1846     #define MAX_HEIGHT 2160U
1847     -#define MIN_HEIGHT 480U
1848     +#define MIN_HEIGHT 360U
1849    
1850     #define dprintk(dev, fmt, arg...) \
1851     v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
1852     @@ -438,7 +438,8 @@ restart:
1853     for (; p < p_out + sz; p++) {
1854     u32 copy;
1855    
1856     - p = memchr(p, magic[ctx->comp_magic_cnt], sz);
1857     + p = memchr(p, magic[ctx->comp_magic_cnt],
1858     + p_out + sz - p);
1859     if (!p) {
1860     ctx->comp_magic_cnt = 0;
1861     break;
1862     diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
1863     index 57aa521e16b1..405a6a76d820 100644
1864     --- a/drivers/media/usb/gspca/gspca.c
1865     +++ b/drivers/media/usb/gspca/gspca.c
1866     @@ -426,10 +426,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
1867    
1868     /* append the packet to the frame buffer */
1869     if (len > 0) {
1870     - if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) {
1871     + if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) {
1872     gspca_err(gspca_dev, "frame overflow %d > %d\n",
1873     gspca_dev->image_len + len,
1874     - gspca_dev->pixfmt.sizeimage);
1875     + PAGE_ALIGN(gspca_dev->pixfmt.sizeimage));
1876     packet_type = DISCARD_PACKET;
1877     } else {
1878     /* !! image is NULL only when last pkt is LAST or DISCARD
1879     @@ -1297,18 +1297,19 @@ static int gspca_queue_setup(struct vb2_queue *vq,
1880     unsigned int sizes[], struct device *alloc_devs[])
1881     {
1882     struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq);
1883     + unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
1884    
1885     if (*nplanes)
1886     - return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0;
1887     + return sizes[0] < size ? -EINVAL : 0;
1888     *nplanes = 1;
1889     - sizes[0] = gspca_dev->pixfmt.sizeimage;
1890     + sizes[0] = size;
1891     return 0;
1892     }
1893    
1894     static int gspca_buffer_prepare(struct vb2_buffer *vb)
1895     {
1896     struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue);
1897     - unsigned long size = gspca_dev->pixfmt.sizeimage;
1898     + unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
1899    
1900     if (vb2_plane_size(vb, 0) < size) {
1901     gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n",
1902     diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
1903     index 999dac752bcc..6b22d54a540d 100644
1904     --- a/drivers/mfd/cros_ec_dev.c
1905     +++ b/drivers/mfd/cros_ec_dev.c
1906     @@ -263,6 +263,11 @@ static const struct file_operations fops = {
1907     #endif
1908     };
1909    
1910     +static void cros_ec_class_release(struct device *dev)
1911     +{
1912     + kfree(to_cros_ec_dev(dev));
1913     +}
1914     +
1915     static void cros_ec_sensors_register(struct cros_ec_dev *ec)
1916     {
1917     /*
1918     @@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev)
1919     int retval = -ENOMEM;
1920     struct device *dev = &pdev->dev;
1921     struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
1922     - struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
1923     + struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
1924    
1925     if (!ec)
1926     return retval;
1927     @@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev)
1928     ec->class_dev.devt = MKDEV(ec_major, pdev->id);
1929     ec->class_dev.class = &cros_class;
1930     ec->class_dev.parent = dev;
1931     + ec->class_dev.release = cros_ec_class_release;
1932    
1933     retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
1934     if (retval) {
1935     diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
1936     index d1d470bb32e4..8815f3e2b718 100644
1937     --- a/drivers/mtd/nand/raw/qcom_nandc.c
1938     +++ b/drivers/mtd/nand/raw/qcom_nandc.c
1939     @@ -151,15 +151,15 @@
1940     #define NAND_VERSION_MINOR_SHIFT 16
1941    
1942     /* NAND OP_CMDs */
1943     -#define PAGE_READ 0x2
1944     -#define PAGE_READ_WITH_ECC 0x3
1945     -#define PAGE_READ_WITH_ECC_SPARE 0x4
1946     -#define PROGRAM_PAGE 0x6
1947     -#define PAGE_PROGRAM_WITH_ECC 0x7
1948     -#define PROGRAM_PAGE_SPARE 0x9
1949     -#define BLOCK_ERASE 0xa
1950     -#define FETCH_ID 0xb
1951     -#define RESET_DEVICE 0xd
1952     +#define OP_PAGE_READ 0x2
1953     +#define OP_PAGE_READ_WITH_ECC 0x3
1954     +#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
1955     +#define OP_PROGRAM_PAGE 0x6
1956     +#define OP_PAGE_PROGRAM_WITH_ECC 0x7
1957     +#define OP_PROGRAM_PAGE_SPARE 0x9
1958     +#define OP_BLOCK_ERASE 0xa
1959     +#define OP_FETCH_ID 0xb
1960     +#define OP_RESET_DEVICE 0xd
1961    
1962     /* Default Value for NAND_DEV_CMD_VLD */
1963     #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
1964     @@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
1965    
1966     if (read) {
1967     if (host->use_ecc)
1968     - cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
1969     + cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
1970     else
1971     - cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
1972     + cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
1973     } else {
1974     - cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
1975     + cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
1976     }
1977    
1978     if (host->use_ecc) {
1979     @@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
1980     * in use. we configure the controller to perform a raw read of 512
1981     * bytes to read onfi params
1982     */
1983     - nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1984     + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
1985     nandc_set_reg(nandc, NAND_ADDR0, 0);
1986     nandc_set_reg(nandc, NAND_ADDR1, 0);
1987     nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1988     @@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
1989     struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1990    
1991     nandc_set_reg(nandc, NAND_FLASH_CMD,
1992     - BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1993     + OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1994     nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1995     nandc_set_reg(nandc, NAND_ADDR1, 0);
1996     nandc_set_reg(nandc, NAND_DEV0_CFG0,
1997     @@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
1998     if (column == -1)
1999     return 0;
2000    
2001     - nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
2002     + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
2003     nandc_set_reg(nandc, NAND_ADDR0, column);
2004     nandc_set_reg(nandc, NAND_ADDR1, 0);
2005     nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
2006     @@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
2007     struct nand_chip *chip = &host->chip;
2008     struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2009    
2010     - nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
2011     + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
2012     nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
2013    
2014     write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
2015     diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
2016     index 6e9cbd1a0b6d..0806c7a81c0f 100644
2017     --- a/drivers/mtd/spi-nor/cadence-quadspi.c
2018     +++ b/drivers/mtd/spi-nor/cadence-quadspi.c
2019     @@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
2020     ndelay(cqspi->wr_delay);
2021    
2022     while (remaining > 0) {
2023     + size_t write_words, mod_bytes;
2024     +
2025     write_bytes = remaining > page_size ? page_size : remaining;
2026     - iowrite32_rep(cqspi->ahb_base, txbuf,
2027     - DIV_ROUND_UP(write_bytes, 4));
2028     + write_words = write_bytes / 4;
2029     + mod_bytes = write_bytes % 4;
2030     + /* Write 4 bytes at a time then single bytes. */
2031     + if (write_words) {
2032     + iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
2033     + txbuf += (write_words * 4);
2034     + }
2035     + if (mod_bytes) {
2036     + unsigned int temp = 0xFFFFFFFF;
2037     +
2038     + memcpy(&temp, txbuf, mod_bytes);
2039     + iowrite32(temp, cqspi->ahb_base);
2040     + txbuf += mod_bytes;
2041     + }
2042    
2043     if (!wait_for_completion_timeout(&cqspi->transfer_complete,
2044     msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
2045     @@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
2046     goto failwr;
2047     }
2048    
2049     - txbuf += write_bytes;
2050     remaining -= write_bytes;
2051    
2052     if (remaining > 0)
2053     diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
2054     index 11662f479e76..771a46083739 100644
2055     --- a/drivers/net/can/rcar/rcar_can.c
2056     +++ b/drivers/net/can/rcar/rcar_can.c
2057     @@ -24,6 +24,9 @@
2058    
2059     #define RCAR_CAN_DRV_NAME "rcar_can"
2060    
2061     +#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
2062     + BIT(CLKR_CLKEXT))
2063     +
2064     /* Mailbox configuration:
2065     * mailbox 60 - 63 - Rx FIFO mailboxes
2066     * mailbox 56 - 59 - Tx FIFO mailboxes
2067     @@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
2068     goto fail_clk;
2069     }
2070    
2071     - if (clock_select >= ARRAY_SIZE(clock_names)) {
2072     + if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
2073     err = -EINVAL;
2074     dev_err(&pdev->dev, "invalid CAN clock selected\n");
2075     goto fail_clk;
2076     diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
2077     index 0678a38b1af4..c9fd83e8d947 100644
2078     --- a/drivers/net/can/usb/ucan.c
2079     +++ b/drivers/net/can/usb/ucan.c
2080     @@ -1575,11 +1575,8 @@ err_firmware_needs_update:
2081     /* disconnect the device */
2082     static void ucan_disconnect(struct usb_interface *intf)
2083     {
2084     - struct usb_device *udev;
2085     struct ucan_priv *up = usb_get_intfdata(intf);
2086    
2087     - udev = interface_to_usbdev(intf);
2088     -
2089     usb_set_intfdata(intf, NULL);
2090    
2091     if (up) {
2092     diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2093     index d906293ce07d..4b73131a0f20 100644
2094     --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
2095     +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2096     @@ -2627,8 +2627,8 @@ err_device_destroy:
2097     ena_com_abort_admin_commands(ena_dev);
2098     ena_com_wait_for_abort_completion(ena_dev);
2099     ena_com_admin_destroy(ena_dev);
2100     - ena_com_mmio_reg_read_request_destroy(ena_dev);
2101     ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2102     + ena_com_mmio_reg_read_request_destroy(ena_dev);
2103     err:
2104     clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2105     clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2106     diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
2107     index cdd7a611479b..19f89d9b1781 100644
2108     --- a/drivers/net/ethernet/amd/sunlance.c
2109     +++ b/drivers/net/ethernet/amd/sunlance.c
2110     @@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
2111    
2112     prop = of_get_property(nd, "tpe-link-test?", NULL);
2113     if (!prop)
2114     - goto no_link_test;
2115     + goto node_put;
2116    
2117     if (strcmp(prop, "true")) {
2118     printk(KERN_NOTICE "SunLance: warning: overriding option "
2119     @@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
2120     "to ecd@skynet.be\n");
2121     auxio_set_lte(AUXIO_LTE_ON);
2122     }
2123     +node_put:
2124     + of_node_put(nd);
2125     no_link_test:
2126     lp->auto_select = 1;
2127     lp->tpe = 0;
2128     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
2129     index be1506169076..0de487a8f0eb 100644
2130     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
2131     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
2132     @@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2133     #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
2134     E1HVN_MAX)
2135    
2136     +/* Following is the DMAE channel number allocation for the clients.
2137     + * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
2138     + * Driver: 0-3 and 8-11 (for PF dmae operations)
2139     + * 4 and 12 (for stats requests)
2140     + */
2141     +#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
2142     +
2143     /* PCIE link and speed */
2144     #define PCICFG_LINK_WIDTH 0x1f00000
2145     #define PCICFG_LINK_WIDTH_SHIFT 20
2146     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
2147     index 3f4d2c8da21a..a9eaaf3e73a4 100644
2148     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
2149     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
2150     @@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
2151     rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
2152     rdata->path_id = BP_PATH(bp);
2153     rdata->network_cos_mode = start_params->network_cos_mode;
2154     + rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
2155    
2156     rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
2157     rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
2158     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2159     index e52d7af3ab3e..da9b87689996 100644
2160     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2161     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2162     @@ -2862,8 +2862,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
2163     record->asic_state = 0;
2164     strlcpy(record->system_name, utsname()->nodename,
2165     sizeof(record->system_name));
2166     - record->year = cpu_to_le16(tm.tm_year);
2167     - record->month = cpu_to_le16(tm.tm_mon);
2168     + record->year = cpu_to_le16(tm.tm_year + 1900);
2169     + record->month = cpu_to_le16(tm.tm_mon + 1);
2170     record->day = cpu_to_le16(tm.tm_mday);
2171     record->hour = cpu_to_le16(tm.tm_hour);
2172     record->minute = cpu_to_le16(tm.tm_min);
2173     diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
2174     index a1197d3adbe0..9015bd911bee 100644
2175     --- a/drivers/net/ethernet/faraday/ftmac100.c
2176     +++ b/drivers/net/ethernet/faraday/ftmac100.c
2177     @@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
2178     struct net_device *netdev = dev_id;
2179     struct ftmac100 *priv = netdev_priv(netdev);
2180    
2181     - if (likely(netif_running(netdev))) {
2182     - /* Disable interrupts for polling */
2183     - ftmac100_disable_all_int(priv);
2184     + /* Disable interrupts for polling */
2185     + ftmac100_disable_all_int(priv);
2186     + if (likely(netif_running(netdev)))
2187     napi_schedule(&priv->napi);
2188     - }
2189    
2190     return IRQ_HANDLED;
2191     }
2192     diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2193     index 7661064c815b..5ab21a1b5444 100644
2194     --- a/drivers/net/ethernet/ibm/ibmvnic.c
2195     +++ b/drivers/net/ethernet/ibm/ibmvnic.c
2196     @@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
2197    
2198     for (j = 0; j < rx_pool->size; j++) {
2199     if (rx_pool->rx_buff[j].skb) {
2200     - dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
2201     - rx_pool->rx_buff[i].skb = NULL;
2202     + dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
2203     + rx_pool->rx_buff[j].skb = NULL;
2204     }
2205     }
2206    
2207     @@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
2208     return 0;
2209     }
2210    
2211     - mutex_lock(&adapter->reset_lock);
2212     -
2213     if (adapter->state != VNIC_CLOSED) {
2214     rc = ibmvnic_login(netdev);
2215     - if (rc) {
2216     - mutex_unlock(&adapter->reset_lock);
2217     + if (rc)
2218     return rc;
2219     - }
2220    
2221     rc = init_resources(adapter);
2222     if (rc) {
2223     netdev_err(netdev, "failed to initialize resources\n");
2224     release_resources(adapter);
2225     - mutex_unlock(&adapter->reset_lock);
2226     return rc;
2227     }
2228     }
2229     @@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
2230     rc = __ibmvnic_open(netdev);
2231     netif_carrier_on(netdev);
2232    
2233     - mutex_unlock(&adapter->reset_lock);
2234     -
2235     return rc;
2236     }
2237    
2238     @@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
2239     return 0;
2240     }
2241    
2242     - mutex_lock(&adapter->reset_lock);
2243     rc = __ibmvnic_close(netdev);
2244     ibmvnic_cleanup(netdev);
2245     - mutex_unlock(&adapter->reset_lock);
2246    
2247     return rc;
2248     }
2249     @@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
2250     struct ibmvnic_rwi *rwi, u32 reset_state)
2251     {
2252     u64 old_num_rx_queues, old_num_tx_queues;
2253     + u64 old_num_rx_slots, old_num_tx_slots;
2254     struct net_device *netdev = adapter->netdev;
2255     int i, rc;
2256    
2257     @@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
2258    
2259     old_num_rx_queues = adapter->req_rx_queues;
2260     old_num_tx_queues = adapter->req_tx_queues;
2261     + old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2262     + old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2263    
2264     ibmvnic_cleanup(netdev);
2265    
2266     @@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
2267     if (rc)
2268     return rc;
2269     } else if (adapter->req_rx_queues != old_num_rx_queues ||
2270     - adapter->req_tx_queues != old_num_tx_queues) {
2271     - adapter->map_id = 1;
2272     + adapter->req_tx_queues != old_num_tx_queues ||
2273     + adapter->req_rx_add_entries_per_subcrq !=
2274     + old_num_rx_slots ||
2275     + adapter->req_tx_entries_per_subcrq !=
2276     + old_num_tx_slots) {
2277     release_rx_pools(adapter);
2278     release_tx_pools(adapter);
2279     - rc = init_rx_pools(netdev);
2280     - if (rc)
2281     - return rc;
2282     - rc = init_tx_pools(netdev);
2283     - if (rc)
2284     - return rc;
2285     -
2286     release_napi(adapter);
2287     - rc = init_napi(adapter);
2288     + release_vpd_data(adapter);
2289     +
2290     + rc = init_resources(adapter);
2291     if (rc)
2292     return rc;
2293     +
2294     } else {
2295     rc = reset_tx_pools(adapter);
2296     if (rc)
2297     @@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
2298     adapter->state = VNIC_PROBED;
2299     return 0;
2300     }
2301     - /* netif_set_real_num_xx_queues needs to take rtnl lock here
2302     - * unless wait_for_reset is set, in which case the rtnl lock
2303     - * has already been taken before initializing the reset
2304     - */
2305     - if (!adapter->wait_for_reset) {
2306     - rtnl_lock();
2307     - rc = init_resources(adapter);
2308     - rtnl_unlock();
2309     - } else {
2310     - rc = init_resources(adapter);
2311     - }
2312     +
2313     + rc = init_resources(adapter);
2314     if (rc)
2315     return rc;
2316    
2317     @@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work)
2318     struct ibmvnic_rwi *rwi;
2319     struct ibmvnic_adapter *adapter;
2320     struct net_device *netdev;
2321     + bool we_lock_rtnl = false;
2322     u32 reset_state;
2323     int rc = 0;
2324    
2325     adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2326     netdev = adapter->netdev;
2327    
2328     - mutex_lock(&adapter->reset_lock);
2329     + /* netif_set_real_num_xx_queues needs to take rtnl lock here
2330     + * unless wait_for_reset is set, in which case the rtnl lock
2331     + * has already been taken before initializing the reset
2332     + */
2333     + if (!adapter->wait_for_reset) {
2334     + rtnl_lock();
2335     + we_lock_rtnl = true;
2336     + }
2337     reset_state = adapter->state;
2338    
2339     rwi = get_next_rwi(adapter);
2340     @@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work)
2341     if (rc) {
2342     netdev_dbg(adapter->netdev, "Reset failed\n");
2343     free_all_rwi(adapter);
2344     - mutex_unlock(&adapter->reset_lock);
2345     - return;
2346     }
2347    
2348     adapter->resetting = false;
2349     - mutex_unlock(&adapter->reset_lock);
2350     + if (we_lock_rtnl)
2351     + rtnl_unlock();
2352     }
2353    
2354     static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2355     @@ -4709,7 +4700,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
2356    
2357     INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
2358     INIT_LIST_HEAD(&adapter->rwi_list);
2359     - mutex_init(&adapter->reset_lock);
2360     mutex_init(&adapter->rwi_lock);
2361     adapter->resetting = false;
2362    
2363     @@ -4781,8 +4771,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
2364     struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2365    
2366     adapter->state = VNIC_REMOVING;
2367     - unregister_netdev(netdev);
2368     - mutex_lock(&adapter->reset_lock);
2369     + rtnl_lock();
2370     + unregister_netdevice(netdev);
2371    
2372     release_resources(adapter);
2373     release_sub_crqs(adapter, 1);
2374     @@ -4793,7 +4783,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
2375    
2376     adapter->state = VNIC_REMOVED;
2377    
2378     - mutex_unlock(&adapter->reset_lock);
2379     + rtnl_unlock();
2380     device_remove_file(&dev->dev, &dev_attr_failover);
2381     free_netdev(netdev);
2382     dev_set_drvdata(&dev->dev, NULL);
2383     diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
2384     index f06eec145ca6..735f481b1870 100644
2385     --- a/drivers/net/ethernet/ibm/ibmvnic.h
2386     +++ b/drivers/net/ethernet/ibm/ibmvnic.h
2387     @@ -1068,7 +1068,7 @@ struct ibmvnic_adapter {
2388     struct tasklet_struct tasklet;
2389     enum vnic_state state;
2390     enum ibmvnic_reset_reason reset_reason;
2391     - struct mutex reset_lock, rwi_lock;
2392     + struct mutex rwi_lock;
2393     struct list_head rwi_list;
2394     struct work_struct ibmvnic_reset;
2395     bool resetting;
2396     diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
2397     index 4bdf25059542..21788d4f9881 100644
2398     --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
2399     +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
2400     @@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
2401     static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
2402     int align, u32 skip_mask, u32 *puid)
2403     {
2404     - u32 uid;
2405     + u32 uid = 0;
2406     u32 res;
2407     struct mlx4_zone_allocator *zone_alloc = zone->allocator;
2408     struct mlx4_zone_entry *curr_node;
2409     diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
2410     index ebcd2778eeb3..23f1b5b512c2 100644
2411     --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
2412     +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
2413     @@ -540,8 +540,8 @@ struct slave_list {
2414     struct resource_allocator {
2415     spinlock_t alloc_lock; /* protect quotas */
2416     union {
2417     - int res_reserved;
2418     - int res_port_rsvd[MLX4_MAX_PORTS];
2419     + unsigned int res_reserved;
2420     + unsigned int res_port_rsvd[MLX4_MAX_PORTS];
2421     };
2422     union {
2423     int res_free;
2424     diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
2425     index 2e84f10f59ba..1a11bc0e1612 100644
2426     --- a/drivers/net/ethernet/mellanox/mlx4/mr.c
2427     +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
2428     @@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
2429     container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
2430     buf);
2431    
2432     + (*mpt_entry)->lkey = 0;
2433     err = mlx4_SW2HW_MPT(dev, mailbox, key);
2434     }
2435    
2436     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2437     index f5459de6d60a..5900a506bf8d 100644
2438     --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2439     +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2440     @@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
2441     static void
2442     qed_dcbx_set_params(struct qed_dcbx_results *p_data,
2443     struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2444     - bool enable, u8 prio, u8 tc,
2445     + bool app_tlv, bool enable, u8 prio, u8 tc,
2446     enum dcbx_protocol_type type,
2447     enum qed_pci_personality personality)
2448     {
2449     @@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
2450     p_data->arr[type].dont_add_vlan0 = true;
2451    
2452     /* QM reconf data */
2453     - if (p_hwfn->hw_info.personality == personality)
2454     + if (app_tlv && p_hwfn->hw_info.personality == personality)
2455     qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
2456    
2457     /* Configure dcbx vlan priority in doorbell block for roce EDPM */
2458     @@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
2459     static void
2460     qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
2461     struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2462     - bool enable, u8 prio, u8 tc,
2463     + bool app_tlv, bool enable, u8 prio, u8 tc,
2464     enum dcbx_protocol_type type)
2465     {
2466     enum qed_pci_personality personality;
2467     @@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
2468    
2469     personality = qed_dcbx_app_update[i].personality;
2470    
2471     - qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
2472     + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
2473     prio, tc, type, personality);
2474     }
2475     }
2476     @@ -318,8 +318,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2477     enable = true;
2478     }
2479    
2480     - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
2481     - priority, tc, type);
2482     + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
2483     + enable, priority, tc, type);
2484     }
2485     }
2486    
2487     @@ -340,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2488     continue;
2489    
2490     enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
2491     - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
2492     + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable,
2493     priority, tc, type);
2494     }
2495    
2496     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2497     index 97f073fd3725..2f69ee9221c6 100644
2498     --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
2499     +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2500     @@ -179,6 +179,10 @@ void qed_resc_free(struct qed_dev *cdev)
2501     qed_iscsi_free(p_hwfn);
2502     qed_ooo_free(p_hwfn);
2503     }
2504     +
2505     + if (QED_IS_RDMA_PERSONALITY(p_hwfn))
2506     + qed_rdma_info_free(p_hwfn);
2507     +
2508     qed_iov_free(p_hwfn);
2509     qed_l2_free(p_hwfn);
2510     qed_dmae_info_free(p_hwfn);
2511     @@ -474,8 +478,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
2512     struct qed_qm_info *qm_info = &p_hwfn->qm_info;
2513    
2514     /* Can't have multiple flags set here */
2515     - if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
2516     + if (bitmap_weight((unsigned long *)&pq_flags,
2517     + sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
2518     + DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
2519     + goto err;
2520     + }
2521     +
2522     + if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
2523     + DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
2524     goto err;
2525     + }
2526    
2527     switch (pq_flags) {
2528     case PQ_FLAGS_RLS:
2529     @@ -499,8 +511,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
2530     }
2531    
2532     err:
2533     - DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
2534     - return NULL;
2535     + return &qm_info->start_pq;
2536     }
2537    
2538     /* save pq index in qm info */
2539     @@ -524,20 +535,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
2540     {
2541     u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
2542    
2543     + if (max_tc == 0) {
2544     + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
2545     + PQ_FLAGS_MCOS);
2546     + return p_hwfn->qm_info.start_pq;
2547     + }
2548     +
2549     if (tc > max_tc)
2550     DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
2551    
2552     - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
2553     + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
2554     }
2555    
2556     u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
2557     {
2558     u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
2559    
2560     + if (max_vf == 0) {
2561     + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
2562     + PQ_FLAGS_VFS);
2563     + return p_hwfn->qm_info.start_pq;
2564     + }
2565     +
2566     if (vf > max_vf)
2567     DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
2568    
2569     - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
2570     + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
2571     }
2572    
2573     u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
2574     @@ -1074,6 +1097,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
2575     goto alloc_err;
2576     }
2577    
2578     + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
2579     + rc = qed_rdma_info_alloc(p_hwfn);
2580     + if (rc)
2581     + goto alloc_err;
2582     + }
2583     +
2584     /* DMA info initialization */
2585     rc = qed_dmae_info_alloc(p_hwfn);
2586     if (rc)
2587     @@ -2091,11 +2120,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
2588     if (!p_ptt)
2589     return -EAGAIN;
2590    
2591     - /* If roce info is allocated it means roce is initialized and should
2592     - * be enabled in searcher.
2593     - */
2594     if (p_hwfn->p_rdma_info &&
2595     - p_hwfn->b_rdma_enabled_in_prs)
2596     + p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
2597     qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
2598    
2599     /* Re-open incoming traffic */
2600     diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
2601     index 0f0aba793352..b22f464ea3fa 100644
2602     --- a/drivers/net/ethernet/qlogic/qed/qed_int.c
2603     +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
2604     @@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
2605     */
2606     do {
2607     index = p_sb_attn->sb_index;
2608     + /* finish reading index before the loop condition */
2609     + dma_rmb();
2610     attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
2611     attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
2612     } while (index != p_sb_attn->sb_index);
2613     diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
2614     index 2094d86a7a08..cf3b0e3dc350 100644
2615     --- a/drivers/net/ethernet/qlogic/qed/qed_main.c
2616     +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
2617     @@ -1634,9 +1634,9 @@ static int qed_drain(struct qed_dev *cdev)
2618     return -EBUSY;
2619     }
2620     rc = qed_mcp_drain(hwfn, ptt);
2621     + qed_ptt_release(hwfn, ptt);
2622     if (rc)
2623     return rc;
2624     - qed_ptt_release(hwfn, ptt);
2625     }
2626    
2627     return 0;
2628     diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2629     index 62113438c880..7873d6dfd91f 100644
2630     --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2631     +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2632     @@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
2633     return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
2634     }
2635    
2636     -static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
2637     - struct qed_ptt *p_ptt,
2638     - struct qed_rdma_start_in_params *params)
2639     +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
2640     {
2641     struct qed_rdma_info *p_rdma_info;
2642     - u32 num_cons, num_tasks;
2643     - int rc = -ENOMEM;
2644    
2645     - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
2646     -
2647     - /* Allocate a struct with current pf rdma info */
2648     p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
2649     if (!p_rdma_info)
2650     - return rc;
2651     + return -ENOMEM;
2652     +
2653     + spin_lock_init(&p_rdma_info->lock);
2654    
2655     p_hwfn->p_rdma_info = p_rdma_info;
2656     + return 0;
2657     +}
2658     +
2659     +void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
2660     +{
2661     + kfree(p_hwfn->p_rdma_info);
2662     + p_hwfn->p_rdma_info = NULL;
2663     +}
2664     +
2665     +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
2666     +{
2667     + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
2668     + u32 num_cons, num_tasks;
2669     + int rc = -ENOMEM;
2670     +
2671     + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
2672     +
2673     if (QED_IS_IWARP_PERSONALITY(p_hwfn))
2674     p_rdma_info->proto = PROTOCOLID_IWARP;
2675     else
2676     @@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
2677     /* Allocate a struct with device params and fill it */
2678     p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
2679     if (!p_rdma_info->dev)
2680     - goto free_rdma_info;
2681     + return rc;
2682    
2683     /* Allocate a struct with port params and fill it */
2684     p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
2685     @@ -298,8 +310,6 @@ free_rdma_port:
2686     kfree(p_rdma_info->port);
2687     free_rdma_dev:
2688     kfree(p_rdma_info->dev);
2689     -free_rdma_info:
2690     - kfree(p_rdma_info);
2691    
2692     return rc;
2693     }
2694     @@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
2695    
2696     kfree(p_rdma_info->port);
2697     kfree(p_rdma_info->dev);
2698     -
2699     - kfree(p_rdma_info);
2700     }
2701    
2702     static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
2703     @@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
2704    
2705     DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
2706    
2707     - spin_lock_init(&p_hwfn->p_rdma_info->lock);
2708     -
2709     qed_rdma_init_devinfo(p_hwfn, params);
2710     qed_rdma_init_port(p_hwfn);
2711     qed_rdma_init_events(p_hwfn, params);
2712     @@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt)
2713     /* Disable RoCE search */
2714     qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
2715     p_hwfn->b_rdma_enabled_in_prs = false;
2716     -
2717     + p_hwfn->p_rdma_info->active = 0;
2718     qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
2719    
2720     ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
2721     @@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt,
2722     u8 max_stats_queues;
2723     int rc;
2724    
2725     - if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
2726     + if (!rdma_cxt || !in_params || !out_params ||
2727     + !p_hwfn->p_rdma_info->active) {
2728     DP_ERR(p_hwfn->cdev,
2729     "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
2730     rdma_cxt, in_params, out_params);
2731     @@ -1802,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
2732     {
2733     bool result;
2734    
2735     - /* if rdma info has not been allocated, naturally there are no qps */
2736     - if (!p_hwfn->p_rdma_info)
2737     + /* if rdma wasn't activated yet, naturally there are no qps */
2738     + if (!p_hwfn->p_rdma_info->active)
2739     return false;
2740    
2741     spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2742     @@ -1849,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt,
2743     if (!p_ptt)
2744     goto err;
2745    
2746     - rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
2747     + rc = qed_rdma_alloc(p_hwfn);
2748     if (rc)
2749     goto err1;
2750    
2751     @@ -1858,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt,
2752     goto err2;
2753    
2754     qed_ptt_release(p_hwfn, p_ptt);
2755     + p_hwfn->p_rdma_info->active = 1;
2756    
2757     return rc;
2758    
2759     diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
2760     index 6f722ee8ee94..3689fe3e5935 100644
2761     --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
2762     +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
2763     @@ -102,6 +102,7 @@ struct qed_rdma_info {
2764     u16 max_queue_zones;
2765     enum protocol_type proto;
2766     struct qed_iwarp_info iwarp;
2767     + u8 active:1;
2768     };
2769    
2770     struct qed_rdma_qp {
2771     @@ -176,10 +177,14 @@ struct qed_rdma_qp {
2772     #if IS_ENABLED(CONFIG_QED_RDMA)
2773     void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
2774     void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
2775     +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
2776     +void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
2777     #else
2778     static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
2779     static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
2780     struct qed_ptt *p_ptt) {}
2781     +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
2782     +static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
2783     #endif
2784    
2785     int
2786     diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2787     index d887016e54b6..4b6572f0188a 100644
2788     --- a/drivers/net/team/team.c
2789     +++ b/drivers/net/team/team.c
2790     @@ -985,8 +985,6 @@ static void team_port_disable(struct team *team,
2791     team->en_port_count--;
2792     team_queue_override_port_del(team, port);
2793     team_adjust_ops(team);
2794     - team_notify_peers(team);
2795     - team_mcast_rejoin(team);
2796     team_lower_state_changed(port);
2797     }
2798    
2799     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
2800     index e7584b842dce..eb5db94f5745 100644
2801     --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
2802     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
2803     @@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
2804     }
2805     break;
2806     case BRCMU_CHSPEC_D11AC_BW_160:
2807     + ch->bw = BRCMU_CHAN_BW_160;
2808     + ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
2809     + BRCMU_CHSPEC_D11AC_SB_SHIFT);
2810     switch (ch->sb) {
2811     case BRCMU_CHAN_SB_LLL:
2812     ch->control_ch_num -= CH_70MHZ_APART;
2813     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2814     index 07442ada6dd0..62ab42e94c9d 100644
2815     --- a/drivers/net/wireless/mac80211_hwsim.c
2816     +++ b/drivers/net/wireless/mac80211_hwsim.c
2817     @@ -2889,6 +2889,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2818    
2819     wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
2820    
2821     + tasklet_hrtimer_init(&data->beacon_timer,
2822     + mac80211_hwsim_beacon,
2823     + CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2824     +
2825     err = ieee80211_register_hw(hw);
2826     if (err < 0) {
2827     pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
2828     @@ -2913,10 +2917,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2829     data->debugfs,
2830     data, &hwsim_simulate_radar);
2831    
2832     - tasklet_hrtimer_init(&data->beacon_timer,
2833     - mac80211_hwsim_beacon,
2834     - CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2835     -
2836     spin_lock_bh(&hwsim_radio_lock);
2837     err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
2838     hwsim_rht_params);
2839     diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
2840     index b6c5f17dca30..27826217ff76 100644
2841     --- a/drivers/net/wireless/mediatek/mt76/Kconfig
2842     +++ b/drivers/net/wireless/mediatek/mt76/Kconfig
2843     @@ -1,6 +1,12 @@
2844     config MT76_CORE
2845     tristate
2846    
2847     +config MT76_LEDS
2848     + bool
2849     + depends on MT76_CORE
2850     + depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
2851     + default y
2852     +
2853     config MT76_USB
2854     tristate
2855     depends on MT76_CORE
2856     diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
2857     index 029d54bce9e8..ade4a2029a24 100644
2858     --- a/drivers/net/wireless/mediatek/mt76/mac80211.c
2859     +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
2860     @@ -342,9 +342,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
2861     mt76_check_sband(dev, NL80211_BAND_2GHZ);
2862     mt76_check_sband(dev, NL80211_BAND_5GHZ);
2863    
2864     - ret = mt76_led_init(dev);
2865     - if (ret)
2866     - return ret;
2867     + if (IS_ENABLED(CONFIG_MT76_LEDS)) {
2868     + ret = mt76_led_init(dev);
2869     + if (ret)
2870     + return ret;
2871     + }
2872    
2873     return ieee80211_register_hw(hw);
2874     }
2875     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
2876     index b814391f79ac..03b103c45d69 100644
2877     --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
2878     +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
2879     @@ -581,8 +581,10 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
2880     mt76x2_dfs_init_detector(dev);
2881    
2882     /* init led callbacks */
2883     - dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
2884     - dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
2885     + if (IS_ENABLED(CONFIG_MT76_LEDS)) {
2886     + dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
2887     + dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
2888     + }
2889    
2890     ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
2891     ARRAY_SIZE(mt76x2_rates));
2892     diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
2893     index ac68072fb8cd..5ff254dc9b14 100644
2894     --- a/drivers/nvdimm/nd-core.h
2895     +++ b/drivers/nvdimm/nd-core.h
2896     @@ -112,6 +112,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
2897     struct nd_mapping *nd_mapping, resource_size_t *overlap);
2898     resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
2899     resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
2900     +int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
2901     + resource_size_t size);
2902     resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
2903     struct nd_label_id *label_id);
2904     int alias_dpa_busy(struct device *dev, void *data);
2905     diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
2906     index 3f7ad5bc443e..7fe84bfe0878 100644
2907     --- a/drivers/nvdimm/pfn_devs.c
2908     +++ b/drivers/nvdimm/pfn_devs.c
2909     @@ -590,14 +590,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
2910     ALIGN_DOWN(phys, nd_pfn->align));
2911     }
2912    
2913     +/*
2914     + * Check if pmem collides with 'System RAM', or other regions when
2915     + * section aligned. Trim it accordingly.
2916     + */
2917     +static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
2918     +{
2919     + struct nd_namespace_common *ndns = nd_pfn->ndns;
2920     + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
2921     + struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
2922     + const resource_size_t start = nsio->res.start;
2923     + const resource_size_t end = start + resource_size(&nsio->res);
2924     + resource_size_t adjust, size;
2925     +
2926     + *start_pad = 0;
2927     + *end_trunc = 0;
2928     +
2929     + adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
2930     + size = resource_size(&nsio->res) + adjust;
2931     + if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
2932     + IORES_DESC_NONE) == REGION_MIXED
2933     + || nd_region_conflict(nd_region, start - adjust, size))
2934     + *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
2935     +
2936     + /* Now check that end of the range does not collide. */
2937     + adjust = PHYS_SECTION_ALIGN_UP(end) - end;
2938     + size = resource_size(&nsio->res) + adjust;
2939     + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
2940     + IORES_DESC_NONE) == REGION_MIXED
2941     + || !IS_ALIGNED(end, nd_pfn->align)
2942     + || nd_region_conflict(nd_region, start, size + adjust))
2943     + *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
2944     +}
2945     +
2946     static int nd_pfn_init(struct nd_pfn *nd_pfn)
2947     {
2948     u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
2949     struct nd_namespace_common *ndns = nd_pfn->ndns;
2950     - u32 start_pad = 0, end_trunc = 0;
2951     + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
2952     resource_size_t start, size;
2953     - struct nd_namespace_io *nsio;
2954     struct nd_region *nd_region;
2955     + u32 start_pad, end_trunc;
2956     struct nd_pfn_sb *pfn_sb;
2957     unsigned long npfns;
2958     phys_addr_t offset;
2959     @@ -629,30 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
2960    
2961     memset(pfn_sb, 0, sizeof(*pfn_sb));
2962    
2963     - /*
2964     - * Check if pmem collides with 'System RAM' when section aligned and
2965     - * trim it accordingly
2966     - */
2967     - nsio = to_nd_namespace_io(&ndns->dev);
2968     - start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
2969     - size = resource_size(&nsio->res);
2970     - if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
2971     - IORES_DESC_NONE) == REGION_MIXED) {
2972     - start = nsio->res.start;
2973     - start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
2974     - }
2975     -
2976     - start = nsio->res.start;
2977     - size = PHYS_SECTION_ALIGN_UP(start + size) - start;
2978     - if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
2979     - IORES_DESC_NONE) == REGION_MIXED
2980     - || !IS_ALIGNED(start + resource_size(&nsio->res),
2981     - nd_pfn->align)) {
2982     - size = resource_size(&nsio->res);
2983     - end_trunc = start + size - phys_pmem_align_down(nd_pfn,
2984     - start + size);
2985     - }
2986     -
2987     + trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
2988     if (start_pad + end_trunc)
2989     dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
2990     dev_name(&ndns->dev), start_pad + end_trunc);
2991     @@ -663,7 +673,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
2992     * implementation will limit the pfns advertised through
2993     * ->direct_access() to those that are included in the memmap.
2994     */
2995     - start += start_pad;
2996     + start = nsio->res.start + start_pad;
2997     size = resource_size(&nsio->res);
2998     npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
2999     / PAGE_SIZE);
3000     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
3001     index 174a418cb171..e7377f1028ef 100644
3002     --- a/drivers/nvdimm/region_devs.c
3003     +++ b/drivers/nvdimm/region_devs.c
3004     @@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
3005     }
3006     EXPORT_SYMBOL_GPL(nvdimm_has_cache);
3007    
3008     +struct conflict_context {
3009     + struct nd_region *nd_region;
3010     + resource_size_t start, size;
3011     +};
3012     +
3013     +static int region_conflict(struct device *dev, void *data)
3014     +{
3015     + struct nd_region *nd_region;
3016     + struct conflict_context *ctx = data;
3017     + resource_size_t res_end, region_end, region_start;
3018     +
3019     + if (!is_memory(dev))
3020     + return 0;
3021     +
3022     + nd_region = to_nd_region(dev);
3023     + if (nd_region == ctx->nd_region)
3024     + return 0;
3025     +
3026     + res_end = ctx->start + ctx->size;
3027     + region_start = nd_region->ndr_start;
3028     + region_end = region_start + nd_region->ndr_size;
3029     + if (ctx->start >= region_start && ctx->start < region_end)
3030     + return -EBUSY;
3031     + if (res_end > region_start && res_end <= region_end)
3032     + return -EBUSY;
3033     + return 0;
3034     +}
3035     +
3036     +int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
3037     + resource_size_t size)
3038     +{
3039     + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
3040     + struct conflict_context ctx = {
3041     + .nd_region = nd_region,
3042     + .start = start,
3043     + .size = size,
3044     + };
3045     +
3046     + return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
3047     +}
3048     +
3049     void __exit nd_region_devs_exit(void)
3050     {
3051     ida_destroy(&region_ida);
3052     diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
3053     index 611e70cae754..9375fa705d82 100644
3054     --- a/drivers/nvme/host/fc.c
3055     +++ b/drivers/nvme/host/fc.c
3056     @@ -144,6 +144,7 @@ struct nvme_fc_ctrl {
3057    
3058     bool ioq_live;
3059     bool assoc_active;
3060     + atomic_t err_work_active;
3061     u64 association_id;
3062    
3063     struct list_head ctrl_list; /* rport->ctrl_list */
3064     @@ -152,6 +153,7 @@ struct nvme_fc_ctrl {
3065     struct blk_mq_tag_set tag_set;
3066    
3067     struct delayed_work connect_work;
3068     + struct work_struct err_work;
3069    
3070     struct kref ref;
3071     u32 flags;
3072     @@ -1523,6 +1525,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
3073     struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
3074     int i;
3075    
3076     + /* ensure we've initialized the ops once */
3077     + if (!(aen_op->flags & FCOP_FLAGS_AEN))
3078     + return;
3079     +
3080     for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
3081     __nvme_fc_abort_op(ctrl, aen_op);
3082     }
3083     @@ -2036,7 +2042,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
3084     static void
3085     nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
3086     {
3087     - /* only proceed if in LIVE state - e.g. on first error */
3088     + int active;
3089     +
3090     + /*
3091     + * if an error (io timeout, etc) while (re)connecting,
3092     + * it's an error on creating the new association.
3093     + * Start the error recovery thread if it hasn't already
3094     + * been started. It is expected there could be multiple
3095     + * ios hitting this path before things are cleaned up.
3096     + */
3097     + if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
3098     + active = atomic_xchg(&ctrl->err_work_active, 1);
3099     + if (!active && !schedule_work(&ctrl->err_work)) {
3100     + atomic_set(&ctrl->err_work_active, 0);
3101     + WARN_ON(1);
3102     + }
3103     + return;
3104     + }
3105     +
3106     + /* Otherwise, only proceed if in LIVE state - e.g. on first error */
3107     if (ctrl->ctrl.state != NVME_CTRL_LIVE)
3108     return;
3109    
3110     @@ -2802,6 +2826,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3111     {
3112     struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3113    
3114     + cancel_work_sync(&ctrl->err_work);
3115     cancel_delayed_work_sync(&ctrl->connect_work);
3116     /*
3117     * kill the association on the link side. this will block
3118     @@ -2854,23 +2879,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3119     }
3120    
3121     static void
3122     -nvme_fc_reset_ctrl_work(struct work_struct *work)
3123     +__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
3124     {
3125     - struct nvme_fc_ctrl *ctrl =
3126     - container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3127     - int ret;
3128     -
3129     - nvme_stop_ctrl(&ctrl->ctrl);
3130     + nvme_stop_keep_alive(&ctrl->ctrl);
3131    
3132     /* will block will waiting for io to terminate */
3133     nvme_fc_delete_association(ctrl);
3134    
3135     - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3136     + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
3137     + !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3138     dev_err(ctrl->ctrl.device,
3139     "NVME-FC{%d}: error_recovery: Couldn't change state "
3140     "to CONNECTING\n", ctrl->cnum);
3141     - return;
3142     - }
3143     +}
3144     +
3145     +static void
3146     +nvme_fc_reset_ctrl_work(struct work_struct *work)
3147     +{
3148     + struct nvme_fc_ctrl *ctrl =
3149     + container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3150     + int ret;
3151     +
3152     + __nvme_fc_terminate_io(ctrl);
3153     +
3154     + nvme_stop_ctrl(&ctrl->ctrl);
3155    
3156     if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
3157     ret = nvme_fc_create_association(ctrl);
3158     @@ -2885,6 +2917,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
3159     ctrl->cnum);
3160     }
3161    
3162     +static void
3163     +nvme_fc_connect_err_work(struct work_struct *work)
3164     +{
3165     + struct nvme_fc_ctrl *ctrl =
3166     + container_of(work, struct nvme_fc_ctrl, err_work);
3167     +
3168     + __nvme_fc_terminate_io(ctrl);
3169     +
3170     + atomic_set(&ctrl->err_work_active, 0);
3171     +
3172     + /*
3173     + * Rescheduling the connection after recovering
3174     + * from the io error is left to the reconnect work
3175     + * item, which is what should have stalled waiting on
3176     + * the io that had the error that scheduled this work.
3177     + */
3178     +}
3179     +
3180     static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3181     .name = "fc",
3182     .module = THIS_MODULE,
3183     @@ -2995,6 +3045,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3184     ctrl->cnum = idx;
3185     ctrl->ioq_live = false;
3186     ctrl->assoc_active = false;
3187     + atomic_set(&ctrl->err_work_active, 0);
3188     init_waitqueue_head(&ctrl->ioabort_wait);
3189    
3190     get_device(ctrl->dev);
3191     @@ -3002,6 +3053,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3192    
3193     INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3194     INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3195     + INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3196     spin_lock_init(&ctrl->lock);
3197    
3198     /* io queue count */
3199     @@ -3092,6 +3144,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3200     fail_ctrl:
3201     nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3202     cancel_work_sync(&ctrl->ctrl.reset_work);
3203     + cancel_work_sync(&ctrl->err_work);
3204     cancel_delayed_work_sync(&ctrl->connect_work);
3205    
3206     ctrl->ctrl.opts = NULL;
3207     diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
3208     index c0631895154e..8684bcec8ff4 100644
3209     --- a/drivers/s390/net/ism_drv.c
3210     +++ b/drivers/s390/net/ism_drv.c
3211     @@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
3212     break;
3213    
3214     clear_bit_inv(bit, bv);
3215     + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
3216     barrier();
3217     smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
3218     - ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
3219     }
3220    
3221     if (ism->sba->e) {
3222     diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
3223     index 8f5c1d7f751a..b67dc4974f23 100644
3224     --- a/drivers/s390/virtio/virtio_ccw.c
3225     +++ b/drivers/s390/virtio/virtio_ccw.c
3226     @@ -56,6 +56,7 @@ struct virtio_ccw_device {
3227     unsigned int revision; /* Transport revision */
3228     wait_queue_head_t wait_q;
3229     spinlock_t lock;
3230     + struct mutex io_lock; /* Serializes I/O requests */
3231     struct list_head virtqueues;
3232     unsigned long indicators;
3233     unsigned long indicators2;
3234     @@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
3235     unsigned long flags;
3236     int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
3237    
3238     + mutex_lock(&vcdev->io_lock);
3239     do {
3240     spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
3241     ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
3242     @@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
3243     cpu_relax();
3244     } while (ret == -EBUSY);
3245     wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
3246     - return ret ? ret : vcdev->err;
3247     + ret = ret ? ret : vcdev->err;
3248     + mutex_unlock(&vcdev->io_lock);
3249     + return ret;
3250     }
3251    
3252     static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
3253     @@ -828,6 +832,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
3254     int ret;
3255     struct ccw1 *ccw;
3256     void *config_area;
3257     + unsigned long flags;
3258    
3259     ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
3260     if (!ccw)
3261     @@ -846,11 +851,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
3262     if (ret)
3263     goto out_free;
3264    
3265     + spin_lock_irqsave(&vcdev->lock, flags);
3266     memcpy(vcdev->config, config_area, offset + len);
3267     - if (buf)
3268     - memcpy(buf, &vcdev->config[offset], len);
3269     if (vcdev->config_ready < offset + len)
3270     vcdev->config_ready = offset + len;
3271     + spin_unlock_irqrestore(&vcdev->lock, flags);
3272     + if (buf)
3273     + memcpy(buf, config_area + offset, len);
3274    
3275     out_free:
3276     kfree(config_area);
3277     @@ -864,6 +871,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
3278     struct virtio_ccw_device *vcdev = to_vc_device(vdev);
3279     struct ccw1 *ccw;
3280     void *config_area;
3281     + unsigned long flags;
3282    
3283     ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
3284     if (!ccw)
3285     @@ -876,9 +884,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
3286     /* Make sure we don't overwrite fields. */
3287     if (vcdev->config_ready < offset)
3288     virtio_ccw_get_config(vdev, 0, NULL, offset);
3289     + spin_lock_irqsave(&vcdev->lock, flags);
3290     memcpy(&vcdev->config[offset], buf, len);
3291     /* Write the config area to the host. */
3292     memcpy(config_area, vcdev->config, sizeof(vcdev->config));
3293     + spin_unlock_irqrestore(&vcdev->lock, flags);
3294     ccw->cmd_code = CCW_CMD_WRITE_CONF;
3295     ccw->flags = 0;
3296     ccw->count = offset + len;
3297     @@ -1247,6 +1257,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
3298     init_waitqueue_head(&vcdev->wait_q);
3299     INIT_LIST_HEAD(&vcdev->virtqueues);
3300     spin_lock_init(&vcdev->lock);
3301     + mutex_init(&vcdev->io_lock);
3302    
3303     spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3304     dev_set_drvdata(&cdev->dev, vcdev);
3305     diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
3306     index 46df707e6f2c..452e19f8fb47 100644
3307     --- a/drivers/scsi/ufs/ufs-hisi.c
3308     +++ b/drivers/scsi/ufs/ufs-hisi.c
3309     @@ -20,6 +20,7 @@
3310     #include "unipro.h"
3311     #include "ufs-hisi.h"
3312     #include "ufshci.h"
3313     +#include "ufs_quirks.h"
3314    
3315     static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
3316     {
3317     @@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
3318    
3319     static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
3320     {
3321     + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
3322     + pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
3323     + /* VS_DebugSaveConfigTime */
3324     + ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
3325     + /* sync length */
3326     + ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
3327     + }
3328     +
3329     /* update */
3330     ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
3331     /* PA_TxSkip */
3332     diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
3333     index 71f73d1d1ad1..5d2dfdb41a6f 100644
3334     --- a/drivers/scsi/ufs/ufs_quirks.h
3335     +++ b/drivers/scsi/ufs/ufs_quirks.h
3336     @@ -131,4 +131,10 @@ struct ufs_dev_fix {
3337     */
3338     #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
3339    
3340     +/*
3341     + * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
3342     + * enabling this quirk ensure this.
3343     + */
3344     +#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
3345     +
3346     #endif /* UFS_QUIRKS_H_ */
3347     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3348     index 54074dd483a7..0b81d9d03357 100644
3349     --- a/drivers/scsi/ufs/ufshcd.c
3350     +++ b/drivers/scsi/ufs/ufshcd.c
3351     @@ -230,6 +230,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
3352     UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
3353     UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
3354     UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
3355     + UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
3356     + UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
3357    
3358     END_FIX
3359     };
3360     diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
3361     index baaa52f04560..52095086574f 100644
3362     --- a/drivers/staging/rtl8712/mlme_linux.c
3363     +++ b/drivers/staging/rtl8712/mlme_linux.c
3364     @@ -158,7 +158,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
3365     p = buff;
3366     p += sprintf(p, "ASSOCINFO(ReqIEs=");
3367     len = sec_ie[1] + 2;
3368     - len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1;
3369     + len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
3370     for (i = 0; i < len; i++)
3371     p += sprintf(p, "%02x", sec_ie[i]);
3372     p += sprintf(p, ")");
3373     diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
3374     index ac547ddd72d1..d7e88d2a8b1b 100644
3375     --- a/drivers/staging/rtl8712/rtl871x_mlme.c
3376     +++ b/drivers/staging/rtl8712/rtl871x_mlme.c
3377     @@ -1358,7 +1358,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
3378     u8 *out_ie, uint in_len)
3379     {
3380     u8 authmode = 0, match;
3381     - u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
3382     + u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255];
3383     u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
3384     uint ielength, cnt, remove_cnt;
3385     int iEntry;
3386     diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
3387     index 0952d15f6d40..ca6f1fa3466a 100644
3388     --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
3389     +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
3390     @@ -1566,7 +1566,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
3391     if (pstat->aid > 0) {
3392     DBG_871X(" old AID %d\n", pstat->aid);
3393     } else {
3394     - for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++)
3395     + for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
3396     if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
3397     break;
3398    
3399     diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
3400     index dd5e1cede2b5..c3f933d10295 100644
3401     --- a/drivers/tty/serial/8250/8250_mtk.c
3402     +++ b/drivers/tty/serial/8250/8250_mtk.c
3403     @@ -213,17 +213,17 @@ static int mtk8250_probe(struct platform_device *pdev)
3404    
3405     platform_set_drvdata(pdev, data);
3406    
3407     - pm_runtime_enable(&pdev->dev);
3408     - if (!pm_runtime_enabled(&pdev->dev)) {
3409     - err = mtk8250_runtime_resume(&pdev->dev);
3410     - if (err)
3411     - return err;
3412     - }
3413     + err = mtk8250_runtime_resume(&pdev->dev);
3414     + if (err)
3415     + return err;
3416    
3417     data->line = serial8250_register_8250_port(&uart);
3418     if (data->line < 0)
3419     return data->line;
3420    
3421     + pm_runtime_set_active(&pdev->dev);
3422     + pm_runtime_enable(&pdev->dev);
3423     +
3424     return 0;
3425     }
3426    
3427     @@ -234,13 +234,11 @@ static int mtk8250_remove(struct platform_device *pdev)
3428     pm_runtime_get_sync(&pdev->dev);
3429    
3430     serial8250_unregister_port(data->line);
3431     + mtk8250_runtime_suspend(&pdev->dev);
3432    
3433     pm_runtime_disable(&pdev->dev);
3434     pm_runtime_put_noidle(&pdev->dev);
3435    
3436     - if (!pm_runtime_status_suspended(&pdev->dev))
3437     - mtk8250_runtime_suspend(&pdev->dev);
3438     -
3439     return 0;
3440     }
3441    
3442     diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
3443     index 8a111ab33b50..93d3a0ec5e11 100644
3444     --- a/drivers/tty/serial/kgdboc.c
3445     +++ b/drivers/tty/serial/kgdboc.c
3446     @@ -230,7 +230,7 @@ static void kgdboc_put_char(u8 chr)
3447     static int param_set_kgdboc_var(const char *kmessage,
3448     const struct kernel_param *kp)
3449     {
3450     - int len = strlen(kmessage);
3451     + size_t len = strlen(kmessage);
3452    
3453     if (len >= MAX_CONFIG_LEN) {
3454     printk(KERN_ERR "kgdboc: config string too long\n");
3455     @@ -252,7 +252,7 @@ static int param_set_kgdboc_var(const char *kmessage,
3456    
3457     strcpy(config, kmessage);
3458     /* Chop out \n char as a result of echo */
3459     - if (config[len - 1] == '\n')
3460     + if (len && config[len - 1] == '\n')
3461     config[len - 1] = '\0';
3462    
3463     if (configured == 1)
3464     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3465     index 252eef2c32f9..d6f42b528277 100644
3466     --- a/drivers/tty/tty_io.c
3467     +++ b/drivers/tty/tty_io.c
3468     @@ -1372,7 +1372,13 @@ err_release_lock:
3469     return ERR_PTR(retval);
3470     }
3471    
3472     -static void tty_free_termios(struct tty_struct *tty)
3473     +/**
3474     + * tty_save_termios() - save tty termios data in driver table
3475     + * @tty: tty whose termios data to save
3476     + *
3477     + * Locking: Caller guarantees serialisation with tty_init_termios().
3478     + */
3479     +void tty_save_termios(struct tty_struct *tty)
3480     {
3481     struct ktermios *tp;
3482     int idx = tty->index;
3483     @@ -1391,6 +1397,7 @@ static void tty_free_termios(struct tty_struct *tty)
3484     }
3485     *tp = tty->termios;
3486     }
3487     +EXPORT_SYMBOL_GPL(tty_save_termios);
3488    
3489     /**
3490     * tty_flush_works - flush all works of a tty/pty pair
3491     @@ -1490,7 +1497,7 @@ static void release_tty(struct tty_struct *tty, int idx)
3492     WARN_ON(!mutex_is_locked(&tty_mutex));
3493     if (tty->ops->shutdown)
3494     tty->ops->shutdown(tty);
3495     - tty_free_termios(tty);
3496     + tty_save_termios(tty);
3497     tty_driver_remove_tty(tty->driver, tty);
3498     tty->port->itty = NULL;
3499     if (tty->link)
3500     diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
3501     index 25d736880013..c699d41a2a48 100644
3502     --- a/drivers/tty/tty_port.c
3503     +++ b/drivers/tty/tty_port.c
3504     @@ -640,7 +640,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty,
3505     if (tty_port_close_start(port, tty, filp) == 0)
3506     return;
3507     tty_port_shutdown(port, tty);
3508     - set_bit(TTY_IO_ERROR, &tty->flags);
3509     + if (!port->console)
3510     + set_bit(TTY_IO_ERROR, &tty->flags);
3511     tty_port_close_end(port, tty);
3512     tty_port_tty_set(port, NULL);
3513     }
3514     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3515     index f79979ae482a..cc62707c0251 100644
3516     --- a/drivers/usb/core/hub.c
3517     +++ b/drivers/usb/core/hub.c
3518     @@ -2250,7 +2250,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
3519     /* descriptor may appear anywhere in config */
3520     err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
3521     le16_to_cpu(udev->config[0].desc.wTotalLength),
3522     - USB_DT_OTG, (void **) &desc);
3523     + USB_DT_OTG, (void **) &desc, sizeof(*desc));
3524     if (err || !(desc->bmAttributes & USB_OTG_HNP))
3525     return 0;
3526    
3527     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3528     index 0690fcff0ea2..514c5214ddb2 100644
3529     --- a/drivers/usb/core/quirks.c
3530     +++ b/drivers/usb/core/quirks.c
3531     @@ -333,6 +333,10 @@ static const struct usb_device_id usb_quirk_list[] = {
3532     /* Midiman M-Audio Keystation 88es */
3533     { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
3534    
3535     + /* SanDisk Ultra Fit and Ultra Flair */
3536     + { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
3537     + { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
3538     +
3539     /* M-Systems Flash Disk Pioneers */
3540     { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
3541    
3542     diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
3543     index 79d8bd7a612e..4ebfbd737905 100644
3544     --- a/drivers/usb/core/usb.c
3545     +++ b/drivers/usb/core/usb.c
3546     @@ -832,14 +832,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
3547     */
3548    
3549     int __usb_get_extra_descriptor(char *buffer, unsigned size,
3550     - unsigned char type, void **ptr)
3551     + unsigned char type, void **ptr, size_t minsize)
3552     {
3553     struct usb_descriptor_header *header;
3554    
3555     while (size >= sizeof(struct usb_descriptor_header)) {
3556     header = (struct usb_descriptor_header *)buffer;
3557    
3558     - if (header->bLength < 2) {
3559     + if (header->bLength < 2 || header->bLength > size) {
3560     printk(KERN_ERR
3561     "%s: bogus descriptor, type %d length %d\n",
3562     usbcore_name,
3563     @@ -848,7 +848,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
3564     return -1;
3565     }
3566    
3567     - if (header->bDescriptorType == type) {
3568     + if (header->bDescriptorType == type && header->bLength >= minsize) {
3569     *ptr = header;
3570     return 0;
3571     }
3572     diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
3573     index d257c541e51b..7afc10872f1f 100644
3574     --- a/drivers/usb/dwc2/pci.c
3575     +++ b/drivers/usb/dwc2/pci.c
3576     @@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci,
3577     dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
3578     if (!dwc2) {
3579     dev_err(dev, "couldn't allocate dwc2 device\n");
3580     + ret = -ENOMEM;
3581     goto err;
3582     }
3583    
3584     diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
3585     index 3ada83d81bda..31e8bf3578c8 100644
3586     --- a/drivers/usb/gadget/function/f_fs.c
3587     +++ b/drivers/usb/gadget/function/f_fs.c
3588     @@ -215,7 +215,6 @@ struct ffs_io_data {
3589    
3590     struct mm_struct *mm;
3591     struct work_struct work;
3592     - struct work_struct cancellation_work;
3593    
3594     struct usb_ep *ep;
3595     struct usb_request *req;
3596     @@ -1073,31 +1072,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
3597     return 0;
3598     }
3599    
3600     -static void ffs_aio_cancel_worker(struct work_struct *work)
3601     -{
3602     - struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
3603     - cancellation_work);
3604     -
3605     - ENTER();
3606     -
3607     - usb_ep_dequeue(io_data->ep, io_data->req);
3608     -}
3609     -
3610     static int ffs_aio_cancel(struct kiocb *kiocb)
3611     {
3612     struct ffs_io_data *io_data = kiocb->private;
3613     - struct ffs_data *ffs = io_data->ffs;
3614     + struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
3615     int value;
3616    
3617     ENTER();
3618    
3619     - if (likely(io_data && io_data->ep && io_data->req)) {
3620     - INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
3621     - queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
3622     - value = -EINPROGRESS;
3623     - } else {
3624     + spin_lock_irq(&epfile->ffs->eps_lock);
3625     +
3626     + if (likely(io_data && io_data->ep && io_data->req))
3627     + value = usb_ep_dequeue(io_data->ep, io_data->req);
3628     + else
3629     value = -EINVAL;
3630     - }
3631     +
3632     + spin_unlock_irq(&epfile->ffs->eps_lock);
3633    
3634     return value;
3635     }
3636     diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
3637     index 684d6f074c3a..09a8ebd95588 100644
3638     --- a/drivers/usb/host/hwa-hc.c
3639     +++ b/drivers/usb/host/hwa-hc.c
3640     @@ -640,7 +640,7 @@ static int hwahc_security_create(struct hwahc *hwahc)
3641     top = itr + itr_size;
3642     result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
3643     le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
3644     - USB_DT_SECURITY, (void **) &secd);
3645     + USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
3646     if (result == -1) {
3647     dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
3648     return 0;
3649     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3650     index beeda27b3789..09bf6b4b741b 100644
3651     --- a/drivers/usb/host/xhci-pci.c
3652     +++ b/drivers/usb/host/xhci-pci.c
3653     @@ -132,6 +132,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3654     pdev->device == 0x43bb))
3655     xhci->quirks |= XHCI_SUSPEND_DELAY;
3656    
3657     + if (pdev->vendor == PCI_VENDOR_ID_AMD &&
3658     + (pdev->device == 0x15e0 || pdev->device == 0x15e1))
3659     + xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
3660     +
3661     if (pdev->vendor == PCI_VENDOR_ID_AMD)
3662     xhci->quirks |= XHCI_TRUST_TX_LENGTH;
3663    
3664     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3665     index c928dbbff881..dae3be1b9c8f 100644
3666     --- a/drivers/usb/host/xhci.c
3667     +++ b/drivers/usb/host/xhci.c
3668     @@ -968,6 +968,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
3669     unsigned int delay = XHCI_MAX_HALT_USEC;
3670     struct usb_hcd *hcd = xhci_to_hcd(xhci);
3671     u32 command;
3672     + u32 res;
3673    
3674     if (!hcd->state)
3675     return 0;
3676     @@ -1021,11 +1022,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
3677     command = readl(&xhci->op_regs->command);
3678     command |= CMD_CSS;
3679     writel(command, &xhci->op_regs->command);
3680     + xhci->broken_suspend = 0;
3681     if (xhci_handshake(&xhci->op_regs->status,
3682     STS_SAVE, 0, 10 * 1000)) {
3683     - xhci_warn(xhci, "WARN: xHC save state timeout\n");
3684     - spin_unlock_irq(&xhci->lock);
3685     - return -ETIMEDOUT;
3686     + /*
3687     + * AMD SNPS xHC 3.0 occasionally does not clear the
3688     + * SSS bit of USBSTS and when driver tries to poll
3689     + * to see if the xHC clears BIT(8) which never happens
3690     + * and driver assumes that controller is not responding
3691     + * and times out. To workaround this, its good to check
3692     + * if SRE and HCE bits are not set (as per xhci
3693     + * Section 5.4.2) and bypass the timeout.
3694     + */
3695     + res = readl(&xhci->op_regs->status);
3696     + if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
3697     + (((res & STS_SRE) == 0) &&
3698     + ((res & STS_HCE) == 0))) {
3699     + xhci->broken_suspend = 1;
3700     + } else {
3701     + xhci_warn(xhci, "WARN: xHC save state timeout\n");
3702     + spin_unlock_irq(&xhci->lock);
3703     + return -ETIMEDOUT;
3704     + }
3705     }
3706     spin_unlock_irq(&xhci->lock);
3707    
3708     @@ -1078,7 +1096,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3709     set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
3710    
3711     spin_lock_irq(&xhci->lock);
3712     - if (xhci->quirks & XHCI_RESET_ON_RESUME)
3713     + if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
3714     hibernated = true;
3715    
3716     if (!hibernated) {
3717     @@ -4496,6 +4514,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
3718     {
3719     unsigned long long timeout_ns;
3720    
3721     + /* Prevent U1 if service interval is shorter than U1 exit latency */
3722     + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
3723     + if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
3724     + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
3725     + return USB3_LPM_DISABLED;
3726     + }
3727     + }
3728     +
3729     if (xhci->quirks & XHCI_INTEL_HOST)
3730     timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
3731     else
3732     @@ -4552,6 +4578,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
3733     {
3734     unsigned long long timeout_ns;
3735    
3736     + /* Prevent U2 if service interval is shorter than U2 exit latency */
3737     + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
3738     + if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
3739     + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
3740     + return USB3_LPM_DISABLED;
3741     + }
3742     + }
3743     +
3744     if (xhci->quirks & XHCI_INTEL_HOST)
3745     timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
3746     else
3747     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3748     index e936e4c8af98..c3ed7d1c9f65 100644
3749     --- a/drivers/usb/host/xhci.h
3750     +++ b/drivers/usb/host/xhci.h
3751     @@ -1847,6 +1847,7 @@ struct xhci_hcd {
3752     #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
3753     #define XHCI_ZERO_64B_REGS BIT_ULL(32)
3754     #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
3755     +#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
3756    
3757     unsigned int num_active_eps;
3758     unsigned int limit_active_eps;
3759     @@ -1876,6 +1877,8 @@ struct xhci_hcd {
3760     void *dbc;
3761     /* platform-specific data -- must come last */
3762     unsigned long priv[0] __aligned(sizeof(s64));
3763     + /* Broken Suspend flag for SNPS Suspend resume issue */
3764     + u8 broken_suspend;
3765     };
3766    
3767     /* Platform specific overrides to generic XHCI hc_driver ops */
3768     diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
3769     index 6a0c60badfa0..1c6da8d6cccf 100644
3770     --- a/drivers/usb/misc/appledisplay.c
3771     +++ b/drivers/usb/misc/appledisplay.c
3772     @@ -51,6 +51,7 @@ static const struct usb_device_id appledisplay_table[] = {
3773     { APPLEDISPLAY_DEVICE(0x921c) },
3774     { APPLEDISPLAY_DEVICE(0x921d) },
3775     { APPLEDISPLAY_DEVICE(0x9222) },
3776     + { APPLEDISPLAY_DEVICE(0x9226) },
3777     { APPLEDISPLAY_DEVICE(0x9236) },
3778    
3779     /* Terminating entry */
3780     diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
3781     index 17940589c647..7d289302ff6c 100644
3782     --- a/drivers/usb/serial/console.c
3783     +++ b/drivers/usb/serial/console.c
3784     @@ -101,7 +101,6 @@ static int usb_console_setup(struct console *co, char *options)
3785     cflag |= PARENB;
3786     break;
3787     }
3788     - co->cflag = cflag;
3789    
3790     /*
3791     * no need to check the index here: if the index is wrong, console
3792     @@ -164,6 +163,7 @@ static int usb_console_setup(struct console *co, char *options)
3793     serial->type->set_termios(tty, port, &dummy);
3794    
3795     tty_port_tty_set(&port->port, NULL);
3796     + tty_save_termios(tty);
3797     tty_kref_put(tty);
3798     }
3799     tty_port_set_initialized(&port->port, 1);
3800     diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
3801     index 34bc3ab40c6d..51879ed18652 100644
3802     --- a/drivers/vhost/vsock.c
3803     +++ b/drivers/vhost/vsock.c
3804     @@ -15,6 +15,7 @@
3805     #include <net/sock.h>
3806     #include <linux/virtio_vsock.h>
3807     #include <linux/vhost.h>
3808     +#include <linux/hashtable.h>
3809    
3810     #include <net/af_vsock.h>
3811     #include "vhost.h"
3812     @@ -27,14 +28,14 @@ enum {
3813    
3814     /* Used to track all the vhost_vsock instances on the system. */
3815     static DEFINE_SPINLOCK(vhost_vsock_lock);
3816     -static LIST_HEAD(vhost_vsock_list);
3817     +static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
3818    
3819     struct vhost_vsock {
3820     struct vhost_dev dev;
3821     struct vhost_virtqueue vqs[2];
3822    
3823     - /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
3824     - struct list_head list;
3825     + /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
3826     + struct hlist_node hash;
3827    
3828     struct vhost_work send_pkt_work;
3829     spinlock_t send_pkt_list_lock;
3830     @@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
3831     return VHOST_VSOCK_DEFAULT_HOST_CID;
3832     }
3833    
3834     -static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
3835     +/* Callers that dereference the return value must hold vhost_vsock_lock or the
3836     + * RCU read lock.
3837     + */
3838     +static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
3839     {
3840     struct vhost_vsock *vsock;
3841    
3842     - list_for_each_entry(vsock, &vhost_vsock_list, list) {
3843     + hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
3844     u32 other_cid = vsock->guest_cid;
3845    
3846     /* Skip instances that have no CID yet */
3847     @@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
3848     return NULL;
3849     }
3850    
3851     -static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
3852     -{
3853     - struct vhost_vsock *vsock;
3854     -
3855     - spin_lock_bh(&vhost_vsock_lock);
3856     - vsock = __vhost_vsock_get(guest_cid);
3857     - spin_unlock_bh(&vhost_vsock_lock);
3858     -
3859     - return vsock;
3860     -}
3861     -
3862     static void
3863     vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
3864     struct vhost_virtqueue *vq)
3865     @@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
3866     struct vhost_vsock *vsock;
3867     int len = pkt->len;
3868    
3869     + rcu_read_lock();
3870     +
3871     /* Find the vhost_vsock according to guest context id */
3872     vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
3873     if (!vsock) {
3874     + rcu_read_unlock();
3875     virtio_transport_free_pkt(pkt);
3876     return -ENODEV;
3877     }
3878     @@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
3879     spin_unlock_bh(&vsock->send_pkt_list_lock);
3880    
3881     vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
3882     +
3883     + rcu_read_unlock();
3884     return len;
3885     }
3886    
3887     @@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
3888     struct vhost_vsock *vsock;
3889     struct virtio_vsock_pkt *pkt, *n;
3890     int cnt = 0;
3891     + int ret = -ENODEV;
3892     LIST_HEAD(freeme);
3893    
3894     + rcu_read_lock();
3895     +
3896     /* Find the vhost_vsock according to guest context id */
3897     vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
3898     if (!vsock)
3899     - return -ENODEV;
3900     + goto out;
3901    
3902     spin_lock_bh(&vsock->send_pkt_list_lock);
3903     list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
3904     @@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
3905     vhost_poll_queue(&tx_vq->poll);
3906     }
3907    
3908     - return 0;
3909     + ret = 0;
3910     +out:
3911     + rcu_read_unlock();
3912     + return ret;
3913     }
3914    
3915     static struct virtio_vsock_pkt *
3916     @@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
3917     spin_lock_init(&vsock->send_pkt_list_lock);
3918     INIT_LIST_HEAD(&vsock->send_pkt_list);
3919     vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
3920     -
3921     - spin_lock_bh(&vhost_vsock_lock);
3922     - list_add_tail(&vsock->list, &vhost_vsock_list);
3923     - spin_unlock_bh(&vhost_vsock_lock);
3924     return 0;
3925    
3926     out:
3927     @@ -577,9 +577,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
3928     struct vhost_vsock *vsock = file->private_data;
3929    
3930     spin_lock_bh(&vhost_vsock_lock);
3931     - list_del(&vsock->list);
3932     + if (vsock->guest_cid)
3933     + hash_del_rcu(&vsock->hash);
3934     spin_unlock_bh(&vhost_vsock_lock);
3935    
3936     + /* Wait for other CPUs to finish using vsock */
3937     + synchronize_rcu();
3938     +
3939     /* Iterating over all connections for all CIDs to find orphans is
3940     * inefficient. Room for improvement here. */
3941     vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
3942     @@ -620,12 +624,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
3943    
3944     /* Refuse if CID is already in use */
3945     spin_lock_bh(&vhost_vsock_lock);
3946     - other = __vhost_vsock_get(guest_cid);
3947     + other = vhost_vsock_get(guest_cid);
3948     if (other && other != vsock) {
3949     spin_unlock_bh(&vhost_vsock_lock);
3950     return -EADDRINUSE;
3951     }
3952     +
3953     + if (vsock->guest_cid)
3954     + hash_del_rcu(&vsock->hash);
3955     +
3956     vsock->guest_cid = guest_cid;
3957     + hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
3958     spin_unlock_bh(&vhost_vsock_lock);
3959    
3960     return 0;
3961     diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
3962     index 3713d22b95a7..907e85d65bb4 100644
3963     --- a/fs/cifs/dir.c
3964     +++ b/fs/cifs/dir.c
3965     @@ -174,7 +174,7 @@ cifs_bp_rename_retry:
3966    
3967     cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
3968     memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
3969     - full_path[dfsplen] = '\\';
3970     + full_path[dfsplen] = dirsep;
3971     for (i = 0; i < pplen-1; i++)
3972     if (full_path[dfsplen+1+i] == '/')
3973     full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
3974     diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
3975     index 7b861bbc0b43..315967354954 100644
3976     --- a/fs/nfs/callback_proc.c
3977     +++ b/fs/nfs/callback_proc.c
3978     @@ -686,20 +686,24 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
3979     {
3980     struct cb_offloadargs *args = data;
3981     struct nfs_server *server;
3982     - struct nfs4_copy_state *copy;
3983     + struct nfs4_copy_state *copy, *tmp_copy;
3984     bool found = false;
3985    
3986     + copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
3987     + if (!copy)
3988     + return htonl(NFS4ERR_SERVERFAULT);
3989     +
3990     spin_lock(&cps->clp->cl_lock);
3991     rcu_read_lock();
3992     list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
3993     client_link) {
3994     - list_for_each_entry(copy, &server->ss_copies, copies) {
3995     + list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
3996     if (memcmp(args->coa_stateid.other,
3997     - copy->stateid.other,
3998     + tmp_copy->stateid.other,
3999     sizeof(args->coa_stateid.other)))
4000     continue;
4001     - nfs4_copy_cb_args(copy, args);
4002     - complete(&copy->completion);
4003     + nfs4_copy_cb_args(tmp_copy, args);
4004     + complete(&tmp_copy->completion);
4005     found = true;
4006     goto out;
4007     }
4008     @@ -707,15 +711,11 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
4009     out:
4010     rcu_read_unlock();
4011     if (!found) {
4012     - copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
4013     - if (!copy) {
4014     - spin_unlock(&cps->clp->cl_lock);
4015     - return htonl(NFS4ERR_SERVERFAULT);
4016     - }
4017     memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
4018     nfs4_copy_cb_args(copy, args);
4019     list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
4020     - }
4021     + } else
4022     + kfree(copy);
4023     spin_unlock(&cps->clp->cl_lock);
4024    
4025     return 0;
4026     diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
4027     index cae43333ef16..86ac2c5b93fe 100644
4028     --- a/fs/nfs/flexfilelayout/flexfilelayout.c
4029     +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
4030     @@ -1361,12 +1361,7 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
4031     task))
4032     return;
4033    
4034     - if (ff_layout_read_prepare_common(task, hdr))
4035     - return;
4036     -
4037     - if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4038     - hdr->args.lock_context, FMODE_READ) == -EIO)
4039     - rpc_exit(task, -EIO); /* lost lock, terminate I/O */
4040     + ff_layout_read_prepare_common(task, hdr);
4041     }
4042    
4043     static void ff_layout_read_call_done(struct rpc_task *task, void *data)
4044     @@ -1542,12 +1537,7 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
4045     task))
4046     return;
4047    
4048     - if (ff_layout_write_prepare_common(task, hdr))
4049     - return;
4050     -
4051     - if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4052     - hdr->args.lock_context, FMODE_WRITE) == -EIO)
4053     - rpc_exit(task, -EIO); /* lost lock, terminate I/O */
4054     + ff_layout_write_prepare_common(task, hdr);
4055     }
4056    
4057     static void ff_layout_write_call_done(struct rpc_task *task, void *data)
4058     @@ -1742,6 +1732,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
4059     fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
4060     if (fh)
4061     hdr->args.fh = fh;
4062     +
4063     + if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
4064     + goto out_failed;
4065     +
4066     /*
4067     * Note that if we ever decide to split across DSes,
4068     * then we may need to handle dense-like offsets.
4069     @@ -1804,6 +1798,9 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
4070     if (fh)
4071     hdr->args.fh = fh;
4072    
4073     + if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
4074     + goto out_failed;
4075     +
4076     /*
4077     * Note that if we ever decide to split across DSes,
4078     * then we may need to handle dense-like offsets.
4079     diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
4080     index 411798346e48..de50a342d5a5 100644
4081     --- a/fs/nfs/flexfilelayout/flexfilelayout.h
4082     +++ b/fs/nfs/flexfilelayout/flexfilelayout.h
4083     @@ -215,6 +215,10 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
4084     unsigned int maxnum);
4085     struct nfs_fh *
4086     nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
4087     +int
4088     +nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
4089     + u32 mirror_idx,
4090     + nfs4_stateid *stateid);
4091    
4092     struct nfs4_pnfs_ds *
4093     nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
4094     diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
4095     index 59aa04976331..a8df2f496898 100644
4096     --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
4097     +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
4098     @@ -370,6 +370,25 @@ out:
4099     return fh;
4100     }
4101    
4102     +int
4103     +nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
4104     + u32 mirror_idx,
4105     + nfs4_stateid *stateid)
4106     +{
4107     + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
4108     +
4109     + if (!ff_layout_mirror_valid(lseg, mirror, false)) {
4110     + pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
4111     + __func__, mirror_idx);
4112     + goto out;
4113     + }
4114     +
4115     + nfs4_stateid_copy(stateid, &mirror->stateid);
4116     + return 1;
4117     +out:
4118     + return 0;
4119     +}
4120     +
4121     /**
4122     * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
4123     * @lseg: the layout segment we're operating on
4124     diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
4125     index ac5b784a1de0..fed06fd9998d 100644
4126     --- a/fs/nfs/nfs42proc.c
4127     +++ b/fs/nfs/nfs42proc.c
4128     @@ -137,31 +137,32 @@ static int handle_async_copy(struct nfs42_copy_res *res,
4129     struct file *dst,
4130     nfs4_stateid *src_stateid)
4131     {
4132     - struct nfs4_copy_state *copy;
4133     + struct nfs4_copy_state *copy, *tmp_copy;
4134     int status = NFS4_OK;
4135     bool found_pending = false;
4136     struct nfs_open_context *ctx = nfs_file_open_context(dst);
4137    
4138     + copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
4139     + if (!copy)
4140     + return -ENOMEM;
4141     +
4142     spin_lock(&server->nfs_client->cl_lock);
4143     - list_for_each_entry(copy, &server->nfs_client->pending_cb_stateids,
4144     + list_for_each_entry(tmp_copy, &server->nfs_client->pending_cb_stateids,
4145     copies) {
4146     - if (memcmp(&res->write_res.stateid, &copy->stateid,
4147     + if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
4148     NFS4_STATEID_SIZE))
4149     continue;
4150     found_pending = true;
4151     - list_del(&copy->copies);
4152     + list_del(&tmp_copy->copies);
4153     break;
4154     }
4155     if (found_pending) {
4156     spin_unlock(&server->nfs_client->cl_lock);
4157     + kfree(copy);
4158     + copy = tmp_copy;
4159     goto out;
4160     }
4161    
4162     - copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
4163     - if (!copy) {
4164     - spin_unlock(&server->nfs_client->cl_lock);
4165     - return -ENOMEM;
4166     - }
4167     memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
4168     init_completion(&copy->completion);
4169     copy->parent_state = ctx->state;
4170     diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
4171     index 3a6904173214..63287d911c08 100644
4172     --- a/fs/nfs/nfs4_fs.h
4173     +++ b/fs/nfs/nfs4_fs.h
4174     @@ -41,6 +41,8 @@ enum nfs4_client_state {
4175     NFS4CLNT_MOVED,
4176     NFS4CLNT_LEASE_MOVED,
4177     NFS4CLNT_DELEGATION_EXPIRED,
4178     + NFS4CLNT_RUN_MANAGER,
4179     + NFS4CLNT_DELEGRETURN_RUNNING,
4180     };
4181    
4182     #define NFS4_RENEW_TIMEOUT 0x01
4183     diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
4184     index 18920152da14..d2f645d34eb1 100644
4185     --- a/fs/nfs/nfs4state.c
4186     +++ b/fs/nfs/nfs4state.c
4187     @@ -1210,6 +1210,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
4188     struct task_struct *task;
4189     char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
4190    
4191     + set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
4192     if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
4193     return;
4194     __module_get(THIS_MODULE);
4195     @@ -2485,6 +2486,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
4196    
4197     /* Ensure exclusive access to NFSv4 state */
4198     do {
4199     + clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
4200     if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4201     section = "purge state";
4202     status = nfs4_purge_lease(clp);
4203     @@ -2575,14 +2577,18 @@ static void nfs4_state_manager(struct nfs_client *clp)
4204     }
4205    
4206     nfs4_end_drain_session(clp);
4207     - if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
4208     - nfs_client_return_marked_delegations(clp);
4209     - continue;
4210     + nfs4_clear_state_manager_bit(clp);
4211     +
4212     + if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
4213     + if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
4214     + nfs_client_return_marked_delegations(clp);
4215     + set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
4216     + }
4217     + clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state);
4218     }
4219    
4220     - nfs4_clear_state_manager_bit(clp);
4221     /* Did we race with an attempt to give us more work? */
4222     - if (clp->cl_state == 0)
4223     + if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
4224     return;
4225     if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
4226     return;
4227     diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
4228     index efda23cf32c7..5185a16b19ba 100644
4229     --- a/include/linux/hyperv.h
4230     +++ b/include/linux/hyperv.h
4231     @@ -904,6 +904,13 @@ struct vmbus_channel {
4232    
4233     bool probe_done;
4234    
4235     + /*
4236     + * We must offload the handling of the primary/sub channels
4237     + * from the single-threaded vmbus_connection.work_queue to
4238     + * two different workqueue, otherwise we can block
4239     + * vmbus_connection.work_queue and hang: see vmbus_process_offer().
4240     + */
4241     + struct work_struct add_channel_work;
4242     };
4243    
4244     static inline bool is_hvsock_channel(const struct vmbus_channel *c)
4245     diff --git a/include/linux/tty.h b/include/linux/tty.h
4246     index c56e3978b00f..808fbfe86f85 100644
4247     --- a/include/linux/tty.h
4248     +++ b/include/linux/tty.h
4249     @@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
4250     extern void tty_release_struct(struct tty_struct *tty, int idx);
4251     extern int tty_release(struct inode *inode, struct file *filp);
4252     extern void tty_init_termios(struct tty_struct *tty);
4253     +extern void tty_save_termios(struct tty_struct *tty);
4254     extern int tty_standard_install(struct tty_driver *driver,
4255     struct tty_struct *tty);
4256    
4257     diff --git a/include/linux/usb.h b/include/linux/usb.h
4258     index 4cdd515a4385..5e49e82c4368 100644
4259     --- a/include/linux/usb.h
4260     +++ b/include/linux/usb.h
4261     @@ -407,11 +407,11 @@ struct usb_host_bos {
4262     };
4263    
4264     int __usb_get_extra_descriptor(char *buffer, unsigned size,
4265     - unsigned char type, void **ptr);
4266     + unsigned char type, void **ptr, size_t min);
4267     #define usb_get_extra_descriptor(ifpoint, type, ptr) \
4268     __usb_get_extra_descriptor((ifpoint)->extra, \
4269     (ifpoint)->extralen, \
4270     - type, (void **)ptr)
4271     + type, (void **)ptr, sizeof(**(ptr)))
4272    
4273     /* ----------------------------------------------------------------------- */
4274    
4275     diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
4276     index 2dd37cada7c0..888a833d3b00 100644
4277     --- a/include/sound/pcm_params.h
4278     +++ b/include/sound/pcm_params.h
4279     @@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
4280     static inline int snd_interval_single(const struct snd_interval *i)
4281     {
4282     return (i->min == i->max ||
4283     - (i->min + 1 == i->max && i->openmax));
4284     + (i->min + 1 == i->max && (i->openmin || i->openmax)));
4285     }
4286    
4287     static inline int snd_interval_value(const struct snd_interval *i)
4288     {
4289     + if (i->openmin && !i->openmax)
4290     + return i->max;
4291     return i->min;
4292     }
4293    
4294     diff --git a/lib/test_firmware.c b/lib/test_firmware.c
4295     index b984806d7d7b..7cab9a9869ac 100644
4296     --- a/lib/test_firmware.c
4297     +++ b/lib/test_firmware.c
4298     @@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
4299     if (req->fw->size > PAGE_SIZE) {
4300     pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
4301     rc = -EINVAL;
4302     + goto out;
4303     }
4304     memcpy(buf, req->fw->data, req->fw->size);
4305    
4306     diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
4307     index 9f481cfdf77d..e8090f099eb8 100644
4308     --- a/net/batman-adv/bat_v_elp.c
4309     +++ b/net/batman-adv/bat_v_elp.c
4310     @@ -352,19 +352,21 @@ out:
4311     */
4312     int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
4313     {
4314     + static const size_t tvlv_padding = sizeof(__be32);
4315     struct batadv_elp_packet *elp_packet;
4316     unsigned char *elp_buff;
4317     u32 random_seqno;
4318     size_t size;
4319     int res = -ENOMEM;
4320    
4321     - size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
4322     + size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
4323     hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
4324     if (!hard_iface->bat_v.elp_skb)
4325     goto out;
4326    
4327     skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
4328     - elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
4329     + elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
4330     + BATADV_ELP_HLEN + tvlv_padding);
4331     elp_packet = (struct batadv_elp_packet *)elp_buff;
4332    
4333     elp_packet->packet_type = BATADV_ELP;
4334     diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
4335     index 0fddc17106bd..5b71a289d04f 100644
4336     --- a/net/batman-adv/fragmentation.c
4337     +++ b/net/batman-adv/fragmentation.c
4338     @@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
4339     kfree(entry);
4340    
4341     packet = (struct batadv_frag_packet *)skb_out->data;
4342     - size = ntohs(packet->total_size);
4343     + size = ntohs(packet->total_size) + hdr_size;
4344    
4345     /* Make room for the rest of the fragments. */
4346     if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
4347     diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
4348     index 5836ddeac9e3..5f3c81e705c7 100644
4349     --- a/net/mac80211/iface.c
4350     +++ b/net/mac80211/iface.c
4351     @@ -1015,6 +1015,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
4352     if (local->open_count == 0)
4353     ieee80211_clear_tx_pending(local);
4354    
4355     + sdata->vif.bss_conf.beacon_int = 0;
4356     +
4357     /*
4358     * If the interface goes down while suspended, presumably because
4359     * the device was unplugged and that happens before our resume,
4360     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4361     index 96611d5dfadb..5e2b4a41acf1 100644
4362     --- a/net/mac80211/rx.c
4363     +++ b/net/mac80211/rx.c
4364     @@ -1372,6 +1372,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
4365     return RX_CONTINUE;
4366    
4367     if (ieee80211_is_ctl(hdr->frame_control) ||
4368     + ieee80211_is_nullfunc(hdr->frame_control) ||
4369     ieee80211_is_qos_nullfunc(hdr->frame_control) ||
4370     is_multicast_ether_addr(hdr->addr1))
4371     return RX_CONTINUE;
4372     @@ -3029,7 +3030,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
4373     cfg80211_sta_opmode_change_notify(sdata->dev,
4374     rx->sta->addr,
4375     &sta_opmode,
4376     - GFP_KERNEL);
4377     + GFP_ATOMIC);
4378     goto handled;
4379     }
4380     case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
4381     @@ -3066,7 +3067,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
4382     cfg80211_sta_opmode_change_notify(sdata->dev,
4383     rx->sta->addr,
4384     &sta_opmode,
4385     - GFP_KERNEL);
4386     + GFP_ATOMIC);
4387     goto handled;
4388     }
4389     default:
4390     diff --git a/net/mac80211/status.c b/net/mac80211/status.c
4391     index 91d7c0cd1882..7fa10d06cc51 100644
4392     --- a/net/mac80211/status.c
4393     +++ b/net/mac80211/status.c
4394     @@ -964,6 +964,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
4395     /* Track when last TDLS packet was ACKed */
4396     if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
4397     sta->status_stats.last_tdls_pkt_time = jiffies;
4398     + } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
4399     + return;
4400     } else {
4401     ieee80211_lost_packet(sta, info);
4402     }
4403     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4404     index 25ba24bef8f5..995a491f73a9 100644
4405     --- a/net/mac80211/tx.c
4406     +++ b/net/mac80211/tx.c
4407     @@ -439,8 +439,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
4408     if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
4409     info->hw_queue = tx->sdata->vif.cab_queue;
4410    
4411     - /* no stations in PS mode */
4412     - if (!atomic_read(&ps->num_sta_ps))
4413     + /* no stations in PS mode and no buffered packets */
4414     + if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
4415     return TX_CONTINUE;
4416    
4417     info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
4418     diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
4419     index 21c0aa0a0d1d..8cb7d812ccb8 100644
4420     --- a/net/sunrpc/auth_gss/auth_gss.c
4421     +++ b/net/sunrpc/auth_gss/auth_gss.c
4422     @@ -1768,6 +1768,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
4423     for (i=0; i < rqstp->rq_enc_pages_num; i++)
4424     __free_page(rqstp->rq_enc_pages[i]);
4425     kfree(rqstp->rq_enc_pages);
4426     + rqstp->rq_release_snd_buf = NULL;
4427     }
4428    
4429     static int
4430     @@ -1776,6 +1777,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
4431     struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
4432     int first, last, i;
4433    
4434     + if (rqstp->rq_release_snd_buf)
4435     + rqstp->rq_release_snd_buf(rqstp);
4436     +
4437     if (snd_buf->page_len == 0) {
4438     rqstp->rq_enc_pages_num = 0;
4439     return 0;
4440     diff --git a/net/wireless/util.c b/net/wireless/util.c
4441     index 959ed3acd240..aad1c8e858e5 100644
4442     --- a/net/wireless/util.c
4443     +++ b/net/wireless/util.c
4444     @@ -1418,6 +1418,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
4445     ies[pos + ext],
4446     ext == 2))
4447     pos = skip_ie(ies, ielen, pos);
4448     + else
4449     + break;
4450     }
4451     } else {
4452     pos = skip_ie(ies, ielen, pos);
4453     diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
4454     index 66c90f486af9..818dff1de545 100644
4455     --- a/sound/core/pcm_native.c
4456     +++ b/sound/core/pcm_native.c
4457     @@ -36,6 +36,7 @@
4458     #include <sound/timer.h>
4459     #include <sound/minors.h>
4460     #include <linux/uio.h>
4461     +#include <linux/delay.h>
4462    
4463     #include "pcm_local.h"
4464    
4465     @@ -91,12 +92,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
4466     * and this may lead to a deadlock when the code path takes read sem
4467     * twice (e.g. one in snd_pcm_action_nonatomic() and another in
4468     * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
4469     - * spin until it gets the lock.
4470     + * sleep until all the readers are completed without blocking by writer.
4471     */
4472     -static inline void down_write_nonblock(struct rw_semaphore *lock)
4473     +static inline void down_write_nonfifo(struct rw_semaphore *lock)
4474     {
4475     while (!down_write_trylock(lock))
4476     - cond_resched();
4477     + msleep(1);
4478     }
4479    
4480     #define PCM_LOCK_DEFAULT 0
4481     @@ -1967,7 +1968,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
4482     res = -ENOMEM;
4483     goto _nolock;
4484     }
4485     - down_write_nonblock(&snd_pcm_link_rwsem);
4486     + down_write_nonfifo(&snd_pcm_link_rwsem);
4487     write_lock_irq(&snd_pcm_link_rwlock);
4488     if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
4489     substream->runtime->status->state != substream1->runtime->status->state ||
4490     @@ -2014,7 +2015,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
4491     struct snd_pcm_substream *s;
4492     int res = 0;
4493    
4494     - down_write_nonblock(&snd_pcm_link_rwsem);
4495     + down_write_nonfifo(&snd_pcm_link_rwsem);
4496     write_lock_irq(&snd_pcm_link_rwlock);
4497     if (!snd_pcm_stream_linked(substream)) {
4498     res = -EALREADY;
4499     @@ -2369,7 +2370,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
4500    
4501     static void pcm_release_private(struct snd_pcm_substream *substream)
4502     {
4503     - snd_pcm_unlink(substream);
4504     + if (snd_pcm_stream_linked(substream))
4505     + snd_pcm_unlink(substream);
4506     }
4507    
4508     void snd_pcm_release_substream(struct snd_pcm_substream *substream)
4509     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4510     index 5810be2c6c34..1ddeebc373b3 100644
4511     --- a/sound/pci/hda/hda_intel.c
4512     +++ b/sound/pci/hda/hda_intel.c
4513     @@ -2585,6 +2585,10 @@ static const struct pci_device_id azx_ids[] = {
4514     /* AMD Hudson */
4515     { PCI_DEVICE(0x1022, 0x780d),
4516     .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
4517     + /* AMD Stoney */
4518     + { PCI_DEVICE(0x1022, 0x157a),
4519     + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
4520     + AZX_DCAPS_PM_RUNTIME },
4521     /* AMD Raven */
4522     { PCI_DEVICE(0x1022, 0x15e3),
4523     .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
4524     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4525     index cf5d26642bcd..22ca1f0a858f 100644
4526     --- a/sound/pci/hda/patch_realtek.c
4527     +++ b/sound/pci/hda/patch_realtek.c
4528     @@ -4988,9 +4988,18 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
4529     { 0x19, 0x21a11010 }, /* dock mic */
4530     { }
4531     };
4532     + /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
4533     + * the speaker output becomes too low by some reason on Thinkpads with
4534     + * ALC298 codec
4535     + */
4536     + static hda_nid_t preferred_pairs[] = {
4537     + 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
4538     + 0
4539     + };
4540     struct alc_spec *spec = codec->spec;
4541    
4542     if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4543     + spec->gen.preferred_dacs = preferred_pairs;
4544     spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4545     snd_hda_apply_pincfgs(codec, pincfgs);
4546     } else if (action == HDA_FIXUP_ACT_INIT) {
4547     @@ -5510,6 +5519,7 @@ enum {
4548     ALC221_FIXUP_HP_HEADSET_MIC,
4549     ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
4550     ALC295_FIXUP_HP_AUTO_MUTE,
4551     + ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
4552     };
4553    
4554     static const struct hda_fixup alc269_fixups[] = {
4555     @@ -6387,6 +6397,15 @@ static const struct hda_fixup alc269_fixups[] = {
4556     .type = HDA_FIXUP_FUNC,
4557     .v.func = alc_fixup_auto_mute_via_amp,
4558     },
4559     + [ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE] = {
4560     + .type = HDA_FIXUP_PINS,
4561     + .v.pins = (const struct hda_pintbl[]) {
4562     + { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
4563     + { }
4564     + },
4565     + .chained = true,
4566     + .chain_id = ALC269_FIXUP_HEADSET_MIC
4567     + },
4568     };
4569    
4570     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4571     @@ -6401,7 +6420,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4572     SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
4573     SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
4574     SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
4575     + SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4576     SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
4577     + SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4578     + SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4579     + SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4580     SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
4581     SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
4582     SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
4583     @@ -7065,6 +7088,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4584     {0x14, 0x90170110},
4585     {0x19, 0x04a11040},
4586     {0x21, 0x04211020}),
4587     + SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
4588     + {0x12, 0x90a60130},
4589     + {0x17, 0x90170110},
4590     + {0x21, 0x02211020}),
4591     SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
4592     {0x12, 0x90a60120},
4593     {0x14, 0x90170110},
4594     diff --git a/sound/usb/card.c b/sound/usb/card.c
4595     index 2bfe4e80a6b9..a105947eaf55 100644
4596     --- a/sound/usb/card.c
4597     +++ b/sound/usb/card.c
4598     @@ -682,9 +682,12 @@ static int usb_audio_probe(struct usb_interface *intf,
4599    
4600     __error:
4601     if (chip) {
4602     + /* chip->active is inside the chip->card object,
4603     + * decrement before memory is possibly returned.
4604     + */
4605     + atomic_dec(&chip->active);
4606     if (!chip->num_interfaces)
4607     snd_card_free(chip->card);
4608     - atomic_dec(&chip->active);
4609     }
4610     mutex_unlock(&register_mutex);
4611     return err;
4612     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
4613     index 8a945ece9869..6623cafc94f2 100644
4614     --- a/sound/usb/quirks.c
4615     +++ b/sound/usb/quirks.c
4616     @@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
4617     return SNDRV_PCM_FMTBIT_DSD_U32_BE;
4618     break;
4619    
4620     + case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
4621     case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
4622     case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
4623     case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
4624     diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
4625     index 87a04a8a5945..7607ba3e3cbe 100755
4626     --- a/tools/testing/selftests/tc-testing/tdc.py
4627     +++ b/tools/testing/selftests/tc-testing/tdc.py
4628     @@ -134,9 +134,9 @@ def exec_cmd(args, pm, stage, command):
4629     (rawout, serr) = proc.communicate()
4630    
4631     if proc.returncode != 0 and len(serr) > 0:
4632     - foutput = serr.decode("utf-8")
4633     + foutput = serr.decode("utf-8", errors="ignore")
4634     else:
4635     - foutput = rawout.decode("utf-8")
4636     + foutput = rawout.decode("utf-8", errors="ignore")
4637    
4638     proc.stdout.close()
4639     proc.stderr.close()
4640     @@ -169,6 +169,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
4641     file=sys.stderr)
4642     print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
4643     file=sys.stderr)
4644     + print("returncode {}; expected {}".format(proc.returncode,
4645     + exit_codes))
4646     print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
4647     print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
4648     print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
4649     @@ -195,12 +197,18 @@ def run_one_test(pm, args, index, tidx):
4650     print('-----> execute stage')
4651     pm.call_pre_execute()
4652     (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
4653     - exit_code = p.returncode
4654     + if p:
4655     + exit_code = p.returncode
4656     + else:
4657     + exit_code = None
4658     +
4659     pm.call_post_execute()
4660    
4661     - if (exit_code != int(tidx["expExitCode"])):
4662     + if (exit_code is None or exit_code != int(tidx["expExitCode"])):
4663     result = False
4664     - print("exit:", exit_code, int(tidx["expExitCode"]))
4665     + print("exit: {!r}".format(exit_code))
4666     + print("exit: {}".format(int(tidx["expExitCode"])))
4667     + #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
4668     print(procout)
4669     else:
4670     if args.verbose > 0: