Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.20/0106-4.20.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3283 - (hide annotations) (download)
Mon Mar 4 10:35:54 2019 UTC (5 years, 2 months ago) by niro
File size: 106201 byte(s)
linux-4.20.7
1 niro 3283 diff --git a/Makefile b/Makefile
2     index 523922ea9c97..efc46cce5919 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 20
9     -SUBLEVEL = 6
10     +SUBLEVEL = 7
11     EXTRAVERSION =
12     NAME = Shy Crocodile
13    
14     diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
15     index 318394ed5c7a..5e11ad3164e0 100644
16     --- a/arch/arm/mach-cns3xxx/pcie.c
17     +++ b/arch/arm/mach-cns3xxx/pcie.c
18     @@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
19     } else /* remote PCI bus */
20     base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
21    
22     - return base + (where & 0xffc) + (devfn << 12);
23     + return base + where + (devfn << 12);
24     }
25    
26     static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
27     diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
28     index 29cdc99688f3..9859e1178e6b 100644
29     --- a/arch/arm64/kernel/hibernate.c
30     +++ b/arch/arm64/kernel/hibernate.c
31     @@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
32     dcache_clean_range(__idmap_text_start, __idmap_text_end);
33    
34     /* Clean kvm setup code to PoC? */
35     - if (el2_reset_needed())
36     + if (el2_reset_needed()) {
37     dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
38     + dcache_clean_range(__hyp_text_start, __hyp_text_end);
39     + }
40    
41     /* make the crash dump kernel image protected again */
42     crash_post_resume();
43     diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
44     index e1261fbaa374..17f325ba831e 100644
45     --- a/arch/arm64/kernel/hyp-stub.S
46     +++ b/arch/arm64/kernel/hyp-stub.S
47     @@ -28,6 +28,8 @@
48     #include <asm/virt.h>
49    
50     .text
51     + .pushsection .hyp.text, "ax"
52     +
53     .align 11
54    
55     ENTRY(__hyp_stub_vectors)
56     diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
57     index ba6b41790fcd..b09b6f75f759 100644
58     --- a/arch/arm64/kernel/kaslr.c
59     +++ b/arch/arm64/kernel/kaslr.c
60     @@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
61     * we end up running with module randomization disabled.
62     */
63     module_alloc_base = (u64)_etext - MODULES_VSIZE;
64     + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
65    
66     /*
67     * Try to map the FDT early. If this fails, we simply bail,
68     diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
69     index 30695a868107..5c9073bace83 100644
70     --- a/arch/arm64/mm/flush.c
71     +++ b/arch/arm64/mm/flush.c
72     @@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
73     __clean_dcache_area_pou(kaddr, len);
74     __flush_icache_all();
75     } else {
76     - flush_icache_range(addr, addr + len);
77     + /*
78     + * Don't issue kick_all_cpus_sync() after I-cache invalidation
79     + * for user mappings.
80     + */
81     + __flush_icache_range(addr, addr + len);
82     }
83     }
84    
85     diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
86     index 6b11f1314248..7f9e0304b510 100644
87     --- a/drivers/gpio/gpio-altera-a10sr.c
88     +++ b/drivers/gpio/gpio-altera-a10sr.c
89     @@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
90     static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
91     unsigned int nr, int value)
92     {
93     - if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
94     + if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
95     + altr_a10sr_gpio_set(gc, nr, value);
96     return 0;
97     + }
98     return -EINVAL;
99     }
100    
101     diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
102     index e0d6a0a7bc69..e41223c05f6e 100644
103     --- a/drivers/gpio/gpio-eic-sprd.c
104     +++ b/drivers/gpio/gpio-eic-sprd.c
105     @@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
106    
107     static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
108     {
109     - return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
110     + struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
111     +
112     + switch (sprd_eic->type) {
113     + case SPRD_EIC_DEBOUNCE:
114     + return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
115     + case SPRD_EIC_ASYNC:
116     + return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
117     + case SPRD_EIC_SYNC:
118     + return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
119     + default:
120     + return -ENOTSUPP;
121     + }
122     }
123    
124     static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
125     @@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
126     irq_set_handler_locked(data, handle_edge_irq);
127     break;
128     case IRQ_TYPE_EDGE_BOTH:
129     + sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
130     sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
131     irq_set_handler_locked(data, handle_edge_irq);
132     break;
133     diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
134     index adf72dda25a2..68a35b65925a 100644
135     --- a/drivers/gpio/gpio-pcf857x.c
136     +++ b/drivers/gpio/gpio-pcf857x.c
137     @@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
138     */
139     struct pcf857x {
140     struct gpio_chip chip;
141     + struct irq_chip irqchip;
142     struct i2c_client *client;
143     struct mutex lock; /* protect 'out' */
144     unsigned out; /* software latch */
145     @@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
146     mutex_unlock(&gpio->lock);
147     }
148    
149     -static struct irq_chip pcf857x_irq_chip = {
150     - .name = "pcf857x",
151     - .irq_enable = pcf857x_irq_enable,
152     - .irq_disable = pcf857x_irq_disable,
153     - .irq_ack = noop,
154     - .irq_mask = noop,
155     - .irq_unmask = noop,
156     - .irq_set_wake = pcf857x_irq_set_wake,
157     - .irq_bus_lock = pcf857x_irq_bus_lock,
158     - .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
159     -};
160     -
161     /*-------------------------------------------------------------------------*/
162    
163     static int pcf857x_probe(struct i2c_client *client,
164     @@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
165    
166     /* Enable irqchip if we have an interrupt */
167     if (client->irq) {
168     + gpio->irqchip.name = "pcf857x",
169     + gpio->irqchip.irq_enable = pcf857x_irq_enable,
170     + gpio->irqchip.irq_disable = pcf857x_irq_disable,
171     + gpio->irqchip.irq_ack = noop,
172     + gpio->irqchip.irq_mask = noop,
173     + gpio->irqchip.irq_unmask = noop,
174     + gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
175     + gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
176     + gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
177     status = gpiochip_irqchip_add_nested(&gpio->chip,
178     - &pcf857x_irq_chip,
179     + &gpio->irqchip,
180     0, handle_level_irq,
181     IRQ_TYPE_NONE);
182     if (status) {
183     @@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
184     if (status)
185     goto fail;
186    
187     - gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
188     + gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
189     client->irq);
190     gpio->irq_parent = client->irq;
191     }
192     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
193     index a2cbb474901c..76913e23cf3a 100644
194     --- a/drivers/gpio/gpiolib.c
195     +++ b/drivers/gpio/gpiolib.c
196     @@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
197     /* Do not leak kernel stack to userspace */
198     memset(&ge, 0, sizeof(ge));
199    
200     - ge.timestamp = le->timestamp;
201     + /*
202     + * We may be running from a nested threaded interrupt in which case
203     + * we didn't get the timestamp from lineevent_irq_handler().
204     + */
205     + if (!le->timestamp)
206     + ge.timestamp = ktime_get_real_ns();
207     + else
208     + ge.timestamp = le->timestamp;
209    
210     if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
211     && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
212     diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
213     index f82bac086666..5d500e58d5ce 100644
214     --- a/drivers/gpu/drm/msm/msm_gpu.h
215     +++ b/drivers/gpu/drm/msm/msm_gpu.h
216     @@ -63,7 +63,7 @@ struct msm_gpu_funcs {
217     struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
218     void (*recover)(struct msm_gpu *gpu);
219     void (*destroy)(struct msm_gpu *gpu);
220     -#ifdef CONFIG_DEBUG_FS
221     +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
222     /* show GPU status in debugfs: */
223     void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
224     struct drm_printer *p);
225     diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
226     index 6d373f5515b7..50a97471a2be 100644
227     --- a/drivers/infiniband/core/uverbs_main.c
228     +++ b/drivers/infiniband/core/uverbs_main.c
229     @@ -262,6 +262,9 @@ void ib_uverbs_release_file(struct kref *ref)
230     if (atomic_dec_and_test(&file->device->refcount))
231     ib_uverbs_comp_dev(file->device);
232    
233     + if (file->async_file)
234     + kref_put(&file->async_file->ref,
235     + ib_uverbs_release_async_event_file);
236     put_device(&file->device->dev);
237     kfree(file);
238     }
239     @@ -997,11 +1000,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
240    
241     /* Get an arbitrary mm pointer that hasn't been cleaned yet */
242     mutex_lock(&ufile->umap_lock);
243     - if (!list_empty(&ufile->umaps)) {
244     - mm = list_first_entry(&ufile->umaps,
245     - struct rdma_umap_priv, list)
246     - ->vma->vm_mm;
247     - mmget(mm);
248     + while (!list_empty(&ufile->umaps)) {
249     + int ret;
250     +
251     + priv = list_first_entry(&ufile->umaps,
252     + struct rdma_umap_priv, list);
253     + mm = priv->vma->vm_mm;
254     + ret = mmget_not_zero(mm);
255     + if (!ret) {
256     + list_del_init(&priv->list);
257     + mm = NULL;
258     + continue;
259     + }
260     + break;
261     }
262     mutex_unlock(&ufile->umap_lock);
263     if (!mm)
264     @@ -1132,10 +1143,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
265     list_del_init(&file->list);
266     mutex_unlock(&file->device->lists_mutex);
267    
268     - if (file->async_file)
269     - kref_put(&file->async_file->ref,
270     - ib_uverbs_release_async_event_file);
271     -
272     kref_put(&file->ref, ib_uverbs_release_file);
273    
274     return 0;
275     diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
276     index c22ebc774a6a..f9a7e9d29c8b 100644
277     --- a/drivers/infiniband/hw/hfi1/file_ops.c
278     +++ b/drivers/infiniband/hw/hfi1/file_ops.c
279     @@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
280     vmf = 1;
281     break;
282     case STATUS:
283     - if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
284     + if (flags & VM_WRITE) {
285     ret = -EPERM;
286     goto done;
287     }
288     diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
289     index 1735deb1a9d4..79ac9c3c0e5a 100644
290     --- a/drivers/infiniband/sw/rdmavt/qp.c
291     +++ b/drivers/infiniband/sw/rdmavt/qp.c
292     @@ -2903,6 +2903,8 @@ send:
293     goto op_err;
294     if (!ret)
295     goto rnr_nak;
296     + if (wqe->length > qp->r_len)
297     + goto inv_err;
298     break;
299    
300     case IB_WR_RDMA_WRITE_WITH_IMM:
301     @@ -3071,7 +3073,10 @@ op_err:
302     goto err;
303    
304     inv_err:
305     - send_status = IB_WC_REM_INV_REQ_ERR;
306     + send_status =
307     + sqp->ibqp.qp_type == IB_QPT_RC ?
308     + IB_WC_REM_INV_REQ_ERR :
309     + IB_WC_SUCCESS;
310     wc.status = IB_WC_LOC_QP_OP_ERR;
311     goto err;
312    
313     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
314     index f3afab82f3ee..7ec9c6739170 100644
315     --- a/drivers/iommu/intel-iommu.c
316     +++ b/drivers/iommu/intel-iommu.c
317     @@ -5204,7 +5204,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
318     struct iommu_resv_region *entry, *next;
319    
320     list_for_each_entry_safe(entry, next, head, list) {
321     - if (entry->type == IOMMU_RESV_RESERVED)
322     + if (entry->type == IOMMU_RESV_MSI)
323     kfree(entry);
324     }
325     }
326     diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
327     index ec3a5ef7fee0..cbbe6b6535be 100644
328     --- a/drivers/md/raid5-cache.c
329     +++ b/drivers/md/raid5-cache.c
330     @@ -1935,12 +1935,14 @@ out:
331     }
332    
333     static struct stripe_head *
334     -r5c_recovery_alloc_stripe(struct r5conf *conf,
335     - sector_t stripe_sect)
336     +r5c_recovery_alloc_stripe(
337     + struct r5conf *conf,
338     + sector_t stripe_sect,
339     + int noblock)
340     {
341     struct stripe_head *sh;
342    
343     - sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
344     + sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
345     if (!sh)
346     return NULL; /* no more stripe available */
347    
348     @@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
349     stripe_sect);
350    
351     if (!sh) {
352     - sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
353     + sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
354     /*
355     * cannot get stripe from raid5_get_active_stripe
356     * try replay some stripes
357     @@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
358     r5c_recovery_replay_stripes(
359     cached_stripe_list, ctx);
360     sh = r5c_recovery_alloc_stripe(
361     - conf, stripe_sect);
362     + conf, stripe_sect, 1);
363     }
364     if (!sh) {
365     + int new_size = conf->min_nr_stripes * 2;
366     pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
367     mdname(mddev),
368     - conf->min_nr_stripes * 2);
369     - raid5_set_cache_size(mddev,
370     - conf->min_nr_stripes * 2);
371     - sh = r5c_recovery_alloc_stripe(conf,
372     - stripe_sect);
373     + new_size);
374     + ret = raid5_set_cache_size(mddev, new_size);
375     + if (conf->min_nr_stripes <= new_size / 2) {
376     + pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
377     + mdname(mddev),
378     + ret,
379     + new_size,
380     + conf->min_nr_stripes,
381     + conf->max_nr_stripes);
382     + return -ENOMEM;
383     + }
384     + sh = r5c_recovery_alloc_stripe(
385     + conf, stripe_sect, 0);
386     }
387     if (!sh) {
388     pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
389     - mdname(mddev));
390     + mdname(mddev));
391     return -ENOMEM;
392     }
393     list_add_tail(&sh->lru, cached_stripe_list);
394     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
395     index 4990f0319f6c..cecea901ab8c 100644
396     --- a/drivers/md/raid5.c
397     +++ b/drivers/md/raid5.c
398     @@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
399     int
400     raid5_set_cache_size(struct mddev *mddev, int size)
401     {
402     + int result = 0;
403     struct r5conf *conf = mddev->private;
404    
405     if (size <= 16 || size > 32768)
406     @@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
407    
408     mutex_lock(&conf->cache_size_mutex);
409     while (size > conf->max_nr_stripes)
410     - if (!grow_one_stripe(conf, GFP_KERNEL))
411     + if (!grow_one_stripe(conf, GFP_KERNEL)) {
412     + conf->min_nr_stripes = conf->max_nr_stripes;
413     + result = -ENOMEM;
414     break;
415     + }
416     mutex_unlock(&conf->cache_size_mutex);
417    
418     - return 0;
419     + return result;
420     }
421     EXPORT_SYMBOL(raid5_set_cache_size);
422    
423     diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
424     index 768972af8b85..0d3b7473bc21 100644
425     --- a/drivers/mmc/host/bcm2835.c
426     +++ b/drivers/mmc/host/bcm2835.c
427     @@ -1427,6 +1427,8 @@ static int bcm2835_probe(struct platform_device *pdev)
428    
429     err:
430     dev_dbg(dev, "%s -> err %d\n", __func__, ret);
431     + if (host->dma_chan_rxtx)
432     + dma_release_channel(host->dma_chan_rxtx);
433     mmc_free_host(mmc);
434    
435     return ret;
436     diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
437     index 6334cc752d8b..4ff7646d0d29 100644
438     --- a/drivers/mmc/host/mtk-sd.c
439     +++ b/drivers/mmc/host/mtk-sd.c
440     @@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
441    
442     if (timing == MMC_TIMING_MMC_HS400 &&
443     host->dev_comp->hs400_tune)
444     - sdr_set_field(host->base + PAD_CMD_TUNE,
445     + sdr_set_field(host->base + tune_reg,
446     MSDC_PAD_TUNE_CMDRRDLY,
447     host->hs400_cmd_int_delay);
448     dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
449     diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
450     index 32e02700feaa..91d7965b3dab 100644
451     --- a/drivers/net/ethernet/freescale/ucc_geth.c
452     +++ b/drivers/net/ethernet/freescale/ucc_geth.c
453     @@ -1883,6 +1883,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
454     u16 i, j;
455     u8 __iomem *bd;
456    
457     + netdev_reset_queue(ugeth->ndev);
458     +
459     ug_info = ugeth->ug_info;
460     uf_info = &ug_info->uf_info;
461    
462     diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
463     index babcfd9c0571..75213046563c 100644
464     --- a/drivers/net/ethernet/mellanox/mlx4/fw.c
465     +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
466     @@ -2064,9 +2064,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
467     {
468     struct mlx4_cmd_mailbox *mailbox;
469     __be32 *outbox;
470     + u64 qword_field;
471     u32 dword_field;
472     - int err;
473     + u16 word_field;
474     u8 byte_field;
475     + int err;
476     static const u8 a0_dmfs_query_hw_steering[] = {
477     [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
478     [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
479     @@ -2094,19 +2096,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
480    
481     /* QPC/EEC/CQC/EQC/RDMARC attributes */
482    
483     - MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
484     - MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
485     - MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
486     - MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
487     - MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
488     - MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
489     - MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
490     - MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
491     - MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
492     - MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
493     - MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
494     - MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
495     - MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
496     + MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
497     + param->qpc_base = qword_field & ~((u64)0x1f);
498     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
499     + param->log_num_qps = byte_field & 0x1f;
500     + MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
501     + param->srqc_base = qword_field & ~((u64)0x1f);
502     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
503     + param->log_num_srqs = byte_field & 0x1f;
504     + MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
505     + param->cqc_base = qword_field & ~((u64)0x1f);
506     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
507     + param->log_num_cqs = byte_field & 0x1f;
508     + MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
509     + param->altc_base = qword_field;
510     + MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
511     + param->auxc_base = qword_field;
512     + MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
513     + param->eqc_base = qword_field & ~((u64)0x1f);
514     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
515     + param->log_num_eqs = byte_field & 0x1f;
516     + MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
517     + param->num_sys_eqs = word_field & 0xfff;
518     + MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
519     + param->rdmarc_base = qword_field & ~((u64)0x1f);
520     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
521     + param->log_rd_per_qp = byte_field & 0x7;
522    
523     MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
524     if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
525     @@ -2125,22 +2140,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
526     /* steering attributes */
527     if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
528     MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
529     - MLX4_GET(param->log_mc_entry_sz, outbox,
530     - INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
531     - MLX4_GET(param->log_mc_table_sz, outbox,
532     - INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
533     - MLX4_GET(byte_field, outbox,
534     - INIT_HCA_FS_A0_OFFSET);
535     + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
536     + param->log_mc_entry_sz = byte_field & 0x1f;
537     + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
538     + param->log_mc_table_sz = byte_field & 0x1f;
539     + MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
540     param->dmfs_high_steer_mode =
541     a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
542     } else {
543     MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
544     - MLX4_GET(param->log_mc_entry_sz, outbox,
545     - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
546     - MLX4_GET(param->log_mc_hash_sz, outbox,
547     - INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
548     - MLX4_GET(param->log_mc_table_sz, outbox,
549     - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
550     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
551     + param->log_mc_entry_sz = byte_field & 0x1f;
552     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
553     + param->log_mc_hash_sz = byte_field & 0x1f;
554     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
555     + param->log_mc_table_sz = byte_field & 0x1f;
556     }
557    
558     /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
559     @@ -2164,15 +2178,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
560     /* TPT attributes */
561    
562     MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
563     - MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
564     - MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
565     + MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
566     + param->mw_enabled = byte_field >> 7;
567     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
568     + param->log_mpt_sz = byte_field & 0x3f;
569     MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
570     MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
571    
572     /* UAR attributes */
573    
574     MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
575     - MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
576     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
577     + param->log_uar_sz = byte_field & 0xf;
578    
579     /* phv_check enable */
580     MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
581     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
582     index d004957328f9..3908ed554474 100644
583     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
584     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
585     @@ -1133,13 +1133,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
586     int err = 0;
587     u8 *smac_v;
588    
589     - if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
590     - mlx5_core_warn(esw->dev,
591     - "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
592     - vport->vport);
593     - return -EPERM;
594     - }
595     -
596     esw_vport_cleanup_ingress_rules(esw, vport);
597    
598     if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
599     @@ -1696,7 +1689,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
600     int vport_num;
601     int err;
602    
603     - if (!MLX5_ESWITCH_MANAGER(dev))
604     + if (!MLX5_VPORT_MANAGER(dev))
605     return 0;
606    
607     esw_info(dev,
608     @@ -1765,7 +1758,7 @@ abort:
609    
610     void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
611     {
612     - if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
613     + if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
614     return;
615    
616     esw_info(esw->dev, "cleanup\n");
617     @@ -1812,13 +1805,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
618     mutex_lock(&esw->state_lock);
619     evport = &esw->vports[vport];
620    
621     - if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
622     + if (evport->info.spoofchk && !is_valid_ether_addr(mac))
623     mlx5_core_warn(esw->dev,
624     - "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
625     + "Set invalid MAC while spoofchk is on, vport(%d)\n",
626     vport);
627     - err = -EPERM;
628     - goto unlock;
629     - }
630    
631     err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
632     if (err) {
633     @@ -1964,6 +1954,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
634     evport = &esw->vports[vport];
635     pschk = evport->info.spoofchk;
636     evport->info.spoofchk = spoofchk;
637     + if (pschk && !is_valid_ether_addr(evport->info.mac))
638     + mlx5_core_warn(esw->dev,
639     + "Spoofchk in set while MAC is invalid, vport(%d)\n",
640     + evport->vport);
641     if (evport->enabled && esw->mode == SRIOV_LEGACY)
642     err = esw_vport_ingress_config(esw, evport);
643     if (err)
644     diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
645     index defed0d0c51d..e7f8ab6e4391 100644
646     --- a/drivers/net/ethernet/renesas/ravb_main.c
647     +++ b/drivers/net/ethernet/renesas/ravb_main.c
648     @@ -350,7 +350,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
649     int i;
650    
651     priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
652     - ETH_HLEN + VLAN_HLEN;
653     + ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
654    
655     /* Allocate RX and TX skb rings */
656     priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
657     @@ -533,13 +533,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
658     {
659     u8 *hw_csum;
660    
661     - /* The hardware checksum is 2 bytes appended to packet data */
662     - if (unlikely(skb->len < 2))
663     + /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
664     + * appended to packet data
665     + */
666     + if (unlikely(skb->len < sizeof(__sum16)))
667     return;
668     - hw_csum = skb_tail_pointer(skb) - 2;
669     + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
670     skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
671     skb->ip_summed = CHECKSUM_COMPLETE;
672     - skb_trim(skb, skb->len - 2);
673     + skb_trim(skb, skb->len - sizeof(__sum16));
674     }
675    
676     /* Packet receive function for Ethernet AVB */
677     diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
678     index 4a949569ec4c..5fb541897863 100644
679     --- a/drivers/net/ipvlan/ipvlan_main.c
680     +++ b/drivers/net/ipvlan/ipvlan_main.c
681     @@ -97,12 +97,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
682     err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
683     if (!err) {
684     mdev->l3mdev_ops = &ipvl_l3mdev_ops;
685     - mdev->priv_flags |= IFF_L3MDEV_MASTER;
686     + mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
687     } else
688     goto fail;
689     } else if (port->mode == IPVLAN_MODE_L3S) {
690     /* Old mode was L3S */
691     - mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
692     + mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
693     ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
694     mdev->l3mdev_ops = NULL;
695     }
696     @@ -162,7 +162,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
697     struct sk_buff *skb;
698    
699     if (port->mode == IPVLAN_MODE_L3S) {
700     - dev->priv_flags &= ~IFF_L3MDEV_MASTER;
701     + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
702     ipvlan_unregister_nf_hook(dev_net(dev));
703     dev->l3mdev_ops = NULL;
704     }
705     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
706     index 6658658246d2..1e6f0da1fa8e 100644
707     --- a/drivers/net/tun.c
708     +++ b/drivers/net/tun.c
709     @@ -862,8 +862,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
710     if (rtnl_dereference(tun->xdp_prog))
711     sock_set_flag(&tfile->sk, SOCK_XDP);
712    
713     - tun_set_real_num_queues(tun);
714     -
715     /* device is allowed to go away first, so no need to hold extra
716     * refcnt.
717     */
718     @@ -875,6 +873,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
719     rcu_assign_pointer(tfile->tun, tun);
720     rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
721     tun->numqueues++;
722     + tun_set_real_num_queues(tun);
723     out:
724     return err;
725     }
726     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
727     index ea672145f6a6..da500c77c5b9 100644
728     --- a/drivers/net/virtio_net.c
729     +++ b/drivers/net/virtio_net.c
730     @@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
731     #define VIRTIO_XDP_TX BIT(0)
732     #define VIRTIO_XDP_REDIR BIT(1)
733    
734     +#define VIRTIO_XDP_FLAG BIT(0)
735     +
736     /* RX packet size EWMA. The average packet size is used to determine the packet
737     * buffer size when refilling RX rings. As the entire RX ring may be refilled
738     * at once, the weight is chosen so that the EWMA will be insensitive to short-
739     @@ -251,6 +253,21 @@ struct padded_vnet_hdr {
740     char padding[4];
741     };
742    
743     +static bool is_xdp_frame(void *ptr)
744     +{
745     + return (unsigned long)ptr & VIRTIO_XDP_FLAG;
746     +}
747     +
748     +static void *xdp_to_ptr(struct xdp_frame *ptr)
749     +{
750     + return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
751     +}
752     +
753     +static struct xdp_frame *ptr_to_xdp(void *ptr)
754     +{
755     + return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
756     +}
757     +
758     /* Converting between virtqueue no. and kernel tx/rx queue no.
759     * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
760     */
761     @@ -461,7 +478,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
762    
763     sg_init_one(sq->sg, xdpf->data, xdpf->len);
764    
765     - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
766     + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
767     + GFP_ATOMIC);
768     if (unlikely(err))
769     return -ENOSPC; /* Caller handle free/refcnt */
770    
771     @@ -481,36 +499,37 @@ static int virtnet_xdp_xmit(struct net_device *dev,
772     {
773     struct virtnet_info *vi = netdev_priv(dev);
774     struct receive_queue *rq = vi->rq;
775     - struct xdp_frame *xdpf_sent;
776     struct bpf_prog *xdp_prog;
777     struct send_queue *sq;
778     unsigned int len;
779     int drops = 0;
780     int kicks = 0;
781     int ret, err;
782     + void *ptr;
783     int i;
784    
785     - sq = virtnet_xdp_sq(vi);
786     -
787     - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
788     - ret = -EINVAL;
789     - drops = n;
790     - goto out;
791     - }
792     -
793     /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
794     * indicate XDP resources have been successfully allocated.
795     */
796     xdp_prog = rcu_dereference(rq->xdp_prog);
797     - if (!xdp_prog) {
798     - ret = -ENXIO;
799     + if (!xdp_prog)
800     + return -ENXIO;
801     +
802     + sq = virtnet_xdp_sq(vi);
803     +
804     + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
805     + ret = -EINVAL;
806     drops = n;
807     goto out;
808     }
809    
810     /* Free up any pending old buffers before queueing new ones. */
811     - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
812     - xdp_return_frame(xdpf_sent);
813     + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
814     + if (likely(is_xdp_frame(ptr)))
815     + xdp_return_frame(ptr_to_xdp(ptr));
816     + else
817     + napi_consume_skb(ptr, false);
818     + }
819    
820     for (i = 0; i < n; i++) {
821     struct xdp_frame *xdpf = frames[i];
822     @@ -1329,20 +1348,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
823     return stats.packets;
824     }
825    
826     -static void free_old_xmit_skbs(struct send_queue *sq)
827     +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
828     {
829     - struct sk_buff *skb;
830     unsigned int len;
831     unsigned int packets = 0;
832     unsigned int bytes = 0;
833     + void *ptr;
834    
835     - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
836     - pr_debug("Sent skb %p\n", skb);
837     + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
838     + if (likely(!is_xdp_frame(ptr))) {
839     + struct sk_buff *skb = ptr;
840    
841     - bytes += skb->len;
842     - packets++;
843     + pr_debug("Sent skb %p\n", skb);
844     +
845     + bytes += skb->len;
846     + napi_consume_skb(skb, in_napi);
847     + } else {
848     + struct xdp_frame *frame = ptr_to_xdp(ptr);
849    
850     - dev_consume_skb_any(skb);
851     + bytes += frame->len;
852     + xdp_return_frame(frame);
853     + }
854     + packets++;
855     }
856    
857     /* Avoid overhead when no packets have been processed
858     @@ -1357,6 +1384,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
859     u64_stats_update_end(&sq->stats.syncp);
860     }
861    
862     +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
863     +{
864     + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
865     + return false;
866     + else if (q < vi->curr_queue_pairs)
867     + return true;
868     + else
869     + return false;
870     +}
871     +
872     static void virtnet_poll_cleantx(struct receive_queue *rq)
873     {
874     struct virtnet_info *vi = rq->vq->vdev->priv;
875     @@ -1364,11 +1401,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
876     struct send_queue *sq = &vi->sq[index];
877     struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
878    
879     - if (!sq->napi.weight)
880     + if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
881     return;
882    
883     if (__netif_tx_trylock(txq)) {
884     - free_old_xmit_skbs(sq);
885     + free_old_xmit_skbs(sq, true);
886     __netif_tx_unlock(txq);
887     }
888    
889     @@ -1441,10 +1478,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
890     {
891     struct send_queue *sq = container_of(napi, struct send_queue, napi);
892     struct virtnet_info *vi = sq->vq->vdev->priv;
893     - struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
894     + unsigned int index = vq2txq(sq->vq);
895     + struct netdev_queue *txq;
896    
897     + if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
898     + /* We don't need to enable cb for XDP */
899     + napi_complete_done(napi, 0);
900     + return 0;
901     + }
902     +
903     + txq = netdev_get_tx_queue(vi->dev, index);
904     __netif_tx_lock(txq, raw_smp_processor_id());
905     - free_old_xmit_skbs(sq);
906     + free_old_xmit_skbs(sq, true);
907     __netif_tx_unlock(txq);
908    
909     virtqueue_napi_complete(napi, sq->vq, 0);
910     @@ -1513,7 +1558,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
911     bool use_napi = sq->napi.weight;
912    
913     /* Free up any pending old buffers before queueing new ones. */
914     - free_old_xmit_skbs(sq);
915     + free_old_xmit_skbs(sq, false);
916    
917     if (use_napi && kick)
918     virtqueue_enable_cb_delayed(sq->vq);
919     @@ -1556,7 +1601,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
920     if (!use_napi &&
921     unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
922     /* More just got used, free them then recheck. */
923     - free_old_xmit_skbs(sq);
924     + free_old_xmit_skbs(sq, false);
925     if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
926     netif_start_subqueue(dev, qnum);
927     virtqueue_disable_cb(sq->vq);
928     @@ -2394,6 +2439,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
929     return -ENOMEM;
930     }
931    
932     + old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
933     + if (!prog && !old_prog)
934     + return 0;
935     +
936     if (prog) {
937     prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
938     if (IS_ERR(prog))
939     @@ -2401,36 +2450,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
940     }
941    
942     /* Make sure NAPI is not using any XDP TX queues for RX. */
943     - if (netif_running(dev))
944     - for (i = 0; i < vi->max_queue_pairs; i++)
945     + if (netif_running(dev)) {
946     + for (i = 0; i < vi->max_queue_pairs; i++) {
947     napi_disable(&vi->rq[i].napi);
948     + virtnet_napi_tx_disable(&vi->sq[i].napi);
949     + }
950     + }
951     +
952     + if (!prog) {
953     + for (i = 0; i < vi->max_queue_pairs; i++) {
954     + rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
955     + if (i == 0)
956     + virtnet_restore_guest_offloads(vi);
957     + }
958     + synchronize_net();
959     + }
960    
961     - netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
962     err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
963     if (err)
964     goto err;
965     + netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
966     vi->xdp_queue_pairs = xdp_qp;
967    
968     - for (i = 0; i < vi->max_queue_pairs; i++) {
969     - old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
970     - rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
971     - if (i == 0) {
972     - if (!old_prog)
973     + if (prog) {
974     + for (i = 0; i < vi->max_queue_pairs; i++) {
975     + rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
976     + if (i == 0 && !old_prog)
977     virtnet_clear_guest_offloads(vi);
978     - if (!prog)
979     - virtnet_restore_guest_offloads(vi);
980     }
981     + }
982     +
983     + for (i = 0; i < vi->max_queue_pairs; i++) {
984     if (old_prog)
985     bpf_prog_put(old_prog);
986     - if (netif_running(dev))
987     + if (netif_running(dev)) {
988     virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
989     + virtnet_napi_tx_enable(vi, vi->sq[i].vq,
990     + &vi->sq[i].napi);
991     + }
992     }
993    
994     return 0;
995    
996     err:
997     - for (i = 0; i < vi->max_queue_pairs; i++)
998     - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
999     + if (!prog) {
1000     + virtnet_clear_guest_offloads(vi);
1001     + for (i = 0; i < vi->max_queue_pairs; i++)
1002     + rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
1003     + }
1004     +
1005     + if (netif_running(dev)) {
1006     + for (i = 0; i < vi->max_queue_pairs; i++) {
1007     + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
1008     + virtnet_napi_tx_enable(vi, vi->sq[i].vq,
1009     + &vi->sq[i].napi);
1010     + }
1011     + }
1012     if (prog)
1013     bpf_prog_sub(prog, vi->max_queue_pairs - 1);
1014     return err;
1015     @@ -2586,16 +2661,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
1016     put_page(vi->rq[i].alloc_frag.page);
1017     }
1018    
1019     -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1020     -{
1021     - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1022     - return false;
1023     - else if (q < vi->curr_queue_pairs)
1024     - return true;
1025     - else
1026     - return false;
1027     -}
1028     -
1029     static void free_unused_bufs(struct virtnet_info *vi)
1030     {
1031     void *buf;
1032     @@ -2604,10 +2669,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
1033     for (i = 0; i < vi->max_queue_pairs; i++) {
1034     struct virtqueue *vq = vi->sq[i].vq;
1035     while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1036     - if (!is_xdp_raw_buffer_queue(vi, i))
1037     + if (!is_xdp_frame(buf))
1038     dev_kfree_skb(buf);
1039     else
1040     - put_page(virt_to_head_page(buf));
1041     + xdp_return_frame(ptr_to_xdp(buf));
1042     }
1043     }
1044    
1045     diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
1046     index ecea92f68c87..45c0b1f4cb69 100644
1047     --- a/drivers/of/dynamic.c
1048     +++ b/drivers/of/dynamic.c
1049     @@ -275,9 +275,6 @@ void __of_detach_node(struct device_node *np)
1050    
1051     /**
1052     * of_detach_node() - "Unplug" a node from the device tree.
1053     - *
1054     - * The caller must hold a reference to the node. The memory associated with
1055     - * the node is not freed until its refcount goes to zero.
1056     */
1057     int of_detach_node(struct device_node *np)
1058     {
1059     @@ -333,6 +330,25 @@ void of_node_release(struct kobject *kobj)
1060     if (!of_node_check_flag(node, OF_DYNAMIC))
1061     return;
1062    
1063     + if (of_node_check_flag(node, OF_OVERLAY)) {
1064     +
1065     + if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
1066     + /* premature refcount of zero, do not free memory */
1067     + pr_err("ERROR: memory leak before free overlay changeset, %pOF\n",
1068     + node);
1069     + return;
1070     + }
1071     +
1072     + /*
1073     + * If node->properties non-empty then properties were added
1074     + * to this node either by different overlay that has not
1075     + * yet been removed, or by a non-overlay mechanism.
1076     + */
1077     + if (node->properties)
1078     + pr_err("ERROR: %s(), unexpected properties in %pOF\n",
1079     + __func__, node);
1080     + }
1081     +
1082     property_list_free(node->properties);
1083     property_list_free(node->deadprops);
1084    
1085     @@ -437,6 +453,16 @@ struct device_node *__of_node_dup(const struct device_node *np,
1086    
1087     static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
1088     {
1089     + if (ce->action == OF_RECONFIG_ATTACH_NODE &&
1090     + of_node_check_flag(ce->np, OF_OVERLAY)) {
1091     + if (kref_read(&ce->np->kobj.kref) > 1) {
1092     + pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n",
1093     + kref_read(&ce->np->kobj.kref), ce->np);
1094     + } else {
1095     + of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET);
1096     + }
1097     + }
1098     +
1099     of_node_put(ce->np);
1100     list_del(&ce->node);
1101     kfree(ce);
1102     diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
1103     index 7a0a18980b98..c72eef988041 100644
1104     --- a/drivers/of/kobj.c
1105     +++ b/drivers/of/kobj.c
1106     @@ -133,6 +133,9 @@ int __of_attach_node_sysfs(struct device_node *np)
1107     }
1108     if (!name)
1109     return -ENOMEM;
1110     +
1111     + of_node_get(np);
1112     +
1113     rc = kobject_add(&np->kobj, parent, "%s", name);
1114     kfree(name);
1115     if (rc)
1116     @@ -159,6 +162,5 @@ void __of_detach_node_sysfs(struct device_node *np)
1117     kobject_del(&np->kobj);
1118     }
1119    
1120     - /* finally remove the kobj_init ref */
1121     of_node_put(np);
1122     }
1123     diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
1124     index 1e058196f23f..9808aae4621a 100644
1125     --- a/drivers/of/overlay.c
1126     +++ b/drivers/of/overlay.c
1127     @@ -23,6 +23,26 @@
1128    
1129     #include "of_private.h"
1130    
1131     +/**
1132     + * struct target - info about current target node as recursing through overlay
1133     + * @np: node where current level of overlay will be applied
1134     + * @in_livetree: @np is a node in the live devicetree
1135     + *
1136     + * Used in the algorithm to create the portion of a changeset that describes
1137     + * an overlay fragment, which is a devicetree subtree. Initially @np is a node
1138     + * in the live devicetree where the overlay subtree is targeted to be grafted
1139     + * into. When recursing to the next level of the overlay subtree, the target
1140     + * also recurses to the next level of the live devicetree, as long as overlay
1141     + * subtree node also exists in the live devicetree. When a node in the overlay
1142     + * subtree does not exist at the same level in the live devicetree, target->np
1143     + * points to a newly allocated node, and all subsequent targets in the subtree
1144     + * will be newly allocated nodes.
1145     + */
1146     +struct target {
1147     + struct device_node *np;
1148     + bool in_livetree;
1149     +};
1150     +
1151     /**
1152     * struct fragment - info about fragment nodes in overlay expanded device tree
1153     * @target: target of the overlay operation
1154     @@ -72,8 +92,7 @@ static int devicetree_corrupt(void)
1155     }
1156    
1157     static int build_changeset_next_level(struct overlay_changeset *ovcs,
1158     - struct device_node *target_node,
1159     - const struct device_node *overlay_node);
1160     + struct target *target, const struct device_node *overlay_node);
1161    
1162     /*
1163     * of_resolve_phandles() finds the largest phandle in the live tree.
1164     @@ -257,14 +276,17 @@ err_free_target_path:
1165     /**
1166     * add_changeset_property() - add @overlay_prop to overlay changeset
1167     * @ovcs: overlay changeset
1168     - * @target_node: where to place @overlay_prop in live tree
1169     + * @target: where @overlay_prop will be placed
1170     * @overlay_prop: property to add or update, from overlay tree
1171     * @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__"
1172     *
1173     - * If @overlay_prop does not already exist in @target_node, add changeset entry
1174     - * to add @overlay_prop in @target_node, else add changeset entry to update
1175     + * If @overlay_prop does not already exist in live devicetree, add changeset
1176     + * entry to add @overlay_prop in @target, else add changeset entry to update
1177     * value of @overlay_prop.
1178     *
1179     + * @target may be either in the live devicetree or in a new subtree that
1180     + * is contained in the changeset.
1181     + *
1182     * Some special properties are not updated (no error returned).
1183     *
1184     * Update of property in symbols node is not allowed.
1185     @@ -273,20 +295,22 @@ err_free_target_path:
1186     * invalid @overlay.
1187     */
1188     static int add_changeset_property(struct overlay_changeset *ovcs,
1189     - struct device_node *target_node,
1190     - struct property *overlay_prop,
1191     + struct target *target, struct property *overlay_prop,
1192     bool is_symbols_prop)
1193     {
1194     struct property *new_prop = NULL, *prop;
1195     int ret = 0;
1196    
1197     - prop = of_find_property(target_node, overlay_prop->name, NULL);
1198     -
1199     if (!of_prop_cmp(overlay_prop->name, "name") ||
1200     !of_prop_cmp(overlay_prop->name, "phandle") ||
1201     !of_prop_cmp(overlay_prop->name, "linux,phandle"))
1202     return 0;
1203    
1204     + if (target->in_livetree)
1205     + prop = of_find_property(target->np, overlay_prop->name, NULL);
1206     + else
1207     + prop = NULL;
1208     +
1209     if (is_symbols_prop) {
1210     if (prop)
1211     return -EINVAL;
1212     @@ -299,10 +323,10 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
1213     return -ENOMEM;
1214    
1215     if (!prop)
1216     - ret = of_changeset_add_property(&ovcs->cset, target_node,
1217     + ret = of_changeset_add_property(&ovcs->cset, target->np,
1218     new_prop);
1219     else
1220     - ret = of_changeset_update_property(&ovcs->cset, target_node,
1221     + ret = of_changeset_update_property(&ovcs->cset, target->np,
1222     new_prop);
1223    
1224     if (ret) {
1225     @@ -315,14 +339,14 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
1226    
1227     /**
1228     * add_changeset_node() - add @node (and children) to overlay changeset
1229     - * @ovcs: overlay changeset
1230     - * @target_node: where to place @node in live tree
1231     - * @node: node from within overlay device tree fragment
1232     + * @ovcs: overlay changeset
1233     + * @target: where @node will be placed in live tree or changeset
1234     + * @node: node from within overlay device tree fragment
1235     *
1236     - * If @node does not already exist in @target_node, add changeset entry
1237     - * to add @node in @target_node.
1238     + * If @node does not already exist in @target, add changeset entry
1239     + * to add @node in @target.
1240     *
1241     - * If @node already exists in @target_node, and the existing node has
1242     + * If @node already exists in @target, and the existing node has
1243     * a phandle, the overlay node is not allowed to have a phandle.
1244     *
1245     * If @node has child nodes, add the children recursively via
1246     @@ -355,38 +379,46 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
1247     * invalid @overlay.
1248     */
1249     static int add_changeset_node(struct overlay_changeset *ovcs,
1250     - struct device_node *target_node, struct device_node *node)
1251     + struct target *target, struct device_node *node)
1252     {
1253     const char *node_kbasename;
1254     struct device_node *tchild;
1255     + struct target target_child;
1256     int ret = 0;
1257    
1258     node_kbasename = kbasename(node->full_name);
1259    
1260     - for_each_child_of_node(target_node, tchild)
1261     + for_each_child_of_node(target->np, tchild)
1262     if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
1263     break;
1264    
1265     if (!tchild) {
1266     - tchild = __of_node_dup(node, node_kbasename);
1267     + tchild = __of_node_dup(NULL, node_kbasename);
1268     if (!tchild)
1269     return -ENOMEM;
1270    
1271     - tchild->parent = target_node;
1272     + tchild->parent = target->np;
1273     + of_node_set_flag(tchild, OF_OVERLAY);
1274    
1275     ret = of_changeset_attach_node(&ovcs->cset, tchild);
1276     if (ret)
1277     return ret;
1278    
1279     - ret = build_changeset_next_level(ovcs, tchild, node);
1280     + target_child.np = tchild;
1281     + target_child.in_livetree = false;
1282     +
1283     + ret = build_changeset_next_level(ovcs, &target_child, node);
1284     of_node_put(tchild);
1285     return ret;
1286     }
1287    
1288     - if (node->phandle && tchild->phandle)
1289     + if (node->phandle && tchild->phandle) {
1290     ret = -EINVAL;
1291     - else
1292     - ret = build_changeset_next_level(ovcs, tchild, node);
1293     + } else {
1294     + target_child.np = tchild;
1295     + target_child.in_livetree = target->in_livetree;
1296     + ret = build_changeset_next_level(ovcs, &target_child, node);
1297     + }
1298     of_node_put(tchild);
1299    
1300     return ret;
1301     @@ -395,7 +427,7 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
1302     /**
1303     * build_changeset_next_level() - add level of overlay changeset
1304     * @ovcs: overlay changeset
1305     - * @target_node: where to place @overlay_node in live tree
1306     + * @target: where to place @overlay_node in live tree
1307     * @overlay_node: node from within an overlay device tree fragment
1308     *
1309     * Add the properties (if any) and nodes (if any) from @overlay_node to the
1310     @@ -408,27 +440,26 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
1311     * invalid @overlay_node.
1312     */
1313     static int build_changeset_next_level(struct overlay_changeset *ovcs,
1314     - struct device_node *target_node,
1315     - const struct device_node *overlay_node)
1316     + struct target *target, const struct device_node *overlay_node)
1317     {
1318     struct device_node *child;
1319     struct property *prop;
1320     int ret;
1321    
1322     for_each_property_of_node(overlay_node, prop) {
1323     - ret = add_changeset_property(ovcs, target_node, prop, 0);
1324     + ret = add_changeset_property(ovcs, target, prop, 0);
1325     if (ret) {
1326     pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
1327     - target_node, prop->name, ret);
1328     + target->np, prop->name, ret);
1329     return ret;
1330     }
1331     }
1332    
1333     for_each_child_of_node(overlay_node, child) {
1334     - ret = add_changeset_node(ovcs, target_node, child);
1335     + ret = add_changeset_node(ovcs, target, child);
1336     if (ret) {
1337     pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
1338     - target_node, child, ret);
1339     + target->np, child, ret);
1340     of_node_put(child);
1341     return ret;
1342     }
1343     @@ -441,17 +472,17 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
1344     * Add the properties from __overlay__ node to the @ovcs->cset changeset.
1345     */
1346     static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
1347     - struct device_node *target_node,
1348     + struct target *target,
1349     const struct device_node *overlay_symbols_node)
1350     {
1351     struct property *prop;
1352     int ret;
1353    
1354     for_each_property_of_node(overlay_symbols_node, prop) {
1355     - ret = add_changeset_property(ovcs, target_node, prop, 1);
1356     + ret = add_changeset_property(ovcs, target, prop, 1);
1357     if (ret) {
1358     pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
1359     - target_node, prop->name, ret);
1360     + target->np, prop->name, ret);
1361     return ret;
1362     }
1363     }
1364     @@ -474,6 +505,7 @@ static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
1365     static int build_changeset(struct overlay_changeset *ovcs)
1366     {
1367     struct fragment *fragment;
1368     + struct target target;
1369     int fragments_count, i, ret;
1370    
1371     /*
1372     @@ -488,7 +520,9 @@ static int build_changeset(struct overlay_changeset *ovcs)
1373     for (i = 0; i < fragments_count; i++) {
1374     fragment = &ovcs->fragments[i];
1375    
1376     - ret = build_changeset_next_level(ovcs, fragment->target,
1377     + target.np = fragment->target;
1378     + target.in_livetree = true;
1379     + ret = build_changeset_next_level(ovcs, &target,
1380     fragment->overlay);
1381     if (ret) {
1382     pr_debug("apply failed '%pOF'\n", fragment->target);
1383     @@ -498,7 +532,10 @@ static int build_changeset(struct overlay_changeset *ovcs)
1384    
1385     if (ovcs->symbols_fragment) {
1386     fragment = &ovcs->fragments[ovcs->count - 1];
1387     - ret = build_changeset_symbols_node(ovcs, fragment->target,
1388     +
1389     + target.np = fragment->target;
1390     + target.in_livetree = true;
1391     + ret = build_changeset_symbols_node(ovcs, &target,
1392     fragment->overlay);
1393     if (ret) {
1394     pr_debug("apply failed '%pOF'\n", fragment->target);
1395     @@ -516,7 +553,7 @@ static int build_changeset(struct overlay_changeset *ovcs)
1396     * 1) "target" property containing the phandle of the target
1397     * 2) "target-path" property containing the path of the target
1398     */
1399     -static struct device_node *find_target_node(struct device_node *info_node)
1400     +static struct device_node *find_target(struct device_node *info_node)
1401     {
1402     struct device_node *node;
1403     const char *path;
1404     @@ -622,7 +659,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
1405    
1406     fragment = &fragments[cnt];
1407     fragment->overlay = overlay_node;
1408     - fragment->target = find_target_node(node);
1409     + fragment->target = find_target(node);
1410     if (!fragment->target) {
1411     of_node_put(fragment->overlay);
1412     ret = -EINVAL;
1413     diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
1414     index db2af09067db..b6f2ff95c3ed 100644
1415     --- a/drivers/platform/x86/asus-nb-wmi.c
1416     +++ b/drivers/platform/x86/asus-nb-wmi.c
1417     @@ -442,8 +442,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
1418     { KE_KEY, 0x30, { KEY_VOLUMEUP } },
1419     { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
1420     { KE_KEY, 0x32, { KEY_MUTE } },
1421     - { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
1422     - { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
1423     + { KE_KEY, 0x35, { KEY_SCREENLOCK } },
1424     { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
1425     { KE_KEY, 0x41, { KEY_NEXTSONG } },
1426     { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
1427     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1428     index 784df2b49628..4954718b2754 100644
1429     --- a/drivers/vhost/net.c
1430     +++ b/drivers/vhost/net.c
1431     @@ -1293,7 +1293,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1432     n->vqs[i].rx_ring = NULL;
1433     vhost_net_buf_init(&n->vqs[i].rxq);
1434     }
1435     - vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
1436     + vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1437     + UIO_MAXIOV + VHOST_NET_BATCH);
1438    
1439     vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1440     vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
1441     diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
1442     index 50dffe83714c..73a4adeab096 100644
1443     --- a/drivers/vhost/scsi.c
1444     +++ b/drivers/vhost/scsi.c
1445     @@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1446     vqs[i] = &vs->vqs[i].vq;
1447     vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1448     }
1449     - vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1450     + vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
1451    
1452     vhost_scsi_init_inflight(vs, NULL);
1453    
1454     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1455     index c66fc8308b5e..cf82e7266397 100644
1456     --- a/drivers/vhost/vhost.c
1457     +++ b/drivers/vhost/vhost.c
1458     @@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
1459     vq->indirect = kmalloc_array(UIO_MAXIOV,
1460     sizeof(*vq->indirect),
1461     GFP_KERNEL);
1462     - vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
1463     + vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
1464     GFP_KERNEL);
1465     - vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
1466     + vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
1467     GFP_KERNEL);
1468     if (!vq->indirect || !vq->log || !vq->heads)
1469     goto err_nomem;
1470     @@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
1471     }
1472    
1473     void vhost_dev_init(struct vhost_dev *dev,
1474     - struct vhost_virtqueue **vqs, int nvqs)
1475     + struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
1476     {
1477     struct vhost_virtqueue *vq;
1478     int i;
1479     @@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
1480     dev->iotlb = NULL;
1481     dev->mm = NULL;
1482     dev->worker = NULL;
1483     + dev->iov_limit = iov_limit;
1484     init_llist_head(&dev->work_list);
1485     init_waitqueue_head(&dev->wait);
1486     INIT_LIST_HEAD(&dev->read_list);
1487     diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
1488     index 1b675dad5e05..9490e7ddb340 100644
1489     --- a/drivers/vhost/vhost.h
1490     +++ b/drivers/vhost/vhost.h
1491     @@ -170,9 +170,11 @@ struct vhost_dev {
1492     struct list_head read_list;
1493     struct list_head pending_list;
1494     wait_queue_head_t wait;
1495     + int iov_limit;
1496     };
1497    
1498     -void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
1499     +void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
1500     + int nvqs, int iov_limit);
1501     long vhost_dev_set_owner(struct vhost_dev *dev);
1502     bool vhost_dev_has_owner(struct vhost_dev *dev);
1503     long vhost_dev_check_owner(struct vhost_dev *);
1504     diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
1505     index 98ed5be132c6..fa93f6711d8d 100644
1506     --- a/drivers/vhost/vsock.c
1507     +++ b/drivers/vhost/vsock.c
1508     @@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
1509     vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
1510     vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
1511    
1512     - vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
1513     + vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
1514    
1515     file->private_data = vsock;
1516     spin_lock_init(&vsock->send_pkt_list_lock);
1517     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
1518     index 47f1183b3dbe..92b572eb6d30 100644
1519     --- a/fs/btrfs/ctree.c
1520     +++ b/fs/btrfs/ctree.c
1521     @@ -967,6 +967,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1522     return 0;
1523     }
1524    
1525     +static struct extent_buffer *alloc_tree_block_no_bg_flush(
1526     + struct btrfs_trans_handle *trans,
1527     + struct btrfs_root *root,
1528     + u64 parent_start,
1529     + const struct btrfs_disk_key *disk_key,
1530     + int level,
1531     + u64 hint,
1532     + u64 empty_size)
1533     +{
1534     + struct btrfs_fs_info *fs_info = root->fs_info;
1535     + struct extent_buffer *ret;
1536     +
1537     + /*
1538     + * If we are COWing a node/leaf from the extent, chunk, device or free
1539     + * space trees, make sure that we do not finish block group creation of
1540     + * pending block groups. We do this to avoid a deadlock.
1541     + * COWing can result in allocation of a new chunk, and flushing pending
1542     + * block groups (btrfs_create_pending_block_groups()) can be triggered
1543     + * when finishing allocation of a new chunk. Creation of a pending block
1544     + * group modifies the extent, chunk, device and free space trees,
1545     + * therefore we could deadlock with ourselves since we are holding a
1546     + * lock on an extent buffer that btrfs_create_pending_block_groups() may
1547     + * try to COW later.
1548     + * For similar reasons, we also need to delay flushing pending block
1549     + * groups when splitting a leaf or node, from one of those trees, since
1550     + * we are holding a write lock on it and its parent or when inserting a
1551     + * new root node for one of those trees.
1552     + */
1553     + if (root == fs_info->extent_root ||
1554     + root == fs_info->chunk_root ||
1555     + root == fs_info->dev_root ||
1556     + root == fs_info->free_space_root)
1557     + trans->can_flush_pending_bgs = false;
1558     +
1559     + ret = btrfs_alloc_tree_block(trans, root, parent_start,
1560     + root->root_key.objectid, disk_key, level,
1561     + hint, empty_size);
1562     + trans->can_flush_pending_bgs = true;
1563     +
1564     + return ret;
1565     +}
1566     +
1567     /*
1568     * does the dirty work in cow of a single block. The parent block (if
1569     * supplied) is updated to point to the new cow copy. The new buffer is marked
1570     @@ -1014,28 +1056,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1571     if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1572     parent_start = parent->start;
1573    
1574     - /*
1575     - * If we are COWing a node/leaf from the extent, chunk, device or free
1576     - * space trees, make sure that we do not finish block group creation of
1577     - * pending block groups. We do this to avoid a deadlock.
1578     - * COWing can result in allocation of a new chunk, and flushing pending
1579     - * block groups (btrfs_create_pending_block_groups()) can be triggered
1580     - * when finishing allocation of a new chunk. Creation of a pending block
1581     - * group modifies the extent, chunk, device and free space trees,
1582     - * therefore we could deadlock with ourselves since we are holding a
1583     - * lock on an extent buffer that btrfs_create_pending_block_groups() may
1584     - * try to COW later.
1585     - */
1586     - if (root == fs_info->extent_root ||
1587     - root == fs_info->chunk_root ||
1588     - root == fs_info->dev_root ||
1589     - root == fs_info->free_space_root)
1590     - trans->can_flush_pending_bgs = false;
1591     -
1592     - cow = btrfs_alloc_tree_block(trans, root, parent_start,
1593     - root->root_key.objectid, &disk_key, level,
1594     - search_start, empty_size);
1595     - trans->can_flush_pending_bgs = true;
1596     + cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1597     + level, search_start, empty_size);
1598     if (IS_ERR(cow))
1599     return PTR_ERR(cow);
1600    
1601     @@ -3342,8 +3364,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
1602     else
1603     btrfs_node_key(lower, &lower_key, 0);
1604    
1605     - c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
1606     - &lower_key, level, root->node->start, 0);
1607     + c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
1608     + root->node->start, 0);
1609     if (IS_ERR(c))
1610     return PTR_ERR(c);
1611    
1612     @@ -3472,8 +3494,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
1613     mid = (c_nritems + 1) / 2;
1614     btrfs_node_key(c, &disk_key, mid);
1615    
1616     - split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
1617     - &disk_key, level, c->start, 0);
1618     + split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
1619     + c->start, 0);
1620     if (IS_ERR(split))
1621     return PTR_ERR(split);
1622    
1623     @@ -4257,8 +4279,8 @@ again:
1624     else
1625     btrfs_item_key(l, &disk_key, mid);
1626    
1627     - right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
1628     - &disk_key, 0, l->start, 0);
1629     + right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
1630     + l->start, 0);
1631     if (IS_ERR(right))
1632     return PTR_ERR(right);
1633    
1634     diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1635     index 645fc81e2a94..38f6cb0bc5f6 100644
1636     --- a/fs/btrfs/super.c
1637     +++ b/fs/btrfs/super.c
1638     @@ -1677,6 +1677,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1639     flags | SB_RDONLY, device_name, data);
1640     if (IS_ERR(mnt_root)) {
1641     root = ERR_CAST(mnt_root);
1642     + kfree(subvol_name);
1643     goto out;
1644     }
1645    
1646     @@ -1686,12 +1687,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1647     if (error < 0) {
1648     root = ERR_PTR(error);
1649     mntput(mnt_root);
1650     + kfree(subvol_name);
1651     goto out;
1652     }
1653     }
1654     }
1655     if (IS_ERR(mnt_root)) {
1656     root = ERR_CAST(mnt_root);
1657     + kfree(subvol_name);
1658     goto out;
1659     }
1660    
1661     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1662     index b83ab72cf855..b14e1abb891c 100644
1663     --- a/fs/cifs/connect.c
1664     +++ b/fs/cifs/connect.c
1665     @@ -50,6 +50,7 @@
1666     #include "cifs_unicode.h"
1667     #include "cifs_debug.h"
1668     #include "cifs_fs_sb.h"
1669     +#include "dns_resolve.h"
1670     #include "ntlmssp.h"
1671     #include "nterr.h"
1672     #include "rfc1002pdu.h"
1673     @@ -318,6 +319,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
1674     static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
1675     const char *devname, bool is_smb3);
1676    
1677     +/*
1678     + * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
1679     + * get their ip addresses changed at some point.
1680     + *
1681     + * This should be called with server->srv_mutex held.
1682     + */
1683     +#ifdef CONFIG_CIFS_DFS_UPCALL
1684     +static int reconn_set_ipaddr(struct TCP_Server_Info *server)
1685     +{
1686     + int rc;
1687     + int len;
1688     + char *unc, *ipaddr = NULL;
1689     +
1690     + if (!server->hostname)
1691     + return -EINVAL;
1692     +
1693     + len = strlen(server->hostname) + 3;
1694     +
1695     + unc = kmalloc(len, GFP_KERNEL);
1696     + if (!unc) {
1697     + cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
1698     + return -ENOMEM;
1699     + }
1700     + snprintf(unc, len, "\\\\%s", server->hostname);
1701     +
1702     + rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
1703     + kfree(unc);
1704     +
1705     + if (rc < 0) {
1706     + cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
1707     + __func__, server->hostname, rc);
1708     + return rc;
1709     + }
1710     +
1711     + rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
1712     + strlen(ipaddr));
1713     + kfree(ipaddr);
1714     +
1715     + return !rc ? -1 : 0;
1716     +}
1717     +#else
1718     +static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
1719     +{
1720     + return 0;
1721     +}
1722     +#endif
1723     +
1724     /*
1725     * cifs tcp session reconnection
1726     *
1727     @@ -418,6 +466,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
1728     rc = generic_ip_connect(server);
1729     if (rc) {
1730     cifs_dbg(FYI, "reconnect error %d\n", rc);
1731     + rc = reconn_set_ipaddr(server);
1732     + if (rc) {
1733     + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
1734     + __func__, rc);
1735     + }
1736     mutex_unlock(&server->srv_mutex);
1737     msleep(3000);
1738     } else {
1739     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1740     index d5c3e0725849..8431854b129f 100644
1741     --- a/fs/cifs/file.c
1742     +++ b/fs/cifs/file.c
1743     @@ -2670,6 +2670,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
1744    
1745     rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
1746     if (rc) {
1747     + kvfree(wdata->pages);
1748     kfree(wdata);
1749     add_credits_and_wake_if(server, credits, 0);
1750     break;
1751     @@ -2681,6 +2682,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
1752     if (rc) {
1753     for (i = 0; i < nr_pages; i++)
1754     put_page(wdata->pages[i]);
1755     + kvfree(wdata->pages);
1756     kfree(wdata);
1757     add_credits_and_wake_if(server, credits, 0);
1758     break;
1759     @@ -3360,8 +3362,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
1760     }
1761    
1762     rc = cifs_read_allocate_pages(rdata, npages);
1763     - if (rc)
1764     - goto error;
1765     + if (rc) {
1766     + kvfree(rdata->pages);
1767     + kfree(rdata);
1768     + add_credits_and_wake_if(server, credits, 0);
1769     + break;
1770     + }
1771    
1772     rdata->tailsz = PAGE_SIZE;
1773     }
1774     @@ -3381,7 +3387,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
1775     if (!rdata->cfile->invalidHandle ||
1776     !(rc = cifs_reopen_file(rdata->cfile, true)))
1777     rc = server->ops->async_readv(rdata);
1778     -error:
1779     if (rc) {
1780     add_credits_and_wake_if(server, rdata->credits, 0);
1781     kref_put(&rdata->refcount,
1782     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1783     index 836c59fca049..c393ac255af7 100644
1784     --- a/fs/cifs/smb2pdu.c
1785     +++ b/fs/cifs/smb2pdu.c
1786     @@ -3139,8 +3139,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
1787     rdata->mr = NULL;
1788     }
1789     #endif
1790     - if (rdata->result)
1791     + if (rdata->result && rdata->result != -ENODATA) {
1792     cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1793     + trace_smb3_read_err(0 /* xid */,
1794     + rdata->cfile->fid.persistent_fid,
1795     + tcon->tid, tcon->ses->Suid, rdata->offset,
1796     + rdata->bytes, rdata->result);
1797     + } else
1798     + trace_smb3_read_done(0 /* xid */,
1799     + rdata->cfile->fid.persistent_fid,
1800     + tcon->tid, tcon->ses->Suid,
1801     + rdata->offset, rdata->got_bytes);
1802    
1803     queue_work(cifsiod_wq, &rdata->work);
1804     DeleteMidQEntry(mid);
1805     @@ -3215,13 +3224,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
1806     if (rc) {
1807     kref_put(&rdata->refcount, cifs_readdata_release);
1808     cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1809     - trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
1810     - io_parms.tcon->tid, io_parms.tcon->ses->Suid,
1811     - io_parms.offset, io_parms.length);
1812     - } else
1813     - trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
1814     - io_parms.tcon->tid, io_parms.tcon->ses->Suid,
1815     - io_parms.offset, io_parms.length);
1816     + trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
1817     + io_parms.tcon->tid,
1818     + io_parms.tcon->ses->Suid,
1819     + io_parms.offset, io_parms.length, rc);
1820     + }
1821    
1822     cifs_small_buf_release(buf);
1823     return rc;
1824     @@ -3265,10 +3272,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1825     if (rc != -ENODATA) {
1826     cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
1827     cifs_dbg(VFS, "Send error in read = %d\n", rc);
1828     + trace_smb3_read_err(xid, req->PersistentFileId,
1829     + io_parms->tcon->tid, ses->Suid,
1830     + io_parms->offset, io_parms->length,
1831     + rc);
1832     }
1833     - trace_smb3_read_err(rc, xid, req->PersistentFileId,
1834     - io_parms->tcon->tid, ses->Suid,
1835     - io_parms->offset, io_parms->length);
1836     free_rsp_buf(resp_buftype, rsp_iov.iov_base);
1837     return rc == -ENODATA ? 0 : rc;
1838     } else
1839     @@ -3354,8 +3362,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
1840     wdata->mr = NULL;
1841     }
1842     #endif
1843     - if (wdata->result)
1844     + if (wdata->result) {
1845     cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1846     + trace_smb3_write_err(0 /* no xid */,
1847     + wdata->cfile->fid.persistent_fid,
1848     + tcon->tid, tcon->ses->Suid, wdata->offset,
1849     + wdata->bytes, wdata->result);
1850     + } else
1851     + trace_smb3_write_done(0 /* no xid */,
1852     + wdata->cfile->fid.persistent_fid,
1853     + tcon->tid, tcon->ses->Suid,
1854     + wdata->offset, wdata->bytes);
1855    
1856     queue_work(cifsiod_wq, &wdata->work);
1857     DeleteMidQEntry(mid);
1858     @@ -3497,10 +3514,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
1859     wdata->bytes, rc);
1860     kref_put(&wdata->refcount, release);
1861     cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1862     - } else
1863     - trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
1864     - tcon->tid, tcon->ses->Suid, wdata->offset,
1865     - wdata->bytes);
1866     + }
1867    
1868     async_writev_out:
1869     cifs_small_buf_release(req);
1870     @@ -3726,8 +3740,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
1871     rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
1872     srch_inf->endOfSearch = true;
1873     rc = 0;
1874     - }
1875     - cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1876     + } else
1877     + cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1878     goto qdir_exit;
1879     }
1880    
1881     @@ -4325,8 +4339,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
1882     rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
1883     cifs_small_buf_release(req);
1884    
1885     - please_key_low = (__u64 *)req->LeaseKey;
1886     - please_key_high = (__u64 *)(req->LeaseKey+8);
1887     + please_key_low = (__u64 *)lease_key;
1888     + please_key_high = (__u64 *)(lease_key+8);
1889     if (rc) {
1890     cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
1891     trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
1892     diff --git a/fs/dcache.c b/fs/dcache.c
1893     index 2593153471cf..44e5652b2664 100644
1894     --- a/fs/dcache.c
1895     +++ b/fs/dcache.c
1896     @@ -1188,15 +1188,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1897     */
1898     void shrink_dcache_sb(struct super_block *sb)
1899     {
1900     - long freed;
1901     -
1902     do {
1903     LIST_HEAD(dispose);
1904    
1905     - freed = list_lru_walk(&sb->s_dentry_lru,
1906     + list_lru_walk(&sb->s_dentry_lru,
1907     dentry_lru_isolate_shrink, &dispose, 1024);
1908     -
1909     - this_cpu_sub(nr_dentry_unused, freed);
1910     shrink_dentry_list(&dispose);
1911     } while (list_lru_count(&sb->s_dentry_lru) > 0);
1912     }
1913     diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1914     index 8d7916570362..b08a530433ad 100644
1915     --- a/fs/gfs2/rgrp.c
1916     +++ b/fs/gfs2/rgrp.c
1917     @@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1918     goto next_iter;
1919     }
1920     if (ret == -E2BIG) {
1921     - n += rbm->bii - initial_bii;
1922     rbm->bii = 0;
1923     rbm->offset = 0;
1924     + n += (rbm->bii - initial_bii);
1925     goto res_covered_end_of_rgrp;
1926     }
1927     return ret;
1928     diff --git a/fs/nfs/write.c b/fs/nfs/write.c
1929     index 586726a590d8..d790faff8e47 100644
1930     --- a/fs/nfs/write.c
1931     +++ b/fs/nfs/write.c
1932     @@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
1933     nfs_set_page_writeback(page);
1934     WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
1935    
1936     - ret = 0;
1937     + ret = req->wb_context->error;
1938     /* If there is a fatal error that covers this write, just exit */
1939     - if (nfs_error_is_fatal_on_server(req->wb_context->error))
1940     + if (nfs_error_is_fatal_on_server(ret))
1941     goto out_launder;
1942    
1943     + ret = 0;
1944     if (!nfs_pageio_add_request(pgio, req)) {
1945     ret = pgio->pg_error;
1946     /*
1947     @@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
1948     nfs_context_set_write_error(req->wb_context, ret);
1949     if (nfs_error_is_fatal_on_server(ret))
1950     goto out_launder;
1951     - }
1952     + } else
1953     + ret = -EAGAIN;
1954     nfs_redirty_request(req);
1955     - ret = -EAGAIN;
1956     } else
1957     nfs_add_stats(page_file_mapping(page)->host,
1958     NFSIOS_WRITEPAGES, 1);
1959     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1960     index 857f8abf7b91..d5e38eddfb49 100644
1961     --- a/include/linux/netdevice.h
1962     +++ b/include/linux/netdevice.h
1963     @@ -1487,6 +1487,7 @@ struct net_device_ops {
1964     * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1965     * @IFF_FAILOVER: device is a failover master device
1966     * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1967     + * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1968     */
1969     enum netdev_priv_flags {
1970     IFF_802_1Q_VLAN = 1<<0,
1971     @@ -1518,6 +1519,7 @@ enum netdev_priv_flags {
1972     IFF_NO_RX_HANDLER = 1<<26,
1973     IFF_FAILOVER = 1<<27,
1974     IFF_FAILOVER_SLAVE = 1<<28,
1975     + IFF_L3MDEV_RX_HANDLER = 1<<29,
1976     };
1977    
1978     #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1979     @@ -1548,6 +1550,7 @@ enum netdev_priv_flags {
1980     #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1981     #define IFF_FAILOVER IFF_FAILOVER
1982     #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1983     +#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1984    
1985     /**
1986     * struct net_device - The DEVICE structure.
1987     @@ -4523,6 +4526,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
1988     return dev->priv_flags & IFF_SUPP_NOFCS;
1989     }
1990    
1991     +static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
1992     +{
1993     + return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
1994     +}
1995     +
1996     static inline bool netif_is_l3_master(const struct net_device *dev)
1997     {
1998     return dev->priv_flags & IFF_L3MDEV_MASTER;
1999     diff --git a/include/linux/of.h b/include/linux/of.h
2000     index a5aee3c438ad..664cd5573ae2 100644
2001     --- a/include/linux/of.h
2002     +++ b/include/linux/of.h
2003     @@ -138,11 +138,16 @@ extern struct device_node *of_aliases;
2004     extern struct device_node *of_stdout;
2005     extern raw_spinlock_t devtree_lock;
2006    
2007     -/* flag descriptions (need to be visible even when !CONFIG_OF) */
2008     -#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
2009     -#define OF_DETACHED 2 /* node has been detached from the device tree */
2010     -#define OF_POPULATED 3 /* device already created for the node */
2011     -#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
2012     +/*
2013     + * struct device_node flag descriptions
2014     + * (need to be visible even when !CONFIG_OF)
2015     + */
2016     +#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */
2017     +#define OF_DETACHED 2 /* detached from the device tree */
2018     +#define OF_POPULATED 3 /* device already created */
2019     +#define OF_POPULATED_BUS 4 /* platform bus created for children */
2020     +#define OF_OVERLAY 5 /* allocated for an overlay */
2021     +#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */
2022    
2023     #define OF_BAD_ADDR ((u64)-1)
2024    
2025     diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
2026     index ec912d01126f..ecdc6542070f 100644
2027     --- a/include/linux/sched/coredump.h
2028     +++ b/include/linux/sched/coredump.h
2029     @@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
2030     #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
2031     #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
2032     #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
2033     +#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
2034     #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
2035    
2036     #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
2037     diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
2038     index 3832099289c5..128487658ff7 100644
2039     --- a/include/net/l3mdev.h
2040     +++ b/include/net/l3mdev.h
2041     @@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
2042    
2043     if (netif_is_l3_slave(skb->dev))
2044     master = netdev_master_upper_dev_get_rcu(skb->dev);
2045     - else if (netif_is_l3_master(skb->dev))
2046     + else if (netif_is_l3_master(skb->dev) ||
2047     + netif_has_l3_rx_handler(skb->dev))
2048     master = skb->dev;
2049    
2050     if (master && master->l3mdev_ops->l3mdev_l3_rcv)
2051     diff --git a/include/net/tls.h b/include/net/tls.h
2052     index 3cbcd12303fd..9f167e77d8ca 100644
2053     --- a/include/net/tls.h
2054     +++ b/include/net/tls.h
2055     @@ -120,6 +120,8 @@ struct tls_rec {
2056     struct scatterlist sg_aead_out[2];
2057    
2058     char aad_space[TLS_AAD_SPACE_SIZE];
2059     + u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
2060     + TLS_CIPHER_AES_GCM_128_SALT_SIZE];
2061     struct aead_request aead_req;
2062     u8 aead_req_ctx[];
2063     };
2064     diff --git a/kernel/exit.c b/kernel/exit.c
2065     index 0e21e6d21f35..55b4fa6d01eb 100644
2066     --- a/kernel/exit.c
2067     +++ b/kernel/exit.c
2068     @@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
2069     return NULL;
2070     }
2071    
2072     -static struct task_struct *find_child_reaper(struct task_struct *father)
2073     +static struct task_struct *find_child_reaper(struct task_struct *father,
2074     + struct list_head *dead)
2075     __releases(&tasklist_lock)
2076     __acquires(&tasklist_lock)
2077     {
2078     struct pid_namespace *pid_ns = task_active_pid_ns(father);
2079     struct task_struct *reaper = pid_ns->child_reaper;
2080     + struct task_struct *p, *n;
2081    
2082     if (likely(reaper != father))
2083     return reaper;
2084     @@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
2085     panic("Attempted to kill init! exitcode=0x%08x\n",
2086     father->signal->group_exit_code ?: father->exit_code);
2087     }
2088     +
2089     + list_for_each_entry_safe(p, n, dead, ptrace_entry) {
2090     + list_del_init(&p->ptrace_entry);
2091     + release_task(p);
2092     + }
2093     +
2094     zap_pid_ns_processes(pid_ns);
2095     write_lock_irq(&tasklist_lock);
2096    
2097     @@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
2098     exit_ptrace(father, dead);
2099    
2100     /* Can drop and reacquire tasklist_lock */
2101     - reaper = find_child_reaper(father);
2102     + reaper = find_child_reaper(father, dead);
2103     if (list_empty(&father->children))
2104     return;
2105    
2106     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2107     index a80832487981..c5c708c83af0 100644
2108     --- a/mm/hugetlb.c
2109     +++ b/mm/hugetlb.c
2110     @@ -4270,7 +4270,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2111     break;
2112     }
2113     if (ret & VM_FAULT_RETRY) {
2114     - if (nonblocking)
2115     + if (nonblocking &&
2116     + !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
2117     *nonblocking = 0;
2118     *nr_pages = 0;
2119     /*
2120     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2121     index 7c72f2a95785..831be5ff5f4d 100644
2122     --- a/mm/memory-failure.c
2123     +++ b/mm/memory-failure.c
2124     @@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
2125     if (fail || tk->addr_valid == 0) {
2126     pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
2127     pfn, tk->tsk->comm, tk->tsk->pid);
2128     - force_sig(SIGKILL, tk->tsk);
2129     + do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
2130     + tk->tsk, PIDTYPE_PID);
2131     }
2132    
2133     /*
2134     diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2135     index cea0880eadfb..21d94b5677e8 100644
2136     --- a/mm/memory_hotplug.c
2137     +++ b/mm/memory_hotplug.c
2138     @@ -1302,23 +1302,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
2139     static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
2140     {
2141     unsigned long pfn;
2142     - struct page *page;
2143     +
2144     for (pfn = start; pfn < end; pfn++) {
2145     - if (pfn_valid(pfn)) {
2146     - page = pfn_to_page(pfn);
2147     - if (PageLRU(page))
2148     - return pfn;
2149     - if (__PageMovable(page))
2150     - return pfn;
2151     - if (PageHuge(page)) {
2152     - if (hugepage_migration_supported(page_hstate(page)) &&
2153     - page_huge_active(page))
2154     - return pfn;
2155     - else
2156     - pfn = round_up(pfn + 1,
2157     - 1 << compound_order(page)) - 1;
2158     - }
2159     - }
2160     + struct page *page, *head;
2161     + unsigned long skip;
2162     +
2163     + if (!pfn_valid(pfn))
2164     + continue;
2165     + page = pfn_to_page(pfn);
2166     + if (PageLRU(page))
2167     + return pfn;
2168     + if (__PageMovable(page))
2169     + return pfn;
2170     +
2171     + if (!PageHuge(page))
2172     + continue;
2173     + head = compound_head(page);
2174     + if (hugepage_migration_supported(page_hstate(head)) &&
2175     + page_huge_active(head))
2176     + return pfn;
2177     + skip = (1 << compound_order(head)) - (page - head);
2178     + pfn += skip - 1;
2179     }
2180     return 0;
2181     }
2182     diff --git a/mm/migrate.c b/mm/migrate.c
2183     index f7e4bfdc13b7..9638cd59fef1 100644
2184     --- a/mm/migrate.c
2185     +++ b/mm/migrate.c
2186     @@ -1108,10 +1108,13 @@ out:
2187     * If migration is successful, decrease refcount of the newpage
2188     * which will not free the page because new page owner increased
2189     * refcounter. As well, if it is LRU page, add the page to LRU
2190     - * list in here.
2191     + * list in here. Use the old state of the isolated source page to
2192     + * determine if we migrated a LRU page. newpage was already unlocked
2193     + * and possibly modified by its owner - don't rely on the page
2194     + * state.
2195     */
2196     if (rc == MIGRATEPAGE_SUCCESS) {
2197     - if (unlikely(__PageMovable(newpage)))
2198     + if (unlikely(!is_lru))
2199     put_page(newpage);
2200     else
2201     putback_lru_page(newpage);
2202     diff --git a/mm/oom_kill.c b/mm/oom_kill.c
2203     index 6589f60d5018..cc317efdcb43 100644
2204     --- a/mm/oom_kill.c
2205     +++ b/mm/oom_kill.c
2206     @@ -634,8 +634,8 @@ static int oom_reaper(void *unused)
2207    
2208     static void wake_oom_reaper(struct task_struct *tsk)
2209     {
2210     - /* tsk is already queued? */
2211     - if (tsk == oom_reaper_list || tsk->oom_reaper_list)
2212     + /* mm is already queued? */
2213     + if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
2214     return;
2215    
2216     get_task_struct(tsk);
2217     @@ -962,6 +962,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
2218     * still freeing memory.
2219     */
2220     read_lock(&tasklist_lock);
2221     +
2222     + /*
2223     + * The task 'p' might have already exited before reaching here. The
2224     + * put_task_struct() will free task_struct 'p' while the loop still try
2225     + * to access the field of 'p', so, get an extra reference.
2226     + */
2227     + get_task_struct(p);
2228     for_each_thread(p, t) {
2229     list_for_each_entry(child, &t->children, sibling) {
2230     unsigned int child_points;
2231     @@ -981,6 +988,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
2232     }
2233     }
2234     }
2235     + put_task_struct(p);
2236     read_unlock(&tasklist_lock);
2237    
2238     /*
2239     diff --git a/net/core/dev.c b/net/core/dev.c
2240     index 722d50dbf8a4..43f8a4fd4968 100644
2241     --- a/net/core/dev.c
2242     +++ b/net/core/dev.c
2243     @@ -8624,6 +8624,9 @@ int init_dummy_netdev(struct net_device *dev)
2244     set_bit(__LINK_STATE_PRESENT, &dev->state);
2245     set_bit(__LINK_STATE_START, &dev->state);
2246    
2247     + /* napi_busy_loop stats accounting wants this */
2248     + dev_net_set(dev, &init_net);
2249     +
2250     /* Note : We dont allocate pcpu_refcnt for dummy devices,
2251     * because users of this 'device' dont need to change
2252     * its refcount.
2253     diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
2254     index 7efe740c06eb..511b32ea2533 100644
2255     --- a/net/ipv4/gre_demux.c
2256     +++ b/net/ipv4/gre_demux.c
2257     @@ -25,6 +25,7 @@
2258     #include <linux/spinlock.h>
2259     #include <net/protocol.h>
2260     #include <net/gre.h>
2261     +#include <net/erspan.h>
2262    
2263     #include <net/icmp.h>
2264     #include <net/route.h>
2265     @@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2266     hdr_len += 4;
2267     }
2268     tpi->hdr_len = hdr_len;
2269     +
2270     + /* ERSPAN ver 1 and 2 protocol sets GRE key field
2271     + * to 0 and sets the configured key in the
2272     + * inner erspan header field
2273     + */
2274     + if (greh->protocol == htons(ETH_P_ERSPAN) ||
2275     + greh->protocol == htons(ETH_P_ERSPAN2)) {
2276     + struct erspan_base_hdr *ershdr;
2277     +
2278     + if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
2279     + return -EINVAL;
2280     +
2281     + ershdr = (struct erspan_base_hdr *)options;
2282     + tpi->key = cpu_to_be32(get_session_id(ershdr));
2283     + }
2284     +
2285     return hdr_len;
2286     }
2287     EXPORT_SYMBOL(gre_parse_header);
2288     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2289     index 3407a82d4549..b5488c5197bc 100644
2290     --- a/net/ipv4/ip_gre.c
2291     +++ b/net/ipv4/ip_gre.c
2292     @@ -266,20 +266,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2293     int len;
2294    
2295     itn = net_generic(net, erspan_net_id);
2296     - len = gre_hdr_len + sizeof(*ershdr);
2297     -
2298     - /* Check based hdr len */
2299     - if (unlikely(!pskb_may_pull(skb, len)))
2300     - return PACKET_REJECT;
2301    
2302     iph = ip_hdr(skb);
2303     ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
2304     ver = ershdr->ver;
2305    
2306     - /* The original GRE header does not have key field,
2307     - * Use ERSPAN 10-bit session ID as key.
2308     - */
2309     - tpi->key = cpu_to_be32(get_session_id(ershdr));
2310     tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
2311     tpi->flags | TUNNEL_KEY,
2312     iph->saddr, iph->daddr, tpi->key);
2313     @@ -1468,12 +1459,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2314     {
2315     struct ip_tunnel *t = netdev_priv(dev);
2316     struct ip_tunnel_parm *p = &t->parms;
2317     + __be16 o_flags = p->o_flags;
2318     +
2319     + if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
2320     + !t->collect_md)
2321     + o_flags |= TUNNEL_KEY;
2322    
2323     if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2324     nla_put_be16(skb, IFLA_GRE_IFLAGS,
2325     gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2326     nla_put_be16(skb, IFLA_GRE_OFLAGS,
2327     - gre_tnl_flags_to_gre_flags(p->o_flags)) ||
2328     + gre_tnl_flags_to_gre_flags(o_flags)) ||
2329     nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2330     nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2331     nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
2332     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2333     index e3cb53b0ef67..d2b597674d60 100644
2334     --- a/net/ipv6/addrconf.c
2335     +++ b/net/ipv6/addrconf.c
2336     @@ -5120,6 +5120,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
2337     if (idev) {
2338     err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
2339     &fillargs);
2340     + if (err > 0)
2341     + err = 0;
2342     }
2343     goto put_tgt_net;
2344     }
2345     diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
2346     index 4dc935838184..d99753b5e39b 100644
2347     --- a/net/ipv6/af_inet6.c
2348     +++ b/net/ipv6/af_inet6.c
2349     @@ -362,6 +362,9 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
2350     err = -EINVAL;
2351     goto out_unlock;
2352     }
2353     + }
2354     +
2355     + if (sk->sk_bound_dev_if) {
2356     dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
2357     if (!dev) {
2358     err = -ENODEV;
2359     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2360     index b529a79ac222..94903061f324 100644
2361     --- a/net/ipv6/ip6_gre.c
2362     +++ b/net/ipv6/ip6_gre.c
2363     @@ -532,13 +532,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
2364     struct ip6_tnl *tunnel;
2365     u8 ver;
2366    
2367     - if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
2368     - return PACKET_REJECT;
2369     -
2370     ipv6h = ipv6_hdr(skb);
2371     ershdr = (struct erspan_base_hdr *)skb->data;
2372     ver = ershdr->ver;
2373     - tpi->key = cpu_to_be32(get_session_id(ershdr));
2374    
2375     tunnel = ip6gre_tunnel_lookup(skb->dev,
2376     &ipv6h->saddr, &ipv6h->daddr, tpi->key,
2377     @@ -2106,12 +2102,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2378     {
2379     struct ip6_tnl *t = netdev_priv(dev);
2380     struct __ip6_tnl_parm *p = &t->parms;
2381     + __be16 o_flags = p->o_flags;
2382     +
2383     + if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
2384     + !p->collect_md)
2385     + o_flags |= TUNNEL_KEY;
2386    
2387     if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2388     nla_put_be16(skb, IFLA_GRE_IFLAGS,
2389     gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2390     nla_put_be16(skb, IFLA_GRE_OFLAGS,
2391     - gre_tnl_flags_to_gre_flags(p->o_flags)) ||
2392     + gre_tnl_flags_to_gre_flags(o_flags)) ||
2393     nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2394     nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2395     nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2396     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
2397     index eb3220812b56..edda3f9daab9 100644
2398     --- a/net/ipv6/ip6mr.c
2399     +++ b/net/ipv6/ip6mr.c
2400     @@ -1516,6 +1516,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
2401     continue;
2402     rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
2403     list_del_rcu(&c->list);
2404     + call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
2405     + FIB_EVENT_ENTRY_DEL,
2406     + (struct mfc6_cache *)c, mrt->id);
2407     mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
2408     mr_cache_put(c);
2409     }
2410     @@ -1524,10 +1527,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
2411     spin_lock_bh(&mfc_unres_lock);
2412     list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
2413     list_del(&c->list);
2414     - call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
2415     - FIB_EVENT_ENTRY_DEL,
2416     - (struct mfc6_cache *)c,
2417     - mrt->id);
2418     mr6_netlink_event(mrt, (struct mfc6_cache *)c,
2419     RTM_DELROUTE);
2420     ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
2421     diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
2422     index 8181ee7e1e27..ee5403cbe655 100644
2423     --- a/net/ipv6/seg6_iptunnel.c
2424     +++ b/net/ipv6/seg6_iptunnel.c
2425     @@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
2426     } else {
2427     ip6_flow_hdr(hdr, 0, flowlabel);
2428     hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
2429     +
2430     + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
2431     }
2432    
2433     hdr->nexthdr = NEXTHDR_ROUTING;
2434     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
2435     index 26f1d435696a..fed6becc5daf 100644
2436     --- a/net/l2tp/l2tp_core.c
2437     +++ b/net/l2tp/l2tp_core.c
2438     @@ -83,8 +83,7 @@
2439     #define L2TP_SLFLAG_S 0x40000000
2440     #define L2TP_SL_SEQ_MASK 0x00ffffff
2441    
2442     -#define L2TP_HDR_SIZE_SEQ 10
2443     -#define L2TP_HDR_SIZE_NOSEQ 6
2444     +#define L2TP_HDR_SIZE_MAX 14
2445    
2446     /* Default trace flags */
2447     #define L2TP_DEFAULT_DEBUG_FLAGS 0
2448     @@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
2449     __skb_pull(skb, sizeof(struct udphdr));
2450    
2451     /* Short packet? */
2452     - if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
2453     + if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
2454     l2tp_info(tunnel, L2TP_MSG_DATA,
2455     "%s: recv short packet (len=%d)\n",
2456     tunnel->name, skb->len);
2457     @@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
2458     goto error;
2459     }
2460    
2461     + if (tunnel->version == L2TP_HDR_VER_3 &&
2462     + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
2463     + goto error;
2464     +
2465     l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
2466     l2tp_session_dec_refcount(session);
2467    
2468     diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
2469     index 9c9afe94d389..b2ce90260c35 100644
2470     --- a/net/l2tp/l2tp_core.h
2471     +++ b/net/l2tp/l2tp_core.h
2472     @@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
2473     }
2474     #endif
2475    
2476     +static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
2477     + unsigned char **ptr, unsigned char **optr)
2478     +{
2479     + int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
2480     +
2481     + if (opt_len > 0) {
2482     + int off = *ptr - *optr;
2483     +
2484     + if (!pskb_may_pull(skb, off + opt_len))
2485     + return -1;
2486     +
2487     + if (skb->data != *optr) {
2488     + *optr = skb->data;
2489     + *ptr = skb->data + off;
2490     + }
2491     + }
2492     +
2493     + return 0;
2494     +}
2495     +
2496     #define l2tp_printk(ptr, type, func, fmt, ...) \
2497     do { \
2498     if (((ptr)->debug) & (type)) \
2499     diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
2500     index 35f6f86d4dcc..d4c60523c549 100644
2501     --- a/net/l2tp/l2tp_ip.c
2502     +++ b/net/l2tp/l2tp_ip.c
2503     @@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
2504     print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
2505     }
2506    
2507     + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
2508     + goto discard_sess;
2509     +
2510     l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
2511     l2tp_session_dec_refcount(session);
2512    
2513     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
2514     index 237f1a4a0b0c..0ae6899edac0 100644
2515     --- a/net/l2tp/l2tp_ip6.c
2516     +++ b/net/l2tp/l2tp_ip6.c
2517     @@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
2518     print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
2519     }
2520    
2521     + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
2522     + goto discard_sess;
2523     +
2524     l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
2525     l2tp_session_dec_refcount(session);
2526    
2527     diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
2528     index cbd51ed5a2d7..908e53ab47a4 100644
2529     --- a/net/netrom/nr_timer.c
2530     +++ b/net/netrom/nr_timer.c
2531     @@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
2532     {
2533     struct nr_sock *nr = nr_sk(sk);
2534    
2535     - mod_timer(&nr->t1timer, jiffies + nr->t1);
2536     + sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
2537     }
2538    
2539     void nr_start_t2timer(struct sock *sk)
2540     {
2541     struct nr_sock *nr = nr_sk(sk);
2542    
2543     - mod_timer(&nr->t2timer, jiffies + nr->t2);
2544     + sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
2545     }
2546    
2547     void nr_start_t4timer(struct sock *sk)
2548     {
2549     struct nr_sock *nr = nr_sk(sk);
2550    
2551     - mod_timer(&nr->t4timer, jiffies + nr->t4);
2552     + sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
2553     }
2554    
2555     void nr_start_idletimer(struct sock *sk)
2556     @@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
2557     struct nr_sock *nr = nr_sk(sk);
2558    
2559     if (nr->idle > 0)
2560     - mod_timer(&nr->idletimer, jiffies + nr->idle);
2561     + sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
2562     }
2563    
2564     void nr_start_heartbeat(struct sock *sk)
2565     {
2566     - mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
2567     + sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
2568     }
2569    
2570     void nr_stop_t1timer(struct sock *sk)
2571     {
2572     - del_timer(&nr_sk(sk)->t1timer);
2573     + sk_stop_timer(sk, &nr_sk(sk)->t1timer);
2574     }
2575    
2576     void nr_stop_t2timer(struct sock *sk)
2577     {
2578     - del_timer(&nr_sk(sk)->t2timer);
2579     + sk_stop_timer(sk, &nr_sk(sk)->t2timer);
2580     }
2581    
2582     void nr_stop_t4timer(struct sock *sk)
2583     {
2584     - del_timer(&nr_sk(sk)->t4timer);
2585     + sk_stop_timer(sk, &nr_sk(sk)->t4timer);
2586     }
2587    
2588     void nr_stop_idletimer(struct sock *sk)
2589     {
2590     - del_timer(&nr_sk(sk)->idletimer);
2591     + sk_stop_timer(sk, &nr_sk(sk)->idletimer);
2592     }
2593    
2594     void nr_stop_heartbeat(struct sock *sk)
2595     {
2596     - del_timer(&sk->sk_timer);
2597     + sk_stop_timer(sk, &sk->sk_timer);
2598     }
2599    
2600     int nr_t1timer_running(struct sock *sk)
2601     diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
2602     index 77e9f85a2c92..f2ff21d7df08 100644
2603     --- a/net/rose/rose_route.c
2604     +++ b/net/rose/rose_route.c
2605     @@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
2606    
2607     /*
2608     * Route a frame to an appropriate AX.25 connection.
2609     + * A NULL ax25_cb indicates an internally generated frame.
2610     */
2611     int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
2612     {
2613     @@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
2614    
2615     if (skb->len < ROSE_MIN_LEN)
2616     return res;
2617     +
2618     + if (!ax25)
2619     + return rose_loopback_queue(skb, NULL);
2620     +
2621     frametype = skb->data[2];
2622     lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
2623     if (frametype == ROSE_CALL_REQUEST &&
2624     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2625     index 0bae07e9c9e7..4fede55b9010 100644
2626     --- a/net/sctp/ipv6.c
2627     +++ b/net/sctp/ipv6.c
2628     @@ -277,7 +277,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2629    
2630     if (saddr) {
2631     fl6->saddr = saddr->v6.sin6_addr;
2632     - fl6->fl6_sport = saddr->v6.sin6_port;
2633     + if (!fl6->fl6_sport)
2634     + fl6->fl6_sport = saddr->v6.sin6_port;
2635    
2636     pr_debug("src=%pI6 - ", &fl6->saddr);
2637     }
2638     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
2639     index 85af878f5668..8410ccc57c40 100644
2640     --- a/net/sctp/protocol.c
2641     +++ b/net/sctp/protocol.c
2642     @@ -440,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2643     }
2644     if (saddr) {
2645     fl4->saddr = saddr->v4.sin_addr.s_addr;
2646     - fl4->fl4_sport = saddr->v4.sin_port;
2647     + if (!fl4->fl4_sport)
2648     + fl4->fl4_sport = saddr->v4.sin_port;
2649     }
2650    
2651     pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
2652     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
2653     index f4ac6c592e13..d05c57664e36 100644
2654     --- a/net/sctp/sm_make_chunk.c
2655     +++ b/net/sctp/sm_make_chunk.c
2656     @@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
2657     *
2658     * [INIT ACK back to where the INIT came from.]
2659     */
2660     - retval->transport = chunk->transport;
2661     + if (chunk->transport)
2662     + retval->transport =
2663     + sctp_assoc_lookup_paddr(asoc,
2664     + &chunk->transport->ipaddr);
2665    
2666     retval->subh.init_hdr =
2667     sctp_addto_chunk(retval, sizeof(initack), &initack);
2668     @@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
2669     *
2670     * [COOKIE ACK back to where the COOKIE ECHO came from.]
2671     */
2672     - if (retval && chunk)
2673     - retval->transport = chunk->transport;
2674     + if (retval && chunk && chunk->transport)
2675     + retval->transport =
2676     + sctp_assoc_lookup_paddr(asoc,
2677     + &chunk->transport->ipaddr);
2678    
2679     return retval;
2680     }
2681     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
2682     index 3892e7630f3a..80e0ae5534ec 100644
2683     --- a/net/sctp/stream.c
2684     +++ b/net/sctp/stream.c
2685     @@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
2686     struct sctp_strreset_outreq *outreq = param.v;
2687     struct sctp_stream *stream = &asoc->stream;
2688     __u32 result = SCTP_STRRESET_DENIED;
2689     - __u16 i, nums, flags = 0;
2690     __be16 *str_p = NULL;
2691     __u32 request_seq;
2692     + __u16 i, nums;
2693    
2694     request_seq = ntohl(outreq->request_seq);
2695    
2696     @@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
2697     if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
2698     goto out;
2699    
2700     + nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
2701     + str_p = outreq->list_of_streams;
2702     + for (i = 0; i < nums; i++) {
2703     + if (ntohs(str_p[i]) >= stream->incnt) {
2704     + result = SCTP_STRRESET_ERR_WRONG_SSN;
2705     + goto out;
2706     + }
2707     + }
2708     +
2709     if (asoc->strreset_chunk) {
2710     if (!sctp_chunk_lookup_strreset_param(
2711     asoc, outreq->response_seq,
2712     @@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
2713     sctp_chunk_put(asoc->strreset_chunk);
2714     asoc->strreset_chunk = NULL;
2715     }
2716     -
2717     - flags = SCTP_STREAM_RESET_INCOMING_SSN;
2718     }
2719    
2720     - nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
2721     - if (nums) {
2722     - str_p = outreq->list_of_streams;
2723     - for (i = 0; i < nums; i++) {
2724     - if (ntohs(str_p[i]) >= stream->incnt) {
2725     - result = SCTP_STRRESET_ERR_WRONG_SSN;
2726     - goto out;
2727     - }
2728     - }
2729     -
2730     + if (nums)
2731     for (i = 0; i < nums; i++)
2732     SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
2733     - } else {
2734     + else
2735     for (i = 0; i < stream->incnt; i++)
2736     SCTP_SI(stream, i)->mid = 0;
2737     - }
2738    
2739     result = SCTP_STRRESET_PERFORMED;
2740    
2741     *evp = sctp_ulpevent_make_stream_reset_event(asoc,
2742     - flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
2743     - GFP_ATOMIC);
2744     + SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
2745    
2746     out:
2747     sctp_update_strreset_result(asoc, result);
2748     @@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
2749    
2750     result = SCTP_STRRESET_PERFORMED;
2751    
2752     - *evp = sctp_ulpevent_make_stream_reset_event(asoc,
2753     - SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
2754     -
2755     out:
2756     sctp_update_strreset_result(asoc, result);
2757     err:
2758     @@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
2759     if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
2760     goto out;
2761    
2762     + in = ntohs(addstrm->number_of_streams);
2763     + incnt = stream->incnt + in;
2764     + if (!in || incnt > SCTP_MAX_STREAM)
2765     + goto out;
2766     +
2767     + if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
2768     + goto out;
2769     +
2770     if (asoc->strreset_chunk) {
2771     if (!sctp_chunk_lookup_strreset_param(
2772     asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
2773     @@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
2774     }
2775     }
2776    
2777     - in = ntohs(addstrm->number_of_streams);
2778     - incnt = stream->incnt + in;
2779     - if (!in || incnt > SCTP_MAX_STREAM)
2780     - goto out;
2781     -
2782     - if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
2783     - goto out;
2784     -
2785     stream->incnt = incnt;
2786    
2787     result = SCTP_STRRESET_PERFORMED;
2788     @@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
2789    
2790     result = SCTP_STRRESET_PERFORMED;
2791    
2792     - *evp = sctp_ulpevent_make_stream_change_event(asoc,
2793     - 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
2794     -
2795     out:
2796     sctp_update_strreset_result(asoc, result);
2797     err:
2798     @@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
2799     sout->mid_uo = 0;
2800     }
2801     }
2802     -
2803     - flags = SCTP_STREAM_RESET_OUTGOING_SSN;
2804     }
2805    
2806     + flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
2807     +
2808     for (i = 0; i < stream->outcnt; i++)
2809     SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
2810    
2811     @@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
2812     nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
2813     sizeof(__u16);
2814    
2815     + flags |= SCTP_STREAM_RESET_INCOMING_SSN;
2816     +
2817     *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
2818     nums, str_p, GFP_ATOMIC);
2819     } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
2820     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
2821     index 29b27858fff1..419314ba94ec 100644
2822     --- a/net/tls/tls_sw.c
2823     +++ b/net/tls/tls_sw.c
2824     @@ -439,6 +439,8 @@ static int tls_do_encryption(struct sock *sk,
2825     struct scatterlist *sge = sk_msg_elem(msg_en, start);
2826     int rc;
2827    
2828     + memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
2829     +
2830     sge->offset += tls_ctx->tx.prepend_size;
2831     sge->length -= tls_ctx->tx.prepend_size;
2832    
2833     @@ -448,7 +450,7 @@ static int tls_do_encryption(struct sock *sk,
2834     aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
2835     aead_request_set_crypt(aead_req, rec->sg_aead_in,
2836     rec->sg_aead_out,
2837     - data_len, tls_ctx->tx.iv);
2838     + data_len, rec->iv_data);
2839    
2840     aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2841     tls_encrypt_done, sk);
2842     @@ -1768,7 +1770,9 @@ void tls_sw_free_resources_tx(struct sock *sk)
2843     if (atomic_read(&ctx->encrypt_pending))
2844     crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2845    
2846     + release_sock(sk);
2847     cancel_delayed_work_sync(&ctx->tx_work.work);
2848     + lock_sock(sk);
2849    
2850     /* Tx whatever records we can transmit and abandon the rest */
2851     tls_tx_records(sk, -1);
2852     diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
2853     index 40013b26f671..6c99fa8ac5fa 100644
2854     --- a/sound/core/pcm_lib.c
2855     +++ b/sound/core/pcm_lib.c
2856     @@ -2112,6 +2112,13 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2857     return 0;
2858     }
2859    
2860     +/* allow waiting for a capture stream that hasn't been started */
2861     +#if IS_ENABLED(CONFIG_SND_PCM_OSS)
2862     +#define wait_capture_start(substream) ((substream)->oss.oss)
2863     +#else
2864     +#define wait_capture_start(substream) false
2865     +#endif
2866     +
2867     /* the common loop for read/write data */
2868     snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2869     void *data, bool interleaved,
2870     @@ -2182,7 +2189,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2871     err = snd_pcm_start(substream);
2872     if (err < 0)
2873     goto _end_unlock;
2874     - } else {
2875     + } else if (!wait_capture_start(substream)) {
2876     /* nothing to do */
2877     err = 0;
2878     goto _end_unlock;
2879     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2880     index 8ddd016c04d0..0bfd78b7e0ca 100644
2881     --- a/sound/pci/hda/patch_realtek.c
2882     +++ b/sound/pci/hda/patch_realtek.c
2883     @@ -117,6 +117,7 @@ struct alc_spec {
2884     int codec_variant; /* flag for other variants */
2885     unsigned int has_alc5505_dsp:1;
2886     unsigned int no_depop_delay:1;
2887     + unsigned int done_hp_init:1;
2888    
2889     /* for PLL fix */
2890     hda_nid_t pll_nid;
2891     @@ -3372,6 +3373,48 @@ static void alc_default_shutup(struct hda_codec *codec)
2892     snd_hda_shutup_pins(codec);
2893     }
2894    
2895     +static void alc294_hp_init(struct hda_codec *codec)
2896     +{
2897     + struct alc_spec *spec = codec->spec;
2898     + hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
2899     + int i, val;
2900     +
2901     + if (!hp_pin)
2902     + return;
2903     +
2904     + snd_hda_codec_write(codec, hp_pin, 0,
2905     + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
2906     +
2907     + msleep(100);
2908     +
2909     + snd_hda_codec_write(codec, hp_pin, 0,
2910     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
2911     +
2912     + alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
2913     + alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
2914     +
2915     + /* Wait for depop procedure finish */
2916     + val = alc_read_coefex_idx(codec, 0x58, 0x01);
2917     + for (i = 0; i < 20 && val & 0x0080; i++) {
2918     + msleep(50);
2919     + val = alc_read_coefex_idx(codec, 0x58, 0x01);
2920     + }
2921     + /* Set HP depop to auto mode */
2922     + alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
2923     + msleep(50);
2924     +}
2925     +
2926     +static void alc294_init(struct hda_codec *codec)
2927     +{
2928     + struct alc_spec *spec = codec->spec;
2929     +
2930     + if (!spec->done_hp_init) {
2931     + alc294_hp_init(codec);
2932     + spec->done_hp_init = true;
2933     + }
2934     + alc_default_init(codec);
2935     +}
2936     +
2937     static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
2938     unsigned int val)
2939     {
2940     @@ -7288,37 +7331,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
2941     alc_update_coef_idx(codec, 0x4, 0, 1<<11);
2942     }
2943    
2944     -static void alc294_hp_init(struct hda_codec *codec)
2945     -{
2946     - struct alc_spec *spec = codec->spec;
2947     - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
2948     - int i, val;
2949     -
2950     - if (!hp_pin)
2951     - return;
2952     -
2953     - snd_hda_codec_write(codec, hp_pin, 0,
2954     - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
2955     -
2956     - msleep(100);
2957     -
2958     - snd_hda_codec_write(codec, hp_pin, 0,
2959     - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
2960     -
2961     - alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
2962     - alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
2963     -
2964     - /* Wait for depop procedure finish */
2965     - val = alc_read_coefex_idx(codec, 0x58, 0x01);
2966     - for (i = 0; i < 20 && val & 0x0080; i++) {
2967     - msleep(50);
2968     - val = alc_read_coefex_idx(codec, 0x58, 0x01);
2969     - }
2970     - /* Set HP depop to auto mode */
2971     - alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
2972     - msleep(50);
2973     -}
2974     -
2975     /*
2976     */
2977     static int patch_alc269(struct hda_codec *codec)
2978     @@ -7444,7 +7456,7 @@ static int patch_alc269(struct hda_codec *codec)
2979     spec->codec_variant = ALC269_TYPE_ALC294;
2980     spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
2981     alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
2982     - alc294_hp_init(codec);
2983     + spec->init_hook = alc294_init;
2984     break;
2985     case 0x10ec0300:
2986     spec->codec_variant = ALC269_TYPE_ALC300;
2987     @@ -7456,7 +7468,7 @@ static int patch_alc269(struct hda_codec *codec)
2988     spec->codec_variant = ALC269_TYPE_ALC700;
2989     spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
2990     alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
2991     - alc294_hp_init(codec);
2992     + spec->init_hook = alc294_init;
2993     break;
2994    
2995     }
2996     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
2997     index 6623cafc94f2..7e93686a430a 100644
2998     --- a/sound/usb/quirks.c
2999     +++ b/sound/usb/quirks.c
3000     @@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
3001     return SNDRV_PCM_FMTBIT_DSD_U32_BE;
3002     break;
3003    
3004     + case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
3005     case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
3006     case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
3007     case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
3008     diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
3009     index c9a2abf8be1b..0692d5dab729 100644
3010     --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
3011     +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
3012     @@ -1563,7 +1563,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
3013     #ifdef SYSCALL_NUM_RET_SHARE_REG
3014     # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
3015     #else
3016     -# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action)
3017     +# define EXPECT_SYSCALL_RETURN(val, action) \
3018     + do { \
3019     + errno = 0; \
3020     + if (val < 0) { \
3021     + EXPECT_EQ(-1, action); \
3022     + EXPECT_EQ(-(val), errno); \
3023     + } else { \
3024     + EXPECT_EQ(val, action); \
3025     + } \
3026     + } while (0)
3027     #endif
3028    
3029     /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
3030     @@ -1602,7 +1611,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
3031    
3032     /* Architecture-specific syscall changing routine. */
3033     void change_syscall(struct __test_metadata *_metadata,
3034     - pid_t tracee, int syscall)
3035     + pid_t tracee, int syscall, int result)
3036     {
3037     int ret;
3038     ARCH_REGS regs;
3039     @@ -1661,7 +1670,7 @@ void change_syscall(struct __test_metadata *_metadata,
3040     #ifdef SYSCALL_NUM_RET_SHARE_REG
3041     TH_LOG("Can't modify syscall return on this architecture");
3042     #else
3043     - regs.SYSCALL_RET = EPERM;
3044     + regs.SYSCALL_RET = result;
3045     #endif
3046    
3047     #ifdef HAVE_GETREGS
3048     @@ -1689,14 +1698,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
3049     case 0x1002:
3050     /* change getpid to getppid. */
3051     EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
3052     - change_syscall(_metadata, tracee, __NR_getppid);
3053     + change_syscall(_metadata, tracee, __NR_getppid, 0);
3054     break;
3055     case 0x1003:
3056     - /* skip gettid. */
3057     + /* skip gettid with valid return code. */
3058     EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
3059     - change_syscall(_metadata, tracee, -1);
3060     + change_syscall(_metadata, tracee, -1, 45000);
3061     break;
3062     case 0x1004:
3063     + /* skip openat with error. */
3064     + EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
3065     + change_syscall(_metadata, tracee, -1, -ESRCH);
3066     + break;
3067     + case 0x1005:
3068     /* do nothing (allow getppid) */
3069     EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
3070     break;
3071     @@ -1729,9 +1743,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
3072     nr = get_syscall(_metadata, tracee);
3073    
3074     if (nr == __NR_getpid)
3075     - change_syscall(_metadata, tracee, __NR_getppid);
3076     + change_syscall(_metadata, tracee, __NR_getppid, 0);
3077     + if (nr == __NR_gettid)
3078     + change_syscall(_metadata, tracee, -1, 45000);
3079     if (nr == __NR_openat)
3080     - change_syscall(_metadata, tracee, -1);
3081     + change_syscall(_metadata, tracee, -1, -ESRCH);
3082     }
3083    
3084     FIXTURE_DATA(TRACE_syscall) {
3085     @@ -1748,8 +1764,10 @@ FIXTURE_SETUP(TRACE_syscall)
3086     BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
3087     BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
3088     BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
3089     - BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
3090     + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
3091     BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
3092     + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
3093     + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
3094     BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3095     };
3096    
3097     @@ -1797,15 +1815,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
3098     EXPECT_NE(self->mypid, syscall(__NR_getpid));
3099     }
3100    
3101     -TEST_F(TRACE_syscall, ptrace_syscall_dropped)
3102     +TEST_F(TRACE_syscall, ptrace_syscall_errno)
3103     +{
3104     + /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
3105     + teardown_trace_fixture(_metadata, self->tracer);
3106     + self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
3107     + true);
3108     +
3109     + /* Tracer should skip the open syscall, resulting in ESRCH. */
3110     + EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
3111     +}
3112     +
3113     +TEST_F(TRACE_syscall, ptrace_syscall_faked)
3114     {
3115     /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
3116     teardown_trace_fixture(_metadata, self->tracer);
3117     self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
3118     true);
3119    
3120     - /* Tracer should skip the open syscall, resulting in EPERM. */
3121     - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
3122     + /* Tracer should skip the gettid syscall, resulting fake pid. */
3123     + EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
3124     }
3125    
3126     TEST_F(TRACE_syscall, syscall_allowed)
3127     @@ -1838,7 +1867,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
3128     EXPECT_NE(self->mypid, syscall(__NR_getpid));
3129     }
3130    
3131     -TEST_F(TRACE_syscall, syscall_dropped)
3132     +TEST_F(TRACE_syscall, syscall_errno)
3133     +{
3134     + long ret;
3135     +
3136     + ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3137     + ASSERT_EQ(0, ret);
3138     +
3139     + ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
3140     + ASSERT_EQ(0, ret);
3141     +
3142     + /* openat has been skipped and an errno return. */
3143     + EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
3144     +}
3145     +
3146     +TEST_F(TRACE_syscall, syscall_faked)
3147     {
3148     long ret;
3149    
3150     @@ -1849,8 +1892,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
3151     ASSERT_EQ(0, ret);
3152    
3153     /* gettid has been skipped and an altered return value stored. */
3154     - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
3155     - EXPECT_NE(self->mytid, syscall(__NR_gettid));
3156     + EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
3157     }
3158    
3159     TEST_F(TRACE_syscall, skip_after_RET_TRACE)