Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0115-5.4.16-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3496 - (hide annotations) (download)
Mon May 11 14:36:18 2020 UTC (4 years ago) by niro
File size: 188872 byte(s)
-linux-5.4.16
1 niro 3496 diff --git a/Makefile b/Makefile
2     index 30600e309c73..e16d2e58ed4b 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 15
10     +SUBLEVEL = 16
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
15     index 15b75005bc34..3fa1b962dc27 100644
16     --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
17     +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
18     @@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
19     *
20     */
21     #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
22     +
23     +// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
24     #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
25     - MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
26     + MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
27     +
28     /*
29     * For platforms that support on 65bit VA we limit the context bits
30     */
31     diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
32     index f2dfcd50a2d3..33aee7490cbb 100644
33     --- a/arch/powerpc/include/asm/xive-regs.h
34     +++ b/arch/powerpc/include/asm/xive-regs.h
35     @@ -39,6 +39,7 @@
36    
37     #define XIVE_ESB_VAL_P 0x2
38     #define XIVE_ESB_VAL_Q 0x1
39     +#define XIVE_ESB_INVALID 0xFF
40    
41     /*
42     * Thread Management (aka "TM") registers
43     diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
44     index f5fadbd2533a..9651ca061828 100644
45     --- a/arch/powerpc/sysdev/xive/common.c
46     +++ b/arch/powerpc/sysdev/xive/common.c
47     @@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data,
48     enum irqchip_irq_state which, bool *state)
49     {
50     struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
51     + u8 pq;
52    
53     switch (which) {
54     case IRQCHIP_STATE_ACTIVE:
55     - *state = !xd->stale_p &&
56     - (xd->saved_p ||
57     - !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
58     + pq = xive_esb_read(xd, XIVE_ESB_GET);
59     +
60     + /*
61     + * The esb value being all 1's means we couldn't get
62     + * the PQ state of the interrupt through mmio. It may
63     + * happen, for example when querying a PHB interrupt
64     + * while the PHB is in an error state. We consider the
65     + * interrupt to be inactive in that case.
66     + */
67     + *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
68     + (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
69     return 0;
70     default:
71     return -EINVAL;
72     diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
73     index 2bbab0230aeb..d287837ed755 100644
74     --- a/drivers/atm/firestream.c
75     +++ b/drivers/atm/firestream.c
76     @@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
77     }
78     if (!to) {
79     printk ("No more free channels for FS50..\n");
80     + kfree(vcc);
81     return -EBUSY;
82     }
83     vcc->channo = dev->channo;
84     @@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
85     if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
86     ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
87     printk ("Channel is in use for FS155.\n");
88     + kfree(vcc);
89     return -EBUSY;
90     }
91     }
92     @@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
93     tc, sizeof (struct fs_transmit_config));
94     if (!tc) {
95     fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
96     + kfree(vcc);
97     return -ENOMEM;
98     }
99    
100     diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
101     index 3d4f5775a4ba..25235ef630c1 100644
102     --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
103     +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
104     @@ -9,16 +9,16 @@
105     #include "i915_gem_ioctls.h"
106     #include "i915_gem_object.h"
107    
108     -static __always_inline u32 __busy_read_flag(u8 id)
109     +static __always_inline u32 __busy_read_flag(u16 id)
110     {
111     - if (id == (u8)I915_ENGINE_CLASS_INVALID)
112     + if (id == (u16)I915_ENGINE_CLASS_INVALID)
113     return 0xffff0000u;
114    
115     GEM_BUG_ON(id >= 16);
116     return 0x10000u << id;
117     }
118    
119     -static __always_inline u32 __busy_write_id(u8 id)
120     +static __always_inline u32 __busy_write_id(u16 id)
121     {
122     /*
123     * The uABI guarantees an active writer is also amongst the read
124     @@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
125     * last_read - hence we always set both read and write busy for
126     * last_write.
127     */
128     - if (id == (u8)I915_ENGINE_CLASS_INVALID)
129     + if (id == (u16)I915_ENGINE_CLASS_INVALID)
130     return 0xffffffffu;
131    
132     return (id + 1) | __busy_read_flag(id);
133     }
134    
135     static __always_inline unsigned int
136     -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
137     +__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
138     {
139     const struct i915_request *rq;
140    
141     @@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
142     return 0;
143    
144     /* Beware type-expansion follies! */
145     - BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
146     + BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
147     return flag(rq->engine->uabi_class);
148     }
149    
150     diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
151     index abfbac49b8e8..968d9b2705d0 100644
152     --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
153     +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
154     @@ -427,7 +427,7 @@ struct get_pages_work {
155    
156     static struct sg_table *
157     __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
158     - struct page **pvec, int num_pages)
159     + struct page **pvec, unsigned long num_pages)
160     {
161     unsigned int max_segment = i915_sg_segment_size();
162     struct sg_table *st;
163     @@ -473,9 +473,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
164     {
165     struct get_pages_work *work = container_of(_work, typeof(*work), work);
166     struct drm_i915_gem_object *obj = work->obj;
167     - const int npages = obj->base.size >> PAGE_SHIFT;
168     + const unsigned long npages = obj->base.size >> PAGE_SHIFT;
169     + unsigned long pinned;
170     struct page **pvec;
171     - int pinned, ret;
172     + int ret;
173    
174     ret = -ENOMEM;
175     pinned = 0;
176     @@ -578,7 +579,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
177    
178     static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
179     {
180     - const int num_pages = obj->base.size >> PAGE_SHIFT;
181     + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
182     struct mm_struct *mm = obj->userptr.mm->mm;
183     struct page **pvec;
184     struct sg_table *pages;
185     diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
186     index 9dd8c299cb2d..798e1b024406 100644
187     --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
188     +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
189     @@ -300,8 +300,8 @@ struct intel_engine_cs {
190     u8 class;
191     u8 instance;
192    
193     - u8 uabi_class;
194     - u8 uabi_instance;
195     + u16 uabi_class;
196     + u16 uabi_instance;
197    
198     u32 context_size;
199     u32 mmio_base;
200     diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
201     index b1a7a8b9b46a..f614646ed3f9 100644
202     --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
203     +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
204     @@ -1178,6 +1178,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
205     pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
206     vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
207     do {
208     + GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
209     vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
210    
211     iter->dma += I915_GTT_PAGE_SIZE;
212     @@ -1657,6 +1658,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
213    
214     vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
215     do {
216     + GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
217     vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
218    
219     iter.dma += I915_GTT_PAGE_SIZE;
220     diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
221     index 1c67ac434e10..5906c80c4b2c 100644
222     --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
223     +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
224     @@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
225     static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
226     struct drm_file *file)
227     {
228     + struct panfrost_file_priv *priv = file->driver_priv;
229     struct panfrost_gem_object *bo;
230     struct drm_panfrost_create_bo *args = data;
231     + struct panfrost_gem_mapping *mapping;
232    
233     if (!args->size || args->pad ||
234     (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
235     @@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
236     if (IS_ERR(bo))
237     return PTR_ERR(bo);
238    
239     - args->offset = bo->node.start << PAGE_SHIFT;
240     + mapping = panfrost_gem_mapping_get(bo, priv);
241     + if (!mapping) {
242     + drm_gem_object_put_unlocked(&bo->base.base);
243     + return -EINVAL;
244     + }
245     +
246     + args->offset = mapping->mmnode.start << PAGE_SHIFT;
247     + panfrost_gem_mapping_put(mapping);
248    
249     return 0;
250     }
251     @@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
252     struct drm_panfrost_submit *args,
253     struct panfrost_job *job)
254     {
255     + struct panfrost_file_priv *priv = file_priv->driver_priv;
256     + struct panfrost_gem_object *bo;
257     + unsigned int i;
258     + int ret;
259     +
260     job->bo_count = args->bo_handle_count;
261    
262     if (!job->bo_count)
263     @@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
264     if (!job->implicit_fences)
265     return -ENOMEM;
266    
267     - return drm_gem_objects_lookup(file_priv,
268     - (void __user *)(uintptr_t)args->bo_handles,
269     - job->bo_count, &job->bos);
270     + ret = drm_gem_objects_lookup(file_priv,
271     + (void __user *)(uintptr_t)args->bo_handles,
272     + job->bo_count, &job->bos);
273     + if (ret)
274     + return ret;
275     +
276     + job->mappings = kvmalloc_array(job->bo_count,
277     + sizeof(struct panfrost_gem_mapping *),
278     + GFP_KERNEL | __GFP_ZERO);
279     + if (!job->mappings)
280     + return -ENOMEM;
281     +
282     + for (i = 0; i < job->bo_count; i++) {
283     + struct panfrost_gem_mapping *mapping;
284     +
285     + bo = to_panfrost_bo(job->bos[i]);
286     + mapping = panfrost_gem_mapping_get(bo, priv);
287     + if (!mapping) {
288     + ret = -EINVAL;
289     + break;
290     + }
291     +
292     + job->mappings[i] = mapping;
293     + }
294     +
295     + return ret;
296     }
297    
298     /**
299     @@ -320,7 +357,9 @@ out:
300     static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
301     struct drm_file *file_priv)
302     {
303     + struct panfrost_file_priv *priv = file_priv->driver_priv;
304     struct drm_panfrost_get_bo_offset *args = data;
305     + struct panfrost_gem_mapping *mapping;
306     struct drm_gem_object *gem_obj;
307     struct panfrost_gem_object *bo;
308    
309     @@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
310     }
311     bo = to_panfrost_bo(gem_obj);
312    
313     - args->offset = bo->node.start << PAGE_SHIFT;
314     -
315     + mapping = panfrost_gem_mapping_get(bo, priv);
316     drm_gem_object_put_unlocked(gem_obj);
317     +
318     + if (!mapping)
319     + return -EINVAL;
320     +
321     + args->offset = mapping->mmnode.start << PAGE_SHIFT;
322     + panfrost_gem_mapping_put(mapping);
323     return 0;
324     }
325    
326     static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
327     struct drm_file *file_priv)
328     {
329     + struct panfrost_file_priv *priv = file_priv->driver_priv;
330     struct drm_panfrost_madvise *args = data;
331     struct panfrost_device *pfdev = dev->dev_private;
332     struct drm_gem_object *gem_obj;
333     + struct panfrost_gem_object *bo;
334     + int ret = 0;
335    
336     gem_obj = drm_gem_object_lookup(file_priv, args->handle);
337     if (!gem_obj) {
338     @@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
339     return -ENOENT;
340     }
341    
342     + bo = to_panfrost_bo(gem_obj);
343     +
344     mutex_lock(&pfdev->shrinker_lock);
345     + mutex_lock(&bo->mappings.lock);
346     + if (args->madv == PANFROST_MADV_DONTNEED) {
347     + struct panfrost_gem_mapping *first;
348     +
349     + first = list_first_entry(&bo->mappings.list,
350     + struct panfrost_gem_mapping,
351     + node);
352     +
353     + /*
354     + * If we want to mark the BO purgeable, there must be only one
355     + * user: the caller FD.
356     + * We could do something smarter and mark the BO purgeable only
357     + * when all its users have marked it purgeable, but globally
358     + * visible/shared BOs are likely to never be marked purgeable
359     + * anyway, so let's not bother.
360     + */
361     + if (!list_is_singular(&bo->mappings.list) ||
362     + WARN_ON_ONCE(first->mmu != &priv->mmu)) {
363     + ret = -EINVAL;
364     + goto out_unlock_mappings;
365     + }
366     + }
367     +
368     args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
369    
370     if (args->retained) {
371     - struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
372     -
373     if (args->madv == PANFROST_MADV_DONTNEED)
374     list_add_tail(&bo->base.madv_list,
375     &pfdev->shrinker_list);
376     else if (args->madv == PANFROST_MADV_WILLNEED)
377     list_del_init(&bo->base.madv_list);
378     }
379     +
380     +out_unlock_mappings:
381     + mutex_unlock(&bo->mappings.lock);
382     mutex_unlock(&pfdev->shrinker_lock);
383    
384     drm_gem_object_put_unlocked(gem_obj);
385     - return 0;
386     + return ret;
387     }
388    
389     int panfrost_unstable_ioctl_check(void)
390     diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
391     index 92a95210a899..77c3a3855c68 100644
392     --- a/drivers/gpu/drm/panfrost/panfrost_gem.c
393     +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
394     @@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
395     list_del_init(&bo->base.madv_list);
396     mutex_unlock(&pfdev->shrinker_lock);
397    
398     + /*
399     + * If we still have mappings attached to the BO, there's a problem in
400     + * our refcounting.
401     + */
402     + WARN_ON_ONCE(!list_empty(&bo->mappings.list));
403     +
404     if (bo->sgts) {
405     int i;
406     int n_sgt = bo->base.base.size / SZ_2M;
407     @@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
408     drm_gem_shmem_free_object(obj);
409     }
410    
411     +struct panfrost_gem_mapping *
412     +panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
413     + struct panfrost_file_priv *priv)
414     +{
415     + struct panfrost_gem_mapping *iter, *mapping = NULL;
416     +
417     + mutex_lock(&bo->mappings.lock);
418     + list_for_each_entry(iter, &bo->mappings.list, node) {
419     + if (iter->mmu == &priv->mmu) {
420     + kref_get(&iter->refcount);
421     + mapping = iter;
422     + break;
423     + }
424     + }
425     + mutex_unlock(&bo->mappings.lock);
426     +
427     + return mapping;
428     +}
429     +
430     +static void
431     +panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
432     +{
433     + struct panfrost_file_priv *priv;
434     +
435     + if (mapping->active)
436     + panfrost_mmu_unmap(mapping);
437     +
438     + priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
439     + spin_lock(&priv->mm_lock);
440     + if (drm_mm_node_allocated(&mapping->mmnode))
441     + drm_mm_remove_node(&mapping->mmnode);
442     + spin_unlock(&priv->mm_lock);
443     +}
444     +
445     +static void panfrost_gem_mapping_release(struct kref *kref)
446     +{
447     + struct panfrost_gem_mapping *mapping;
448     +
449     + mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
450     +
451     + panfrost_gem_teardown_mapping(mapping);
452     + drm_gem_object_put_unlocked(&mapping->obj->base.base);
453     + kfree(mapping);
454     +}
455     +
456     +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
457     +{
458     + if (!mapping)
459     + return;
460     +
461     + kref_put(&mapping->refcount, panfrost_gem_mapping_release);
462     +}
463     +
464     +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
465     +{
466     + struct panfrost_gem_mapping *mapping;
467     +
468     + mutex_lock(&bo->mappings.lock);
469     + list_for_each_entry(mapping, &bo->mappings.list, node)
470     + panfrost_gem_teardown_mapping(mapping);
471     + mutex_unlock(&bo->mappings.lock);
472     +}
473     +
474     int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
475     {
476     int ret;
477     @@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
478     struct panfrost_gem_object *bo = to_panfrost_bo(obj);
479     unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
480     struct panfrost_file_priv *priv = file_priv->driver_priv;
481     + struct panfrost_gem_mapping *mapping;
482     +
483     + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
484     + if (!mapping)
485     + return -ENOMEM;
486     +
487     + INIT_LIST_HEAD(&mapping->node);
488     + kref_init(&mapping->refcount);
489     + drm_gem_object_get(obj);
490     + mapping->obj = bo;
491    
492     /*
493     * Executable buffers cannot cross a 16MB boundary as the program
494     @@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
495     else
496     align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
497    
498     - bo->mmu = &priv->mmu;
499     + mapping->mmu = &priv->mmu;
500     spin_lock(&priv->mm_lock);
501     - ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
502     + ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
503     size >> PAGE_SHIFT, align, color, 0);
504     spin_unlock(&priv->mm_lock);
505     if (ret)
506     - return ret;
507     + goto err;
508    
509     if (!bo->is_heap) {
510     - ret = panfrost_mmu_map(bo);
511     - if (ret) {
512     - spin_lock(&priv->mm_lock);
513     - drm_mm_remove_node(&bo->node);
514     - spin_unlock(&priv->mm_lock);
515     - }
516     + ret = panfrost_mmu_map(mapping);
517     + if (ret)
518     + goto err;
519     }
520     +
521     + mutex_lock(&bo->mappings.lock);
522     + WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
523     + list_add_tail(&mapping->node, &bo->mappings.list);
524     + mutex_unlock(&bo->mappings.lock);
525     +
526     +err:
527     + if (ret)
528     + panfrost_gem_mapping_put(mapping);
529     return ret;
530     }
531    
532     void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
533     {
534     - struct panfrost_gem_object *bo = to_panfrost_bo(obj);
535     struct panfrost_file_priv *priv = file_priv->driver_priv;
536     + struct panfrost_gem_object *bo = to_panfrost_bo(obj);
537     + struct panfrost_gem_mapping *mapping = NULL, *iter;
538    
539     - if (bo->is_mapped)
540     - panfrost_mmu_unmap(bo);
541     + mutex_lock(&bo->mappings.lock);
542     + list_for_each_entry(iter, &bo->mappings.list, node) {
543     + if (iter->mmu == &priv->mmu) {
544     + mapping = iter;
545     + list_del(&iter->node);
546     + break;
547     + }
548     + }
549     + mutex_unlock(&bo->mappings.lock);
550    
551     - spin_lock(&priv->mm_lock);
552     - if (drm_mm_node_allocated(&bo->node))
553     - drm_mm_remove_node(&bo->node);
554     - spin_unlock(&priv->mm_lock);
555     + panfrost_gem_mapping_put(mapping);
556     }
557    
558     static int panfrost_gem_pin(struct drm_gem_object *obj)
559     @@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
560     if (!obj)
561     return NULL;
562    
563     + INIT_LIST_HEAD(&obj->mappings.list);
564     + mutex_init(&obj->mappings.lock);
565     obj->base.base.funcs = &panfrost_gem_funcs;
566    
567     return &obj->base.base;
568     diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
569     index 4b17e7308764..ca1bc9019600 100644
570     --- a/drivers/gpu/drm/panfrost/panfrost_gem.h
571     +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
572     @@ -13,23 +13,46 @@ struct panfrost_gem_object {
573     struct drm_gem_shmem_object base;
574     struct sg_table *sgts;
575    
576     - struct panfrost_mmu *mmu;
577     - struct drm_mm_node node;
578     - bool is_mapped :1;
579     + /*
580     + * Use a list for now. If searching a mapping ever becomes the
581     + * bottleneck, we should consider using an RB-tree, or even better,
582     + * let the core store drm_gem_object_mapping entries (where we
583     + * could place driver specific data) instead of drm_gem_object ones
584     + * in its drm_file->object_idr table.
585     + *
586     + * struct drm_gem_object_mapping {
587     + * struct drm_gem_object *obj;
588     + * void *driver_priv;
589     + * };
590     + */
591     + struct {
592     + struct list_head list;
593     + struct mutex lock;
594     + } mappings;
595     +
596     bool noexec :1;
597     bool is_heap :1;
598     };
599    
600     +struct panfrost_gem_mapping {
601     + struct list_head node;
602     + struct kref refcount;
603     + struct panfrost_gem_object *obj;
604     + struct drm_mm_node mmnode;
605     + struct panfrost_mmu *mmu;
606     + bool active :1;
607     +};
608     +
609     static inline
610     struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
611     {
612     return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
613     }
614    
615     -static inline
616     -struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
617     +static inline struct panfrost_gem_mapping *
618     +drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
619     {
620     - return container_of(node, struct panfrost_gem_object, node);
621     + return container_of(node, struct panfrost_gem_mapping, mmnode);
622     }
623    
624     struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
625     @@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
626     void panfrost_gem_close(struct drm_gem_object *obj,
627     struct drm_file *file_priv);
628    
629     +struct panfrost_gem_mapping *
630     +panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
631     + struct panfrost_file_priv *priv);
632     +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
633     +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
634     +
635     void panfrost_gem_shrinker_init(struct drm_device *dev);
636     void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
637    
638     diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
639     index 458f0fa68111..f5dd7b29bc95 100644
640     --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
641     +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
642     @@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
643     static bool panfrost_gem_purge(struct drm_gem_object *obj)
644     {
645     struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
646     + struct panfrost_gem_object *bo = to_panfrost_bo(obj);
647    
648     if (!mutex_trylock(&shmem->pages_lock))
649     return false;
650    
651     - panfrost_mmu_unmap(to_panfrost_bo(obj));
652     + panfrost_gem_teardown_mappings(bo);
653     drm_gem_shmem_purge_locked(obj);
654    
655     mutex_unlock(&shmem->pages_lock);
656     diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
657     index 21f34d44aac2..bbb0c5e3ca6f 100644
658     --- a/drivers/gpu/drm/panfrost/panfrost_job.c
659     +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
660     @@ -269,9 +269,20 @@ static void panfrost_job_cleanup(struct kref *ref)
661     dma_fence_put(job->done_fence);
662     dma_fence_put(job->render_done_fence);
663    
664     - if (job->bos) {
665     + if (job->mappings) {
666     for (i = 0; i < job->bo_count; i++)
667     + panfrost_gem_mapping_put(job->mappings[i]);
668     + kvfree(job->mappings);
669     + }
670     +
671     + if (job->bos) {
672     + struct panfrost_gem_object *bo;
673     +
674     + for (i = 0; i < job->bo_count; i++) {
675     + bo = to_panfrost_bo(job->bos[i]);
676     drm_gem_object_put_unlocked(job->bos[i]);
677     + }
678     +
679     kvfree(job->bos);
680     }
681    
682     diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
683     index 62454128a792..bbd3ba97ff67 100644
684     --- a/drivers/gpu/drm/panfrost/panfrost_job.h
685     +++ b/drivers/gpu/drm/panfrost/panfrost_job.h
686     @@ -32,6 +32,7 @@ struct panfrost_job {
687    
688     /* Exclusive fences we have taken from the BOs to wait for */
689     struct dma_fence **implicit_fences;
690     + struct panfrost_gem_mapping **mappings;
691     struct drm_gem_object **bos;
692     u32 bo_count;
693    
694     diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
695     index a3ed64a1f15e..763cfca886a7 100644
696     --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
697     +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
698     @@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
699     return 0;
700     }
701    
702     -int panfrost_mmu_map(struct panfrost_gem_object *bo)
703     +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
704     {
705     + struct panfrost_gem_object *bo = mapping->obj;
706     struct drm_gem_object *obj = &bo->base.base;
707     struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
708     struct sg_table *sgt;
709     int prot = IOMMU_READ | IOMMU_WRITE;
710    
711     - if (WARN_ON(bo->is_mapped))
712     + if (WARN_ON(mapping->active))
713     return 0;
714    
715     if (bo->noexec)
716     @@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
717     if (WARN_ON(IS_ERR(sgt)))
718     return PTR_ERR(sgt);
719    
720     - mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
721     - bo->is_mapped = true;
722     + mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
723     + prot, sgt);
724     + mapping->active = true;
725    
726     return 0;
727     }
728    
729     -void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
730     +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
731     {
732     + struct panfrost_gem_object *bo = mapping->obj;
733     struct drm_gem_object *obj = &bo->base.base;
734     struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
735     - struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
736     - u64 iova = bo->node.start << PAGE_SHIFT;
737     - size_t len = bo->node.size << PAGE_SHIFT;
738     + struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
739     + u64 iova = mapping->mmnode.start << PAGE_SHIFT;
740     + size_t len = mapping->mmnode.size << PAGE_SHIFT;
741     size_t unmapped_len = 0;
742    
743     - if (WARN_ON(!bo->is_mapped))
744     + if (WARN_ON(!mapping->active))
745     return;
746    
747     - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
748     + dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
749     + mapping->mmu->as, iova, len);
750    
751     while (unmapped_len < len) {
752     size_t unmapped_page;
753     @@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
754     unmapped_len += pgsize;
755     }
756    
757     - panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
758     - bo->is_mapped = false;
759     + panfrost_mmu_flush_range(pfdev, mapping->mmu,
760     + mapping->mmnode.start << PAGE_SHIFT, len);
761     + mapping->active = false;
762     }
763    
764     static void mmu_tlb_inv_context_s1(void *cookie)
765     @@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
766     free_io_pgtable_ops(mmu->pgtbl_ops);
767     }
768    
769     -static struct panfrost_gem_object *
770     -addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
771     +static struct panfrost_gem_mapping *
772     +addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
773     {
774     - struct panfrost_gem_object *bo = NULL;
775     + struct panfrost_gem_mapping *mapping = NULL;
776     struct panfrost_file_priv *priv;
777     struct drm_mm_node *node;
778     u64 offset = addr >> PAGE_SHIFT;
779     @@ -418,8 +423,9 @@ found_mmu:
780     drm_mm_for_each_node(node, &priv->mm) {
781     if (offset >= node->start &&
782     offset < (node->start + node->size)) {
783     - bo = drm_mm_node_to_panfrost_bo(node);
784     - drm_gem_object_get(&bo->base.base);
785     + mapping = drm_mm_node_to_panfrost_mapping(node);
786     +
787     + kref_get(&mapping->refcount);
788     break;
789     }
790     }
791     @@ -427,7 +433,7 @@ found_mmu:
792     spin_unlock(&priv->mm_lock);
793     out:
794     spin_unlock(&pfdev->as_lock);
795     - return bo;
796     + return mapping;
797     }
798    
799     #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
800     @@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
801     u64 addr)
802     {
803     int ret, i;
804     + struct panfrost_gem_mapping *bomapping;
805     struct panfrost_gem_object *bo;
806     struct address_space *mapping;
807     pgoff_t page_offset;
808     struct sg_table *sgt;
809     struct page **pages;
810    
811     - bo = addr_to_drm_mm_node(pfdev, as, addr);
812     - if (!bo)
813     + bomapping = addr_to_mapping(pfdev, as, addr);
814     + if (!bomapping)
815     return -ENOENT;
816    
817     + bo = bomapping->obj;
818     if (!bo->is_heap) {
819     dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
820     - bo->node.start << PAGE_SHIFT);
821     + bomapping->mmnode.start << PAGE_SHIFT);
822     ret = -EINVAL;
823     goto err_bo;
824     }
825     - WARN_ON(bo->mmu->as != as);
826     + WARN_ON(bomapping->mmu->as != as);
827    
828     /* Assume 2MB alignment and size multiple */
829     addr &= ~((u64)SZ_2M - 1);
830     page_offset = addr >> PAGE_SHIFT;
831     - page_offset -= bo->node.start;
832     + page_offset -= bomapping->mmnode.start;
833    
834     mutex_lock(&bo->base.pages_lock);
835    
836     @@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
837     goto err_map;
838     }
839    
840     - mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
841     + mmu_map_sg(pfdev, bomapping->mmu, addr,
842     + IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
843    
844     - bo->is_mapped = true;
845     + bomapping->active = true;
846    
847     dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
848    
849     - drm_gem_object_put_unlocked(&bo->base.base);
850     + panfrost_gem_mapping_put(bomapping);
851    
852     return 0;
853    
854     diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
855     index 7c5b6775ae23..44fc2edf63ce 100644
856     --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
857     +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
858     @@ -4,12 +4,12 @@
859     #ifndef __PANFROST_MMU_H__
860     #define __PANFROST_MMU_H__
861    
862     -struct panfrost_gem_object;
863     +struct panfrost_gem_mapping;
864     struct panfrost_file_priv;
865     struct panfrost_mmu;
866    
867     -int panfrost_mmu_map(struct panfrost_gem_object *bo);
868     -void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
869     +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
870     +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
871    
872     int panfrost_mmu_init(struct panfrost_device *pfdev);
873     void panfrost_mmu_fini(struct panfrost_device *pfdev);
874     diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
875     index 2c04e858c50a..684820448be3 100644
876     --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
877     +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
878     @@ -25,7 +25,7 @@
879     #define V4_SHADERS_PER_COREGROUP 4
880    
881     struct panfrost_perfcnt {
882     - struct panfrost_gem_object *bo;
883     + struct panfrost_gem_mapping *mapping;
884     size_t bosize;
885     void *buf;
886     struct panfrost_file_priv *user;
887     @@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
888     int ret;
889    
890     reinit_completion(&pfdev->perfcnt->dump_comp);
891     - gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
892     + gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
893     gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
894     gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
895     gpu_write(pfdev, GPU_INT_CLEAR,
896     @@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
897     if (IS_ERR(bo))
898     return PTR_ERR(bo);
899    
900     - perfcnt->bo = to_panfrost_bo(&bo->base);
901     -
902     /* Map the perfcnt buf in the address space attached to file_priv. */
903     - ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
904     + ret = panfrost_gem_open(&bo->base, file_priv);
905     if (ret)
906     goto err_put_bo;
907    
908     + perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
909     + user);
910     + if (!perfcnt->mapping) {
911     + ret = -EINVAL;
912     + goto err_close_bo;
913     + }
914     +
915     perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
916     if (IS_ERR(perfcnt->buf)) {
917     ret = PTR_ERR(perfcnt->buf);
918     - goto err_close_bo;
919     + goto err_put_mapping;
920     }
921    
922     /*
923     @@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
924     if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
925     gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
926    
927     + /* The BO ref is retained by the mapping. */
928     + drm_gem_object_put_unlocked(&bo->base);
929     +
930     return 0;
931    
932     err_vunmap:
933     - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
934     + drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
935     +err_put_mapping:
936     + panfrost_gem_mapping_put(perfcnt->mapping);
937     err_close_bo:
938     - panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
939     + panfrost_gem_close(&bo->base, file_priv);
940     err_put_bo:
941     drm_gem_object_put_unlocked(&bo->base);
942     return ret;
943     @@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
944     GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
945    
946     perfcnt->user = NULL;
947     - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
948     + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
949     perfcnt->buf = NULL;
950     - panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
951     - drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
952     - perfcnt->bo = NULL;
953     + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
954     + panfrost_gem_mapping_put(perfcnt->mapping);
955     + perfcnt->mapping = NULL;
956     pm_runtime_mark_last_busy(pfdev->dev);
957     pm_runtime_put_autosuspend(pfdev->dev);
958    
959     diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
960     index 6c64d50c9aae..01c2eeb02aa9 100644
961     --- a/drivers/hwmon/adt7475.c
962     +++ b/drivers/hwmon/adt7475.c
963     @@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
964     long reg;
965    
966     if (bypass_attn & (1 << channel))
967     - reg = (volt * 1024) / 2250;
968     + reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
969     else
970     - reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
971     + reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
972     + (r[0] + r[1]) * 2250);
973     return clamp_val(reg, 0, 1023) & (0xff << 2);
974     }
975    
976     diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
977     index 1f3b30b085b9..d018b20089ec 100644
978     --- a/drivers/hwmon/hwmon.c
979     +++ b/drivers/hwmon/hwmon.c
980     @@ -51,6 +51,7 @@ struct hwmon_device_attribute {
981    
982     #define to_hwmon_attr(d) \
983     container_of(d, struct hwmon_device_attribute, dev_attr)
984     +#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
985    
986     /*
987     * Thermal zone information
988     @@ -58,7 +59,7 @@ struct hwmon_device_attribute {
989     * also provides the sensor index.
990     */
991     struct hwmon_thermal_data {
992     - struct hwmon_device *hwdev; /* Reference to hwmon device */
993     + struct device *dev; /* Reference to hwmon device */
994     int index; /* sensor index */
995     };
996    
997     @@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
998     NULL
999     };
1000    
1001     +static void hwmon_free_attrs(struct attribute **attrs)
1002     +{
1003     + int i;
1004     +
1005     + for (i = 0; attrs[i]; i++) {
1006     + struct device_attribute *dattr = to_dev_attr(attrs[i]);
1007     + struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
1008     +
1009     + kfree(hattr);
1010     + }
1011     + kfree(attrs);
1012     +}
1013     +
1014     static void hwmon_dev_release(struct device *dev)
1015     {
1016     - kfree(to_hwmon_device(dev));
1017     + struct hwmon_device *hwdev = to_hwmon_device(dev);
1018     +
1019     + if (hwdev->group.attrs)
1020     + hwmon_free_attrs(hwdev->group.attrs);
1021     + kfree(hwdev->groups);
1022     + kfree(hwdev);
1023     }
1024    
1025     static struct class hwmon_class = {
1026     @@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
1027     static int hwmon_thermal_get_temp(void *data, int *temp)
1028     {
1029     struct hwmon_thermal_data *tdata = data;
1030     - struct hwmon_device *hwdev = tdata->hwdev;
1031     + struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
1032     int ret;
1033     long t;
1034    
1035     - ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
1036     + ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
1037     tdata->index, &t);
1038     if (ret < 0)
1039     return ret;
1040     @@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
1041     .get_temp = hwmon_thermal_get_temp,
1042     };
1043    
1044     -static int hwmon_thermal_add_sensor(struct device *dev,
1045     - struct hwmon_device *hwdev, int index)
1046     +static int hwmon_thermal_add_sensor(struct device *dev, int index)
1047     {
1048     struct hwmon_thermal_data *tdata;
1049     struct thermal_zone_device *tzd;
1050     @@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
1051     if (!tdata)
1052     return -ENOMEM;
1053    
1054     - tdata->hwdev = hwdev;
1055     + tdata->dev = dev;
1056     tdata->index = index;
1057    
1058     - tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
1059     + tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
1060     &hwmon_thermal_ops);
1061     /*
1062     * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
1063     @@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
1064     return 0;
1065     }
1066     #else
1067     -static int hwmon_thermal_add_sensor(struct device *dev,
1068     - struct hwmon_device *hwdev, int index)
1069     +static int hwmon_thermal_add_sensor(struct device *dev, int index)
1070     {
1071     return 0;
1072     }
1073     @@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
1074     (type == hwmon_fan && attr == hwmon_fan_label);
1075     }
1076    
1077     -static struct attribute *hwmon_genattr(struct device *dev,
1078     - const void *drvdata,
1079     +static struct attribute *hwmon_genattr(const void *drvdata,
1080     enum hwmon_sensor_types type,
1081     u32 attr,
1082     int index,
1083     @@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
1084     if ((mode & 0222) && !ops->write)
1085     return ERR_PTR(-EINVAL);
1086    
1087     - hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
1088     + hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
1089     if (!hattr)
1090     return ERR_PTR(-ENOMEM);
1091    
1092     @@ -492,8 +508,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
1093     return n;
1094     }
1095    
1096     -static int hwmon_genattrs(struct device *dev,
1097     - const void *drvdata,
1098     +static int hwmon_genattrs(const void *drvdata,
1099     struct attribute **attrs,
1100     const struct hwmon_ops *ops,
1101     const struct hwmon_channel_info *info)
1102     @@ -519,7 +534,7 @@ static int hwmon_genattrs(struct device *dev,
1103     attr_mask &= ~BIT(attr);
1104     if (attr >= template_size)
1105     return -EINVAL;
1106     - a = hwmon_genattr(dev, drvdata, info->type, attr, i,
1107     + a = hwmon_genattr(drvdata, info->type, attr, i,
1108     templates[attr], ops);
1109     if (IS_ERR(a)) {
1110     if (PTR_ERR(a) != -ENOENT)
1111     @@ -533,8 +548,7 @@ static int hwmon_genattrs(struct device *dev,
1112     }
1113    
1114     static struct attribute **
1115     -__hwmon_create_attrs(struct device *dev, const void *drvdata,
1116     - const struct hwmon_chip_info *chip)
1117     +__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
1118     {
1119     int ret, i, aindex = 0, nattrs = 0;
1120     struct attribute **attrs;
1121     @@ -545,15 +559,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
1122     if (nattrs == 0)
1123     return ERR_PTR(-EINVAL);
1124    
1125     - attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1126     + attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1127     if (!attrs)
1128     return ERR_PTR(-ENOMEM);
1129    
1130     for (i = 0; chip->info[i]; i++) {
1131     - ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
1132     + ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
1133     chip->info[i]);
1134     - if (ret < 0)
1135     + if (ret < 0) {
1136     + hwmon_free_attrs(attrs);
1137     return ERR_PTR(ret);
1138     + }
1139     aindex += ret;
1140     }
1141    
1142     @@ -595,14 +611,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1143     for (i = 0; groups[i]; i++)
1144     ngroups++;
1145    
1146     - hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
1147     - GFP_KERNEL);
1148     + hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
1149     if (!hwdev->groups) {
1150     err = -ENOMEM;
1151     goto free_hwmon;
1152     }
1153    
1154     - attrs = __hwmon_create_attrs(dev, drvdata, chip);
1155     + attrs = __hwmon_create_attrs(drvdata, chip);
1156     if (IS_ERR(attrs)) {
1157     err = PTR_ERR(attrs);
1158     goto free_hwmon;
1159     @@ -647,8 +662,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1160     hwmon_temp_input, j))
1161     continue;
1162     if (info[i]->config[j] & HWMON_T_INPUT) {
1163     - err = hwmon_thermal_add_sensor(dev,
1164     - hwdev, j);
1165     + err = hwmon_thermal_add_sensor(hdev, j);
1166     if (err) {
1167     device_unregister(hdev);
1168     /*
1169     @@ -667,7 +681,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1170     return hdev;
1171    
1172     free_hwmon:
1173     - kfree(hwdev);
1174     + hwmon_dev_release(hdev);
1175     ida_remove:
1176     ida_simple_remove(&hwmon_ida, id);
1177     return ERR_PTR(err);
1178     diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
1179     index f3dd2a17bd42..2e97e56c72c7 100644
1180     --- a/drivers/hwmon/nct7802.c
1181     +++ b/drivers/hwmon/nct7802.c
1182     @@ -23,8 +23,8 @@
1183     static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
1184    
1185     static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
1186     - { 0x40, 0x00, 0x42, 0x44, 0x46 },
1187     - { 0x3f, 0x00, 0x41, 0x43, 0x45 },
1188     + { 0x46, 0x00, 0x40, 0x42, 0x44 },
1189     + { 0x45, 0x00, 0x3f, 0x41, 0x43 },
1190     };
1191    
1192     static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
1193     @@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
1194     struct nct7802_data {
1195     struct regmap *regmap;
1196     struct mutex access_lock; /* for multi-byte read and write operations */
1197     + u8 in_status;
1198     + struct mutex in_alarm_lock;
1199     };
1200    
1201     static ssize_t temp_type_show(struct device *dev,
1202     @@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
1203     return err ? : count;
1204     }
1205    
1206     +static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
1207     + char *buf)
1208     +{
1209     + struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
1210     + struct nct7802_data *data = dev_get_drvdata(dev);
1211     + int volt, min, max, ret;
1212     + unsigned int val;
1213     +
1214     + mutex_lock(&data->in_alarm_lock);
1215     +
1216     + /*
1217     + * The SMI Voltage status register is the only register giving a status
1218     + * for voltages. A bit is set for each input crossing a threshold, in
1219     + * both direction, but the "inside" or "outside" limits info is not
1220     + * available. Also this register is cleared on read.
1221     + * Note: this is not explicitly spelled out in the datasheet, but
1222     + * from experiment.
1223     + * To deal with this we use a status cache with one validity bit and
1224     + * one status bit for each input. Validity is cleared at startup and
1225     + * each time the register reports a change, and the status is processed
1226     + * by software based on current input value and limits.
1227     + */
1228     + ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
1229     + if (ret < 0)
1230     + goto abort;
1231     +
1232     + /* invalidate cached status for all inputs crossing a threshold */
1233     + data->in_status &= ~((val & 0x0f) << 4);
1234     +
1235     + /* if cached status for requested input is invalid, update it */
1236     + if (!(data->in_status & (0x10 << sattr->index))) {
1237     + ret = nct7802_read_voltage(data, sattr->nr, 0);
1238     + if (ret < 0)
1239     + goto abort;
1240     + volt = ret;
1241     +
1242     + ret = nct7802_read_voltage(data, sattr->nr, 1);
1243     + if (ret < 0)
1244     + goto abort;
1245     + min = ret;
1246     +
1247     + ret = nct7802_read_voltage(data, sattr->nr, 2);
1248     + if (ret < 0)
1249     + goto abort;
1250     + max = ret;
1251     +
1252     + if (volt < min || volt > max)
1253     + data->in_status |= (1 << sattr->index);
1254     + else
1255     + data->in_status &= ~(1 << sattr->index);
1256     +
1257     + data->in_status |= 0x10 << sattr->index;
1258     + }
1259     +
1260     + ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
1261     +abort:
1262     + mutex_unlock(&data->in_alarm_lock);
1263     + return ret;
1264     +}
1265     +
1266     static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
1267     char *buf)
1268     {
1269     @@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
1270     static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
1271     static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
1272     static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
1273     -static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
1274     +static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
1275     static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
1276    
1277     static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
1278     @@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
1279     static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
1280     static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
1281     static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
1282     -static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
1283     +static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
1284     static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
1285    
1286     static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
1287     static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
1288     static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
1289     -static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
1290     +static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
1291     static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
1292    
1293     static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
1294     static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
1295     static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
1296     -static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
1297     +static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
1298     static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
1299    
1300     static struct attribute *nct7802_in_attrs[] = {
1301     @@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
1302     return PTR_ERR(data->regmap);
1303    
1304     mutex_init(&data->access_lock);
1305     + mutex_init(&data->in_alarm_lock);
1306    
1307     ret = nct7802_init_chip(data);
1308     if (ret < 0)
1309     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1310     index a1a035270cab..b273e421e910 100644
1311     --- a/drivers/infiniband/ulp/isert/ib_isert.c
1312     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1313     @@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
1314     }
1315     }
1316    
1317     -static void
1318     -isert_wait4cmds(struct iscsi_conn *conn)
1319     -{
1320     - isert_info("iscsi_conn %p\n", conn);
1321     -
1322     - if (conn->sess) {
1323     - target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1324     - target_wait_for_sess_cmds(conn->sess->se_sess);
1325     - }
1326     -}
1327     -
1328     /**
1329     * isert_put_unsol_pending_cmds() - Drop commands waiting for
1330     * unsolicitate dataout
1331     @@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1332    
1333     ib_drain_qp(isert_conn->qp);
1334     isert_put_unsol_pending_cmds(conn);
1335     - isert_wait4cmds(conn);
1336     isert_wait4logout(isert_conn);
1337    
1338     queue_work(isert_release_wq, &isert_conn->release_work);
1339     diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
1340     index 83368f1e7c4e..4650f4a94989 100644
1341     --- a/drivers/input/misc/keyspan_remote.c
1342     +++ b/drivers/input/misc/keyspan_remote.c
1343     @@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
1344     int retval = 0;
1345    
1346     retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1347     - 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
1348     + 0x11, 0x40, 0x5601, 0x0, NULL, 0,
1349     + USB_CTRL_SET_TIMEOUT);
1350     if (retval) {
1351     dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
1352     __func__, retval);
1353     @@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
1354     }
1355    
1356     retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1357     - 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
1358     + 0x44, 0x40, 0x0, 0x0, NULL, 0,
1359     + USB_CTRL_SET_TIMEOUT);
1360     if (retval) {
1361     dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
1362     __func__, retval);
1363     @@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
1364     }
1365    
1366     retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1367     - 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
1368     + 0x22, 0x40, 0x0, 0x0, NULL, 0,
1369     + USB_CTRL_SET_TIMEOUT);
1370     if (retval) {
1371     dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
1372     __func__, retval);
1373     diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
1374     index ecd762f93732..53ad25eaf1a2 100644
1375     --- a/drivers/input/misc/pm8xxx-vibrator.c
1376     +++ b/drivers/input/misc/pm8xxx-vibrator.c
1377     @@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
1378    
1379     if (regs->enable_mask)
1380     rc = regmap_update_bits(vib->regmap, regs->enable_addr,
1381     - on ? regs->enable_mask : 0, val);
1382     + regs->enable_mask, on ? ~0 : 0);
1383    
1384     return rc;
1385     }
1386     diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
1387     index b313c579914f..2407ea43de59 100644
1388     --- a/drivers/input/rmi4/rmi_smbus.c
1389     +++ b/drivers/input/rmi4/rmi_smbus.c
1390     @@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1391     /* prepare to write next block of bytes */
1392     cur_len -= SMB_MAX_COUNT;
1393     databuff += SMB_MAX_COUNT;
1394     + rmiaddr += SMB_MAX_COUNT;
1395     }
1396     exit:
1397     mutex_unlock(&rmi_smb->page_mutex);
1398     @@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1399     /* prepare to read next block of bytes */
1400     cur_len -= SMB_MAX_COUNT;
1401     databuff += SMB_MAX_COUNT;
1402     + rmiaddr += SMB_MAX_COUNT;
1403     }
1404    
1405     retval = 0;
1406     diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
1407     index 2ca586fb914f..06d0ffef4a17 100644
1408     --- a/drivers/input/tablet/aiptek.c
1409     +++ b/drivers/input/tablet/aiptek.c
1410     @@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1411     input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
1412    
1413     /* Verify that a device really has an endpoint */
1414     - if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1415     + if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1416     dev_err(&intf->dev,
1417     "interface has %d endpoints, but must have minimum 1\n",
1418     - intf->altsetting[0].desc.bNumEndpoints);
1419     + intf->cur_altsetting->desc.bNumEndpoints);
1420     err = -EINVAL;
1421     goto fail3;
1422     }
1423     - endpoint = &intf->altsetting[0].endpoint[0].desc;
1424     + endpoint = &intf->cur_altsetting->endpoint[0].desc;
1425    
1426     /* Go set up our URB, which is called when the tablet receives
1427     * input.
1428     diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
1429     index 35031228a6d0..799c94dda651 100644
1430     --- a/drivers/input/tablet/gtco.c
1431     +++ b/drivers/input/tablet/gtco.c
1432     @@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
1433     }
1434    
1435     /* Sanity check that a device has an endpoint */
1436     - if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
1437     + if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
1438     dev_err(&usbinterface->dev,
1439     "Invalid number of endpoints\n");
1440     error = -EINVAL;
1441     goto err_free_urb;
1442     }
1443    
1444     - /*
1445     - * The endpoint is always altsetting 0, we know this since we know
1446     - * this device only has one interrupt endpoint
1447     - */
1448     - endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1449     + endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1450    
1451     /* Some debug */
1452     dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
1453     @@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
1454     input_dev->dev.parent = &usbinterface->dev;
1455    
1456     /* Setup the URB, it will be posted later on open of input device */
1457     - endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1458     + endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1459    
1460     usb_fill_int_urb(gtco->urbinfo,
1461     udev,
1462     diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
1463     index a1f3a0cb197e..38f087404f7a 100644
1464     --- a/drivers/input/tablet/pegasus_notetaker.c
1465     +++ b/drivers/input/tablet/pegasus_notetaker.c
1466     @@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
1467     return -ENODEV;
1468    
1469     /* Sanity check that the device has an endpoint */
1470     - if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1471     + if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1472     dev_err(&intf->dev, "Invalid number of endpoints\n");
1473     return -EINVAL;
1474     }
1475     diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
1476     index 0af0fe8c40d7..742a7e96c1b5 100644
1477     --- a/drivers/input/touchscreen/sun4i-ts.c
1478     +++ b/drivers/input/touchscreen/sun4i-ts.c
1479     @@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1480     struct device *dev = &pdev->dev;
1481     struct device_node *np = dev->of_node;
1482     struct device *hwmon;
1483     + struct thermal_zone_device *thermal;
1484     int error;
1485     u32 reg;
1486     bool ts_attached;
1487     @@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1488     if (IS_ERR(hwmon))
1489     return PTR_ERR(hwmon);
1490    
1491     - devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
1492     + thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
1493     + &sun4i_ts_tz_ops);
1494     + if (IS_ERR(thermal))
1495     + return PTR_ERR(thermal);
1496    
1497     writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
1498    
1499     diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
1500     index 3fd3e862269b..2e2ea5719c90 100644
1501     --- a/drivers/input/touchscreen/sur40.c
1502     +++ b/drivers/input/touchscreen/sur40.c
1503     @@ -653,7 +653,7 @@ static int sur40_probe(struct usb_interface *interface,
1504     int error;
1505    
1506     /* Check if we really have the right interface. */
1507     - iface_desc = &interface->altsetting[0];
1508     + iface_desc = interface->cur_altsetting;
1509     if (iface_desc->desc.bInterfaceClass != 0xFF)
1510     return -ENODEV;
1511    
1512     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
1513     index 568c52317757..483f7bc379fa 100644
1514     --- a/drivers/iommu/amd_iommu_init.c
1515     +++ b/drivers/iommu/amd_iommu_init.c
1516     @@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1517     static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1518     {
1519     struct pci_dev *pdev = iommu->dev;
1520     - u64 val = 0xabcd, val2 = 0;
1521     + u64 val = 0xabcd, val2 = 0, save_reg = 0;
1522    
1523     if (!iommu_feature(iommu, FEATURE_PC))
1524     return;
1525    
1526     amd_iommu_pc_present = true;
1527    
1528     + /* save the value to restore, if writable */
1529     + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1530     + goto pc_false;
1531     +
1532     /* Check if the performance counters can be written to */
1533     if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1534     (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1535     - (val != val2)) {
1536     - pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
1537     - amd_iommu_pc_present = false;
1538     - return;
1539     - }
1540     + (val != val2))
1541     + goto pc_false;
1542     +
1543     + /* restore */
1544     + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1545     + goto pc_false;
1546    
1547     pci_info(pdev, "IOMMU performance counters supported\n");
1548    
1549     val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1550     iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1551     iommu->max_counters = (u8) ((val >> 7) & 0xf);
1552     +
1553     + return;
1554     +
1555     +pc_false:
1556     + pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1557     + amd_iommu_pc_present = false;
1558     + return;
1559     }
1560    
1561     static ssize_t amd_iommu_show_cap(struct device *dev,
1562     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1563     index e84c5dfe146f..dd5db856dcaf 100644
1564     --- a/drivers/iommu/intel-iommu.c
1565     +++ b/drivers/iommu/intel-iommu.c
1566     @@ -5132,7 +5132,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
1567    
1568     spin_lock_irqsave(&device_domain_lock, flags);
1569     info = dev->archdata.iommu;
1570     - if (info)
1571     + if (info && info != DEFER_DEVICE_DOMAIN_INFO
1572     + && info != DUMMY_DEVICE_DOMAIN_INFO)
1573     __dmar_remove_one_dev_info(info);
1574     spin_unlock_irqrestore(&device_domain_lock, flags);
1575     }
1576     diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
1577     index a5c73f3d5f79..2bf74595610f 100644
1578     --- a/drivers/leds/leds-gpio.c
1579     +++ b/drivers/leds/leds-gpio.c
1580     @@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
1581     struct gpio_led led = {};
1582     const char *state = NULL;
1583    
1584     + /*
1585     + * Acquire gpiod from DT with uninitialized label, which
1586     + * will be updated after LED class device is registered,
1587     + * Only then the final LED name is known.
1588     + */
1589     led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
1590     GPIOD_ASIS,
1591     - led.name);
1592     + NULL);
1593     if (IS_ERR(led.gpiod)) {
1594     fwnode_handle_put(child);
1595     return ERR_CAST(led.gpiod);
1596     @@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
1597     fwnode_handle_put(child);
1598     return ERR_PTR(ret);
1599     }
1600     + /* Set gpiod label to match the corresponding LED name. */
1601     + gpiod_set_consumer_name(led_dat->gpiod,
1602     + led_dat->cdev.dev->kobj.name);
1603     priv->num_leds++;
1604     }
1605    
1606     diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
1607     index 21bb96ce4cd6..58868d7129eb 100644
1608     --- a/drivers/media/v4l2-core/v4l2-ioctl.c
1609     +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
1610     @@ -1605,12 +1605,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1611     case V4L2_BUF_TYPE_VBI_CAPTURE:
1612     if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
1613     break;
1614     - CLEAR_AFTER_FIELD(p, fmt.vbi);
1615     + CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1616     return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
1617     case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1618     if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
1619     break;
1620     - CLEAR_AFTER_FIELD(p, fmt.sliced);
1621     + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1622     return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
1623     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1624     if (unlikely(!ops->vidioc_s_fmt_vid_out))
1625     @@ -1636,22 +1636,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1626     case V4L2_BUF_TYPE_VBI_OUTPUT:
1627     if (unlikely(!ops->vidioc_s_fmt_vbi_out))
1628     break;
1629     - CLEAR_AFTER_FIELD(p, fmt.vbi);
1630     + CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1631     return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
1632     case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1633     if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
1634     break;
1635     - CLEAR_AFTER_FIELD(p, fmt.sliced);
1636     + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1637     return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
1638     case V4L2_BUF_TYPE_SDR_CAPTURE:
1639     if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
1640     break;
1641     - CLEAR_AFTER_FIELD(p, fmt.sdr);
1642     + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1643     return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
1644     case V4L2_BUF_TYPE_SDR_OUTPUT:
1645     if (unlikely(!ops->vidioc_s_fmt_sdr_out))
1646     break;
1647     - CLEAR_AFTER_FIELD(p, fmt.sdr);
1648     + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1649     return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
1650     case V4L2_BUF_TYPE_META_CAPTURE:
1651     if (unlikely(!ops->vidioc_s_fmt_meta_cap))
1652     @@ -1707,12 +1707,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1653     case V4L2_BUF_TYPE_VBI_CAPTURE:
1654     if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
1655     break;
1656     - CLEAR_AFTER_FIELD(p, fmt.vbi);
1657     + CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1658     return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
1659     case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1660     if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
1661     break;
1662     - CLEAR_AFTER_FIELD(p, fmt.sliced);
1663     + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1664     return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
1665     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1666     if (unlikely(!ops->vidioc_try_fmt_vid_out))
1667     @@ -1738,22 +1738,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1668     case V4L2_BUF_TYPE_VBI_OUTPUT:
1669     if (unlikely(!ops->vidioc_try_fmt_vbi_out))
1670     break;
1671     - CLEAR_AFTER_FIELD(p, fmt.vbi);
1672     + CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1673     return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
1674     case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1675     if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
1676     break;
1677     - CLEAR_AFTER_FIELD(p, fmt.sliced);
1678     + CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1679     return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
1680     case V4L2_BUF_TYPE_SDR_CAPTURE:
1681     if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
1682     break;
1683     - CLEAR_AFTER_FIELD(p, fmt.sdr);
1684     + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1685     return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
1686     case V4L2_BUF_TYPE_SDR_OUTPUT:
1687     if (unlikely(!ops->vidioc_try_fmt_sdr_out))
1688     break;
1689     - CLEAR_AFTER_FIELD(p, fmt.sdr);
1690     + CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1691     return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
1692     case V4L2_BUF_TYPE_META_CAPTURE:
1693     if (unlikely(!ops->vidioc_try_fmt_meta_cap))
1694     diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
1695     index 7bc950520fd9..403ac44a7378 100644
1696     --- a/drivers/mmc/host/sdhci-tegra.c
1697     +++ b/drivers/mmc/host/sdhci-tegra.c
1698     @@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
1699     misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
1700     if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
1701     misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
1702     - if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
1703     + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
1704     clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
1705     }
1706    
1707     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1708     index 5f9df2dbde06..4478b94d4791 100644
1709     --- a/drivers/mmc/host/sdhci.c
1710     +++ b/drivers/mmc/host/sdhci.c
1711     @@ -3902,11 +3902,13 @@ int sdhci_setup_host(struct sdhci_host *host)
1712     if (host->ops->get_min_clock)
1713     mmc->f_min = host->ops->get_min_clock(host);
1714     else if (host->version >= SDHCI_SPEC_300) {
1715     - if (host->clk_mul) {
1716     - mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
1717     + if (host->clk_mul)
1718     max_clk = host->max_clk * host->clk_mul;
1719     - } else
1720     - mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1721     + /*
1722     + * Divided Clock Mode minimum clock rate is always less than
1723     + * Programmable Clock Mode minimum clock rate.
1724     + */
1725     + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1726     } else
1727     mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
1728    
1729     diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
1730     index bb90757ecace..4cbb764c9822 100644
1731     --- a/drivers/mmc/host/sdhci_am654.c
1732     +++ b/drivers/mmc/host/sdhci_am654.c
1733     @@ -236,6 +236,22 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
1734     writeb(val, host->ioaddr + reg);
1735     }
1736    
1737     +static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
1738     +{
1739     + struct sdhci_host *host = mmc_priv(mmc);
1740     + int err = sdhci_execute_tuning(mmc, opcode);
1741     +
1742     + if (err)
1743     + return err;
1744     + /*
1745     + * Tuning data remains in the buffer after tuning.
1746     + * Do a command and data reset to get rid of it
1747     + */
1748     + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1749     +
1750     + return 0;
1751     +}
1752     +
1753     static struct sdhci_ops sdhci_am654_ops = {
1754     .get_max_clock = sdhci_pltfm_clk_get_max_clock,
1755     .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
1756     @@ -249,8 +265,7 @@ static struct sdhci_ops sdhci_am654_ops = {
1757    
1758     static const struct sdhci_pltfm_data sdhci_am654_pdata = {
1759     .ops = &sdhci_am654_ops,
1760     - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1761     - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1762     + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1763     .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1764     };
1765    
1766     @@ -272,8 +287,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
1767    
1768     static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
1769     .ops = &sdhci_j721e_8bit_ops,
1770     - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1771     - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1772     + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1773     .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1774     };
1775    
1776     @@ -295,8 +309,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
1777    
1778     static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
1779     .ops = &sdhci_j721e_4bit_ops,
1780     - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1781     - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1782     + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1783     .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1784     };
1785    
1786     @@ -480,6 +493,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
1787     goto pm_runtime_put;
1788     }
1789    
1790     + host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
1791     +
1792     ret = sdhci_am654_init(host);
1793     if (ret)
1794     goto pm_runtime_put;
1795     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1796     index 2e57122f02fb..2f5c287eac95 100644
1797     --- a/drivers/net/can/slcan.c
1798     +++ b/drivers/net/can/slcan.c
1799     @@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
1800     */
1801     static void slcan_write_wakeup(struct tty_struct *tty)
1802     {
1803     - struct slcan *sl = tty->disc_data;
1804     + struct slcan *sl;
1805     +
1806     + rcu_read_lock();
1807     + sl = rcu_dereference(tty->disc_data);
1808     + if (!sl)
1809     + goto out;
1810    
1811     schedule_work(&sl->tx_work);
1812     +out:
1813     + rcu_read_unlock();
1814     }
1815    
1816     /* Send a can_frame to a TTY queue. */
1817     @@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
1818     return;
1819    
1820     spin_lock_bh(&sl->lock);
1821     - tty->disc_data = NULL;
1822     + rcu_assign_pointer(tty->disc_data, NULL);
1823     sl->tty = NULL;
1824     spin_unlock_bh(&sl->lock);
1825    
1826     + synchronize_rcu();
1827     flush_work(&sl->tx_work);
1828    
1829     /* Flush network side */
1830     diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1831     index 1de51811fcb4..8f909d57501f 100644
1832     --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1833     +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1834     @@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1835     DMA_END_ADDR);
1836    
1837     /* Initialize Tx NAPI */
1838     - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1839     - NAPI_POLL_WEIGHT);
1840     + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1841     + NAPI_POLL_WEIGHT);
1842     }
1843    
1844     /* Initialize a RDMA ring */
1845     diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1846     index 58f89f6a040f..97ff8608f0ab 100644
1847     --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1848     +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1849     @@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1850    
1851     if (!is_offload(adapter))
1852     return -EOPNOTSUPP;
1853     + if (!capable(CAP_NET_ADMIN))
1854     + return -EPERM;
1855     if (!(adapter->flags & FULL_INIT_DONE))
1856     return -EIO; /* need the memory controllers */
1857     if (copy_from_user(&t, useraddr, sizeof(t)))
1858     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1859     index 778dab1af8fc..f260dd96873b 100644
1860     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1861     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1862     @@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
1863    
1864     struct tx_sync_info {
1865     u64 rcd_sn;
1866     - s32 sync_len;
1867     + u32 sync_len;
1868     int nr_frags;
1869     skb_frag_t frags[MAX_SKB_FRAGS];
1870     };
1871     @@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
1872    
1873     static enum mlx5e_ktls_sync_retval
1874     tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
1875     - u32 tcp_seq, struct tx_sync_info *info)
1876     + u32 tcp_seq, int datalen, struct tx_sync_info *info)
1877     {
1878     struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
1879     enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
1880     struct tls_record_info *record;
1881     int remaining, i = 0;
1882     unsigned long flags;
1883     + bool ends_before;
1884    
1885     spin_lock_irqsave(&tx_ctx->lock, flags);
1886     record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
1887     @@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
1888     goto out;
1889     }
1890    
1891     - if (unlikely(tcp_seq < tls_record_start_seq(record))) {
1892     - ret = tls_record_is_start_marker(record) ?
1893     - MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
1894     + /* There are the following cases:
1895     + * 1. packet ends before start marker: bypass offload.
1896     + * 2. packet starts before start marker and ends after it: drop,
1897     + * not supported, breaks contract with kernel.
1898     + * 3. packet ends before tls record info starts: drop,
1899     + * this packet was already acknowledged and its record info
1900     + * was released.
1901     + */
1902     + ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
1903     +
1904     + if (unlikely(tls_record_is_start_marker(record))) {
1905     + ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
1906     + goto out;
1907     + } else if (ends_before) {
1908     + ret = MLX5E_KTLS_SYNC_FAIL;
1909     goto out;
1910     }
1911    
1912     @@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1913     u8 num_wqebbs;
1914     int i = 0;
1915    
1916     - ret = tx_sync_info_get(priv_tx, seq, &info);
1917     + ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
1918     if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
1919     if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
1920     stats->tls_skip_no_sync_data++;
1921     @@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1922     goto err_out;
1923     }
1924    
1925     - if (unlikely(info.sync_len < 0)) {
1926     - if (likely(datalen <= -info.sync_len))
1927     - return MLX5E_KTLS_SYNC_DONE;
1928     -
1929     - stats->tls_drop_bypass_req++;
1930     - goto err_out;
1931     - }
1932     -
1933     stats->tls_ooo++;
1934    
1935     tx_post_resync_params(sq, priv_tx, info.rcd_sn);
1936     @@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1937     if (unlikely(contig_wqebbs_room < num_wqebbs))
1938     mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1939    
1940     - tx_post_resync_params(sq, priv_tx, info.rcd_sn);
1941     -
1942     for (; i < info.nr_frags; i++) {
1943     unsigned int orig_fsz, frag_offset = 0, n = 0;
1944     skb_frag_t *f = &info.frags[i];
1945     @@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
1946     enum mlx5e_ktls_sync_retval ret =
1947     mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
1948    
1949     - if (likely(ret == MLX5E_KTLS_SYNC_DONE))
1950     + switch (ret) {
1951     + case MLX5E_KTLS_SYNC_DONE:
1952     *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
1953     - else if (ret == MLX5E_KTLS_SYNC_FAIL)
1954     + break;
1955     + case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
1956     + if (likely(!skb->decrypted))
1957     + goto out;
1958     + WARN_ON_ONCE(1);
1959     + /* fall-through */
1960     + default: /* MLX5E_KTLS_SYNC_FAIL */
1961     goto err_out;
1962     - else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
1963     - goto out;
1964     + }
1965     }
1966    
1967     priv_tx->expected_seq = seq + datalen;
1968     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1969     index 96711e34d248..1f9107d83848 100644
1970     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1971     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1972     @@ -3951,6 +3951,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
1973     u32 rate_mbps;
1974     int err;
1975    
1976     + vport_num = rpriv->rep->vport;
1977     + if (vport_num >= MLX5_VPORT_ECPF) {
1978     + NL_SET_ERR_MSG_MOD(extack,
1979     + "Ingress rate limit is supported only for Eswitch ports connected to VFs");
1980     + return -EOPNOTSUPP;
1981     + }
1982     +
1983     esw = priv->mdev->priv.eswitch;
1984     /* rate is given in bytes/sec.
1985     * First convert to bits/sec and then round to the nearest mbit/secs.
1986     @@ -3959,8 +3966,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
1987     * 1 mbit/sec.
1988     */
1989     rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
1990     - vport_num = rpriv->rep->vport;
1991     -
1992     err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
1993     if (err)
1994     NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
1995     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1996     index 9004a07e457a..5acfdea3a75a 100644
1997     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1998     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1999     @@ -858,7 +858,7 @@ out:
2000     */
2001     #define ESW_SIZE (16 * 1024 * 1024)
2002     const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
2003     - 64 * 1024, 4 * 1024 };
2004     + 64 * 1024, 128 };
2005    
2006     static int
2007     get_sz_from_pool(struct mlx5_eswitch *esw)
2008     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2009     index 051ab845b501..c96a0e501007 100644
2010     --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2011     +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2012     @@ -1569,6 +1569,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
2013     { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
2014     { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
2015     { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
2016     + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
2017     { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
2018     { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
2019     { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
2020     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2021     index 51803eef13dd..c7f10d4f8f8d 100644
2022     --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2023     +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2024     @@ -1,6 +1,7 @@
2025     // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2026     /* Copyright (c) 2019 Mellanox Technologies. */
2027    
2028     +#include <linux/smp.h>
2029     #include "dr_types.h"
2030    
2031     #define QUEUE_SIZE 128
2032     @@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
2033     if (!in)
2034     goto err_cqwq;
2035    
2036     - vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
2037     + vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
2038     err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
2039     if (err) {
2040     kvfree(in);
2041     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2042     index 3d587d0bdbbe..1e32e2443f73 100644
2043     --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2044     +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2045     @@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
2046     if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2047     list_for_each_entry(dst, &fte->node.children, node.list) {
2048     enum mlx5_flow_destination_type type = dst->dest_attr.type;
2049     - u32 id;
2050    
2051     if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
2052     err = -ENOSPC;
2053     goto free_actions;
2054     }
2055    
2056     - switch (type) {
2057     - case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
2058     - id = dst->dest_attr.counter_id;
2059     + if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
2060     + continue;
2061    
2062     - tmp_action =
2063     - mlx5dr_action_create_flow_counter(id);
2064     - if (!tmp_action) {
2065     - err = -ENOMEM;
2066     - goto free_actions;
2067     - }
2068     - fs_dr_actions[fs_dr_num_actions++] = tmp_action;
2069     - actions[num_actions++] = tmp_action;
2070     - break;
2071     + switch (type) {
2072     case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
2073     tmp_action = create_ft_action(dev, dst);
2074     if (!tmp_action) {
2075     @@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
2076     }
2077     }
2078    
2079     + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
2080     + list_for_each_entry(dst, &fte->node.children, node.list) {
2081     + u32 id;
2082     +
2083     + if (dst->dest_attr.type !=
2084     + MLX5_FLOW_DESTINATION_TYPE_COUNTER)
2085     + continue;
2086     +
2087     + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
2088     + err = -ENOSPC;
2089     + goto free_actions;
2090     + }
2091     +
2092     + id = dst->dest_attr.counter_id;
2093     + tmp_action =
2094     + mlx5dr_action_create_flow_counter(id);
2095     + if (!tmp_action) {
2096     + err = -ENOMEM;
2097     + goto free_actions;
2098     + }
2099     +
2100     + fs_dr_actions[fs_dr_num_actions++] = tmp_action;
2101     + actions[num_actions++] = tmp_action;
2102     + }
2103     + }
2104     +
2105     params.match_sz = match_sz;
2106     params.match_buf = (u64 *)fte->val;
2107    
2108     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2109     index 150b3a144b83..3d3cca596116 100644
2110     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2111     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2112     @@ -8,6 +8,7 @@
2113     #include <linux/string.h>
2114     #include <linux/rhashtable.h>
2115     #include <linux/netdevice.h>
2116     +#include <linux/mutex.h>
2117     #include <net/net_namespace.h>
2118     #include <net/tc_act/tc_vlan.h>
2119    
2120     @@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
2121     struct mlxsw_sp_fid *dummy_fid;
2122     struct rhashtable ruleset_ht;
2123     struct list_head rules;
2124     + struct mutex rules_lock; /* Protects rules list */
2125     struct {
2126     struct delayed_work dw;
2127     unsigned long interval; /* ms */
2128     @@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
2129     goto err_ruleset_block_bind;
2130     }
2131    
2132     + mutex_lock(&mlxsw_sp->acl->rules_lock);
2133     list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
2134     + mutex_unlock(&mlxsw_sp->acl->rules_lock);
2135     block->rule_count++;
2136     block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
2137     return 0;
2138     @@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
2139    
2140     block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
2141     ruleset->ht_key.block->rule_count--;
2142     + mutex_lock(&mlxsw_sp->acl->rules_lock);
2143     list_del(&rule->list);
2144     + mutex_unlock(&mlxsw_sp->acl->rules_lock);
2145     if (!ruleset->ht_key.chain_index &&
2146     mlxsw_sp_acl_ruleset_is_singular(ruleset))
2147     mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
2148     @@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
2149     struct mlxsw_sp_acl_rule *rule;
2150     int err;
2151    
2152     - /* Protect internal structures from changes */
2153     - rtnl_lock();
2154     + mutex_lock(&acl->rules_lock);
2155     list_for_each_entry(rule, &acl->rules, list) {
2156     err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
2157     rule);
2158     if (err)
2159     goto err_rule_update;
2160     }
2161     - rtnl_unlock();
2162     + mutex_unlock(&acl->rules_lock);
2163     return 0;
2164    
2165     err_rule_update:
2166     - rtnl_unlock();
2167     + mutex_unlock(&acl->rules_lock);
2168     return err;
2169     }
2170    
2171     @@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
2172     acl->dummy_fid = fid;
2173    
2174     INIT_LIST_HEAD(&acl->rules);
2175     + mutex_init(&acl->rules_lock);
2176     err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
2177     if (err)
2178     goto err_acl_ops_init;
2179     @@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
2180     return 0;
2181    
2182     err_acl_ops_init:
2183     + mutex_destroy(&acl->rules_lock);
2184     mlxsw_sp_fid_put(fid);
2185     err_fid_get:
2186     rhashtable_destroy(&acl->ruleset_ht);
2187     @@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
2188    
2189     cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
2190     mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
2191     + mutex_destroy(&acl->rules_lock);
2192     WARN_ON(!list_empty(&acl->rules));
2193     mlxsw_sp_fid_put(acl->dummy_fid);
2194     rhashtable_destroy(&acl->ruleset_ht);
2195     diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2196     index 1c14c051ee52..63e7a058b7c6 100644
2197     --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2198     +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2199     @@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
2200     u64 len;
2201     int err;
2202    
2203     + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
2204     + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
2205     + dev_kfree_skb_any(skb);
2206     + return NETDEV_TX_OK;
2207     + }
2208     +
2209     memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
2210    
2211     if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
2212     return NETDEV_TX_BUSY;
2213    
2214     - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
2215     - struct sk_buff *skb_orig = skb;
2216     -
2217     - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
2218     - if (!skb) {
2219     - this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
2220     - dev_kfree_skb_any(skb_orig);
2221     - return NETDEV_TX_OK;
2222     - }
2223     - dev_consume_skb_any(skb_orig);
2224     - }
2225     mlxsw_sx_txhdr_construct(skb, &tx_info);
2226     /* TX header is consumed by HW on the way so we shouldn't count its
2227     * bytes as being sent.
2228     diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
2229     index b339125b2f09..05e760444a92 100644
2230     --- a/drivers/net/ethernet/natsemi/sonic.c
2231     +++ b/drivers/net/ethernet/natsemi/sonic.c
2232     @@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
2233    
2234     netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
2235    
2236     + spin_lock_init(&lp->lock);
2237     +
2238     for (i = 0; i < SONIC_NUM_RRS; i++) {
2239     struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
2240     if (skb == NULL) {
2241     @@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
2242     return 0;
2243     }
2244    
2245     +/* Wait for the SONIC to become idle. */
2246     +static void sonic_quiesce(struct net_device *dev, u16 mask)
2247     +{
2248     + struct sonic_local * __maybe_unused lp = netdev_priv(dev);
2249     + int i;
2250     + u16 bits;
2251     +
2252     + for (i = 0; i < 1000; ++i) {
2253     + bits = SONIC_READ(SONIC_CMD) & mask;
2254     + if (!bits)
2255     + return;
2256     + if (irqs_disabled() || in_interrupt())
2257     + udelay(20);
2258     + else
2259     + usleep_range(100, 200);
2260     + }
2261     + WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
2262     +}
2263    
2264     /*
2265     * Close the SONIC device
2266     @@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
2267     /*
2268     * stop the SONIC, disable interrupts
2269     */
2270     + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2271     + sonic_quiesce(dev, SONIC_CR_ALL);
2272     +
2273     SONIC_WRITE(SONIC_IMR, 0);
2274     SONIC_WRITE(SONIC_ISR, 0x7fff);
2275     SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2276     @@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
2277     * put the Sonic into software-reset mode and
2278     * disable all interrupts before releasing DMA buffers
2279     */
2280     + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2281     + sonic_quiesce(dev, SONIC_CR_ALL);
2282     +
2283     SONIC_WRITE(SONIC_IMR, 0);
2284     SONIC_WRITE(SONIC_ISR, 0x7fff);
2285     SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2286     @@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
2287     * wake the tx queue
2288     * Concurrently with all of this, the SONIC is potentially writing to
2289     * the status flags of the TDs.
2290     - * Until some mutual exclusion is added, this code will not work with SMP. However,
2291     - * MIPS Jazz machines and m68k Macs were all uni-processor machines.
2292     */
2293    
2294     static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2295     @@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2296     struct sonic_local *lp = netdev_priv(dev);
2297     dma_addr_t laddr;
2298     int length;
2299     - int entry = lp->next_tx;
2300     + int entry;
2301     + unsigned long flags;
2302    
2303     netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
2304    
2305     @@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2306     return NETDEV_TX_OK;
2307     }
2308    
2309     + spin_lock_irqsave(&lp->lock, flags);
2310     +
2311     + entry = lp->next_tx;
2312     +
2313     sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
2314     sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
2315     sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
2316     @@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2317     sonic_tda_put(dev, entry, SONIC_TD_LINK,
2318     sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
2319    
2320     - /*
2321     - * Must set tx_skb[entry] only after clearing status, and
2322     - * before clearing EOL and before stopping queue
2323     - */
2324     wmb();
2325     lp->tx_len[entry] = length;
2326     lp->tx_laddr[entry] = laddr;
2327     @@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2328    
2329     SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
2330    
2331     + spin_unlock_irqrestore(&lp->lock, flags);
2332     +
2333     return NETDEV_TX_OK;
2334     }
2335    
2336     @@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2337     struct net_device *dev = dev_id;
2338     struct sonic_local *lp = netdev_priv(dev);
2339     int status;
2340     + unsigned long flags;
2341     +
2342     + /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
2343     + * with sonic_send_packet() so that the two functions can share state.
2344     + * Secondly, it makes sonic_interrupt() re-entrant, as that is required
2345     + * by macsonic which must use two IRQs with different priority levels.
2346     + */
2347     + spin_lock_irqsave(&lp->lock, flags);
2348     +
2349     + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
2350     + if (!status) {
2351     + spin_unlock_irqrestore(&lp->lock, flags);
2352    
2353     - if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
2354     return IRQ_NONE;
2355     + }
2356    
2357     do {
2358     + SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
2359     +
2360     if (status & SONIC_INT_PKTRX) {
2361     netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
2362     sonic_rx(dev); /* got packet(s) */
2363     - SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
2364     }
2365    
2366     if (status & SONIC_INT_TXDN) {
2367     @@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2368     int td_status;
2369     int freed_some = 0;
2370    
2371     - /* At this point, cur_tx is the index of a TD that is one of:
2372     - * unallocated/freed (status set & tx_skb[entry] clear)
2373     - * allocated and sent (status set & tx_skb[entry] set )
2374     - * allocated and not yet sent (status clear & tx_skb[entry] set )
2375     - * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
2376     + /* The state of a Transmit Descriptor may be inferred
2377     + * from { tx_skb[entry], td_status } as follows.
2378     + * { clear, clear } => the TD has never been used
2379     + * { set, clear } => the TD was handed to SONIC
2380     + * { set, set } => the TD was handed back
2381     + * { clear, set } => the TD is available for re-use
2382     */
2383    
2384     netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
2385     @@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2386     if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
2387     break;
2388    
2389     - if (td_status & 0x0001) {
2390     + if (td_status & SONIC_TCR_PTX) {
2391     lp->stats.tx_packets++;
2392     lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
2393     } else {
2394     - lp->stats.tx_errors++;
2395     - if (td_status & 0x0642)
2396     + if (td_status & (SONIC_TCR_EXD |
2397     + SONIC_TCR_EXC | SONIC_TCR_BCM))
2398     lp->stats.tx_aborted_errors++;
2399     - if (td_status & 0x0180)
2400     + if (td_status &
2401     + (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
2402     lp->stats.tx_carrier_errors++;
2403     - if (td_status & 0x0020)
2404     + if (td_status & SONIC_TCR_OWC)
2405     lp->stats.tx_window_errors++;
2406     - if (td_status & 0x0004)
2407     + if (td_status & SONIC_TCR_FU)
2408     lp->stats.tx_fifo_errors++;
2409     }
2410    
2411     @@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2412     if (freed_some || lp->tx_skb[entry] == NULL)
2413     netif_wake_queue(dev); /* The ring is no longer full */
2414     lp->cur_tx = entry;
2415     - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
2416     }
2417    
2418     /*
2419     @@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2420     if (status & SONIC_INT_RFO) {
2421     netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
2422     __func__);
2423     - lp->stats.rx_fifo_errors++;
2424     - SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
2425     }
2426     if (status & SONIC_INT_RDE) {
2427     netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
2428     __func__);
2429     - lp->stats.rx_dropped++;
2430     - SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
2431     }
2432     if (status & SONIC_INT_RBAE) {
2433     netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
2434     __func__);
2435     - lp->stats.rx_dropped++;
2436     - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
2437     }
2438    
2439     /* counter overruns; all counters are 16bit wide */
2440     - if (status & SONIC_INT_FAE) {
2441     + if (status & SONIC_INT_FAE)
2442     lp->stats.rx_frame_errors += 65536;
2443     - SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
2444     - }
2445     - if (status & SONIC_INT_CRC) {
2446     + if (status & SONIC_INT_CRC)
2447     lp->stats.rx_crc_errors += 65536;
2448     - SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
2449     - }
2450     - if (status & SONIC_INT_MP) {
2451     + if (status & SONIC_INT_MP)
2452     lp->stats.rx_missed_errors += 65536;
2453     - SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
2454     - }
2455    
2456     /* transmit error */
2457     if (status & SONIC_INT_TXER) {
2458     - if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
2459     - netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
2460     - __func__);
2461     - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
2462     + u16 tcr = SONIC_READ(SONIC_TCR);
2463     +
2464     + netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
2465     + __func__, tcr);
2466     +
2467     + if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
2468     + SONIC_TCR_FU | SONIC_TCR_BCM)) {
2469     + /* Aborted transmission. Try again. */
2470     + netif_stop_queue(dev);
2471     + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
2472     + }
2473     }
2474    
2475     /* bus retry */
2476     @@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2477     /* ... to help debug DMA problems causing endless interrupts. */
2478     /* Bounce the eth interface to turn on the interrupt again. */
2479     SONIC_WRITE(SONIC_IMR, 0);
2480     - SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
2481     }
2482    
2483     - /* load CAM done */
2484     - if (status & SONIC_INT_LCD)
2485     - SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
2486     - } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
2487     + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
2488     + } while (status);
2489     +
2490     + spin_unlock_irqrestore(&lp->lock, flags);
2491     +
2492     return IRQ_HANDLED;
2493     }
2494    
2495     +/* Return the array index corresponding to a given Receive Buffer pointer. */
2496     +static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
2497     + unsigned int last)
2498     +{
2499     + unsigned int i = last;
2500     +
2501     + do {
2502     + i = (i + 1) & SONIC_RRS_MASK;
2503     + if (addr == lp->rx_laddr[i])
2504     + return i;
2505     + } while (i != last);
2506     +
2507     + return -ENOENT;
2508     +}
2509     +
2510     +/* Allocate and map a new skb to be used as a receive buffer. */
2511     +static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
2512     + struct sk_buff **new_skb, dma_addr_t *new_addr)
2513     +{
2514     + *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
2515     + if (!*new_skb)
2516     + return false;
2517     +
2518     + if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
2519     + skb_reserve(*new_skb, 2);
2520     +
2521     + *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
2522     + SONIC_RBSIZE, DMA_FROM_DEVICE);
2523     + if (!*new_addr) {
2524     + dev_kfree_skb(*new_skb);
2525     + *new_skb = NULL;
2526     + return false;
2527     + }
2528     +
2529     + return true;
2530     +}
2531     +
2532     +/* Place a new receive resource in the Receive Resource Area and update RWP. */
2533     +static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
2534     + dma_addr_t old_addr, dma_addr_t new_addr)
2535     +{
2536     + unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
2537     + unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
2538     + u32 buf;
2539     +
2540     + /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
2541     + * scans the other resources in the RRA, those in the range [RWP, RRP).
2542     + */
2543     + do {
2544     + buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
2545     + sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
2546     +
2547     + if (buf == old_addr)
2548     + break;
2549     +
2550     + entry = (entry + 1) & SONIC_RRS_MASK;
2551     + } while (entry != end);
2552     +
2553     + WARN_ONCE(buf != old_addr, "failed to find resource!\n");
2554     +
2555     + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
2556     + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
2557     +
2558     + entry = (entry + 1) & SONIC_RRS_MASK;
2559     +
2560     + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
2561     +}
2562     +
2563     /*
2564     * We have a good packet(s), pass it/them up the network stack.
2565     */
2566     static void sonic_rx(struct net_device *dev)
2567     {
2568     struct sonic_local *lp = netdev_priv(dev);
2569     - int status;
2570     int entry = lp->cur_rx;
2571     + int prev_entry = lp->eol_rx;
2572     + bool rbe = false;
2573    
2574     while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
2575     - struct sk_buff *used_skb;
2576     - struct sk_buff *new_skb;
2577     - dma_addr_t new_laddr;
2578     - u16 bufadr_l;
2579     - u16 bufadr_h;
2580     - int pkt_len;
2581     -
2582     - status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
2583     - if (status & SONIC_RCR_PRX) {
2584     - /* Malloc up new buffer. */
2585     - new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
2586     - if (new_skb == NULL) {
2587     - lp->stats.rx_dropped++;
2588     + u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
2589     +
2590     + /* If the RD has LPKT set, the chip has finished with the RB */
2591     + if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
2592     + struct sk_buff *new_skb;
2593     + dma_addr_t new_laddr;
2594     + u32 addr = (sonic_rda_get(dev, entry,
2595     + SONIC_RD_PKTPTR_H) << 16) |
2596     + sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
2597     + int i = index_from_addr(lp, addr, entry);
2598     +
2599     + if (i < 0) {
2600     + WARN_ONCE(1, "failed to find buffer!\n");
2601     break;
2602     }
2603     - /* provide 16 byte IP header alignment unless DMA requires otherwise */
2604     - if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
2605     - skb_reserve(new_skb, 2);
2606     -
2607     - new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
2608     - SONIC_RBSIZE, DMA_FROM_DEVICE);
2609     - if (!new_laddr) {
2610     - dev_kfree_skb(new_skb);
2611     - printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
2612     +
2613     + if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
2614     + struct sk_buff *used_skb = lp->rx_skb[i];
2615     + int pkt_len;
2616     +
2617     + /* Pass the used buffer up the stack */
2618     + dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
2619     + DMA_FROM_DEVICE);
2620     +
2621     + pkt_len = sonic_rda_get(dev, entry,
2622     + SONIC_RD_PKTLEN);
2623     + skb_trim(used_skb, pkt_len);
2624     + used_skb->protocol = eth_type_trans(used_skb,
2625     + dev);
2626     + netif_rx(used_skb);
2627     + lp->stats.rx_packets++;
2628     + lp->stats.rx_bytes += pkt_len;
2629     +
2630     + lp->rx_skb[i] = new_skb;
2631     + lp->rx_laddr[i] = new_laddr;
2632     + } else {
2633     + /* Failed to obtain a new buffer so re-use it */
2634     + new_laddr = addr;
2635     lp->stats.rx_dropped++;
2636     - break;
2637     }
2638     -
2639     - /* now we have a new skb to replace it, pass the used one up the stack */
2640     - dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
2641     - used_skb = lp->rx_skb[entry];
2642     - pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
2643     - skb_trim(used_skb, pkt_len);
2644     - used_skb->protocol = eth_type_trans(used_skb, dev);
2645     - netif_rx(used_skb);
2646     - lp->stats.rx_packets++;
2647     - lp->stats.rx_bytes += pkt_len;
2648     -
2649     - /* and insert the new skb */
2650     - lp->rx_laddr[entry] = new_laddr;
2651     - lp->rx_skb[entry] = new_skb;
2652     -
2653     - bufadr_l = (unsigned long)new_laddr & 0xffff;
2654     - bufadr_h = (unsigned long)new_laddr >> 16;
2655     - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
2656     - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
2657     - } else {
2658     - /* This should only happen, if we enable accepting broken packets. */
2659     - lp->stats.rx_errors++;
2660     - if (status & SONIC_RCR_FAER)
2661     - lp->stats.rx_frame_errors++;
2662     - if (status & SONIC_RCR_CRCR)
2663     - lp->stats.rx_crc_errors++;
2664     - }
2665     - if (status & SONIC_RCR_LPKT) {
2666     - /*
2667     - * this was the last packet out of the current receive buffer
2668     - * give the buffer back to the SONIC
2669     + /* If RBE is already asserted when RWP advances then
2670     + * it's safe to clear RBE after processing this packet.
2671     */
2672     - lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
2673     - if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
2674     - SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
2675     - if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
2676     - netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
2677     - __func__);
2678     - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
2679     - }
2680     - } else
2681     - printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
2682     - dev->name);
2683     + rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
2684     + sonic_update_rra(dev, lp, addr, new_laddr);
2685     + }
2686     /*
2687     * give back the descriptor
2688     */
2689     - sonic_rda_put(dev, entry, SONIC_RD_LINK,
2690     - sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
2691     + sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
2692     sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
2693     - sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
2694     - sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
2695     - lp->eol_rx = entry;
2696     - lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
2697     +
2698     + prev_entry = entry;
2699     + entry = (entry + 1) & SONIC_RDS_MASK;
2700     + }
2701     +
2702     + lp->cur_rx = entry;
2703     +
2704     + if (prev_entry != lp->eol_rx) {
2705     + /* Advance the EOL flag to put descriptors back into service */
2706     + sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
2707     + sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
2708     + sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
2709     + sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
2710     + lp->eol_rx = prev_entry;
2711     }
2712     +
2713     + if (rbe)
2714     + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
2715     /*
2716     * If any worth-while packets have been received, netif_rx()
2717     * has done a mark_bh(NET_BH) for us and will work on them
2718     @@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
2719     (netdev_mc_count(dev) > 15)) {
2720     rcr |= SONIC_RCR_AMC;
2721     } else {
2722     + unsigned long flags;
2723     +
2724     netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
2725     netdev_mc_count(dev));
2726     sonic_set_cam_enable(dev, 1); /* always enable our own address */
2727     @@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
2728     i++;
2729     }
2730     SONIC_WRITE(SONIC_CDC, 16);
2731     - /* issue Load CAM command */
2732     SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
2733     +
2734     + /* LCAM and TXP commands can't be used simultaneously */
2735     + spin_lock_irqsave(&lp->lock, flags);
2736     + sonic_quiesce(dev, SONIC_CR_TXP);
2737     SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
2738     + sonic_quiesce(dev, SONIC_CR_LCAM);
2739     + spin_unlock_irqrestore(&lp->lock, flags);
2740     }
2741     }
2742    
2743     @@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
2744     */
2745     static int sonic_init(struct net_device *dev)
2746     {
2747     - unsigned int cmd;
2748     struct sonic_local *lp = netdev_priv(dev);
2749     int i;
2750    
2751     @@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
2752     SONIC_WRITE(SONIC_ISR, 0x7fff);
2753     SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2754    
2755     + /* While in reset mode, clear CAM Enable register */
2756     + SONIC_WRITE(SONIC_CE, 0);
2757     +
2758     /*
2759     * clear software reset flag, disable receiver, clear and
2760     * enable interrupts, then completely initialize the SONIC
2761     */
2762     SONIC_WRITE(SONIC_CMD, 0);
2763     - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2764     + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
2765     + sonic_quiesce(dev, SONIC_CR_ALL);
2766    
2767     /*
2768     * initialize the receive resource area
2769     @@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
2770     }
2771    
2772     /* initialize all RRA registers */
2773     - lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
2774     - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
2775     - lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
2776     - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
2777     -
2778     - SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
2779     - SONIC_WRITE(SONIC_REA, lp->rra_end);
2780     - SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
2781     - SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
2782     + SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
2783     + SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
2784     + SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
2785     + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
2786     SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
2787     SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
2788    
2789     @@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
2790     netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
2791    
2792     SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
2793     - i = 0;
2794     - while (i++ < 100) {
2795     - if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
2796     - break;
2797     - }
2798     -
2799     - netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
2800     - SONIC_READ(SONIC_CMD), i);
2801     + sonic_quiesce(dev, SONIC_CR_RRRA);
2802    
2803     /*
2804     * Initialize the receive descriptors so that they
2805     @@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
2806     * load the CAM
2807     */
2808     SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
2809     -
2810     - i = 0;
2811     - while (i++ < 100) {
2812     - if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
2813     - break;
2814     - }
2815     - netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
2816     - SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
2817     + sonic_quiesce(dev, SONIC_CR_LCAM);
2818    
2819     /*
2820     * enable receiver, disable loopback
2821     * and enable all interrupts
2822     */
2823     - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
2824     SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
2825     SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
2826     SONIC_WRITE(SONIC_ISR, 0x7fff);
2827     SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
2828     -
2829     - cmd = SONIC_READ(SONIC_CMD);
2830     - if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
2831     - printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
2832     + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
2833    
2834     netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
2835     SONIC_READ(SONIC_CMD));
2836     diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
2837     index 2b27f7049acb..1df6d2f06cc4 100644
2838     --- a/drivers/net/ethernet/natsemi/sonic.h
2839     +++ b/drivers/net/ethernet/natsemi/sonic.h
2840     @@ -110,6 +110,9 @@
2841     #define SONIC_CR_TXP 0x0002
2842     #define SONIC_CR_HTX 0x0001
2843    
2844     +#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
2845     + SONIC_CR_RXEN | SONIC_CR_TXP)
2846     +
2847     /*
2848     * SONIC data configuration bits
2849     */
2850     @@ -175,6 +178,7 @@
2851     #define SONIC_TCR_NCRS 0x0100
2852     #define SONIC_TCR_CRLS 0x0080
2853     #define SONIC_TCR_EXC 0x0040
2854     +#define SONIC_TCR_OWC 0x0020
2855     #define SONIC_TCR_PMB 0x0008
2856     #define SONIC_TCR_FU 0x0004
2857     #define SONIC_TCR_BCM 0x0002
2858     @@ -274,8 +278,9 @@
2859     #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
2860     #define SONIC_NUM_TDS 16 /* number of transmit descriptors */
2861    
2862     -#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
2863     -#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
2864     +#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
2865     +#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
2866     +#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
2867    
2868     #define SONIC_RBSIZE 1520 /* size of one resource buffer */
2869    
2870     @@ -312,8 +317,6 @@ struct sonic_local {
2871     u32 rda_laddr; /* logical DMA address of RDA */
2872     dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
2873     dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
2874     - unsigned int rra_end;
2875     - unsigned int cur_rwp;
2876     unsigned int cur_rx;
2877     unsigned int cur_tx; /* first unacked transmit packet */
2878     unsigned int eol_rx;
2879     @@ -322,6 +325,7 @@ struct sonic_local {
2880     int msg_enable;
2881     struct device *device; /* generic device */
2882     struct net_device_stats stats;
2883     + spinlock_t lock;
2884     };
2885    
2886     #define TX_TIMEOUT (3 * HZ)
2887     @@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
2888     as far as we can tell. */
2889     /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
2890     is a much better name. */
2891     -static inline void sonic_buf_put(void* base, int bitmode,
2892     +static inline void sonic_buf_put(u16 *base, int bitmode,
2893     int offset, __u16 val)
2894     {
2895     if (bitmode)
2896     #ifdef __BIG_ENDIAN
2897     - ((__u16 *) base + (offset*2))[1] = val;
2898     + __raw_writew(val, base + (offset * 2) + 1);
2899     #else
2900     - ((__u16 *) base + (offset*2))[0] = val;
2901     + __raw_writew(val, base + (offset * 2) + 0);
2902     #endif
2903     else
2904     - ((__u16 *) base)[offset] = val;
2905     + __raw_writew(val, base + (offset * 1) + 0);
2906     }
2907    
2908     -static inline __u16 sonic_buf_get(void* base, int bitmode,
2909     +static inline __u16 sonic_buf_get(u16 *base, int bitmode,
2910     int offset)
2911     {
2912     if (bitmode)
2913     #ifdef __BIG_ENDIAN
2914     - return ((volatile __u16 *) base + (offset*2))[1];
2915     + return __raw_readw(base + (offset * 2) + 1);
2916     #else
2917     - return ((volatile __u16 *) base + (offset*2))[0];
2918     + return __raw_readw(base + (offset * 2) + 0);
2919     #endif
2920     else
2921     - return ((volatile __u16 *) base)[offset];
2922     + return __raw_readw(base + (offset * 1) + 0);
2923     }
2924    
2925     /* Inlines that you should actually use for reading/writing DMA buffers */
2926     @@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
2927     (entry * SIZEOF_SONIC_RR) + offset);
2928     }
2929    
2930     +static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
2931     +{
2932     + struct sonic_local *lp = netdev_priv(dev);
2933     +
2934     + return lp->rra_laddr +
2935     + entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
2936     +}
2937     +
2938     +static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
2939     +{
2940     + struct sonic_local *lp = netdev_priv(dev);
2941     +
2942     + return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
2943     + SONIC_BUS_SCALE(lp->dma_bitmode));
2944     +}
2945     +
2946     static const char version[] =
2947     "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
2948    
2949     diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
2950     index f6222ada6818..9b3ba98726d7 100644
2951     --- a/drivers/net/gtp.c
2952     +++ b/drivers/net/gtp.c
2953     @@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
2954     return NULL;
2955     }
2956    
2957     - if (sock->sk->sk_protocol != IPPROTO_UDP) {
2958     + sk = sock->sk;
2959     + if (sk->sk_protocol != IPPROTO_UDP ||
2960     + sk->sk_type != SOCK_DGRAM ||
2961     + (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
2962     pr_debug("socket fd=%d not UDP\n", fd);
2963     sk = ERR_PTR(-EINVAL);
2964     goto out_sock;
2965     }
2966    
2967     - lock_sock(sock->sk);
2968     - if (sock->sk->sk_user_data) {
2969     + lock_sock(sk);
2970     + if (sk->sk_user_data) {
2971     sk = ERR_PTR(-EBUSY);
2972     goto out_rel_sock;
2973     }
2974    
2975     - sk = sock->sk;
2976     sock_hold(sk);
2977    
2978     tuncfg.sk_user_data = gtp;
2979     diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
2980     index 2a91c192659f..61d7e0d1d77d 100644
2981     --- a/drivers/net/slip/slip.c
2982     +++ b/drivers/net/slip/slip.c
2983     @@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
2984     */
2985     static void slip_write_wakeup(struct tty_struct *tty)
2986     {
2987     - struct slip *sl = tty->disc_data;
2988     + struct slip *sl;
2989     +
2990     + rcu_read_lock();
2991     + sl = rcu_dereference(tty->disc_data);
2992     + if (!sl)
2993     + goto out;
2994    
2995     schedule_work(&sl->tx_work);
2996     +out:
2997     + rcu_read_unlock();
2998     }
2999    
3000     static void sl_tx_timeout(struct net_device *dev)
3001     @@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
3002     return;
3003    
3004     spin_lock_bh(&sl->lock);
3005     - tty->disc_data = NULL;
3006     + rcu_assign_pointer(tty->disc_data, NULL);
3007     sl->tty = NULL;
3008     spin_unlock_bh(&sl->lock);
3009    
3010     + synchronize_rcu();
3011     flush_work(&sl->tx_work);
3012    
3013     /* VSV = very important to remove timers */
3014     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
3015     index 16564ebcde50..69f553a028ee 100644
3016     --- a/drivers/net/tun.c
3017     +++ b/drivers/net/tun.c
3018     @@ -1936,6 +1936,10 @@ drop:
3019     if (ret != XDP_PASS) {
3020     rcu_read_unlock();
3021     local_bh_enable();
3022     + if (frags) {
3023     + tfile->napi.skb = NULL;
3024     + mutex_unlock(&tfile->napi_mutex);
3025     + }
3026     return total_len;
3027     }
3028     }
3029     diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
3030     index c232f1612083..0170a441208a 100644
3031     --- a/drivers/net/usb/lan78xx.c
3032     +++ b/drivers/net/usb/lan78xx.c
3033     @@ -20,6 +20,7 @@
3034     #include <linux/mdio.h>
3035     #include <linux/phy.h>
3036     #include <net/ip6_checksum.h>
3037     +#include <net/vxlan.h>
3038     #include <linux/interrupt.h>
3039     #include <linux/irqdomain.h>
3040     #include <linux/irq.h>
3041     @@ -3668,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
3042     tasklet_schedule(&dev->bh);
3043     }
3044    
3045     +static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3046     + struct net_device *netdev,
3047     + netdev_features_t features)
3048     +{
3049     + if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3050     + features &= ~NETIF_F_GSO_MASK;
3051     +
3052     + features = vlan_features_check(skb, features);
3053     + features = vxlan_features_check(skb, features);
3054     +
3055     + return features;
3056     +}
3057     +
3058     static const struct net_device_ops lan78xx_netdev_ops = {
3059     .ndo_open = lan78xx_open,
3060     .ndo_stop = lan78xx_stop,
3061     @@ -3681,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
3062     .ndo_set_features = lan78xx_set_features,
3063     .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3064     .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3065     + .ndo_features_check = lan78xx_features_check,
3066     };
3067    
3068     static void lan78xx_stat_monitor(struct timer_list *t)
3069     diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
3070     index f43c06569ea1..c4c8f1b62e1e 100644
3071     --- a/drivers/net/wireless/cisco/airo.c
3072     +++ b/drivers/net/wireless/cisco/airo.c
3073     @@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
3074     case AIROGVLIST: ridcode = RID_APLIST; break;
3075     case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
3076     case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
3077     - case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
3078     - /* Only super-user can read WEP keys */
3079     - if (!capable(CAP_NET_ADMIN))
3080     - return -EPERM;
3081     - break;
3082     - case AIROGWEPKNV: ridcode = RID_WEP_PERM;
3083     - /* Only super-user can read WEP keys */
3084     - if (!capable(CAP_NET_ADMIN))
3085     - return -EPERM;
3086     - break;
3087     + case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
3088     + case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
3089     case AIROGSTAT: ridcode = RID_STATUS; break;
3090     case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
3091     case AIROGSTATSC32: ridcode = RID_STATS; break;
3092     @@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
3093     return -EINVAL;
3094     }
3095    
3096     - if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
3097     + if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
3098     + /* Only super-user can read WEP keys */
3099     + if (!capable(CAP_NET_ADMIN))
3100     + return -EPERM;
3101     + }
3102     +
3103     + if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
3104     return -ENOMEM;
3105    
3106     PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
3107     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
3108     index 60aff2ecec12..58df25e2fb32 100644
3109     --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
3110     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
3111     @@ -154,5 +154,6 @@
3112     #define IWL_MVM_D3_DEBUG false
3113     #define IWL_MVM_USE_TWT false
3114     #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
3115     +#define IWL_MVM_USE_NSSN_SYNC 0
3116    
3117     #endif /* __MVM_CONSTANTS_H */
3118     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3119     index d31f96c3f925..49aeab7c27a2 100644
3120     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3121     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3122     @@ -742,6 +742,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
3123     return ret;
3124     }
3125    
3126     +static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
3127     + struct ieee80211_sta *sta)
3128     +{
3129     + if (likely(sta)) {
3130     + if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
3131     + return;
3132     + } else {
3133     + if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
3134     + return;
3135     + }
3136     +
3137     + ieee80211_free_txskb(mvm->hw, skb);
3138     +}
3139     +
3140     static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
3141     struct ieee80211_tx_control *control,
3142     struct sk_buff *skb)
3143     @@ -785,14 +799,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
3144     }
3145     }
3146    
3147     - if (sta) {
3148     - if (iwl_mvm_tx_skb(mvm, skb, sta))
3149     - goto drop;
3150     - return;
3151     - }
3152     -
3153     - if (iwl_mvm_tx_skb_non_sta(mvm, skb))
3154     - goto drop;
3155     + iwl_mvm_tx_skb(mvm, skb, sta);
3156     return;
3157     drop:
3158     ieee80211_free_txskb(hw, skb);
3159     @@ -842,10 +849,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
3160     break;
3161     }
3162    
3163     - if (!txq->sta)
3164     - iwl_mvm_tx_skb_non_sta(mvm, skb);
3165     - else
3166     - iwl_mvm_tx_skb(mvm, skb, txq->sta);
3167     + iwl_mvm_tx_skb(mvm, skb, txq->sta);
3168     }
3169     } while (atomic_dec_return(&mvmtxq->tx_request));
3170     rcu_read_unlock();
3171     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3172     index 5ca50f39a023..5f1ecbb6fb71 100644
3173     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3174     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3175     @@ -1508,8 +1508,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
3176     int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
3177     u16 len, const void *data,
3178     u32 *status);
3179     -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
3180     - struct ieee80211_sta *sta);
3181     +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
3182     + struct ieee80211_sta *sta);
3183     int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
3184     void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
3185     struct iwl_tx_cmd *tx_cmd,
3186     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3187     index 77b03b757193..a6e2a30eb310 100644
3188     --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3189     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3190     @@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
3191    
3192     static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
3193     {
3194     - struct iwl_mvm_rss_sync_notif notif = {
3195     - .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
3196     - .metadata.sync = 0,
3197     - .nssn_sync.baid = baid,
3198     - .nssn_sync.nssn = nssn,
3199     - };
3200     -
3201     - iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
3202     + if (IWL_MVM_USE_NSSN_SYNC) {
3203     + struct iwl_mvm_rss_sync_notif notif = {
3204     + .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
3205     + .metadata.sync = 0,
3206     + .nssn_sync.baid = baid,
3207     + .nssn_sync.nssn = nssn,
3208     + };
3209     +
3210     + iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
3211     + sizeof(notif));
3212     + }
3213     }
3214    
3215     #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
3216     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
3217     index fcafa22ec6ce..8aa567d7912c 100644
3218     --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
3219     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
3220     @@ -1220,7 +1220,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
3221     cmd_size = sizeof(struct iwl_scan_config_v2);
3222     else
3223     cmd_size = sizeof(struct iwl_scan_config_v1);
3224     - cmd_size += num_channels;
3225     + cmd_size += mvm->fw->ucode_capa.n_scan_channels;
3226    
3227     cfg = kzalloc(cmd_size, GFP_KERNEL);
3228     if (!cfg)
3229     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3230     index e3b2a2bf3863..d9d82f6b5e87 100644
3231     --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3232     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3233     @@ -1151,7 +1151,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
3234     if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
3235     iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
3236     spin_unlock(&mvmsta->lock);
3237     - return 0;
3238     + return -1;
3239     }
3240    
3241     if (!iwl_mvm_has_new_tx_api(mvm)) {
3242     @@ -1203,8 +1203,8 @@ drop:
3243     return -1;
3244     }
3245    
3246     -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
3247     - struct ieee80211_sta *sta)
3248     +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
3249     + struct ieee80211_sta *sta)
3250     {
3251     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3252     struct ieee80211_tx_info info;
3253     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3254     index 041dd75ac72b..64c74acadb99 100644
3255     --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3256     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3257     @@ -1537,13 +1537,13 @@ out:
3258    
3259     napi = &rxq->napi;
3260     if (napi->poll) {
3261     + napi_gro_flush(napi, false);
3262     +
3263     if (napi->rx_count) {
3264     netif_receive_skb_list(&napi->rx_list);
3265     INIT_LIST_HEAD(&napi->rx_list);
3266     napi->rx_count = 0;
3267     }
3268     -
3269     - napi_gro_flush(napi, false);
3270     }
3271    
3272     iwl_pcie_rxq_restock(trans, rxq);
3273     diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
3274     index 57edfada0665..c9401c121a14 100644
3275     --- a/drivers/net/wireless/marvell/libertas/cfg.c
3276     +++ b/drivers/net/wireless/marvell/libertas/cfg.c
3277     @@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
3278     int hw, ap, ap_max = ie[1];
3279     u8 hw_rate;
3280    
3281     + if (ap_max > MAX_RATES) {
3282     + lbs_deb_assoc("invalid rates\n");
3283     + return tlv;
3284     + }
3285     /* Advance past IE header */
3286     ie += 2;
3287    
3288     @@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
3289     struct cmd_ds_802_11_ad_hoc_join cmd;
3290     u8 preamble = RADIO_PREAMBLE_SHORT;
3291     int ret = 0;
3292     + int hw, i;
3293     + u8 rates_max;
3294     + u8 *rates;
3295    
3296     /* TODO: set preamble based on scan result */
3297     ret = lbs_set_radio(priv, preamble, 1);
3298     @@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
3299     if (!rates_eid) {
3300     lbs_add_rates(cmd.bss.rates);
3301     } else {
3302     - int hw, i;
3303     - u8 rates_max = rates_eid[1];
3304     - u8 *rates = cmd.bss.rates;
3305     + rates_max = rates_eid[1];
3306     + if (rates_max > MAX_RATES) {
3307     + lbs_deb_join("invalid rates");
3308     + goto out;
3309     + }
3310     + rates = cmd.bss.rates;
3311     for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
3312     u8 hw_rate = lbs_rates[hw].bitrate / 5;
3313     for (i = 0; i < rates_max; i++) {
3314     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3315     index 308f744393eb..1593b8494ebb 100644
3316     --- a/drivers/pci/quirks.c
3317     +++ b/drivers/pci/quirks.c
3318     @@ -5021,18 +5021,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
3319    
3320     #ifdef CONFIG_PCI_ATS
3321     /*
3322     - * Some devices have a broken ATS implementation causing IOMMU stalls.
3323     - * Don't use ATS for those devices.
3324     + * Some devices require additional driver setup to enable ATS. Don't use
3325     + * ATS for those devices as ATS will be enabled before the driver has had a
3326     + * chance to load and configure the device.
3327     */
3328     -static void quirk_no_ats(struct pci_dev *pdev)
3329     +static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
3330     {
3331     - pci_info(pdev, "disabling ATS (broken on this device)\n");
3332     + if (pdev->device == 0x7340 && pdev->revision != 0xc5)
3333     + return;
3334     +
3335     + pci_info(pdev, "disabling ATS\n");
3336     pdev->ats_cap = 0;
3337     }
3338    
3339     /* AMD Stoney platform GPU */
3340     -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
3341     -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
3342     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
3343     +/* AMD Iceland dGPU */
3344     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
3345     +/* AMD Navi14 dGPU */
3346     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
3347     #endif /* CONFIG_PCI_ATS */
3348    
3349     /* Freescale PCIe doesn't support MSI in RC mode */
3350     diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
3351     index 44d7f50bbc82..d936e7aa74c4 100644
3352     --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
3353     +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
3354     @@ -49,6 +49,7 @@
3355     .padown_offset = SPT_PAD_OWN, \
3356     .padcfglock_offset = SPT_PADCFGLOCK, \
3357     .hostown_offset = SPT_HOSTSW_OWN, \
3358     + .is_offset = SPT_GPI_IS, \
3359     .ie_offset = SPT_GPI_IE, \
3360     .pin_base = (s), \
3361     .npins = ((e) - (s) + 1), \
3362     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3363     index f194ffc4699e..c070cb2a6a5b 100644
3364     --- a/drivers/target/iscsi/iscsi_target.c
3365     +++ b/drivers/target/iscsi/iscsi_target.c
3366     @@ -4151,9 +4151,6 @@ int iscsit_close_connection(
3367     iscsit_stop_nopin_response_timer(conn);
3368     iscsit_stop_nopin_timer(conn);
3369    
3370     - if (conn->conn_transport->iscsit_wait_conn)
3371     - conn->conn_transport->iscsit_wait_conn(conn);
3372     -
3373     /*
3374     * During Connection recovery drop unacknowledged out of order
3375     * commands for this connection, and prepare the other commands
3376     @@ -4239,6 +4236,9 @@ int iscsit_close_connection(
3377     target_sess_cmd_list_set_waiting(sess->se_sess);
3378     target_wait_for_sess_cmds(sess->se_sess);
3379    
3380     + if (conn->conn_transport->iscsit_wait_conn)
3381     + conn->conn_transport->iscsit_wait_conn(conn);
3382     +
3383     ahash_request_free(conn->conn_tx_hash);
3384     if (conn->conn_rx_hash) {
3385     struct crypto_ahash *tfm;
3386     diff --git a/fs/afs/cell.c b/fs/afs/cell.c
3387     index fd5133e26a38..78ba5f932287 100644
3388     --- a/fs/afs/cell.c
3389     +++ b/fs/afs/cell.c
3390     @@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
3391     _leave(" = -ENAMETOOLONG");
3392     return ERR_PTR(-ENAMETOOLONG);
3393     }
3394     - if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
3395     +
3396     + /* Prohibit cell names that contain unprintable chars, '/' and '@' or
3397     + * that begin with a dot. This also precludes "@cell".
3398     + */
3399     + if (name[0] == '.')
3400     return ERR_PTR(-EINVAL);
3401     + for (i = 0; i < namelen; i++) {
3402     + char ch = name[i];
3403     + if (!isprint(ch) || ch == '/' || ch == '@')
3404     + return ERR_PTR(-EINVAL);
3405     + }
3406    
3407     _enter("%*.*s,%s", namelen, namelen, name, addresses);
3408    
3409     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3410     index a5163296d9d9..ee02a742fff5 100644
3411     --- a/fs/ceph/mds_client.c
3412     +++ b/fs/ceph/mds_client.c
3413     @@ -708,8 +708,10 @@ void ceph_mdsc_release_request(struct kref *kref)
3414     /* avoid calling iput_final() in mds dispatch threads */
3415     ceph_async_iput(req->r_inode);
3416     }
3417     - if (req->r_parent)
3418     + if (req->r_parent) {
3419     ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
3420     + ceph_async_iput(req->r_parent);
3421     + }
3422     ceph_async_iput(req->r_target_inode);
3423     if (req->r_dentry)
3424     dput(req->r_dentry);
3425     @@ -2670,8 +2672,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3426     /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
3427     if (req->r_inode)
3428     ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3429     - if (req->r_parent)
3430     + if (req->r_parent) {
3431     ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
3432     + ihold(req->r_parent);
3433     + }
3434     if (req->r_old_dentry_dir)
3435     ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3436     CEPH_CAP_PIN);
3437     diff --git a/fs/io_uring.c b/fs/io_uring.c
3438     index b1c9ad1fb9e1..709671faaed6 100644
3439     --- a/fs/io_uring.c
3440     +++ b/fs/io_uring.c
3441     @@ -3716,12 +3716,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3442     wake_up(&ctx->sqo_wait);
3443     submitted = to_submit;
3444     } else if (to_submit) {
3445     - if (current->mm != ctx->sqo_mm ||
3446     - current_cred() != ctx->creds) {
3447     - ret = -EPERM;
3448     - goto out;
3449     - }
3450     -
3451     to_submit = min(to_submit, ctx->sq_entries);
3452    
3453     mutex_lock(&ctx->uring_lock);
3454     diff --git a/fs/namei.c b/fs/namei.c
3455     index 671c3c1a3425..e81521c87f98 100644
3456     --- a/fs/namei.c
3457     +++ b/fs/namei.c
3458     @@ -1001,7 +1001,8 @@ static int may_linkat(struct path *link)
3459     * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
3460     * should be allowed, or not, on files that already
3461     * exist.
3462     - * @dir: the sticky parent directory
3463     + * @dir_mode: mode bits of directory
3464     + * @dir_uid: owner of directory
3465     * @inode: the inode of the file to open
3466     *
3467     * Block an O_CREAT open of a FIFO (or a regular file) when:
3468     @@ -1017,18 +1018,18 @@ static int may_linkat(struct path *link)
3469     *
3470     * Returns 0 if the open is allowed, -ve on error.
3471     */
3472     -static int may_create_in_sticky(struct dentry * const dir,
3473     +static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
3474     struct inode * const inode)
3475     {
3476     if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
3477     (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
3478     - likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
3479     - uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
3480     + likely(!(dir_mode & S_ISVTX)) ||
3481     + uid_eq(inode->i_uid, dir_uid) ||
3482     uid_eq(current_fsuid(), inode->i_uid))
3483     return 0;
3484    
3485     - if (likely(dir->d_inode->i_mode & 0002) ||
3486     - (dir->d_inode->i_mode & 0020 &&
3487     + if (likely(dir_mode & 0002) ||
3488     + (dir_mode & 0020 &&
3489     ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
3490     (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
3491     return -EACCES;
3492     @@ -3248,6 +3249,8 @@ static int do_last(struct nameidata *nd,
3493     struct file *file, const struct open_flags *op)
3494     {
3495     struct dentry *dir = nd->path.dentry;
3496     + kuid_t dir_uid = dir->d_inode->i_uid;
3497     + umode_t dir_mode = dir->d_inode->i_mode;
3498     int open_flag = op->open_flag;
3499     bool will_truncate = (open_flag & O_TRUNC) != 0;
3500     bool got_write = false;
3501     @@ -3383,7 +3386,7 @@ finish_open:
3502     error = -EISDIR;
3503     if (d_is_dir(nd->path.dentry))
3504     goto out;
3505     - error = may_create_in_sticky(dir,
3506     + error = may_create_in_sticky(dir_mode, dir_uid,
3507     d_backing_inode(nd->path.dentry));
3508     if (unlikely(error))
3509     goto out;
3510     diff --git a/fs/readdir.c b/fs/readdir.c
3511     index d26d5ea4de7b..de2eceffdee8 100644
3512     --- a/fs/readdir.c
3513     +++ b/fs/readdir.c
3514     @@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir);
3515     * filename length, and the above "soft error" worry means
3516     * that it's probably better left alone until we have that
3517     * issue clarified.
3518     + *
3519     + * Note the PATH_MAX check - it's arbitrary but the real
3520     + * kernel limit on a possible path component, not NAME_MAX,
3521     + * which is the technical standard limit.
3522     */
3523     static int verify_dirent_name(const char *name, int len)
3524     {
3525     - if (!len)
3526     + if (len <= 0 || len >= PATH_MAX)
3527     return -EIO;
3528     if (memchr(name, '/', len))
3529     return -EIO;
3530     @@ -206,7 +210,7 @@ struct linux_dirent {
3531     struct getdents_callback {
3532     struct dir_context ctx;
3533     struct linux_dirent __user * current_dir;
3534     - struct linux_dirent __user * previous;
3535     + int prev_reclen;
3536     int count;
3537     int error;
3538     };
3539     @@ -214,12 +218,13 @@ struct getdents_callback {
3540     static int filldir(struct dir_context *ctx, const char *name, int namlen,
3541     loff_t offset, u64 ino, unsigned int d_type)
3542     {
3543     - struct linux_dirent __user * dirent;
3544     + struct linux_dirent __user *dirent, *prev;
3545     struct getdents_callback *buf =
3546     container_of(ctx, struct getdents_callback, ctx);
3547     unsigned long d_ino;
3548     int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
3549     sizeof(long));
3550     + int prev_reclen;
3551    
3552     buf->error = verify_dirent_name(name, namlen);
3553     if (unlikely(buf->error))
3554     @@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
3555     buf->error = -EOVERFLOW;
3556     return -EOVERFLOW;
3557     }
3558     - dirent = buf->previous;
3559     - if (dirent && signal_pending(current))
3560     + prev_reclen = buf->prev_reclen;
3561     + if (prev_reclen && signal_pending(current))
3562     return -EINTR;
3563     -
3564     - /*
3565     - * Note! This range-checks 'previous' (which may be NULL).
3566     - * The real range was checked in getdents
3567     - */
3568     - if (!user_access_begin(dirent, sizeof(*dirent)))
3569     - goto efault;
3570     - if (dirent)
3571     - unsafe_put_user(offset, &dirent->d_off, efault_end);
3572     dirent = buf->current_dir;
3573     + prev = (void __user *) dirent - prev_reclen;
3574     + if (!user_access_begin(prev, reclen + prev_reclen))
3575     + goto efault;
3576     +
3577     + /* This might be 'dirent->d_off', but if so it will get overwritten */
3578     + unsafe_put_user(offset, &prev->d_off, efault_end);
3579     unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
3580     unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
3581     unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
3582     unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
3583     user_access_end();
3584    
3585     - buf->previous = dirent;
3586     - dirent = (void __user *)dirent + reclen;
3587     - buf->current_dir = dirent;
3588     + buf->current_dir = (void __user *)dirent + reclen;
3589     + buf->prev_reclen = reclen;
3590     buf->count -= reclen;
3591     return 0;
3592     efault_end:
3593     @@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
3594     struct linux_dirent __user *, dirent, unsigned int, count)
3595     {
3596     struct fd f;
3597     - struct linux_dirent __user * lastdirent;
3598     struct getdents_callback buf = {
3599     .ctx.actor = filldir,
3600     .count = count,
3601     @@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
3602     error = iterate_dir(f.file, &buf.ctx);
3603     if (error >= 0)
3604     error = buf.error;
3605     - lastdirent = buf.previous;
3606     - if (lastdirent) {
3607     + if (buf.prev_reclen) {
3608     + struct linux_dirent __user * lastdirent;
3609     + lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
3610     +
3611     if (put_user(buf.ctx.pos, &lastdirent->d_off))
3612     error = -EFAULT;
3613     else
3614     @@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
3615     struct getdents_callback64 {
3616     struct dir_context ctx;
3617     struct linux_dirent64 __user * current_dir;
3618     - struct linux_dirent64 __user * previous;
3619     + int prev_reclen;
3620     int count;
3621     int error;
3622     };
3623     @@ -307,11 +309,12 @@ struct getdents_callback64 {
3624     static int filldir64(struct dir_context *ctx, const char *name, int namlen,
3625     loff_t offset, u64 ino, unsigned int d_type)
3626     {
3627     - struct linux_dirent64 __user *dirent;
3628     + struct linux_dirent64 __user *dirent, *prev;
3629     struct getdents_callback64 *buf =
3630     container_of(ctx, struct getdents_callback64, ctx);
3631     int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
3632     sizeof(u64));
3633     + int prev_reclen;
3634    
3635     buf->error = verify_dirent_name(name, namlen);
3636     if (unlikely(buf->error))
3637     @@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
3638     buf->error = -EINVAL; /* only used if we fail.. */
3639     if (reclen > buf->count)
3640     return -EINVAL;
3641     - dirent = buf->previous;
3642     - if (dirent && signal_pending(current))
3643     + prev_reclen = buf->prev_reclen;
3644     + if (prev_reclen && signal_pending(current))
3645     return -EINTR;
3646     -
3647     - /*
3648     - * Note! This range-checks 'previous' (which may be NULL).
3649     - * The real range was checked in getdents
3650     - */
3651     - if (!user_access_begin(dirent, sizeof(*dirent)))
3652     - goto efault;
3653     - if (dirent)
3654     - unsafe_put_user(offset, &dirent->d_off, efault_end);
3655     dirent = buf->current_dir;
3656     + prev = (void __user *)dirent - prev_reclen;
3657     + if (!user_access_begin(prev, reclen + prev_reclen))
3658     + goto efault;
3659     +
3660     + /* This might be 'dirent->d_off', but if so it will get overwritten */
3661     + unsafe_put_user(offset, &prev->d_off, efault_end);
3662     unsafe_put_user(ino, &dirent->d_ino, efault_end);
3663     unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
3664     unsafe_put_user(d_type, &dirent->d_type, efault_end);
3665     unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
3666     user_access_end();
3667    
3668     - buf->previous = dirent;
3669     - dirent = (void __user *)dirent + reclen;
3670     - buf->current_dir = dirent;
3671     + buf->prev_reclen = reclen;
3672     + buf->current_dir = (void __user *)dirent + reclen;
3673     buf->count -= reclen;
3674     return 0;
3675     +
3676     efault_end:
3677     user_access_end();
3678     efault:
3679     @@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
3680     unsigned int count)
3681     {
3682     struct fd f;
3683     - struct linux_dirent64 __user * lastdirent;
3684     struct getdents_callback64 buf = {
3685     .ctx.actor = filldir64,
3686     .count = count,
3687     @@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
3688     error = iterate_dir(f.file, &buf.ctx);
3689     if (error >= 0)
3690     error = buf.error;
3691     - lastdirent = buf.previous;
3692     - if (lastdirent) {
3693     + if (buf.prev_reclen) {
3694     + struct linux_dirent64 __user * lastdirent;
3695     typeof(lastdirent->d_off) d_off = buf.ctx.pos;
3696     +
3697     + lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
3698     if (__put_user(d_off, &lastdirent->d_off))
3699     error = -EFAULT;
3700     else
3701     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3702     index 13f09706033a..f8fde9fa479c 100644
3703     --- a/include/linux/netdevice.h
3704     +++ b/include/linux/netdevice.h
3705     @@ -3666,6 +3666,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
3706     int dev_get_alias(const struct net_device *, char *, size_t);
3707     int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3708     int __dev_set_mtu(struct net_device *, int);
3709     +int dev_validate_mtu(struct net_device *dev, int mtu,
3710     + struct netlink_ext_ack *extack);
3711     int dev_set_mtu_ext(struct net_device *dev, int mtu,
3712     struct netlink_ext_ack *extack);
3713     int dev_set_mtu(struct net_device *, int);
3714     diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
3715     index 9bc255a8461b..77336f4c4b1c 100644
3716     --- a/include/linux/netfilter/ipset/ip_set.h
3717     +++ b/include/linux/netfilter/ipset/ip_set.h
3718     @@ -445,13 +445,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
3719     sizeof(*addr));
3720     }
3721    
3722     -/* Calculate the bytes required to store the inclusive range of a-b */
3723     -static inline int
3724     -bitmap_bytes(u32 a, u32 b)
3725     -{
3726     - return 4 * ((((b - a + 8) / 8) + 3) / 4);
3727     -}
3728     -
3729     /* How often should the gc be run by default */
3730     #define IPSET_GC_TIME (3 * 60)
3731    
3732     diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
3733     index cf09ab37b45b..851425c3178f 100644
3734     --- a/include/linux/netfilter/nfnetlink.h
3735     +++ b/include/linux/netfilter/nfnetlink.h
3736     @@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
3737     const struct nfnl_callback *cb; /* callback for individual types */
3738     struct module *owner;
3739     int (*commit)(struct net *net, struct sk_buff *skb);
3740     - int (*abort)(struct net *net, struct sk_buff *skb);
3741     + int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
3742     void (*cleanup)(struct net *net);
3743     bool (*valid_genid)(struct net *net, u32 genid);
3744     };
3745     diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
3746     index 286fd960896f..a1a8d45adb42 100644
3747     --- a/include/net/netns/nftables.h
3748     +++ b/include/net/netns/nftables.h
3749     @@ -7,6 +7,7 @@
3750     struct netns_nftables {
3751     struct list_head tables;
3752     struct list_head commit_list;
3753     + struct list_head module_list;
3754     struct mutex commit_mutex;
3755     unsigned int base_seq;
3756     u8 gencursor;
3757     diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
3758     index 9a0e8af21310..a5ccfa67bc5c 100644
3759     --- a/include/trace/events/xen.h
3760     +++ b/include/trace/events/xen.h
3761     @@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
3762     TP_PROTO(xen_mc_callback_fn_t fn, void *data),
3763     TP_ARGS(fn, data),
3764     TP_STRUCT__entry(
3765     - __field(xen_mc_callback_fn_t, fn)
3766     + /*
3767     + * Use field_struct to avoid is_signed_type()
3768     + * comparison of a function pointer.
3769     + */
3770     + __field_struct(xen_mc_callback_fn_t, fn)
3771     __field(void *, data)
3772     ),
3773     TP_fast_assign(
3774     diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
3775     index 26b9168321e7..d65f2d5ab694 100644
3776     --- a/kernel/power/snapshot.c
3777     +++ b/kernel/power/snapshot.c
3778     @@ -1147,24 +1147,24 @@ void free_basic_memory_bitmaps(void)
3779    
3780     void clear_free_pages(void)
3781     {
3782     -#ifdef CONFIG_PAGE_POISONING_ZERO
3783     struct memory_bitmap *bm = free_pages_map;
3784     unsigned long pfn;
3785    
3786     if (WARN_ON(!(free_pages_map)))
3787     return;
3788    
3789     - memory_bm_position_reset(bm);
3790     - pfn = memory_bm_next_pfn(bm);
3791     - while (pfn != BM_END_OF_MAP) {
3792     - if (pfn_valid(pfn))
3793     - clear_highpage(pfn_to_page(pfn));
3794     -
3795     + if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
3796     + memory_bm_position_reset(bm);
3797     pfn = memory_bm_next_pfn(bm);
3798     + while (pfn != BM_END_OF_MAP) {
3799     + if (pfn_valid(pfn))
3800     + clear_highpage(pfn_to_page(pfn));
3801     +
3802     + pfn = memory_bm_next_pfn(bm);
3803     + }
3804     + memory_bm_position_reset(bm);
3805     + pr_info("free pages cleared after restore\n");
3806     }
3807     - memory_bm_position_reset(bm);
3808     - pr_info("free pages cleared after restore\n");
3809     -#endif /* PAGE_POISONING_ZERO */
3810     }
3811    
3812     /**
3813     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3814     index bcb72f102613..341aab32c946 100644
3815     --- a/kernel/trace/trace.c
3816     +++ b/kernel/trace/trace.c
3817     @@ -9270,6 +9270,11 @@ __init static int tracing_set_default_clock(void)
3818     {
3819     /* sched_clock_stable() is determined in late_initcall */
3820     if (!trace_boot_clock && !sched_clock_stable()) {
3821     + if (security_locked_down(LOCKDOWN_TRACEFS)) {
3822     + pr_warn("Can not set tracing clock due to lockdown\n");
3823     + return -EPERM;
3824     + }
3825     +
3826     printk(KERN_WARNING
3827     "Unstable clock detected, switching default tracing clock to \"global\"\n"
3828     "If you want to keep using the local clock, then add:\n"
3829     diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
3830     index c2783915600c..205692181e7b 100644
3831     --- a/kernel/trace/trace_events_hist.c
3832     +++ b/kernel/trace/trace_events_hist.c
3833     @@ -116,6 +116,7 @@ struct hist_field {
3834     struct ftrace_event_field *field;
3835     unsigned long flags;
3836     hist_field_fn_t fn;
3837     + unsigned int ref;
3838     unsigned int size;
3839     unsigned int offset;
3840     unsigned int is_signed;
3841     @@ -1766,11 +1767,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data,
3842     struct event_trigger_data *test;
3843     struct hist_field *hist_field;
3844    
3845     + lockdep_assert_held(&event_mutex);
3846     +
3847     hist_field = find_var_field(hist_data, var_name);
3848     if (hist_field)
3849     return hist_field;
3850    
3851     - list_for_each_entry_rcu(test, &file->triggers, list) {
3852     + list_for_each_entry(test, &file->triggers, list) {
3853     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3854     test_data = test->private_data;
3855     hist_field = find_var_field(test_data, var_name);
3856     @@ -1820,7 +1823,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file,
3857     struct event_trigger_data *test;
3858     struct hist_field *hist_field;
3859    
3860     - list_for_each_entry_rcu(test, &file->triggers, list) {
3861     + lockdep_assert_held(&event_mutex);
3862     +
3863     + list_for_each_entry(test, &file->triggers, list) {
3864     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3865     test_data = test->private_data;
3866     hist_field = find_var_field(test_data, var_name);
3867     @@ -2423,8 +2428,16 @@ static int contains_operator(char *str)
3868     return field_op;
3869     }
3870    
3871     +static void get_hist_field(struct hist_field *hist_field)
3872     +{
3873     + hist_field->ref++;
3874     +}
3875     +
3876     static void __destroy_hist_field(struct hist_field *hist_field)
3877     {
3878     + if (--hist_field->ref > 1)
3879     + return;
3880     +
3881     kfree(hist_field->var.name);
3882     kfree(hist_field->name);
3883     kfree(hist_field->type);
3884     @@ -2466,6 +2479,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
3885     if (!hist_field)
3886     return NULL;
3887    
3888     + hist_field->ref = 1;
3889     +
3890     hist_field->hist_data = hist_data;
3891    
3892     if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
3893     @@ -2661,6 +2676,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
3894     {
3895     unsigned long flags = HIST_FIELD_FL_VAR_REF;
3896     struct hist_field *ref_field;
3897     + int i;
3898     +
3899     + /* Check if the variable already exists */
3900     + for (i = 0; i < hist_data->n_var_refs; i++) {
3901     + ref_field = hist_data->var_refs[i];
3902     + if (ref_field->var.idx == var_field->var.idx &&
3903     + ref_field->var.hist_data == var_field->hist_data) {
3904     + get_hist_field(ref_field);
3905     + return ref_field;
3906     + }
3907     + }
3908    
3909     ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
3910     if (ref_field) {
3911     @@ -3115,7 +3141,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3912     {
3913     struct event_trigger_data *test;
3914    
3915     - list_for_each_entry_rcu(test, &file->triggers, list) {
3916     + lockdep_assert_held(&event_mutex);
3917     +
3918     + list_for_each_entry(test, &file->triggers, list) {
3919     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3920     if (test->private_data == hist_data)
3921     return test->filter_str;
3922     @@ -3166,9 +3194,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data,
3923     struct event_trigger_data *test;
3924     unsigned int n_keys;
3925    
3926     + lockdep_assert_held(&event_mutex);
3927     +
3928     n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3929    
3930     - list_for_each_entry_rcu(test, &file->triggers, list) {
3931     + list_for_each_entry(test, &file->triggers, list) {
3932     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3933     hist_data = test->private_data;
3934    
3935     @@ -5528,7 +5558,7 @@ static int hist_show(struct seq_file *m, void *v)
3936     goto out_unlock;
3937     }
3938    
3939     - list_for_each_entry_rcu(data, &event_file->triggers, list) {
3940     + list_for_each_entry(data, &event_file->triggers, list) {
3941     if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
3942     hist_trigger_show(m, data, n++);
3943     }
3944     @@ -5921,7 +5951,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
3945     if (hist_data->attrs->name && !named_data)
3946     goto new;
3947    
3948     - list_for_each_entry_rcu(test, &file->triggers, list) {
3949     + lockdep_assert_held(&event_mutex);
3950     +
3951     + list_for_each_entry(test, &file->triggers, list) {
3952     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3953     if (!hist_trigger_match(data, test, named_data, false))
3954     continue;
3955     @@ -6005,10 +6037,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data,
3956     struct event_trigger_data *test, *named_data = NULL;
3957     bool match = false;
3958    
3959     + lockdep_assert_held(&event_mutex);
3960     +
3961     if (hist_data->attrs->name)
3962     named_data = find_named_trigger(hist_data->attrs->name);
3963    
3964     - list_for_each_entry_rcu(test, &file->triggers, list) {
3965     + list_for_each_entry(test, &file->triggers, list) {
3966     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3967     if (hist_trigger_match(data, test, named_data, false)) {
3968     match = true;
3969     @@ -6026,10 +6060,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
3970     struct hist_trigger_data *hist_data = data->private_data;
3971     struct event_trigger_data *test, *named_data = NULL;
3972    
3973     + lockdep_assert_held(&event_mutex);
3974     +
3975     if (hist_data->attrs->name)
3976     named_data = find_named_trigger(hist_data->attrs->name);
3977    
3978     - list_for_each_entry_rcu(test, &file->triggers, list) {
3979     + list_for_each_entry(test, &file->triggers, list) {
3980     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3981     if (!hist_trigger_match(data, test, named_data, false))
3982     continue;
3983     @@ -6051,10 +6087,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
3984     struct event_trigger_data *test, *named_data = NULL;
3985     bool unregistered = false;
3986    
3987     + lockdep_assert_held(&event_mutex);
3988     +
3989     if (hist_data->attrs->name)
3990     named_data = find_named_trigger(hist_data->attrs->name);
3991    
3992     - list_for_each_entry_rcu(test, &file->triggers, list) {
3993     + list_for_each_entry(test, &file->triggers, list) {
3994     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3995     if (!hist_trigger_match(data, test, named_data, false))
3996     continue;
3997     @@ -6080,7 +6118,9 @@ static bool hist_file_check_refs(struct trace_event_file *file)
3998     struct hist_trigger_data *hist_data;
3999     struct event_trigger_data *test;
4000    
4001     - list_for_each_entry_rcu(test, &file->triggers, list) {
4002     + lockdep_assert_held(&event_mutex);
4003     +
4004     + list_for_each_entry(test, &file->triggers, list) {
4005     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4006     hist_data = test->private_data;
4007     if (check_var_refs(hist_data))
4008     @@ -6323,7 +6363,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
4009     struct enable_trigger_data *enable_data = data->private_data;
4010     struct event_trigger_data *test;
4011    
4012     - list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
4013     + list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
4014     + lockdep_is_held(&event_mutex)) {
4015     if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4016     if (enable_data->enable)
4017     test->paused = false;
4018     diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
4019     index 2cd53ca21b51..40106fff06a4 100644
4020     --- a/kernel/trace/trace_events_trigger.c
4021     +++ b/kernel/trace/trace_events_trigger.c
4022     @@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file)
4023     struct event_trigger_data *data;
4024     bool set_cond = false;
4025    
4026     - list_for_each_entry_rcu(data, &file->triggers, list) {
4027     + lockdep_assert_held(&event_mutex);
4028     +
4029     + list_for_each_entry(data, &file->triggers, list) {
4030     if (data->filter || event_command_post_trigger(data->cmd_ops) ||
4031     event_command_needs_rec(data->cmd_ops)) {
4032     set_cond = true;
4033     @@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
4034     struct event_trigger_data *test;
4035     int ret = 0;
4036    
4037     - list_for_each_entry_rcu(test, &file->triggers, list) {
4038     + lockdep_assert_held(&event_mutex);
4039     +
4040     + list_for_each_entry(test, &file->triggers, list) {
4041     if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
4042     ret = -EEXIST;
4043     goto out;
4044     @@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
4045     struct event_trigger_data *data;
4046     bool unregistered = false;
4047    
4048     - list_for_each_entry_rcu(data, &file->triggers, list) {
4049     + lockdep_assert_held(&event_mutex);
4050     +
4051     + list_for_each_entry(data, &file->triggers, list) {
4052     if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
4053     unregistered = true;
4054     list_del_rcu(&data->list);
4055     @@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob,
4056     struct event_trigger_data *test;
4057     int ret = 0;
4058    
4059     - list_for_each_entry_rcu(test, &file->triggers, list) {
4060     + lockdep_assert_held(&event_mutex);
4061     +
4062     + list_for_each_entry(test, &file->triggers, list) {
4063     test_enable_data = test->private_data;
4064     if (test_enable_data &&
4065     (test->cmd_ops->trigger_type ==
4066     @@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob,
4067     struct event_trigger_data *data;
4068     bool unregistered = false;
4069    
4070     - list_for_each_entry_rcu(data, &file->triggers, list) {
4071     + lockdep_assert_held(&event_mutex);
4072     +
4073     + list_for_each_entry(data, &file->triggers, list) {
4074     enable_data = data->private_data;
4075     if (enable_data &&
4076     (data->cmd_ops->trigger_type ==
4077     diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4078     index 7f890262c8a3..3e5f9c7d939c 100644
4079     --- a/kernel/trace/trace_kprobe.c
4080     +++ b/kernel/trace/trace_kprobe.c
4081     @@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
4082     INIT_HLIST_NODE(&tk->rp.kp.hlist);
4083     INIT_LIST_HEAD(&tk->rp.kp.list);
4084    
4085     - ret = trace_probe_init(&tk->tp, event, group);
4086     + ret = trace_probe_init(&tk->tp, event, group, 0);
4087     if (ret < 0)
4088     goto error;
4089    
4090     diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
4091     index 905b10af5d5c..bba18cf44a30 100644
4092     --- a/kernel/trace/trace_probe.c
4093     +++ b/kernel/trace/trace_probe.c
4094     @@ -984,7 +984,7 @@ void trace_probe_cleanup(struct trace_probe *tp)
4095     }
4096    
4097     int trace_probe_init(struct trace_probe *tp, const char *event,
4098     - const char *group)
4099     + const char *group, size_t event_data_size)
4100     {
4101     struct trace_event_call *call;
4102     int ret = 0;
4103     @@ -992,7 +992,8 @@ int trace_probe_init(struct trace_probe *tp, const char *event,
4104     if (!event || !group)
4105     return -EINVAL;
4106    
4107     - tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
4108     + tp->event = kzalloc(sizeof(struct trace_probe_event) + event_data_size,
4109     + GFP_KERNEL);
4110     if (!tp->event)
4111     return -ENOMEM;
4112    
4113     diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
4114     index 4ee703728aec..03e4e180058d 100644
4115     --- a/kernel/trace/trace_probe.h
4116     +++ b/kernel/trace/trace_probe.h
4117     @@ -230,6 +230,7 @@ struct trace_probe_event {
4118     struct trace_event_call call;
4119     struct list_head files;
4120     struct list_head probes;
4121     + char data[0];
4122     };
4123    
4124     struct trace_probe {
4125     @@ -322,7 +323,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
4126     }
4127    
4128     int trace_probe_init(struct trace_probe *tp, const char *event,
4129     - const char *group);
4130     + const char *group, size_t event_data_size);
4131     void trace_probe_cleanup(struct trace_probe *tp);
4132     int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
4133     void trace_probe_unlink(struct trace_probe *tp);
4134     diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
4135     index 352073d36585..f66e202fec13 100644
4136     --- a/kernel/trace/trace_uprobe.c
4137     +++ b/kernel/trace/trace_uprobe.c
4138     @@ -60,7 +60,6 @@ static struct dyn_event_operations trace_uprobe_ops = {
4139     */
4140     struct trace_uprobe {
4141     struct dyn_event devent;
4142     - struct trace_uprobe_filter filter;
4143     struct uprobe_consumer consumer;
4144     struct path path;
4145     struct inode *inode;
4146     @@ -264,6 +263,14 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
4147     }
4148     NOKPROBE_SYMBOL(process_fetch_insn)
4149    
4150     +static struct trace_uprobe_filter *
4151     +trace_uprobe_get_filter(struct trace_uprobe *tu)
4152     +{
4153     + struct trace_probe_event *event = tu->tp.event;
4154     +
4155     + return (struct trace_uprobe_filter *)&event->data[0];
4156     +}
4157     +
4158     static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
4159     {
4160     rwlock_init(&filter->rwlock);
4161     @@ -351,7 +358,8 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
4162     if (!tu)
4163     return ERR_PTR(-ENOMEM);
4164    
4165     - ret = trace_probe_init(&tu->tp, event, group);
4166     + ret = trace_probe_init(&tu->tp, event, group,
4167     + sizeof(struct trace_uprobe_filter));
4168     if (ret < 0)
4169     goto error;
4170    
4171     @@ -359,7 +367,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
4172     tu->consumer.handler = uprobe_dispatcher;
4173     if (is_ret)
4174     tu->consumer.ret_handler = uretprobe_dispatcher;
4175     - init_trace_uprobe_filter(&tu->filter);
4176     + init_trace_uprobe_filter(trace_uprobe_get_filter(tu));
4177     return tu;
4178    
4179     error:
4180     @@ -1067,13 +1075,14 @@ static void __probe_event_disable(struct trace_probe *tp)
4181     struct trace_probe *pos;
4182     struct trace_uprobe *tu;
4183    
4184     + tu = container_of(tp, struct trace_uprobe, tp);
4185     + WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
4186     +
4187     list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4188     tu = container_of(pos, struct trace_uprobe, tp);
4189     if (!tu->inode)
4190     continue;
4191    
4192     - WARN_ON(!uprobe_filter_is_empty(&tu->filter));
4193     -
4194     uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
4195     tu->inode = NULL;
4196     }
4197     @@ -1108,7 +1117,7 @@ static int probe_event_enable(struct trace_event_call *call,
4198     }
4199    
4200     tu = container_of(tp, struct trace_uprobe, tp);
4201     - WARN_ON(!uprobe_filter_is_empty(&tu->filter));
4202     + WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
4203    
4204     if (enabled)
4205     return 0;
4206     @@ -1205,39 +1214,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
4207     }
4208    
4209     static inline bool
4210     -uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
4211     +trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
4212     + struct perf_event *event)
4213     {
4214     - return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
4215     + return __uprobe_perf_filter(filter, event->hw.target->mm);
4216     }
4217    
4218     -static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
4219     +static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
4220     + struct perf_event *event)
4221     {
4222     bool done;
4223    
4224     - write_lock(&tu->filter.rwlock);
4225     + write_lock(&filter->rwlock);
4226     if (event->hw.target) {
4227     list_del(&event->hw.tp_list);
4228     - done = tu->filter.nr_systemwide ||
4229     + done = filter->nr_systemwide ||
4230     (event->hw.target->flags & PF_EXITING) ||
4231     - uprobe_filter_event(tu, event);
4232     + trace_uprobe_filter_event(filter, event);
4233     } else {
4234     - tu->filter.nr_systemwide--;
4235     - done = tu->filter.nr_systemwide;
4236     + filter->nr_systemwide--;
4237     + done = filter->nr_systemwide;
4238     }
4239     - write_unlock(&tu->filter.rwlock);
4240     -
4241     - if (!done)
4242     - return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
4243     + write_unlock(&filter->rwlock);
4244    
4245     - return 0;
4246     + return done;
4247     }
4248    
4249     -static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
4250     +/* This returns true if the filter always covers target mm */
4251     +static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
4252     + struct perf_event *event)
4253     {
4254     bool done;
4255     - int err;
4256    
4257     - write_lock(&tu->filter.rwlock);
4258     + write_lock(&filter->rwlock);
4259     if (event->hw.target) {
4260     /*
4261     * event->parent != NULL means copy_process(), we can avoid
4262     @@ -1247,28 +1256,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
4263     * attr.enable_on_exec means that exec/mmap will install the
4264     * breakpoints we need.
4265     */
4266     - done = tu->filter.nr_systemwide ||
4267     + done = filter->nr_systemwide ||
4268     event->parent || event->attr.enable_on_exec ||
4269     - uprobe_filter_event(tu, event);
4270     - list_add(&event->hw.tp_list, &tu->filter.perf_events);
4271     + trace_uprobe_filter_event(filter, event);
4272     + list_add(&event->hw.tp_list, &filter->perf_events);
4273     } else {
4274     - done = tu->filter.nr_systemwide;
4275     - tu->filter.nr_systemwide++;
4276     + done = filter->nr_systemwide;
4277     + filter->nr_systemwide++;
4278     }
4279     - write_unlock(&tu->filter.rwlock);
4280     + write_unlock(&filter->rwlock);
4281    
4282     - err = 0;
4283     - if (!done) {
4284     - err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
4285     - if (err)
4286     - uprobe_perf_close(tu, event);
4287     - }
4288     - return err;
4289     + return done;
4290     }
4291    
4292     -static int uprobe_perf_multi_call(struct trace_event_call *call,
4293     - struct perf_event *event,
4294     - int (*op)(struct trace_uprobe *tu, struct perf_event *event))
4295     +static int uprobe_perf_close(struct trace_event_call *call,
4296     + struct perf_event *event)
4297     {
4298     struct trace_probe *pos, *tp;
4299     struct trace_uprobe *tu;
4300     @@ -1278,25 +1280,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call,
4301     if (WARN_ON_ONCE(!tp))
4302     return -ENODEV;
4303    
4304     + tu = container_of(tp, struct trace_uprobe, tp);
4305     + if (trace_uprobe_filter_remove(trace_uprobe_get_filter(tu), event))
4306     + return 0;
4307     +
4308     list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4309     tu = container_of(pos, struct trace_uprobe, tp);
4310     - ret = op(tu, event);
4311     + ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
4312     if (ret)
4313     break;
4314     }
4315    
4316     return ret;
4317     }
4318     +
4319     +static int uprobe_perf_open(struct trace_event_call *call,
4320     + struct perf_event *event)
4321     +{
4322     + struct trace_probe *pos, *tp;
4323     + struct trace_uprobe *tu;
4324     + int err = 0;
4325     +
4326     + tp = trace_probe_primary_from_call(call);
4327     + if (WARN_ON_ONCE(!tp))
4328     + return -ENODEV;
4329     +
4330     + tu = container_of(tp, struct trace_uprobe, tp);
4331     + if (trace_uprobe_filter_add(trace_uprobe_get_filter(tu), event))
4332     + return 0;
4333     +
4334     + list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4335     + err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
4336     + if (err) {
4337     + uprobe_perf_close(call, event);
4338     + break;
4339     + }
4340     + }
4341     +
4342     + return err;
4343     +}
4344     +
4345     static bool uprobe_perf_filter(struct uprobe_consumer *uc,
4346     enum uprobe_filter_ctx ctx, struct mm_struct *mm)
4347     {
4348     + struct trace_uprobe_filter *filter;
4349     struct trace_uprobe *tu;
4350     int ret;
4351    
4352     tu = container_of(uc, struct trace_uprobe, consumer);
4353     - read_lock(&tu->filter.rwlock);
4354     - ret = __uprobe_perf_filter(&tu->filter, mm);
4355     - read_unlock(&tu->filter.rwlock);
4356     + filter = trace_uprobe_get_filter(tu);
4357     +
4358     + read_lock(&filter->rwlock);
4359     + ret = __uprobe_perf_filter(filter, mm);
4360     + read_unlock(&filter->rwlock);
4361    
4362     return ret;
4363     }
4364     @@ -1419,10 +1455,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
4365     return 0;
4366    
4367     case TRACE_REG_PERF_OPEN:
4368     - return uprobe_perf_multi_call(event, data, uprobe_perf_open);
4369     + return uprobe_perf_open(event, data);
4370    
4371     case TRACE_REG_PERF_CLOSE:
4372     - return uprobe_perf_multi_call(event, data, uprobe_perf_close);
4373     + return uprobe_perf_close(event, data);
4374    
4375     #endif
4376     default:
4377     diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
4378     index dccb95af6003..706020b06617 100644
4379     --- a/lib/strncpy_from_user.c
4380     +++ b/lib/strncpy_from_user.c
4381     @@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
4382     const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
4383     unsigned long res = 0;
4384    
4385     - /*
4386     - * Truncate 'max' to the user-specified limit, so that
4387     - * we only have one limit we need to check in the loop
4388     - */
4389     - if (max > count)
4390     - max = count;
4391     -
4392     if (IS_UNALIGNED(src, dst))
4393     goto byte_at_a_time;
4394    
4395     @@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
4396     unsigned long max = max_addr - src_addr;
4397     long retval;
4398    
4399     + /*
4400     + * Truncate 'max' to the user-specified limit, so that
4401     + * we only have one limit we need to check in the loop
4402     + */
4403     + if (max > count)
4404     + max = count;
4405     +
4406     kasan_check_write(dst, count);
4407     check_object_size(dst, count, false);
4408     if (user_access_begin(src, max)) {
4409     diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
4410     index 6c0005d5dd5c..41670d4a5816 100644
4411     --- a/lib/strnlen_user.c
4412     +++ b/lib/strnlen_user.c
4413     @@ -26,13 +26,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
4414     unsigned long align, res = 0;
4415     unsigned long c;
4416    
4417     - /*
4418     - * Truncate 'max' to the user-specified limit, so that
4419     - * we only have one limit we need to check in the loop
4420     - */
4421     - if (max > count)
4422     - max = count;
4423     -
4424     /*
4425     * Do everything aligned. But that means that we
4426     * need to also expand the maximum..
4427     @@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count)
4428     unsigned long max = max_addr - src_addr;
4429     long retval;
4430    
4431     + /*
4432     + * Truncate 'max' to the user-specified limit, so that
4433     + * we only have one limit we need to check in the loop
4434     + */
4435     + if (max > count)
4436     + max = count;
4437     +
4438     if (user_access_begin(str, max)) {
4439     retval = do_strnlen_user(str, count, max);
4440     user_access_end();
4441     diff --git a/lib/test_xarray.c b/lib/test_xarray.c
4442     index 7df4f7f395bf..03c3f42966ce 100644
4443     --- a/lib/test_xarray.c
4444     +++ b/lib/test_xarray.c
4445     @@ -2,6 +2,7 @@
4446     /*
4447     * test_xarray.c: Test the XArray API
4448     * Copyright (c) 2017-2018 Microsoft Corporation
4449     + * Copyright (c) 2019-2020 Oracle
4450     * Author: Matthew Wilcox <willy@infradead.org>
4451     */
4452    
4453     @@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa)
4454     XA_BUG_ON(xa, !xa_empty(xa));
4455     }
4456    
4457     -static noinline void check_multi_find(struct xarray *xa)
4458     +static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
4459     {
4460     #ifdef CONFIG_XARRAY_MULTI
4461     + unsigned long multi = 3 << order;
4462     + unsigned long next = 4 << order;
4463     unsigned long index;
4464    
4465     - xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
4466     - XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
4467     + xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
4468     + XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
4469     + XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
4470    
4471     index = 0;
4472     XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
4473     - xa_mk_value(12));
4474     - XA_BUG_ON(xa, index != 12);
4475     - index = 13;
4476     + xa_mk_value(multi));
4477     + XA_BUG_ON(xa, index != multi);
4478     + index = multi + 1;
4479     XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
4480     - xa_mk_value(12));
4481     - XA_BUG_ON(xa, (index < 12) || (index >= 16));
4482     + xa_mk_value(multi));
4483     + XA_BUG_ON(xa, (index < multi) || (index >= next));
4484     XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
4485     - xa_mk_value(16));
4486     - XA_BUG_ON(xa, index != 16);
4487     -
4488     - xa_erase_index(xa, 12);
4489     - xa_erase_index(xa, 16);
4490     + xa_mk_value(next));
4491     + XA_BUG_ON(xa, index != next);
4492     + XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
4493     + XA_BUG_ON(xa, index != next);
4494     +
4495     + xa_erase_index(xa, multi);
4496     + xa_erase_index(xa, next);
4497     + xa_erase_index(xa, next + 1);
4498     XA_BUG_ON(xa, !xa_empty(xa));
4499     #endif
4500     }
4501     @@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa)
4502     xa_destroy(xa);
4503     }
4504    
4505     +static noinline void check_find_4(struct xarray *xa)
4506     +{
4507     + unsigned long index = 0;
4508     + void *entry;
4509     +
4510     + xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
4511     +
4512     + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
4513     + XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
4514     +
4515     + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
4516     + XA_BUG_ON(xa, entry);
4517     +
4518     + xa_erase_index(xa, ULONG_MAX);
4519     +}
4520     +
4521     static noinline void check_find(struct xarray *xa)
4522     {
4523     + unsigned i;
4524     +
4525     check_find_1(xa);
4526     check_find_2(xa);
4527     check_find_3(xa);
4528     - check_multi_find(xa);
4529     + check_find_4(xa);
4530     +
4531     + for (i = 2; i < 10; i++)
4532     + check_multi_find_1(xa, i);
4533     check_multi_find_2(xa);
4534     }
4535    
4536     diff --git a/lib/xarray.c b/lib/xarray.c
4537     index 1237c213f52b..47e17d46e5f8 100644
4538     --- a/lib/xarray.c
4539     +++ b/lib/xarray.c
4540     @@ -1,7 +1,8 @@
4541     // SPDX-License-Identifier: GPL-2.0+
4542     /*
4543     * XArray implementation
4544     - * Copyright (c) 2017 Microsoft Corporation
4545     + * Copyright (c) 2017-2018 Microsoft Corporation
4546     + * Copyright (c) 2018-2020 Oracle
4547     * Author: Matthew Wilcox <willy@infradead.org>
4548     */
4549    
4550     @@ -1081,6 +1082,8 @@ void *xas_find(struct xa_state *xas, unsigned long max)
4551    
4552     if (xas_error(xas))
4553     return NULL;
4554     + if (xas->xa_index > max)
4555     + return set_bounds(xas);
4556    
4557     if (!xas->xa_node) {
4558     xas->xa_index = 1;
4559     @@ -1150,6 +1153,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
4560    
4561     if (xas_error(xas))
4562     return NULL;
4563     + if (xas->xa_index > max)
4564     + goto max;
4565    
4566     if (!xas->xa_node) {
4567     xas->xa_index = 1;
4568     @@ -1824,6 +1829,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
4569     }
4570     EXPORT_SYMBOL(xa_find);
4571    
4572     +static bool xas_sibling(struct xa_state *xas)
4573     +{
4574     + struct xa_node *node = xas->xa_node;
4575     + unsigned long mask;
4576     +
4577     + if (!node)
4578     + return false;
4579     + mask = (XA_CHUNK_SIZE << node->shift) - 1;
4580     + return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
4581     +}
4582     +
4583     /**
4584     * xa_find_after() - Search the XArray for a present entry.
4585     * @xa: XArray.
4586     @@ -1847,21 +1863,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
4587     XA_STATE(xas, xa, *indexp + 1);
4588     void *entry;
4589    
4590     + if (xas.xa_index == 0)
4591     + return NULL;
4592     +
4593     rcu_read_lock();
4594     for (;;) {
4595     if ((__force unsigned int)filter < XA_MAX_MARKS)
4596     entry = xas_find_marked(&xas, max, filter);
4597     else
4598     entry = xas_find(&xas, max);
4599     - if (xas.xa_node == XAS_BOUNDS)
4600     +
4601     + if (xas_invalid(&xas))
4602     break;
4603     - if (xas.xa_shift) {
4604     - if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
4605     - continue;
4606     - } else {
4607     - if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
4608     - continue;
4609     - }
4610     + if (xas_sibling(&xas))
4611     + continue;
4612     if (!xas_retry(&xas, entry))
4613     break;
4614     }
4615     diff --git a/net/core/dev.c b/net/core/dev.c
4616     index 3098c90d60e2..82325d3d1371 100644
4617     --- a/net/core/dev.c
4618     +++ b/net/core/dev.c
4619     @@ -5270,9 +5270,29 @@ static void flush_all_backlogs(void)
4620     put_online_cpus();
4621     }
4622    
4623     +/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
4624     +static void gro_normal_list(struct napi_struct *napi)
4625     +{
4626     + if (!napi->rx_count)
4627     + return;
4628     + netif_receive_skb_list_internal(&napi->rx_list);
4629     + INIT_LIST_HEAD(&napi->rx_list);
4630     + napi->rx_count = 0;
4631     +}
4632     +
4633     +/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
4634     + * pass the whole batch up to the stack.
4635     + */
4636     +static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
4637     +{
4638     + list_add_tail(&skb->list, &napi->rx_list);
4639     + if (++napi->rx_count >= gro_normal_batch)
4640     + gro_normal_list(napi);
4641     +}
4642     +
4643     INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
4644     INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
4645     -static int napi_gro_complete(struct sk_buff *skb)
4646     +static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
4647     {
4648     struct packet_offload *ptype;
4649     __be16 type = skb->protocol;
4650     @@ -5305,7 +5325,8 @@ static int napi_gro_complete(struct sk_buff *skb)
4651     }
4652    
4653     out:
4654     - return netif_receive_skb_internal(skb);
4655     + gro_normal_one(napi, skb);
4656     + return NET_RX_SUCCESS;
4657     }
4658    
4659     static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
4660     @@ -5318,7 +5339,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
4661     if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4662     return;
4663     skb_list_del_init(skb);
4664     - napi_gro_complete(skb);
4665     + napi_gro_complete(napi, skb);
4666     napi->gro_hash[index].count--;
4667     }
4668    
4669     @@ -5421,7 +5442,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4670     }
4671     }
4672    
4673     -static void gro_flush_oldest(struct list_head *head)
4674     +static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
4675     {
4676     struct sk_buff *oldest;
4677    
4678     @@ -5437,7 +5458,7 @@ static void gro_flush_oldest(struct list_head *head)
4679     * SKB to the chain.
4680     */
4681     skb_list_del_init(oldest);
4682     - napi_gro_complete(oldest);
4683     + napi_gro_complete(napi, oldest);
4684     }
4685    
4686     INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
4687     @@ -5513,7 +5534,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4688    
4689     if (pp) {
4690     skb_list_del_init(pp);
4691     - napi_gro_complete(pp);
4692     + napi_gro_complete(napi, pp);
4693     napi->gro_hash[hash].count--;
4694     }
4695    
4696     @@ -5524,7 +5545,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4697     goto normal;
4698    
4699     if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
4700     - gro_flush_oldest(gro_head);
4701     + gro_flush_oldest(napi, gro_head);
4702     } else {
4703     napi->gro_hash[hash].count++;
4704     }
4705     @@ -5672,26 +5693,6 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
4706     }
4707     EXPORT_SYMBOL(napi_get_frags);
4708    
4709     -/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
4710     -static void gro_normal_list(struct napi_struct *napi)
4711     -{
4712     - if (!napi->rx_count)
4713     - return;
4714     - netif_receive_skb_list_internal(&napi->rx_list);
4715     - INIT_LIST_HEAD(&napi->rx_list);
4716     - napi->rx_count = 0;
4717     -}
4718     -
4719     -/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
4720     - * pass the whole batch up to the stack.
4721     - */
4722     -static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
4723     -{
4724     - list_add_tail(&skb->list, &napi->rx_list);
4725     - if (++napi->rx_count >= gro_normal_batch)
4726     - gro_normal_list(napi);
4727     -}
4728     -
4729     static gro_result_t napi_frags_finish(struct napi_struct *napi,
4730     struct sk_buff *skb,
4731     gro_result_t ret)
4732     @@ -5979,8 +5980,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
4733     NAPIF_STATE_IN_BUSY_POLL)))
4734     return false;
4735    
4736     - gro_normal_list(n);
4737     -
4738     if (n->gro_bitmask) {
4739     unsigned long timeout = 0;
4740    
4741     @@ -5996,6 +5995,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
4742     hrtimer_start(&n->timer, ns_to_ktime(timeout),
4743     HRTIMER_MODE_REL_PINNED);
4744     }
4745     +
4746     + gro_normal_list(n);
4747     +
4748     if (unlikely(!list_empty(&n->poll_list))) {
4749     /* If n->poll_list is not empty, we need to mask irqs */
4750     local_irq_save(flags);
4751     @@ -6327,8 +6329,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4752     goto out_unlock;
4753     }
4754    
4755     - gro_normal_list(n);
4756     -
4757     if (n->gro_bitmask) {
4758     /* flush too old packets
4759     * If HZ < 1000, flush all packets.
4760     @@ -6336,6 +6336,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4761     napi_gro_flush(n, HZ >= 1000);
4762     }
4763    
4764     + gro_normal_list(n);
4765     +
4766     /* Some drivers may have called napi_schedule
4767     * prior to exhausting their budget.
4768     */
4769     @@ -7973,6 +7975,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
4770     }
4771     EXPORT_SYMBOL(__dev_set_mtu);
4772    
4773     +int dev_validate_mtu(struct net_device *dev, int new_mtu,
4774     + struct netlink_ext_ack *extack)
4775     +{
4776     + /* MTU must be positive, and in range */
4777     + if (new_mtu < 0 || new_mtu < dev->min_mtu) {
4778     + NL_SET_ERR_MSG(extack, "mtu less than device minimum");
4779     + return -EINVAL;
4780     + }
4781     +
4782     + if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
4783     + NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
4784     + return -EINVAL;
4785     + }
4786     + return 0;
4787     +}
4788     +
4789     /**
4790     * dev_set_mtu_ext - Change maximum transfer unit
4791     * @dev: device
4792     @@ -7989,16 +8007,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
4793     if (new_mtu == dev->mtu)
4794     return 0;
4795    
4796     - /* MTU must be positive, and in range */
4797     - if (new_mtu < 0 || new_mtu < dev->min_mtu) {
4798     - NL_SET_ERR_MSG(extack, "mtu less than device minimum");
4799     - return -EINVAL;
4800     - }
4801     -
4802     - if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
4803     - NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
4804     - return -EINVAL;
4805     - }
4806     + err = dev_validate_mtu(dev, new_mtu, extack);
4807     + if (err)
4808     + return err;
4809    
4810     if (!netif_device_present(dev))
4811     return -ENODEV;
4812     @@ -9073,8 +9084,10 @@ int register_netdevice(struct net_device *dev)
4813     goto err_uninit;
4814    
4815     ret = netdev_register_kobject(dev);
4816     - if (ret)
4817     + if (ret) {
4818     + dev->reg_state = NETREG_UNREGISTERED;
4819     goto err_uninit;
4820     + }
4821     dev->reg_state = NETREG_REGISTERED;
4822    
4823     __netdev_update_features(dev);
4824     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4825     index e4ec575c1fba..944acb1a9f29 100644
4826     --- a/net/core/rtnetlink.c
4827     +++ b/net/core/rtnetlink.c
4828     @@ -2959,8 +2959,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
4829     dev->rtnl_link_ops = ops;
4830     dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
4831    
4832     - if (tb[IFLA_MTU])
4833     - dev->mtu = nla_get_u32(tb[IFLA_MTU]);
4834     + if (tb[IFLA_MTU]) {
4835     + u32 mtu = nla_get_u32(tb[IFLA_MTU]);
4836     + int err;
4837     +
4838     + err = dev_validate_mtu(dev, mtu, extack);
4839     + if (err) {
4840     + free_netdev(dev);
4841     + return ERR_PTR(err);
4842     + }
4843     + dev->mtu = mtu;
4844     + }
4845     if (tb[IFLA_ADDRESS]) {
4846     memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
4847     nla_len(tb[IFLA_ADDRESS]));
4848     diff --git a/net/core/skmsg.c b/net/core/skmsg.c
4849     index 3866d7e20c07..ded2d5227678 100644
4850     --- a/net/core/skmsg.c
4851     +++ b/net/core/skmsg.c
4852     @@ -594,8 +594,6 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
4853    
4854     void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
4855     {
4856     - sock_owned_by_me(sk);
4857     -
4858     sk_psock_cork_free(psock);
4859     sk_psock_zap_ingress(psock);
4860    
4861     diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
4862     index d40de84a637f..754d84b217f0 100644
4863     --- a/net/hsr/hsr_main.h
4864     +++ b/net/hsr/hsr_main.h
4865     @@ -191,7 +191,7 @@ void hsr_debugfs_term(struct hsr_priv *priv);
4866     void hsr_debugfs_create_root(void);
4867     void hsr_debugfs_remove_root(void);
4868     #else
4869     -static inline void void hsr_debugfs_rename(struct net_device *dev)
4870     +static inline void hsr_debugfs_rename(struct net_device *dev)
4871     {
4872     }
4873     static inline void hsr_debugfs_init(struct hsr_priv *priv,
4874     diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
4875     index 0e4a7cf6bc87..e2e219c7854a 100644
4876     --- a/net/ipv4/esp4_offload.c
4877     +++ b/net/ipv4/esp4_offload.c
4878     @@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
4879     if (!x)
4880     goto out_reset;
4881    
4882     + skb->mark = xfrm_smark_get(skb->mark, x);
4883     +
4884     sp->xvec[sp->len++] = x;
4885     sp->olen++;
4886    
4887     diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
4888     index 1ab2fb6bb37d..f12fa8da6127 100644
4889     --- a/net/ipv4/fib_trie.c
4890     +++ b/net/ipv4/fib_trie.c
4891     @@ -2175,6 +2175,12 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
4892     int count = cb->args[2];
4893     t_key key = cb->args[3];
4894    
4895     + /* First time here, count and key are both always 0. Count > 0
4896     + * and key == 0 means the dump has wrapped around and we are done.
4897     + */
4898     + if (count && !key)
4899     + return skb->len;
4900     +
4901     while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
4902     int err;
4903    
4904     diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
4905     index 30fa771d382a..dcc79ff54b41 100644
4906     --- a/net/ipv4/fou.c
4907     +++ b/net/ipv4/fou.c
4908     @@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
4909     [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
4910     [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
4911     [FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
4912     - [FOU_ATTR_LOCAL_V6] = { .type = sizeof(struct in6_addr), },
4913     - [FOU_ATTR_PEER_V6] = { .type = sizeof(struct in6_addr), },
4914     + [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
4915     + [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
4916     [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
4917     [FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
4918     };
4919     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
4920     index 0fe2a5d3e258..74e1d964a615 100644
4921     --- a/net/ipv4/ip_tunnel.c
4922     +++ b/net/ipv4/ip_tunnel.c
4923     @@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev)
4924     iph->version = 4;
4925     iph->ihl = 5;
4926    
4927     - if (tunnel->collect_md) {
4928     - dev->features |= NETIF_F_NETNS_LOCAL;
4929     + if (tunnel->collect_md)
4930     netif_keep_dst(dev);
4931     - }
4932     return 0;
4933     }
4934     EXPORT_SYMBOL_GPL(ip_tunnel_init);
4935     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4936     index 84115577d3dc..3640e8563a10 100644
4937     --- a/net/ipv4/tcp.c
4938     +++ b/net/ipv4/tcp.c
4939     @@ -2520,6 +2520,7 @@ static void tcp_rtx_queue_purge(struct sock *sk)
4940     {
4941     struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
4942    
4943     + tcp_sk(sk)->highest_sack = NULL;
4944     while (p) {
4945     struct sk_buff *skb = rb_to_skb(p);
4946    
4947     diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
4948     index a6545ef0d27b..6c4d79baff26 100644
4949     --- a/net/ipv4/tcp_bbr.c
4950     +++ b/net/ipv4/tcp_bbr.c
4951     @@ -779,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
4952     * bandwidth sample. Delivered is in packets and interval_us in uS and
4953     * ratio will be <<1 for most connections. So delivered is first scaled.
4954     */
4955     - bw = (u64)rs->delivered * BW_UNIT;
4956     - do_div(bw, rs->interval_us);
4957     + bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
4958    
4959     /* If this sample is application-limited, it is likely to have a very
4960     * low delivered count that represents application behavior rather than
4961     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4962     index 59b78ce2ce2e..6f7155d91313 100644
4963     --- a/net/ipv4/tcp_input.c
4964     +++ b/net/ipv4/tcp_input.c
4965     @@ -3164,6 +3164,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
4966     tp->retransmit_skb_hint = NULL;
4967     if (unlikely(skb == tp->lost_skb_hint))
4968     tp->lost_skb_hint = NULL;
4969     + tcp_highest_sack_replace(sk, skb, next);
4970     tcp_rtx_queue_unlink_and_free(skb, sk);
4971     }
4972    
4973     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
4974     index e4ba915c4bb5..660b24fe041e 100644
4975     --- a/net/ipv4/tcp_output.c
4976     +++ b/net/ipv4/tcp_output.c
4977     @@ -3231,6 +3231,7 @@ int tcp_send_synack(struct sock *sk)
4978     if (!nskb)
4979     return -ENOMEM;
4980     INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
4981     + tcp_highest_sack_replace(sk, skb, nskb);
4982     tcp_rtx_queue_unlink_and_free(skb, sk);
4983     __skb_header_release(nskb);
4984     tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
4985     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4986     index 7aa4e77161f6..7ae7065758bd 100644
4987     --- a/net/ipv4/udp.c
4988     +++ b/net/ipv4/udp.c
4989     @@ -1368,7 +1368,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
4990     if (likely(partial)) {
4991     up->forward_deficit += size;
4992     size = up->forward_deficit;
4993     - if (size < (sk->sk_rcvbuf >> 2))
4994     + if (size < (sk->sk_rcvbuf >> 2) &&
4995     + !skb_queue_empty(&up->reader_queue))
4996     return;
4997     } else {
4998     size += up->forward_deficit;
4999     diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
5000     index e31626ffccd1..fd535053245b 100644
5001     --- a/net/ipv6/esp6_offload.c
5002     +++ b/net/ipv6/esp6_offload.c
5003     @@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
5004     if (!x)
5005     goto out_reset;
5006    
5007     + skb->mark = xfrm_smark_get(skb->mark, x);
5008     +
5009     sp->xvec[sp->len++] = x;
5010     sp->olen++;
5011    
5012     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5013     index 189de56f5e36..9ec05a1df5e1 100644
5014     --- a/net/ipv6/ip6_gre.c
5015     +++ b/net/ipv6/ip6_gre.c
5016     @@ -1466,7 +1466,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
5017     dev->mtu -= 8;
5018    
5019     if (tunnel->parms.collect_md) {
5020     - dev->features |= NETIF_F_NETNS_LOCAL;
5021     netif_keep_dst(dev);
5022     }
5023     ip6gre_tnl_init_features(dev);
5024     @@ -1894,7 +1893,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
5025     dev->needs_free_netdev = true;
5026     dev->priv_destructor = ip6gre_dev_free;
5027    
5028     - dev->features |= NETIF_F_NETNS_LOCAL;
5029     dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5030     dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5031     netif_keep_dst(dev);
5032     @@ -2197,7 +2195,6 @@ static void ip6erspan_tap_setup(struct net_device *dev)
5033     dev->needs_free_netdev = true;
5034     dev->priv_destructor = ip6gre_dev_free;
5035    
5036     - dev->features |= NETIF_F_NETNS_LOCAL;
5037     dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5038     dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5039     netif_keep_dst(dev);
5040     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
5041     index 2f376dbc37d5..b5dd20c4599b 100644
5042     --- a/net/ipv6/ip6_tunnel.c
5043     +++ b/net/ipv6/ip6_tunnel.c
5044     @@ -1877,10 +1877,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
5045     if (err)
5046     return err;
5047     ip6_tnl_link_config(t);
5048     - if (t->parms.collect_md) {
5049     - dev->features |= NETIF_F_NETNS_LOCAL;
5050     + if (t->parms.collect_md)
5051     netif_keep_dst(dev);
5052     - }
5053     return 0;
5054     }
5055    
5056     diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
5057     index e70567446f28..802eebf8ac4b 100644
5058     --- a/net/ipv6/seg6_local.c
5059     +++ b/net/ipv6/seg6_local.c
5060     @@ -23,6 +23,7 @@
5061     #include <net/addrconf.h>
5062     #include <net/ip6_route.h>
5063     #include <net/dst_cache.h>
5064     +#include <net/ip_tunnels.h>
5065     #ifdef CONFIG_IPV6_SEG6_HMAC
5066     #include <net/seg6_hmac.h>
5067     #endif
5068     @@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
5069    
5070     skb_reset_network_header(skb);
5071     skb_reset_transport_header(skb);
5072     - skb->encapsulation = 0;
5073     + if (iptunnel_pull_offloads(skb))
5074     + return false;
5075    
5076     return true;
5077     }
5078     diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
5079     index e1f271a1b2c1..bfd4b42ba305 100644
5080     --- a/net/netfilter/ipset/ip_set_bitmap_gen.h
5081     +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
5082     @@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set)
5083    
5084     if (set->extensions & IPSET_EXT_DESTROY)
5085     mtype_ext_cleanup(set);
5086     - memset(map->members, 0, map->memsize);
5087     + bitmap_zero(map->members, map->elements);
5088     set->elements = 0;
5089     set->ext_size = 0;
5090     }
5091     diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
5092     index 11ff9d4a7006..d934384f31ad 100644
5093     --- a/net/netfilter/ipset/ip_set_bitmap_ip.c
5094     +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
5095     @@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
5096    
5097     /* Type structure */
5098     struct bitmap_ip {
5099     - void *members; /* the set members */
5100     + unsigned long *members; /* the set members */
5101     u32 first_ip; /* host byte order, included in range */
5102     u32 last_ip; /* host byte order, included in range */
5103     u32 elements; /* number of max elements in the set */
5104     @@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
5105     u32 first_ip, u32 last_ip,
5106     u32 elements, u32 hosts, u8 netmask)
5107     {
5108     - map->members = ip_set_alloc(map->memsize);
5109     + map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
5110     if (!map->members)
5111     return false;
5112     map->first_ip = first_ip;
5113     @@ -310,7 +310,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5114     if (!map)
5115     return -ENOMEM;
5116    
5117     - map->memsize = bitmap_bytes(0, elements - 1);
5118     + map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5119     set->variant = &bitmap_ip;
5120     if (!init_map_ip(set, map, first_ip, last_ip,
5121     elements, hosts, netmask)) {
5122     diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5123     index 1d4e63326e68..e8532783b43a 100644
5124     --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5125     +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5126     @@ -42,7 +42,7 @@ enum {
5127    
5128     /* Type structure */
5129     struct bitmap_ipmac {
5130     - void *members; /* the set members */
5131     + unsigned long *members; /* the set members */
5132     u32 first_ip; /* host byte order, included in range */
5133     u32 last_ip; /* host byte order, included in range */
5134     u32 elements; /* number of max elements in the set */
5135     @@ -299,7 +299,7 @@ static bool
5136     init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
5137     u32 first_ip, u32 last_ip, u32 elements)
5138     {
5139     - map->members = ip_set_alloc(map->memsize);
5140     + map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
5141     if (!map->members)
5142     return false;
5143     map->first_ip = first_ip;
5144     @@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5145     if (!map)
5146     return -ENOMEM;
5147    
5148     - map->memsize = bitmap_bytes(0, elements - 1);
5149     + map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5150     set->variant = &bitmap_ipmac;
5151     if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
5152     kfree(map);
5153     diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
5154     index 704a0dda1609..e3ac914fff1a 100644
5155     --- a/net/netfilter/ipset/ip_set_bitmap_port.c
5156     +++ b/net/netfilter/ipset/ip_set_bitmap_port.c
5157     @@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
5158    
5159     /* Type structure */
5160     struct bitmap_port {
5161     - void *members; /* the set members */
5162     + unsigned long *members; /* the set members */
5163     u16 first_port; /* host byte order, included in range */
5164     u16 last_port; /* host byte order, included in range */
5165     u32 elements; /* number of max elements in the set */
5166     @@ -204,7 +204,7 @@ static bool
5167     init_map_port(struct ip_set *set, struct bitmap_port *map,
5168     u16 first_port, u16 last_port)
5169     {
5170     - map->members = ip_set_alloc(map->memsize);
5171     + map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
5172     if (!map->members)
5173     return false;
5174     map->first_port = first_port;
5175     @@ -244,7 +244,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5176     return -ENOMEM;
5177    
5178     map->elements = elements;
5179     - map->memsize = bitmap_bytes(0, map->elements);
5180     + map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5181     set->variant = &bitmap_port;
5182     if (!init_map_port(set, map, first_port, last_port)) {
5183     kfree(map);
5184     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5185     index 9fefd0150091..23544842b692 100644
5186     --- a/net/netfilter/nf_tables_api.c
5187     +++ b/net/netfilter/nf_tables_api.c
5188     @@ -488,48 +488,71 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
5189    
5190     static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
5191    
5192     +static const struct nft_chain_type *
5193     +__nft_chain_type_get(u8 family, enum nft_chain_types type)
5194     +{
5195     + if (family >= NFPROTO_NUMPROTO ||
5196     + type >= NFT_CHAIN_T_MAX)
5197     + return NULL;
5198     +
5199     + return chain_type[family][type];
5200     +}
5201     +
5202     static const struct nft_chain_type *
5203     __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
5204     {
5205     + const struct nft_chain_type *type;
5206     int i;
5207    
5208     for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
5209     - if (chain_type[family][i] != NULL &&
5210     - !nla_strcmp(nla, chain_type[family][i]->name))
5211     - return chain_type[family][i];
5212     + type = __nft_chain_type_get(family, i);
5213     + if (!type)
5214     + continue;
5215     + if (!nla_strcmp(nla, type->name))
5216     + return type;
5217     }
5218     return NULL;
5219     }
5220    
5221     -/*
5222     - * Loading a module requires dropping mutex that guards the transaction.
5223     - * A different client might race to start a new transaction meanwhile. Zap the
5224     - * list of pending transaction and then restore it once the mutex is grabbed
5225     - * again. Users of this function return EAGAIN which implicitly triggers the
5226     - * transaction abort path to clean up the list of pending transactions.
5227     - */
5228     +struct nft_module_request {
5229     + struct list_head list;
5230     + char module[MODULE_NAME_LEN];
5231     + bool done;
5232     +};
5233     +
5234     #ifdef CONFIG_MODULES
5235     -static void nft_request_module(struct net *net, const char *fmt, ...)
5236     +static int nft_request_module(struct net *net, const char *fmt, ...)
5237     {
5238     char module_name[MODULE_NAME_LEN];
5239     - LIST_HEAD(commit_list);
5240     + struct nft_module_request *req;
5241     va_list args;
5242     int ret;
5243    
5244     - list_splice_init(&net->nft.commit_list, &commit_list);
5245     -
5246     va_start(args, fmt);
5247     ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
5248     va_end(args);
5249     if (ret >= MODULE_NAME_LEN)
5250     - return;
5251     + return 0;
5252    
5253     - mutex_unlock(&net->nft.commit_mutex);
5254     - request_module("%s", module_name);
5255     - mutex_lock(&net->nft.commit_mutex);
5256     + list_for_each_entry(req, &net->nft.module_list, list) {
5257     + if (!strcmp(req->module, module_name)) {
5258     + if (req->done)
5259     + return 0;
5260    
5261     - WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
5262     - list_splice(&commit_list, &net->nft.commit_list);
5263     + /* A request to load this module already exists. */
5264     + return -EAGAIN;
5265     + }
5266     + }
5267     +
5268     + req = kmalloc(sizeof(*req), GFP_KERNEL);
5269     + if (!req)
5270     + return -ENOMEM;
5271     +
5272     + req->done = false;
5273     + strlcpy(req->module, module_name, MODULE_NAME_LEN);
5274     + list_add_tail(&req->list, &net->nft.module_list);
5275     +
5276     + return -EAGAIN;
5277     }
5278     #endif
5279    
5280     @@ -553,10 +576,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
5281     lockdep_nfnl_nft_mutex_not_held();
5282     #ifdef CONFIG_MODULES
5283     if (autoload) {
5284     - nft_request_module(net, "nft-chain-%u-%.*s", family,
5285     - nla_len(nla), (const char *)nla_data(nla));
5286     - type = __nf_tables_chain_type_lookup(nla, family);
5287     - if (type != NULL)
5288     + if (nft_request_module(net, "nft-chain-%u-%.*s", family,
5289     + nla_len(nla),
5290     + (const char *)nla_data(nla)) == -EAGAIN)
5291     return ERR_PTR(-EAGAIN);
5292     }
5293     #endif
5294     @@ -1095,11 +1117,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
5295    
5296     void nft_register_chain_type(const struct nft_chain_type *ctype)
5297     {
5298     - if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
5299     - return;
5300     -
5301     nfnl_lock(NFNL_SUBSYS_NFTABLES);
5302     - if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
5303     + if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
5304     nfnl_unlock(NFNL_SUBSYS_NFTABLES);
5305     return;
5306     }
5307     @@ -1551,7 +1570,10 @@ static int nft_chain_parse_hook(struct net *net,
5308     hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
5309     hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
5310    
5311     - type = chain_type[family][NFT_CHAIN_T_DEFAULT];
5312     + type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
5313     + if (!type)
5314     + return -EOPNOTSUPP;
5315     +
5316     if (nla[NFTA_CHAIN_TYPE]) {
5317     type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
5318     family, autoload);
5319     @@ -2060,9 +2082,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
5320     static int nft_expr_type_request_module(struct net *net, u8 family,
5321     struct nlattr *nla)
5322     {
5323     - nft_request_module(net, "nft-expr-%u-%.*s", family,
5324     - nla_len(nla), (char *)nla_data(nla));
5325     - if (__nft_expr_type_get(family, nla))
5326     + if (nft_request_module(net, "nft-expr-%u-%.*s", family,
5327     + nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
5328     return -EAGAIN;
5329    
5330     return 0;
5331     @@ -2088,9 +2109,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
5332     if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
5333     return ERR_PTR(-EAGAIN);
5334    
5335     - nft_request_module(net, "nft-expr-%.*s",
5336     - nla_len(nla), (char *)nla_data(nla));
5337     - if (__nft_expr_type_get(family, nla))
5338     + if (nft_request_module(net, "nft-expr-%.*s",
5339     + nla_len(nla),
5340     + (char *)nla_data(nla)) == -EAGAIN)
5341     return ERR_PTR(-EAGAIN);
5342     }
5343     #endif
5344     @@ -2181,9 +2202,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
5345     err = PTR_ERR(ops);
5346     #ifdef CONFIG_MODULES
5347     if (err == -EAGAIN)
5348     - nft_expr_type_request_module(ctx->net,
5349     - ctx->family,
5350     - tb[NFTA_EXPR_NAME]);
5351     + if (nft_expr_type_request_module(ctx->net,
5352     + ctx->family,
5353     + tb[NFTA_EXPR_NAME]) != -EAGAIN)
5354     + err = -ENOENT;
5355     #endif
5356     goto err1;
5357     }
5358     @@ -3020,8 +3042,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
5359     lockdep_nfnl_nft_mutex_not_held();
5360     #ifdef CONFIG_MODULES
5361     if (list_empty(&nf_tables_set_types)) {
5362     - nft_request_module(ctx->net, "nft-set");
5363     - if (!list_empty(&nf_tables_set_types))
5364     + if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
5365     return ERR_PTR(-EAGAIN);
5366     }
5367     #endif
5368     @@ -5147,8 +5168,7 @@ nft_obj_type_get(struct net *net, u32 objtype)
5369     lockdep_nfnl_nft_mutex_not_held();
5370     #ifdef CONFIG_MODULES
5371     if (type == NULL) {
5372     - nft_request_module(net, "nft-obj-%u", objtype);
5373     - if (__nft_obj_type_get(objtype))
5374     + if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
5375     return ERR_PTR(-EAGAIN);
5376     }
5377     #endif
5378     @@ -5764,8 +5784,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
5379     lockdep_nfnl_nft_mutex_not_held();
5380     #ifdef CONFIG_MODULES
5381     if (type == NULL) {
5382     - nft_request_module(net, "nf-flowtable-%u", family);
5383     - if (__nft_flowtable_type_get(family))
5384     + if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
5385     return ERR_PTR(-EAGAIN);
5386     }
5387     #endif
5388     @@ -6712,6 +6731,18 @@ static void nft_chain_del(struct nft_chain *chain)
5389     list_del_rcu(&chain->list);
5390     }
5391    
5392     +static void nf_tables_module_autoload_cleanup(struct net *net)
5393     +{
5394     + struct nft_module_request *req, *next;
5395     +
5396     + WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
5397     + list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
5398     + WARN_ON_ONCE(!req->done);
5399     + list_del(&req->list);
5400     + kfree(req);
5401     + }
5402     +}
5403     +
5404     static void nf_tables_commit_release(struct net *net)
5405     {
5406     struct nft_trans *trans;
5407     @@ -6724,6 +6755,7 @@ static void nf_tables_commit_release(struct net *net)
5408     * to prevent expensive synchronize_rcu() in commit phase.
5409     */
5410     if (list_empty(&net->nft.commit_list)) {
5411     + nf_tables_module_autoload_cleanup(net);
5412     mutex_unlock(&net->nft.commit_mutex);
5413     return;
5414     }
5415     @@ -6738,6 +6770,7 @@ static void nf_tables_commit_release(struct net *net)
5416     list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
5417     spin_unlock(&nf_tables_destroy_list_lock);
5418    
5419     + nf_tables_module_autoload_cleanup(net);
5420     mutex_unlock(&net->nft.commit_mutex);
5421    
5422     schedule_work(&trans_destroy_work);
5423     @@ -6929,6 +6962,26 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5424     return 0;
5425     }
5426    
5427     +static void nf_tables_module_autoload(struct net *net)
5428     +{
5429     + struct nft_module_request *req, *next;
5430     + LIST_HEAD(module_list);
5431     +
5432     + list_splice_init(&net->nft.module_list, &module_list);
5433     + mutex_unlock(&net->nft.commit_mutex);
5434     + list_for_each_entry_safe(req, next, &module_list, list) {
5435     + if (req->done) {
5436     + list_del(&req->list);
5437     + kfree(req);
5438     + } else {
5439     + request_module("%s", req->module);
5440     + req->done = true;
5441     + }
5442     + }
5443     + mutex_lock(&net->nft.commit_mutex);
5444     + list_splice(&module_list, &net->nft.module_list);
5445     +}
5446     +
5447     static void nf_tables_abort_release(struct nft_trans *trans)
5448     {
5449     switch (trans->msg_type) {
5450     @@ -6958,7 +7011,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
5451     kfree(trans);
5452     }
5453    
5454     -static int __nf_tables_abort(struct net *net)
5455     +static int __nf_tables_abort(struct net *net, bool autoload)
5456     {
5457     struct nft_trans *trans, *next;
5458     struct nft_trans_elem *te;
5459     @@ -7080,6 +7133,11 @@ static int __nf_tables_abort(struct net *net)
5460     nf_tables_abort_release(trans);
5461     }
5462    
5463     + if (autoload)
5464     + nf_tables_module_autoload(net);
5465     + else
5466     + nf_tables_module_autoload_cleanup(net);
5467     +
5468     return 0;
5469     }
5470    
5471     @@ -7088,9 +7146,9 @@ static void nf_tables_cleanup(struct net *net)
5472     nft_validate_state_update(net, NFT_VALIDATE_SKIP);
5473     }
5474    
5475     -static int nf_tables_abort(struct net *net, struct sk_buff *skb)
5476     +static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
5477     {
5478     - int ret = __nf_tables_abort(net);
5479     + int ret = __nf_tables_abort(net, autoload);
5480    
5481     mutex_unlock(&net->nft.commit_mutex);
5482    
5483     @@ -7685,6 +7743,7 @@ static int __net_init nf_tables_init_net(struct net *net)
5484     {
5485     INIT_LIST_HEAD(&net->nft.tables);
5486     INIT_LIST_HEAD(&net->nft.commit_list);
5487     + INIT_LIST_HEAD(&net->nft.module_list);
5488     mutex_init(&net->nft.commit_mutex);
5489     net->nft.base_seq = 1;
5490     net->nft.validate_state = NFT_VALIDATE_SKIP;
5491     @@ -7696,7 +7755,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
5492     {
5493     mutex_lock(&net->nft.commit_mutex);
5494     if (!list_empty(&net->nft.commit_list))
5495     - __nf_tables_abort(net);
5496     + __nf_tables_abort(net, false);
5497     __nft_release_tables(net);
5498     mutex_unlock(&net->nft.commit_mutex);
5499     WARN_ON_ONCE(!list_empty(&net->nft.tables));
5500     diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
5501     index 4abbb452cf6c..99127e2d95a8 100644
5502     --- a/net/netfilter/nfnetlink.c
5503     +++ b/net/netfilter/nfnetlink.c
5504     @@ -476,7 +476,7 @@ ack:
5505     }
5506     done:
5507     if (status & NFNL_BATCH_REPLAY) {
5508     - ss->abort(net, oskb);
5509     + ss->abort(net, oskb, true);
5510     nfnl_err_reset(&err_list);
5511     kfree_skb(skb);
5512     module_put(ss->owner);
5513     @@ -487,11 +487,11 @@ done:
5514     status |= NFNL_BATCH_REPLAY;
5515     goto done;
5516     } else if (err) {
5517     - ss->abort(net, oskb);
5518     + ss->abort(net, oskb, false);
5519     netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
5520     }
5521     } else {
5522     - ss->abort(net, oskb);
5523     + ss->abort(net, oskb, false);
5524     }
5525     if (ss->cleanup)
5526     ss->cleanup(net);
5527     diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
5528     index f54d6ae15bb1..b42247aa48a9 100644
5529     --- a/net/netfilter/nft_osf.c
5530     +++ b/net/netfilter/nft_osf.c
5531     @@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
5532     int err;
5533     u8 ttl;
5534    
5535     + if (!tb[NFTA_OSF_DREG])
5536     + return -EINVAL;
5537     +
5538     if (tb[NFTA_OSF_TTL]) {
5539     ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
5540     if (ttl > 2)
5541     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
5542     index 76e0d122616a..c2cdd0fc2e70 100644
5543     --- a/net/sched/cls_api.c
5544     +++ b/net/sched/cls_api.c
5545     @@ -2055,9 +2055,8 @@ replay:
5546     &chain_info));
5547    
5548     mutex_unlock(&chain->filter_chain_lock);
5549     - tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
5550     - protocol, prio, chain, rtnl_held,
5551     - extack);
5552     + tp_new = tcf_proto_create(name, protocol, prio, chain,
5553     + rtnl_held, extack);
5554     if (IS_ERR(tp_new)) {
5555     err = PTR_ERR(tp_new);
5556     goto errout_tp;
5557     diff --git a/net/sched/ematch.c b/net/sched/ematch.c
5558     index 8f2ad706784d..d0140a92694a 100644
5559     --- a/net/sched/ematch.c
5560     +++ b/net/sched/ematch.c
5561     @@ -263,12 +263,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
5562     }
5563     em->data = (unsigned long) v;
5564     }
5565     + em->datalen = data_len;
5566     }
5567     }
5568    
5569     em->matchid = em_hdr->matchid;
5570     em->flags = em_hdr->flags;
5571     - em->datalen = data_len;
5572     em->net = net;
5573    
5574     err = 0;
5575     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
5576     index a80920f261ca..41e9c2932b34 100644
5577     --- a/net/tls/tls_sw.c
5578     +++ b/net/tls/tls_sw.c
5579     @@ -793,7 +793,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
5580     psock = sk_psock_get(sk);
5581     if (!psock || !policy) {
5582     err = tls_push_record(sk, flags, record_type);
5583     - if (err) {
5584     + if (err && err != -EINPROGRESS) {
5585     *copied -= sk_msg_free(sk, msg);
5586     tls_free_open_rec(sk);
5587     }
5588     @@ -819,7 +819,7 @@ more_data:
5589     switch (psock->eval) {
5590     case __SK_PASS:
5591     err = tls_push_record(sk, flags, record_type);
5592     - if (err < 0) {
5593     + if (err && err != -EINPROGRESS) {
5594     *copied -= sk_msg_free(sk, msg);
5595     tls_free_open_rec(sk);
5596     goto out_err;
5597     diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
5598     index 6aee9f5e8e71..256f3e97d1f3 100644
5599     --- a/net/x25/af_x25.c
5600     +++ b/net/x25/af_x25.c
5601     @@ -760,6 +760,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
5602     if (sk->sk_state == TCP_ESTABLISHED)
5603     goto out;
5604    
5605     + rc = -EALREADY; /* Do nothing if call is already in progress */
5606     + if (sk->sk_state == TCP_SYN_SENT)
5607     + goto out;
5608     +
5609     sk->sk_state = TCP_CLOSE;
5610     sock->state = SS_UNCONNECTED;
5611    
5612     @@ -806,7 +810,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
5613     /* Now the loop */
5614     rc = -EINPROGRESS;
5615     if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
5616     - goto out_put_neigh;
5617     + goto out;
5618    
5619     rc = x25_wait_for_connection_establishment(sk);
5620     if (rc)
5621     diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
5622     index 612268eabef4..7225107a9aaf 100644
5623     --- a/scripts/recordmcount.c
5624     +++ b/scripts/recordmcount.c
5625     @@ -38,6 +38,10 @@
5626     #define R_AARCH64_ABS64 257
5627     #endif
5628    
5629     +#define R_ARM_PC24 1
5630     +#define R_ARM_THM_CALL 10
5631     +#define R_ARM_CALL 28
5632     +
5633     static int fd_map; /* File descriptor for file being modified. */
5634     static int mmap_failed; /* Boolean flag. */
5635     static char gpfx; /* prefix for global symbol name (sometimes '_') */
5636     @@ -418,6 +422,18 @@ static char const *already_has_rel_mcount = "success"; /* our work here is done!
5637     #define RECORD_MCOUNT_64
5638     #include "recordmcount.h"
5639    
5640     +static int arm_is_fake_mcount(Elf32_Rel const *rp)
5641     +{
5642     + switch (ELF32_R_TYPE(w(rp->r_info))) {
5643     + case R_ARM_THM_CALL:
5644     + case R_ARM_CALL:
5645     + case R_ARM_PC24:
5646     + return 0;
5647     + }
5648     +
5649     + return 1;
5650     +}
5651     +
5652     /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
5653     * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
5654     * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
5655     @@ -523,6 +539,7 @@ static int do_file(char const *const fname)
5656     altmcount = "__gnu_mcount_nc";
5657     make_nop = make_nop_arm;
5658     rel_type_nop = R_ARM_NONE;
5659     + is_fake_mcount32 = arm_is_fake_mcount;
5660     gpfx = 0;
5661     break;
5662     case EM_AARCH64: