Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0149-4.19.50-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3428 - (hide annotations) (download)
Fri Aug 2 11:47:56 2019 UTC (4 years, 10 months ago) by niro
File size: 66914 byte(s)
-linux-4.19.50
1 niro 3428 diff --git a/Makefile b/Makefile
2     index e84966c49117..f7e7e365e2ff 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 49
10     +SUBLEVEL = 50
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
15     index 7c0b2e6cdfbd..4c7a93f4039a 100644
16     --- a/arch/mips/ath79/setup.c
17     +++ b/arch/mips/ath79/setup.c
18     @@ -211,6 +211,12 @@ const char *get_system_type(void)
19     return ath79_sys_type;
20     }
21    
22     +int get_c0_perfcount_int(void)
23     +{
24     + return ATH79_MISC_IRQ(5);
25     +}
26     +EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
27     +
28     unsigned int get_c0_compare_int(void)
29     {
30     return CP0_LEGACY_COMPARE_IRQ;
31     diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
32     index 2f616ebeb7e0..7755a1fad05a 100644
33     --- a/arch/mips/mm/mmap.c
34     +++ b/arch/mips/mm/mmap.c
35     @@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
36    
37     int __virt_addr_valid(const volatile void *kaddr)
38     {
39     + unsigned long vaddr = (unsigned long)vaddr;
40     +
41     + if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
42     + return 0;
43     +
44     return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
45     }
46     EXPORT_SYMBOL_GPL(__virt_addr_valid);
47     diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform
48     index d80cd612df1f..c3592b374ad2 100644
49     --- a/arch/mips/pistachio/Platform
50     +++ b/arch/mips/pistachio/Platform
51     @@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
52     -I$(srctree)/arch/mips/include/asm/mach-pistachio
53     load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
54     zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
55     +all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
56     diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
57     index 22e9d281324d..e7d4ce6964ae 100644
58     --- a/arch/powerpc/kernel/nvram_64.c
59     +++ b/arch/powerpc/kernel/nvram_64.c
60     @@ -563,8 +563,6 @@ static int nvram_pstore_init(void)
61     nvram_pstore_info.buf = oops_data;
62     nvram_pstore_info.bufsize = oops_data_sz;
63    
64     - spin_lock_init(&nvram_pstore_info.buf_lock);
65     -
66     rc = pstore_register(&nvram_pstore_info);
67     if (rc && (rc != -EPERM))
68     /* Print error only when pstore.backend == nvram */
69     diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
70     index 72af23bacbb5..a6e3c7022245 100644
71     --- a/arch/s390/mm/fault.c
72     +++ b/arch/s390/mm/fault.c
73     @@ -107,7 +107,6 @@ void bust_spinlocks(int yes)
74    
75     /*
76     * Find out which address space caused the exception.
77     - * Access register mode is impossible, ignore space == 3.
78     */
79     static inline enum fault_type get_fault_type(struct pt_regs *regs)
80     {
81     @@ -132,6 +131,10 @@ static inline enum fault_type get_fault_type(struct pt_regs *regs)
82     }
83     return VDSO_FAULT;
84     }
85     + if (trans_exc_code == 1) {
86     + /* access register mode, not used in the kernel */
87     + return USER_FAULT;
88     + }
89     /* home space exception -> access via kernel ASCE */
90     return KERNEL_FAULT;
91     }
92     diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
93     index 9119d8e41f1f..87dcba101e56 100644
94     --- a/arch/x86/lib/insn-eval.c
95     +++ b/arch/x86/lib/insn-eval.c
96     @@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
97     }
98    
99     /**
100     - * get_desc() - Obtain pointer to a segment descriptor
101     + * get_desc() - Obtain contents of a segment descriptor
102     + * @out: Segment descriptor contents on success
103     * @sel: Segment selector
104     *
105     * Given a segment selector, obtain a pointer to the segment descriptor.
106     @@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
107     *
108     * Returns:
109     *
110     - * Pointer to segment descriptor on success.
111     + * True on success, false on failure.
112     *
113     * NULL on error.
114     */
115     -static struct desc_struct *get_desc(unsigned short sel)
116     +static bool get_desc(struct desc_struct *out, unsigned short sel)
117     {
118     struct desc_ptr gdt_desc = {0, 0};
119     unsigned long desc_base;
120    
121     #ifdef CONFIG_MODIFY_LDT_SYSCALL
122     if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
123     - struct desc_struct *desc = NULL;
124     + bool success = false;
125     struct ldt_struct *ldt;
126    
127     /* Bits [15:3] contain the index of the desired entry. */
128     @@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsigned short sel)
129    
130     mutex_lock(&current->active_mm->context.lock);
131     ldt = current->active_mm->context.ldt;
132     - if (ldt && sel < ldt->nr_entries)
133     - desc = &ldt->entries[sel];
134     + if (ldt && sel < ldt->nr_entries) {
135     + *out = ldt->entries[sel];
136     + success = true;
137     + }
138    
139     mutex_unlock(&current->active_mm->context.lock);
140    
141     - return desc;
142     + return success;
143     }
144     #endif
145     native_store_gdt(&gdt_desc);
146     @@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsigned short sel)
147     desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
148    
149     if (desc_base > gdt_desc.size)
150     - return NULL;
151     + return false;
152    
153     - return (struct desc_struct *)(gdt_desc.address + desc_base);
154     + *out = *(struct desc_struct *)(gdt_desc.address + desc_base);
155     + return true;
156     }
157    
158     /**
159     @@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsigned short sel)
160     */
161     unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
162     {
163     - struct desc_struct *desc;
164     + struct desc_struct desc;
165     short sel;
166    
167     sel = get_segment_selector(regs, seg_reg_idx);
168     @@ -664,11 +668,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
169     if (!sel)
170     return -1L;
171    
172     - desc = get_desc(sel);
173     - if (!desc)
174     + if (!get_desc(&desc, sel))
175     return -1L;
176    
177     - return get_desc_base(desc);
178     + return get_desc_base(&desc);
179     }
180    
181     /**
182     @@ -690,7 +693,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
183     */
184     static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
185     {
186     - struct desc_struct *desc;
187     + struct desc_struct desc;
188     unsigned long limit;
189     short sel;
190    
191     @@ -704,8 +707,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
192     if (!sel)
193     return 0;
194    
195     - desc = get_desc(sel);
196     - if (!desc)
197     + if (!get_desc(&desc, sel))
198     return 0;
199    
200     /*
201     @@ -714,8 +716,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
202     * not tested when checking the segment limits. In practice,
203     * this means that the segment ends in (limit << 12) + 0xfff.
204     */
205     - limit = get_desc_limit(desc);
206     - if (desc->g)
207     + limit = get_desc_limit(&desc);
208     + if (desc.g)
209     limit = (limit << 12) + 0xfff;
210    
211     return limit;
212     @@ -739,7 +741,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
213     */
214     int insn_get_code_seg_params(struct pt_regs *regs)
215     {
216     - struct desc_struct *desc;
217     + struct desc_struct desc;
218     short sel;
219    
220     if (v8086_mode(regs))
221     @@ -750,8 +752,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
222     if (sel < 0)
223     return sel;
224    
225     - desc = get_desc(sel);
226     - if (!desc)
227     + if (!get_desc(&desc, sel))
228     return -EINVAL;
229    
230     /*
231     @@ -759,10 +760,10 @@ int insn_get_code_seg_params(struct pt_regs *regs)
232     * determines whether a segment contains data or code. If this is a data
233     * segment, return error.
234     */
235     - if (!(desc->type & BIT(3)))
236     + if (!(desc.type & BIT(3)))
237     return -EINVAL;
238    
239     - switch ((desc->l << 1) | desc->d) {
240     + switch ((desc.l << 1) | desc.d) {
241     case 0: /*
242     * Legacy mode. CS.L=0, CS.D=0. Address and operand size are
243     * both 16-bit.
244     diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
245     index a7d966964c6f..513ce09e9950 100644
246     --- a/arch/x86/power/cpu.c
247     +++ b/arch/x86/power/cpu.c
248     @@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
249     * address in its instruction pointer may not be possible to resolve
250     * any more at that point (the page tables used by it previously may
251     * have been overwritten by hibernate image data).
252     + *
253     + * First, make sure that we wake up all the potentially disabled SMT
254     + * threads which have been initially brought up and then put into
255     + * mwait/cpuidle sleep.
256     + * Those will be put to proper (not interfering with hibernation
257     + * resume) sleep afterwards, and the resumed kernel will decide itself
258     + * what to do with them.
259     */
260     + ret = cpuhp_smt_enable();
261     + if (ret)
262     + return ret;
263     smp_ops.play_dead = resume_play_dead;
264     ret = disable_nonboot_cpus();
265     smp_ops.play_dead = play_dead;
266     diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
267     index f8e3b668d20b..c9986041a5e1 100644
268     --- a/arch/x86/power/hibernate_64.c
269     +++ b/arch/x86/power/hibernate_64.c
270     @@ -13,6 +13,7 @@
271     #include <linux/suspend.h>
272     #include <linux/scatterlist.h>
273     #include <linux/kdebug.h>
274     +#include <linux/cpu.h>
275    
276     #include <crypto/hash.h>
277    
278     @@ -363,3 +364,35 @@ int arch_hibernation_header_restore(void *addr)
279    
280     return 0;
281     }
282     +
283     +int arch_resume_nosmt(void)
284     +{
285     + int ret = 0;
286     + /*
287     + * We reached this while coming out of hibernation. This means
288     + * that SMT siblings are sleeping in hlt, as mwait is not safe
289     + * against control transition during resume (see comment in
290     + * hibernate_resume_nonboot_cpu_disable()).
291     + *
292     + * If the resumed kernel has SMT disabled, we have to take all the
293     + * SMT siblings out of hlt, and offline them again so that they
294     + * end up in mwait proper.
295     + *
296     + * Called with hotplug disabled.
297     + */
298     + cpu_hotplug_enable();
299     + if (cpu_smt_control == CPU_SMT_DISABLED ||
300     + cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
301     + enum cpuhp_smt_control old = cpu_smt_control;
302     +
303     + ret = cpuhp_smt_enable();
304     + if (ret)
305     + goto out;
306     + ret = cpuhp_smt_disable(old);
307     + if (ret)
308     + goto out;
309     + }
310     +out:
311     + cpu_hotplug_disable();
312     + return ret;
313     +}
314     diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
315     index 3c5ea7cb693e..ab8faa6d6616 100644
316     --- a/drivers/acpi/apei/erst.c
317     +++ b/drivers/acpi/apei/erst.c
318     @@ -1176,7 +1176,6 @@ static int __init erst_init(void)
319     "Error Record Serialization Table (ERST) support is initialized.\n");
320    
321     buf = kmalloc(erst_erange.size, GFP_KERNEL);
322     - spin_lock_init(&erst_info.buf_lock);
323     if (buf) {
324     erst_info.buf = buf + sizeof(struct cper_pstore_record);
325     erst_info.bufsize = erst_erange.size -
326     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
327     index 3e905da33bcb..5830d9417886 100644
328     --- a/drivers/block/xen-blkfront.c
329     +++ b/drivers/block/xen-blkfront.c
330     @@ -1310,11 +1310,11 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
331     }
332    
333     free_shadow:
334     - kfree(rinfo->shadow[i].grants_used);
335     + kvfree(rinfo->shadow[i].grants_used);
336     rinfo->shadow[i].grants_used = NULL;
337     - kfree(rinfo->shadow[i].indirect_grants);
338     + kvfree(rinfo->shadow[i].indirect_grants);
339     rinfo->shadow[i].indirect_grants = NULL;
340     - kfree(rinfo->shadow[i].sg);
341     + kvfree(rinfo->shadow[i].sg);
342     rinfo->shadow[i].sg = NULL;
343     }
344    
345     @@ -1353,7 +1353,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
346     for (i = 0; i < info->nr_rings; i++)
347     blkif_free_ring(&info->rinfo[i]);
348    
349     - kfree(info->rinfo);
350     + kvfree(info->rinfo);
351     info->rinfo = NULL;
352     info->nr_rings = 0;
353     }
354     @@ -1914,9 +1914,9 @@ static int negotiate_mq(struct blkfront_info *info)
355     if (!info->nr_rings)
356     info->nr_rings = 1;
357    
358     - info->rinfo = kcalloc(info->nr_rings,
359     - sizeof(struct blkfront_ring_info),
360     - GFP_KERNEL);
361     + info->rinfo = kvcalloc(info->nr_rings,
362     + sizeof(struct blkfront_ring_info),
363     + GFP_KERNEL);
364     if (!info->rinfo) {
365     xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
366     info->nr_rings = 0;
367     @@ -2232,17 +2232,17 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
368    
369     for (i = 0; i < BLK_RING_SIZE(info); i++) {
370     rinfo->shadow[i].grants_used =
371     - kcalloc(grants,
372     - sizeof(rinfo->shadow[i].grants_used[0]),
373     - GFP_NOIO);
374     - rinfo->shadow[i].sg = kcalloc(psegs,
375     - sizeof(rinfo->shadow[i].sg[0]),
376     - GFP_NOIO);
377     + kvcalloc(grants,
378     + sizeof(rinfo->shadow[i].grants_used[0]),
379     + GFP_NOIO);
380     + rinfo->shadow[i].sg = kvcalloc(psegs,
381     + sizeof(rinfo->shadow[i].sg[0]),
382     + GFP_NOIO);
383     if (info->max_indirect_segments)
384     rinfo->shadow[i].indirect_grants =
385     - kcalloc(INDIRECT_GREFS(grants),
386     - sizeof(rinfo->shadow[i].indirect_grants[0]),
387     - GFP_NOIO);
388     + kvcalloc(INDIRECT_GREFS(grants),
389     + sizeof(rinfo->shadow[i].indirect_grants[0]),
390     + GFP_NOIO);
391     if ((rinfo->shadow[i].grants_used == NULL) ||
392     (rinfo->shadow[i].sg == NULL) ||
393     (info->max_indirect_segments &&
394     @@ -2256,11 +2256,11 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
395    
396     out_of_memory:
397     for (i = 0; i < BLK_RING_SIZE(info); i++) {
398     - kfree(rinfo->shadow[i].grants_used);
399     + kvfree(rinfo->shadow[i].grants_used);
400     rinfo->shadow[i].grants_used = NULL;
401     - kfree(rinfo->shadow[i].sg);
402     + kvfree(rinfo->shadow[i].sg);
403     rinfo->shadow[i].sg = NULL;
404     - kfree(rinfo->shadow[i].indirect_grants);
405     + kvfree(rinfo->shadow[i].indirect_grants);
406     rinfo->shadow[i].indirect_grants = NULL;
407     }
408     if (!list_empty(&rinfo->indirect_pages)) {
409     diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
410     index cfe87b465819..0f7d97917197 100644
411     --- a/drivers/firmware/efi/efi-pstore.c
412     +++ b/drivers/firmware/efi/efi-pstore.c
413     @@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstore_record *record)
414     efi_name[i] = name[i];
415    
416     ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
417     - !pstore_cannot_block_path(record->reason),
418     - record->size, record->psi->buf);
419     + preemptible(), record->size, record->psi->buf);
420    
421     if (record->reason == KMSG_DUMP_OOPS)
422     efivar_run_worker();
423     @@ -369,7 +368,6 @@ static __init int efivars_pstore_init(void)
424     return -ENOMEM;
425    
426     efi_pstore_info.bufsize = 1024;
427     - spin_lock_init(&efi_pstore_info.buf_lock);
428    
429     if (pstore_register(&efi_pstore_info)) {
430     kfree(efi_pstore_info.buf);
431     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
432     index f008804f0b97..bbd927e800af 100644
433     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
434     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
435     @@ -416,8 +416,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
436     }
437     }
438     if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
439     - if ((adev->flags & AMD_IS_PX) &&
440     - amdgpu_atpx_dgpu_req_power_for_displays()) {
441     + if (adev->flags & AMD_IS_PX) {
442     pm_runtime_get_sync(adev->ddev->dev);
443     /* Just fire off a uevent and let userspace tell us what to do */
444     drm_helper_hpd_irq_event(adev->ddev);
445     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
446     index 5b39d1399630..5be82e4fd1da 100644
447     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
448     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
449     @@ -37,18 +37,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
450     static int psp_early_init(void *handle)
451     {
452     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
453     + struct psp_context *psp = &adev->psp;
454    
455     psp_set_funcs(adev);
456    
457     - return 0;
458     -}
459     -
460     -static int psp_sw_init(void *handle)
461     -{
462     - struct amdgpu_device *adev = (struct amdgpu_device *)handle;
463     - struct psp_context *psp = &adev->psp;
464     - int ret;
465     -
466     switch (adev->asic_type) {
467     case CHIP_VEGA10:
468     case CHIP_VEGA12:
469     @@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
470     if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
471     return 0;
472    
473     + return 0;
474     +}
475     +
476     +static int psp_sw_init(void *handle)
477     +{
478     + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
479     + struct psp_context *psp = &adev->psp;
480     + int ret;
481     +
482     ret = psp_init_microcode(psp);
483     if (ret) {
484     DRM_ERROR("Failed to load psp firmware!\n");
485     diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
486     index 94f5c3646cb7..c22062cc9992 100644
487     --- a/drivers/gpu/drm/drm_atomic_helper.c
488     +++ b/drivers/gpu/drm/drm_atomic_helper.c
489     @@ -1573,15 +1573,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
490     if (old_plane_state->fb != new_plane_state->fb)
491     return -EINVAL;
492    
493     - /*
494     - * FIXME: Since prepare_fb and cleanup_fb are always called on
495     - * the new_plane_state for async updates we need to block framebuffer
496     - * changes. This prevents use of a fb that's been cleaned up and
497     - * double cleanups from occuring.
498     - */
499     - if (old_plane_state->fb != new_plane_state->fb)
500     - return -EINVAL;
501     -
502     funcs = plane->helper_private;
503     if (!funcs->atomic_async_update)
504     return -EINVAL;
505     @@ -1612,6 +1603,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
506     * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
507     * the states like normal sync commits, but just do in-place changes on the
508     * current state.
509     + *
510     + * TODO: Implement full swap instead of doing in-place changes.
511     */
512     void drm_atomic_helper_async_commit(struct drm_device *dev,
513     struct drm_atomic_state *state)
514     @@ -1622,6 +1615,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
515     int i;
516    
517     for_each_new_plane_in_state(state, plane, plane_state, i) {
518     + struct drm_framebuffer *new_fb = plane_state->fb;
519     + struct drm_framebuffer *old_fb = plane->state->fb;
520     +
521     funcs = plane->helper_private;
522     funcs->atomic_async_update(plane, plane_state);
523    
524     @@ -1630,11 +1626,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
525     * plane->state in-place, make sure at least common
526     * properties have been properly updated.
527     */
528     - WARN_ON_ONCE(plane->state->fb != plane_state->fb);
529     + WARN_ON_ONCE(plane->state->fb != new_fb);
530     WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
531     WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
532     WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
533     WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
534     +
535     + /*
536     + * Make sure the FBs have been swapped so that cleanups in the
537     + * new_state performs a cleanup in the old FB.
538     + */
539     + WARN_ON_ONCE(plane_state->fb != old_fb);
540     }
541     }
542     EXPORT_SYMBOL(drm_atomic_helper_async_commit);
543     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
544     index b506e3622b08..7c581f4c2b94 100644
545     --- a/drivers/gpu/drm/drm_edid.c
546     +++ b/drivers/gpu/drm/drm_edid.c
547     @@ -172,6 +172,25 @@ static const struct edid_quirk {
548     /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
549     { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
550    
551     + /* Valve Index Headset */
552     + { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
553     + { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
554     + { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
555     + { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
556     + { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
557     + { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
558     + { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
559     + { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
560     + { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
561     + { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
562     + { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
563     + { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
564     + { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
565     + { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
566     + { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
567     + { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
568     + { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
569     +
570     /* HTC Vive and Vive Pro VR Headsets */
571     { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
572     { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
573     @@ -193,6 +212,12 @@ static const struct edid_quirk {
574    
575     /* Sony PlayStation VR Headset */
576     { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
577     +
578     + /* Sensics VR Headsets */
579     + { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
580     +
581     + /* OSVR HDK and HDK2 VR Headsets */
582     + { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
583     };
584    
585     /*
586     diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
587     index de9531caaca0..9c8446184b17 100644
588     --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
589     +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
590     @@ -594,6 +594,9 @@ void cdv_intel_lvds_init(struct drm_device *dev,
591     int pipe;
592     u8 pin;
593    
594     + if (!dev_priv->lvds_enabled_in_vbt)
595     + return;
596     +
597     pin = GMBUS_PORT_PANEL;
598     if (!lvds_is_present_in_vbt(dev, &pin)) {
599     DRM_DEBUG_KMS("LVDS is not present in VBT\n");
600     diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
601     index 63bde4e86c6a..e019ea271ffc 100644
602     --- a/drivers/gpu/drm/gma500/intel_bios.c
603     +++ b/drivers/gpu/drm/gma500/intel_bios.c
604     @@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
605     if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
606     dev_priv->edp.support = 1;
607    
608     + dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
609     + DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
610     +
611     /* This bit means to use 96Mhz for DPLL_A or not */
612     if (driver->primary_lfp_id)
613     dev_priv->dplla_96mhz = true;
614     diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
615     index 93d2f4000d2f..be3cf9b348bd 100644
616     --- a/drivers/gpu/drm/gma500/psb_drv.h
617     +++ b/drivers/gpu/drm/gma500/psb_drv.h
618     @@ -538,6 +538,7 @@ struct drm_psb_private {
619     int lvds_ssc_freq;
620     bool is_lvds_on;
621     bool is_mipi_on;
622     + bool lvds_enabled_in_vbt;
623     u32 mipi_ctrl_display;
624    
625     unsigned int core_freq;
626     diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
627     index 542f31ce108f..40b32b4d1d98 100644
628     --- a/drivers/gpu/drm/i915/gvt/gtt.c
629     +++ b/drivers/gpu/drm/i915/gvt/gtt.c
630     @@ -2161,7 +2161,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
631     struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
632     unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
633     unsigned long gma, gfn;
634     - struct intel_gvt_gtt_entry e, m;
635     + struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
636     + struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
637     dma_addr_t dma_addr;
638     int ret;
639    
640     @@ -2237,7 +2238,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
641    
642     if (ops->test_present(&e)) {
643     gfn = ops->get_pfn(&e);
644     - m = e;
645     + m.val64 = e.val64;
646     + m.type = e.type;
647    
648     /* one PTE update may be issued in multiple writes and the
649     * first write may not construct a valid gfn
650     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
651     index 5b544cb38148..16f5d2d93801 100644
652     --- a/drivers/gpu/drm/i915/i915_reg.h
653     +++ b/drivers/gpu/drm/i915/i915_reg.h
654     @@ -32,7 +32,7 @@
655     * macros. Do **not** mass change existing definitions just to update the style.
656     *
657     * Layout
658     - * ''''''
659     + * ~~~~~~
660     *
661     * Keep helper macros near the top. For example, _PIPE() and friends.
662     *
663     @@ -78,7 +78,7 @@
664     * style. Use lower case in hexadecimal values.
665     *
666     * Naming
667     - * ''''''
668     + * ~~~~~~
669     *
670     * Try to name registers according to the specs. If the register name changes in
671     * the specs from platform to another, stick to the original name.
672     @@ -96,7 +96,7 @@
673     * suffix to the name. For example, ``_SKL`` or ``_GEN8``.
674     *
675     * Examples
676     - * ''''''''
677     + * ~~~~~~~~
678     *
679     * (Note that the values in the example are indented using spaces instead of
680     * TABs to avoid misalignment in generated documentation. Use TABs in the
681     diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
682     index 01d1d2088f04..728a20e1f638 100644
683     --- a/drivers/gpu/drm/i915/intel_fbc.c
684     +++ b/drivers/gpu/drm/i915/intel_fbc.c
685     @@ -1267,6 +1267,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
686     if (!HAS_FBC(dev_priv))
687     return 0;
688    
689     + /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
690     + if (IS_GEMINILAKE(dev_priv))
691     + return 0;
692     +
693     if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
694     return 1;
695    
696     diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
697     index 4bcdeaf8d98f..c44bb37e434c 100644
698     --- a/drivers/gpu/drm/i915/intel_workarounds.c
699     +++ b/drivers/gpu/drm/i915/intel_workarounds.c
700     @@ -37,7 +37,7 @@
701     * costly and simplifies things. We can revisit this in the future.
702     *
703     * Layout
704     - * ''''''
705     + * ~~~~~~
706     *
707     * Keep things in this file ordered by WA type, as per the above (context, GT,
708     * display, register whitelist, batchbuffer). Then, inside each type, keep the
709     diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
710     index 273cbbe27c2e..1ddf07514de6 100644
711     --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
712     +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
713     @@ -503,6 +503,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
714     static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
715     struct drm_plane_state *new_state)
716     {
717     + struct drm_framebuffer *old_fb = plane->state->fb;
718     +
719     plane->state->src_x = new_state->src_x;
720     plane->state->src_y = new_state->src_y;
721     plane->state->crtc_x = new_state->crtc_x;
722     @@ -525,6 +527,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
723    
724     *to_mdp5_plane_state(plane->state) =
725     *to_mdp5_plane_state(new_state);
726     +
727     + new_state->fb = old_fb;
728     }
729    
730     static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
731     diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
732     index 4b75ad40dd80..00d9d77f583a 100644
733     --- a/drivers/gpu/drm/nouveau/Kconfig
734     +++ b/drivers/gpu/drm/nouveau/Kconfig
735     @@ -16,10 +16,21 @@ config DRM_NOUVEAU
736     select INPUT if ACPI && X86
737     select THERMAL if ACPI && X86
738     select ACPI_VIDEO if ACPI && X86
739     - select DRM_VM
740     help
741     Choose this option for open-source NVIDIA support.
742    
743     +config NOUVEAU_LEGACY_CTX_SUPPORT
744     + bool "Nouveau legacy context support"
745     + depends on DRM_NOUVEAU
746     + select DRM_VM
747     + default y
748     + help
749     + There was a version of the nouveau DDX that relied on legacy
750     + ctx ioctls not erroring out. But that was back in time a long
751     + ways, so offer a way to disable it now. For uapi compat with
752     + old nouveau ddx this should be on by default, but modern distros
753     + should consider turning it off.
754     +
755     config NOUVEAU_PLATFORM_DRIVER
756     bool "Nouveau (NVIDIA) SoC GPUs"
757     depends on DRM_NOUVEAU && ARCH_TEGRA
758     diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
759     index 74d2283f2c28..2b7a54cc3c9e 100644
760     --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
761     +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
762     @@ -1015,8 +1015,11 @@ nouveau_driver_fops = {
763     static struct drm_driver
764     driver_stub = {
765     .driver_features =
766     - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
767     - DRIVER_KMS_LEGACY_CONTEXT,
768     + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
769     +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
770     + | DRIVER_KMS_LEGACY_CONTEXT
771     +#endif
772     + ,
773    
774     .load = nouveau_drm_load,
775     .unload = nouveau_drm_unload,
776     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
777     index 9d3ac8b981da..d8e2d7b3b836 100644
778     --- a/drivers/gpu/drm/radeon/radeon_display.c
779     +++ b/drivers/gpu/drm/radeon/radeon_display.c
780     @@ -921,12 +921,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
781     ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
782    
783     /* get matching reference and feedback divider */
784     - *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
785     + *ref_div = min(max(den/post_div, 1u), ref_div_max);
786     *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
787    
788     /* limit fb divider to its maximum */
789     if (*fb_div > fb_div_max) {
790     - *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
791     + *ref_div = (*ref_div * fb_div_max)/(*fb_div);
792     *fb_div = fb_div_max;
793     }
794     }
795     diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
796     index 0c51c0ffdda9..8d6b6eeef71c 100644
797     --- a/drivers/i2c/busses/i2c-xiic.c
798     +++ b/drivers/i2c/busses/i2c-xiic.c
799     @@ -718,11 +718,16 @@ static const struct i2c_algorithm xiic_algorithm = {
800     .functionality = xiic_func,
801     };
802    
803     +static const struct i2c_adapter_quirks xiic_quirks = {
804     + .max_read_len = 255,
805     +};
806     +
807     static const struct i2c_adapter xiic_adapter = {
808     .owner = THIS_MODULE,
809     .name = DRIVER_NAME,
810     .class = I2C_CLASS_DEPRECATED,
811     .algo = &xiic_algorithm,
812     + .quirks = &xiic_quirks,
813     };
814    
815    
816     diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
817     index 0390603170b4..aa7290784636 100644
818     --- a/drivers/irqchip/irq-ath79-misc.c
819     +++ b/drivers/irqchip/irq-ath79-misc.c
820     @@ -22,15 +22,6 @@
821     #define AR71XX_RESET_REG_MISC_INT_ENABLE 4
822    
823     #define ATH79_MISC_IRQ_COUNT 32
824     -#define ATH79_MISC_PERF_IRQ 5
825     -
826     -static int ath79_perfcount_irq;
827     -
828     -int get_c0_perfcount_int(void)
829     -{
830     - return ath79_perfcount_irq;
831     -}
832     -EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
833    
834     static void ath79_misc_irq_handler(struct irq_desc *desc)
835     {
836     @@ -122,8 +113,6 @@ static void __init ath79_misc_intc_domain_init(
837     {
838     void __iomem *base = domain->host_data;
839    
840     - ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
841     -
842     /* Disable and clear all interrupts */
843     __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
844     __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
845     diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
846     index 8c1b63a4337b..d2098b4d2945 100644
847     --- a/drivers/misc/genwqe/card_dev.c
848     +++ b/drivers/misc/genwqe/card_dev.c
849     @@ -780,6 +780,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
850    
851     if ((m->addr == 0x0) || (m->size == 0))
852     return -EINVAL;
853     + if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
854     + return -EINVAL;
855    
856     map_addr = (m->addr & PAGE_MASK);
857     map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
858     diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
859     index f4f8ab602442..f68435df76d4 100644
860     --- a/drivers/misc/genwqe/card_utils.c
861     +++ b/drivers/misc/genwqe/card_utils.c
862     @@ -587,6 +587,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
863     /* determine space needed for page_list. */
864     data = (unsigned long)uaddr;
865     offs = offset_in_page(data);
866     + if (size > ULONG_MAX - PAGE_SIZE - offs) {
867     + m->size = 0; /* mark unused and not added */
868     + return -EINVAL;
869     + }
870     m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
871    
872     m->page_list = kcalloc(m->nr_pages,
873     diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
874     index 98f6b9c4b684..d16b57081c95 100644
875     --- a/drivers/mtd/nand/spi/macronix.c
876     +++ b/drivers/mtd/nand/spi/macronix.c
877     @@ -10,6 +10,7 @@
878     #include <linux/mtd/spinand.h>
879    
880     #define SPINAND_MFR_MACRONIX 0xC2
881     +#define MACRONIX_ECCSR_MASK 0x0F
882    
883     static SPINAND_OP_VARIANTS(read_cache_variants,
884     SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
885     @@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
886     SPI_MEM_OP_DUMMY(1, 1),
887     SPI_MEM_OP_DATA_IN(1, eccsr, 1));
888    
889     - return spi_mem_exec_op(spinand->spimem, &op);
890     + int ret = spi_mem_exec_op(spinand->spimem, &op);
891     + if (ret)
892     + return ret;
893     +
894     + *eccsr &= MACRONIX_ECCSR_MASK;
895     + return 0;
896     }
897    
898     static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
899     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
900     index 59212d3d9587..df5b74f289e1 100644
901     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
902     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
903     @@ -1310,8 +1310,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
904     int i;
905    
906     for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
907     - memcpy(data + i * ETH_GSTRING_LEN,
908     - &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
909     + strscpy(data + i * ETH_GSTRING_LEN,
910     + mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
911     }
912     }
913    
914     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
915     index d290f0787dfb..94c59939a8cf 100644
916     --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
917     +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
918     @@ -2010,6 +2010,8 @@ static int mlx4_en_set_tunable(struct net_device *dev,
919     return ret;
920     }
921    
922     +#define MLX4_EEPROM_PAGE_LEN 256
923     +
924     static int mlx4_en_get_module_info(struct net_device *dev,
925     struct ethtool_modinfo *modinfo)
926     {
927     @@ -2044,7 +2046,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
928     break;
929     case MLX4_MODULE_ID_SFP:
930     modinfo->type = ETH_MODULE_SFF_8472;
931     - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
932     + modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
933     break;
934     default:
935     return -EINVAL;
936     diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
937     index 10fcc22f4590..ba6ac31a339d 100644
938     --- a/drivers/net/ethernet/mellanox/mlx4/port.c
939     +++ b/drivers/net/ethernet/mellanox/mlx4/port.c
940     @@ -2077,11 +2077,6 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
941     size -= offset + size - I2C_PAGE_SIZE;
942    
943     i2c_addr = I2C_ADDR_LOW;
944     - if (offset >= I2C_PAGE_SIZE) {
945     - /* Reset offset to high page */
946     - i2c_addr = I2C_ADDR_HIGH;
947     - offset -= I2C_PAGE_SIZE;
948     - }
949    
950     cable_info = (struct mlx4_cable_info *)inmad->data;
951     cable_info->dev_mem_address = cpu_to_be16(offset);
952     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
953     index 832bce07c385..1afed85550c0 100644
954     --- a/drivers/net/ethernet/ti/cpsw.c
955     +++ b/drivers/net/ethernet/ti/cpsw.c
956     @@ -2978,7 +2978,7 @@ static void cpsw_get_ringparam(struct net_device *ndev,
957     struct cpsw_common *cpsw = priv->cpsw;
958    
959     /* not supported */
960     - ering->tx_max_pending = 0;
961     + ering->tx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
962     ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
963     ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
964     ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
965     diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
966     index 68c8fbf099f8..8807a806cc47 100644
967     --- a/drivers/net/phy/sfp.c
968     +++ b/drivers/net/phy/sfp.c
969     @@ -280,6 +280,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
970     {
971     struct i2c_msg msgs[2];
972     u8 bus_addr = a2 ? 0x51 : 0x50;
973     + size_t this_len;
974     int ret;
975    
976     msgs[0].addr = bus_addr;
977     @@ -291,11 +292,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
978     msgs[1].len = len;
979     msgs[1].buf = buf;
980    
981     - ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
982     - if (ret < 0)
983     - return ret;
984     + while (len) {
985     + this_len = len;
986     + if (this_len > 16)
987     + this_len = 16;
988    
989     - return ret == ARRAY_SIZE(msgs) ? len : 0;
990     + msgs[1].len = this_len;
991     +
992     + ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
993     + if (ret < 0)
994     + return ret;
995     +
996     + if (ret != ARRAY_SIZE(msgs))
997     + break;
998     +
999     + msgs[1].buf += this_len;
1000     + dev_addr += this_len;
1001     + len -= this_len;
1002     + }
1003     +
1004     + return msgs[1].buf - (u8 *)buf;
1005     }
1006    
1007     static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
1008     diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
1009     index 614823617b8b..b7b2e811d547 100644
1010     --- a/drivers/parisc/ccio-dma.c
1011     +++ b/drivers/parisc/ccio-dma.c
1012     @@ -565,8 +565,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
1013     /* We currently only support kernel addresses */
1014     BUG_ON(sid != KERNEL_SPACE);
1015    
1016     - mtsp(sid,1);
1017     -
1018     /*
1019     ** WORD 1 - low order word
1020     ** "hints" parm includes the VALID bit!
1021     @@ -597,7 +595,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
1022     ** Grab virtual index [0:11]
1023     ** Deposit virt_idx bits into I/O PDIR word
1024     */
1025     - asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
1026     + asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
1027     asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
1028     asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
1029    
1030     diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
1031     index 11de0eccf968..6dd1780a5885 100644
1032     --- a/drivers/parisc/sba_iommu.c
1033     +++ b/drivers/parisc/sba_iommu.c
1034     @@ -575,8 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
1035     pa = virt_to_phys(vba);
1036     pa &= IOVP_MASK;
1037    
1038     - mtsp(sid,1);
1039     - asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
1040     + asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
1041     pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
1042    
1043     pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
1044     diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1045     index f0b354b65a0e..8dbeb14a1e3a 100644
1046     --- a/drivers/tty/serial/serial_core.c
1047     +++ b/drivers/tty/serial/serial_core.c
1048     @@ -130,9 +130,6 @@ static void uart_start(struct tty_struct *tty)
1049     struct uart_port *port;
1050     unsigned long flags;
1051    
1052     - if (!state)
1053     - return;
1054     -
1055     port = uart_port_lock(state, flags);
1056     __uart_start(tty);
1057     uart_port_unlock(port, flags);
1058     @@ -730,9 +727,6 @@ static void uart_unthrottle(struct tty_struct *tty)
1059     upstat_t mask = UPSTAT_SYNC_FIFO;
1060     struct uart_port *port;
1061    
1062     - if (!state)
1063     - return;
1064     -
1065     port = uart_port_ref(state);
1066     if (!port)
1067     return;
1068     @@ -1708,6 +1702,16 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
1069     uart_port_deref(uport);
1070     }
1071    
1072     +static int uart_install(struct tty_driver *driver, struct tty_struct *tty)
1073     +{
1074     + struct uart_driver *drv = driver->driver_state;
1075     + struct uart_state *state = drv->state + tty->index;
1076     +
1077     + tty->driver_data = state;
1078     +
1079     + return tty_standard_install(driver, tty);
1080     +}
1081     +
1082     /*
1083     * Calls to uart_open are serialised by the tty_lock in
1084     * drivers/tty/tty_io.c:tty_open()
1085     @@ -1720,11 +1724,8 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
1086     */
1087     static int uart_open(struct tty_struct *tty, struct file *filp)
1088     {
1089     - struct uart_driver *drv = tty->driver->driver_state;
1090     - int retval, line = tty->index;
1091     - struct uart_state *state = drv->state + line;
1092     -
1093     - tty->driver_data = state;
1094     + struct uart_state *state = tty->driver_data;
1095     + int retval;
1096    
1097     retval = tty_port_open(&state->port, tty, filp);
1098     if (retval > 0)
1099     @@ -2409,6 +2410,7 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
1100     #endif
1101    
1102     static const struct tty_operations uart_ops = {
1103     + .install = uart_install,
1104     .open = uart_open,
1105     .close = uart_close,
1106     .write = uart_write,
1107     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
1108     index 59e8bb72dc14..9a22aa580fe7 100644
1109     --- a/fs/fuse/file.c
1110     +++ b/fs/fuse/file.c
1111     @@ -2981,7 +2981,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
1112     offset + length > i_size_read(inode)) {
1113     err = inode_newsize_ok(inode, offset + length);
1114     if (err)
1115     - return err;
1116     + goto out;
1117     }
1118    
1119     if (!(mode & FALLOC_FL_KEEP_SIZE))
1120     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1121     index 580e37bc3fe2..53cf8599a46e 100644
1122     --- a/fs/nfs/nfs4proc.c
1123     +++ b/fs/nfs/nfs4proc.c
1124     @@ -6850,7 +6850,6 @@ struct nfs4_lock_waiter {
1125     struct task_struct *task;
1126     struct inode *inode;
1127     struct nfs_lowner *owner;
1128     - bool notified;
1129     };
1130    
1131     static int
1132     @@ -6872,13 +6871,13 @@ nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, vo
1133     /* Make sure it's for the right inode */
1134     if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
1135     return 0;
1136     -
1137     - waiter->notified = true;
1138     }
1139    
1140     /* override "private" so we can use default_wake_function */
1141     wait->private = waiter->task;
1142     - ret = autoremove_wake_function(wait, mode, flags, key);
1143     + ret = woken_wake_function(wait, mode, flags, key);
1144     + if (ret)
1145     + list_del_init(&wait->entry);
1146     wait->private = waiter;
1147     return ret;
1148     }
1149     @@ -6887,7 +6886,6 @@ static int
1150     nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
1151     {
1152     int status = -ERESTARTSYS;
1153     - unsigned long flags;
1154     struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
1155     struct nfs_server *server = NFS_SERVER(state->inode);
1156     struct nfs_client *clp = server->nfs_client;
1157     @@ -6897,8 +6895,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
1158     .s_dev = server->s_dev };
1159     struct nfs4_lock_waiter waiter = { .task = current,
1160     .inode = state->inode,
1161     - .owner = &owner,
1162     - .notified = false };
1163     + .owner = &owner};
1164     wait_queue_entry_t wait;
1165    
1166     /* Don't bother with waitqueue if we don't expect a callback */
1167     @@ -6908,27 +6905,22 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
1168     init_wait(&wait);
1169     wait.private = &waiter;
1170     wait.func = nfs4_wake_lock_waiter;
1171     - add_wait_queue(q, &wait);
1172    
1173     while(!signalled()) {
1174     - waiter.notified = false;
1175     + add_wait_queue(q, &wait);
1176     status = nfs4_proc_setlk(state, cmd, request);
1177     - if ((status != -EAGAIN) || IS_SETLK(cmd))
1178     + if ((status != -EAGAIN) || IS_SETLK(cmd)) {
1179     + finish_wait(q, &wait);
1180     break;
1181     -
1182     - status = -ERESTARTSYS;
1183     - spin_lock_irqsave(&q->lock, flags);
1184     - if (waiter.notified) {
1185     - spin_unlock_irqrestore(&q->lock, flags);
1186     - continue;
1187     }
1188     - set_current_state(TASK_INTERRUPTIBLE);
1189     - spin_unlock_irqrestore(&q->lock, flags);
1190    
1191     - freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
1192     + status = -ERESTARTSYS;
1193     + freezer_do_not_count();
1194     + wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
1195     + freezer_count();
1196     + finish_wait(q, &wait);
1197     }
1198    
1199     - finish_wait(q, &wait);
1200     return status;
1201     }
1202     #else /* !CONFIG_NFS_V4_1 */
1203     diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
1204     index b821054ca3ed..4bae3f4fe829 100644
1205     --- a/fs/pstore/platform.c
1206     +++ b/fs/pstore/platform.c
1207     @@ -124,26 +124,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
1208     }
1209     }
1210    
1211     -bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
1212     +/*
1213     + * Should pstore_dump() wait for a concurrent pstore_dump()? If
1214     + * not, the current pstore_dump() will report a failure to dump
1215     + * and return.
1216     + */
1217     +static bool pstore_cannot_wait(enum kmsg_dump_reason reason)
1218     {
1219     - /*
1220     - * In case of NMI path, pstore shouldn't be blocked
1221     - * regardless of reason.
1222     - */
1223     + /* In NMI path, pstore shouldn't block regardless of reason. */
1224     if (in_nmi())
1225     return true;
1226    
1227     switch (reason) {
1228     /* In panic case, other cpus are stopped by smp_send_stop(). */
1229     case KMSG_DUMP_PANIC:
1230     - /* Emergency restart shouldn't be blocked by spin lock. */
1231     + /* Emergency restart shouldn't be blocked. */
1232     case KMSG_DUMP_EMERG:
1233     return true;
1234     default:
1235     return false;
1236     }
1237     }
1238     -EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
1239    
1240     #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
1241     static int zbufsize_deflate(size_t size)
1242     @@ -323,8 +324,10 @@ static void allocate_buf_for_compression(void)
1243    
1244     static void free_buf_for_compression(void)
1245     {
1246     - if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
1247     + if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
1248     crypto_free_comp(tfm);
1249     + tfm = NULL;
1250     + }
1251     kfree(big_oops_buf);
1252     big_oops_buf = NULL;
1253     big_oops_buf_sz = 0;
1254     @@ -378,23 +381,23 @@ static void pstore_dump(struct kmsg_dumper *dumper,
1255     unsigned long total = 0;
1256     const char *why;
1257     unsigned int part = 1;
1258     - unsigned long flags = 0;
1259     - int is_locked;
1260     int ret;
1261    
1262     why = get_reason_str(reason);
1263    
1264     - if (pstore_cannot_block_path(reason)) {
1265     - is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
1266     - if (!is_locked) {
1267     - pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
1268     - , in_nmi() ? "NMI" : why);
1269     + if (down_trylock(&psinfo->buf_lock)) {
1270     + /* Failed to acquire lock: give up if we cannot wait. */
1271     + if (pstore_cannot_wait(reason)) {
1272     + pr_err("dump skipped in %s path: may corrupt error record\n",
1273     + in_nmi() ? "NMI" : why);
1274     + return;
1275     + }
1276     + if (down_interruptible(&psinfo->buf_lock)) {
1277     + pr_err("could not grab semaphore?!\n");
1278     return;
1279     }
1280     - } else {
1281     - spin_lock_irqsave(&psinfo->buf_lock, flags);
1282     - is_locked = 1;
1283     }
1284     +
1285     oopscount++;
1286     while (total < kmsg_bytes) {
1287     char *dst;
1288     @@ -411,7 +414,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
1289     record.part = part;
1290     record.buf = psinfo->buf;
1291    
1292     - if (big_oops_buf && is_locked) {
1293     + if (big_oops_buf) {
1294     dst = big_oops_buf;
1295     dst_size = big_oops_buf_sz;
1296     } else {
1297     @@ -429,7 +432,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
1298     dst_size, &dump_size))
1299     break;
1300    
1301     - if (big_oops_buf && is_locked) {
1302     + if (big_oops_buf) {
1303     zipped_len = pstore_compress(dst, psinfo->buf,
1304     header_size + dump_size,
1305     psinfo->bufsize);
1306     @@ -452,8 +455,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
1307     total += record.size;
1308     part++;
1309     }
1310     - if (is_locked)
1311     - spin_unlock_irqrestore(&psinfo->buf_lock, flags);
1312     +
1313     + up(&psinfo->buf_lock);
1314     }
1315    
1316     static struct kmsg_dumper pstore_dumper = {
1317     @@ -476,31 +479,14 @@ static void pstore_unregister_kmsg(void)
1318     #ifdef CONFIG_PSTORE_CONSOLE
1319     static void pstore_console_write(struct console *con, const char *s, unsigned c)
1320     {
1321     - const char *e = s + c;
1322     + struct pstore_record record;
1323    
1324     - while (s < e) {
1325     - struct pstore_record record;
1326     - unsigned long flags;
1327     -
1328     - pstore_record_init(&record, psinfo);
1329     - record.type = PSTORE_TYPE_CONSOLE;
1330     + pstore_record_init(&record, psinfo);
1331     + record.type = PSTORE_TYPE_CONSOLE;
1332    
1333     - if (c > psinfo->bufsize)
1334     - c = psinfo->bufsize;
1335     -
1336     - if (oops_in_progress) {
1337     - if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
1338     - break;
1339     - } else {
1340     - spin_lock_irqsave(&psinfo->buf_lock, flags);
1341     - }
1342     - record.buf = (char *)s;
1343     - record.size = c;
1344     - psinfo->write(&record);
1345     - spin_unlock_irqrestore(&psinfo->buf_lock, flags);
1346     - s += c;
1347     - c = e - s;
1348     - }
1349     + record.buf = (char *)s;
1350     + record.size = c;
1351     + psinfo->write(&record);
1352     }
1353    
1354     static struct console pstore_console = {
1355     @@ -589,6 +575,7 @@ int pstore_register(struct pstore_info *psi)
1356     psi->write_user = pstore_write_user_compat;
1357     psinfo = psi;
1358     mutex_init(&psinfo->read_mutex);
1359     + sema_init(&psinfo->buf_lock, 1);
1360     spin_unlock(&pstore_lock);
1361    
1362     if (owner && !try_module_get(owner)) {
1363     @@ -596,7 +583,8 @@ int pstore_register(struct pstore_info *psi)
1364     return -EINVAL;
1365     }
1366    
1367     - allocate_buf_for_compression();
1368     + if (psi->flags & PSTORE_FLAGS_DMESG)
1369     + allocate_buf_for_compression();
1370    
1371     if (pstore_is_mounted())
1372     pstore_get_records(0);
1373     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
1374     index 44ed6b193d2e..316c16463b20 100644
1375     --- a/fs/pstore/ram.c
1376     +++ b/fs/pstore/ram.c
1377     @@ -803,27 +803,36 @@ static int ramoops_probe(struct platform_device *pdev)
1378    
1379     cxt->pstore.data = cxt;
1380     /*
1381     - * Since bufsize is only used for dmesg crash dumps, it
1382     - * must match the size of the dprz record (after PRZ header
1383     - * and ECC bytes have been accounted for).
1384     + * Prepare frontend flags based on which areas are initialized.
1385     + * For ramoops_init_przs() cases, the "max count" variable tells
1386     + * if there are regions present. For ramoops_init_prz() cases,
1387     + * the single region size is how to check.
1388     */
1389     - cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
1390     - cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
1391     - if (!cxt->pstore.buf) {
1392     - pr_err("cannot allocate pstore crash dump buffer\n");
1393     - err = -ENOMEM;
1394     - goto fail_clear;
1395     - }
1396     - spin_lock_init(&cxt->pstore.buf_lock);
1397     -
1398     - cxt->pstore.flags = PSTORE_FLAGS_DMESG;
1399     + cxt->pstore.flags = 0;
1400     + if (cxt->max_dump_cnt)
1401     + cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
1402     if (cxt->console_size)
1403     cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
1404     - if (cxt->ftrace_size)
1405     + if (cxt->max_ftrace_cnt)
1406     cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
1407     if (cxt->pmsg_size)
1408     cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
1409    
1410     + /*
1411     + * Since bufsize is only used for dmesg crash dumps, it
1412     + * must match the size of the dprz record (after PRZ header
1413     + * and ECC bytes have been accounted for).
1414     + */
1415     + if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
1416     + cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
1417     + cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
1418     + if (!cxt->pstore.buf) {
1419     + pr_err("cannot allocate pstore crash dump buffer\n");
1420     + err = -ENOMEM;
1421     + goto fail_clear;
1422     + }
1423     + }
1424     +
1425     err = pstore_register(&cxt->pstore);
1426     if (err) {
1427     pr_err("registering with pstore failed\n");
1428     diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
1429     index 61142aa0ab23..0eb3372d0311 100644
1430     --- a/include/drm/drm_modeset_helper_vtables.h
1431     +++ b/include/drm/drm_modeset_helper_vtables.h
1432     @@ -1174,6 +1174,14 @@ struct drm_plane_helper_funcs {
1433     * current one with the new plane configurations in the new
1434     * plane_state.
1435     *
1436     + * Drivers should also swap the framebuffers between current plane
1437     + * state (&drm_plane.state) and new_state.
1438     + * This is required since cleanup for async commits is performed on
1439     + * the new state, rather than old state like for traditional commits.
1440     + * Since we want to give up the reference on the current (old) fb
1441     + * instead of our brand new one, swap them in the driver during the
1442     + * async commit.
1443     + *
1444     * FIXME:
1445     * - It only works for single plane updates
1446     * - Async Pageflips are not supported yet
1447     diff --git a/include/linux/cpu.h b/include/linux/cpu.h
1448     index 57ae83c4d5f4..006f69f9277b 100644
1449     --- a/include/linux/cpu.h
1450     +++ b/include/linux/cpu.h
1451     @@ -183,10 +183,14 @@ enum cpuhp_smt_control {
1452     extern enum cpuhp_smt_control cpu_smt_control;
1453     extern void cpu_smt_disable(bool force);
1454     extern void cpu_smt_check_topology(void);
1455     +extern int cpuhp_smt_enable(void);
1456     +extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
1457     #else
1458     # define cpu_smt_control (CPU_SMT_ENABLED)
1459     static inline void cpu_smt_disable(bool force) { }
1460     static inline void cpu_smt_check_topology(void) { }
1461     +static inline int cpuhp_smt_enable(void) { return 0; }
1462     +static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
1463     #endif
1464    
1465     /*
1466     diff --git a/include/linux/pstore.h b/include/linux/pstore.h
1467     index 30fcec375a3a..de9093d6e660 100644
1468     --- a/include/linux/pstore.h
1469     +++ b/include/linux/pstore.h
1470     @@ -26,7 +26,7 @@
1471     #include <linux/errno.h>
1472     #include <linux/kmsg_dump.h>
1473     #include <linux/mutex.h>
1474     -#include <linux/spinlock.h>
1475     +#include <linux/semaphore.h>
1476     #include <linux/time.h>
1477     #include <linux/types.h>
1478    
1479     @@ -88,7 +88,7 @@ struct pstore_record {
1480     * @owner: module which is repsonsible for this backend driver
1481     * @name: name of the backend driver
1482     *
1483     - * @buf_lock: spinlock to serialize access to @buf
1484     + * @buf_lock: semaphore to serialize access to @buf
1485     * @buf: preallocated crash dump buffer
1486     * @bufsize: size of @buf available for crash dump bytes (must match
1487     * smallest number of bytes available for writing to a
1488     @@ -173,7 +173,7 @@ struct pstore_info {
1489     struct module *owner;
1490     char *name;
1491    
1492     - spinlock_t buf_lock;
1493     + struct semaphore buf_lock;
1494     char *buf;
1495     size_t bufsize;
1496    
1497     @@ -199,7 +199,6 @@ struct pstore_info {
1498    
1499     extern int pstore_register(struct pstore_info *);
1500     extern void pstore_unregister(struct pstore_info *);
1501     -extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
1502    
1503     struct pstore_ftrace_record {
1504     unsigned long ip;
1505     diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
1506     index 75e5b393cf44..e102c5bccbb9 100644
1507     --- a/include/linux/rcupdate.h
1508     +++ b/include/linux/rcupdate.h
1509     @@ -78,14 +78,12 @@ void synchronize_rcu(void);
1510    
1511     static inline void __rcu_read_lock(void)
1512     {
1513     - if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
1514     - preempt_disable();
1515     + preempt_disable();
1516     }
1517    
1518     static inline void __rcu_read_unlock(void)
1519     {
1520     - if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
1521     - preempt_enable();
1522     + preempt_enable();
1523     }
1524    
1525     static inline void synchronize_rcu(void)
1526     diff --git a/include/net/arp.h b/include/net/arp.h
1527     index 977aabfcdc03..c8f580a0e6b1 100644
1528     --- a/include/net/arp.h
1529     +++ b/include/net/arp.h
1530     @@ -18,6 +18,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
1531     return val * hash_rnd[0];
1532     }
1533    
1534     +#ifdef CONFIG_INET
1535     static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
1536     {
1537     if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1538     @@ -25,6 +26,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev
1539    
1540     return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
1541     }
1542     +#else
1543     +static inline
1544     +struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
1545     +{
1546     + return NULL;
1547     +}
1548     +#endif
1549    
1550     static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
1551     {
1552     diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
1553     index 983f7a1a3f1d..62c936230cc8 100644
1554     --- a/include/net/ip6_fib.h
1555     +++ b/include/net/ip6_fib.h
1556     @@ -260,8 +260,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
1557     rcu_read_lock();
1558    
1559     from = rcu_dereference(rt->from);
1560     - if (from && (rt->rt6i_flags & RTF_PCPU ||
1561     - unlikely(!list_empty(&rt->rt6i_uncached))))
1562     + if (from)
1563     fib6_get_cookie_safe(from, &cookie);
1564    
1565     rcu_read_unlock();
1566     diff --git a/include/net/tls.h b/include/net/tls.h
1567     index c423b7d0b6ab..954110575891 100644
1568     --- a/include/net/tls.h
1569     +++ b/include/net/tls.h
1570     @@ -161,6 +161,10 @@ enum {
1571     TLS_PENDING_CLOSED_RECORD
1572     };
1573    
1574     +enum tls_context_flags {
1575     + TLS_RX_SYNC_RUNNING = 0,
1576     +};
1577     +
1578     struct cipher_context {
1579     u16 prepend_size;
1580     u16 tag_size;
1581     diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
1582     index 7f5634ce8e88..4671c9150d4d 100644
1583     --- a/include/uapi/drm/i915_drm.h
1584     +++ b/include/uapi/drm/i915_drm.h
1585     @@ -942,7 +942,7 @@ struct drm_i915_gem_execbuffer2 {
1586     * struct drm_i915_gem_exec_fence *fences.
1587     */
1588     __u64 cliprects_ptr;
1589     -#define I915_EXEC_RING_MASK (7<<0)
1590     +#define I915_EXEC_RING_MASK (0x3f)
1591     #define I915_EXEC_DEFAULT (0<<0)
1592     #define I915_EXEC_RENDER (1<<0)
1593     #define I915_EXEC_BSD (2<<0)
1594     diff --git a/kernel/cpu.c b/kernel/cpu.c
1595     index bc6c880a093f..5d65eae893bd 100644
1596     --- a/kernel/cpu.c
1597     +++ b/kernel/cpu.c
1598     @@ -2035,7 +2035,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
1599     kobject_uevent(&dev->kobj, KOBJ_ONLINE);
1600     }
1601    
1602     -static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1603     +int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1604     {
1605     int cpu, ret = 0;
1606    
1607     @@ -2069,7 +2069,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1608     return ret;
1609     }
1610    
1611     -static int cpuhp_smt_enable(void)
1612     +int cpuhp_smt_enable(void)
1613     {
1614     int cpu, ret = 0;
1615    
1616     diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
1617     index abef759de7c8..f5ce9f7ec132 100644
1618     --- a/kernel/power/hibernate.c
1619     +++ b/kernel/power/hibernate.c
1620     @@ -258,6 +258,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
1621     (kps % 1000) / 10);
1622     }
1623    
1624     +__weak int arch_resume_nosmt(void)
1625     +{
1626     + return 0;
1627     +}
1628     +
1629     /**
1630     * create_image - Create a hibernation image.
1631     * @platform_mode: Whether or not to use the platform driver.
1632     @@ -325,6 +330,10 @@ static int create_image(int platform_mode)
1633     Enable_cpus:
1634     enable_nonboot_cpus();
1635    
1636     + /* Allow architectures to do nosmt-specific post-resume dances */
1637     + if (!in_suspend)
1638     + error = arch_resume_nosmt();
1639     +
1640     Platform_finish:
1641     platform_finish(platform_mode);
1642    
1643     diff --git a/lib/test_firmware.c b/lib/test_firmware.c
1644     index 7cab9a9869ac..fd48a15a0710 100644
1645     --- a/lib/test_firmware.c
1646     +++ b/lib/test_firmware.c
1647     @@ -223,30 +223,30 @@ static ssize_t config_show(struct device *dev,
1648    
1649     mutex_lock(&test_fw_mutex);
1650    
1651     - len += snprintf(buf, PAGE_SIZE,
1652     + len += scnprintf(buf, PAGE_SIZE - len,
1653     "Custom trigger configuration for: %s\n",
1654     dev_name(dev));
1655    
1656     if (test_fw_config->name)
1657     - len += snprintf(buf+len, PAGE_SIZE,
1658     + len += scnprintf(buf+len, PAGE_SIZE - len,
1659     "name:\t%s\n",
1660     test_fw_config->name);
1661     else
1662     - len += snprintf(buf+len, PAGE_SIZE,
1663     + len += scnprintf(buf+len, PAGE_SIZE - len,
1664     "name:\tEMTPY\n");
1665    
1666     - len += snprintf(buf+len, PAGE_SIZE,
1667     + len += scnprintf(buf+len, PAGE_SIZE - len,
1668     "num_requests:\t%u\n", test_fw_config->num_requests);
1669    
1670     - len += snprintf(buf+len, PAGE_SIZE,
1671     + len += scnprintf(buf+len, PAGE_SIZE - len,
1672     "send_uevent:\t\t%s\n",
1673     test_fw_config->send_uevent ?
1674     "FW_ACTION_HOTPLUG" :
1675     "FW_ACTION_NOHOTPLUG");
1676     - len += snprintf(buf+len, PAGE_SIZE,
1677     + len += scnprintf(buf+len, PAGE_SIZE - len,
1678     "sync_direct:\t\t%s\n",
1679     test_fw_config->sync_direct ? "true" : "false");
1680     - len += snprintf(buf+len, PAGE_SIZE,
1681     + len += scnprintf(buf+len, PAGE_SIZE - len,
1682     "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
1683    
1684     mutex_unlock(&test_fw_mutex);
1685     diff --git a/net/core/ethtool.c b/net/core/ethtool.c
1686     index 7cc97f43f138..996813f345d5 100644
1687     --- a/net/core/ethtool.c
1688     +++ b/net/core/ethtool.c
1689     @@ -880,8 +880,13 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
1690     if (rc >= 0)
1691     info.n_priv_flags = rc;
1692     }
1693     - if (ops->get_regs_len)
1694     - info.regdump_len = ops->get_regs_len(dev);
1695     + if (ops->get_regs_len) {
1696     + int ret = ops->get_regs_len(dev);
1697     +
1698     + if (ret > 0)
1699     + info.regdump_len = ret;
1700     + }
1701     +
1702     if (ops->get_eeprom_len)
1703     info.eedump_len = ops->get_eeprom_len(dev);
1704    
1705     @@ -1424,6 +1429,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1706     return -EFAULT;
1707    
1708     reglen = ops->get_regs_len(dev);
1709     + if (reglen <= 0)
1710     + return reglen;
1711     +
1712     if (regs.len > reglen)
1713     regs.len = reglen;
1714    
1715     @@ -1434,13 +1442,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1716     return -ENOMEM;
1717     }
1718    
1719     + if (regs.len < reglen)
1720     + reglen = regs.len;
1721     +
1722     ops->get_regs(dev, &regs, regbuf);
1723    
1724     ret = -EFAULT;
1725     if (copy_to_user(useraddr, &regs, sizeof(regs)))
1726     goto out;
1727     useraddr += offsetof(struct ethtool_regs, data);
1728     - if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
1729     + if (copy_to_user(useraddr, regbuf, reglen))
1730     goto out;
1731     ret = 0;
1732    
1733     diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
1734     index 338147b14d0e..0ff3953f64aa 100644
1735     --- a/net/core/fib_rules.c
1736     +++ b/net/core/fib_rules.c
1737     @@ -756,9 +756,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
1738     if (err)
1739     goto errout;
1740    
1741     - if (rule_exists(ops, frh, tb, rule)) {
1742     - if (nlh->nlmsg_flags & NLM_F_EXCL)
1743     - err = -EEXIST;
1744     + if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
1745     + rule_exists(ops, frh, tb, rule)) {
1746     + err = -EEXIST;
1747     goto errout_free;
1748     }
1749    
1750     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1751     index 4e07824eec5e..4e4ac77c6816 100644
1752     --- a/net/core/neighbour.c
1753     +++ b/net/core/neighbour.c
1754     @@ -30,6 +30,7 @@
1755     #include <linux/times.h>
1756     #include <net/net_namespace.h>
1757     #include <net/neighbour.h>
1758     +#include <net/arp.h>
1759     #include <net/dst.h>
1760     #include <net/sock.h>
1761     #include <net/netevent.h>
1762     @@ -2536,7 +2537,13 @@ int neigh_xmit(int index, struct net_device *dev,
1763     if (!tbl)
1764     goto out;
1765     rcu_read_lock_bh();
1766     - neigh = __neigh_lookup_noref(tbl, addr, dev);
1767     + if (index == NEIGH_ARP_TABLE) {
1768     + u32 key = *((u32 *)addr);
1769     +
1770     + neigh = __ipv4_neigh_lookup_noref(dev, key);
1771     + } else {
1772     + neigh = __neigh_lookup_noref(tbl, addr, dev);
1773     + }
1774     if (!neigh)
1775     neigh = __neigh_create(tbl, addr, dev, false);
1776     err = PTR_ERR(neigh);
1777     diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1778     index 7f6938405fa1..092fa3d75b32 100644
1779     --- a/net/core/pktgen.c
1780     +++ b/net/core/pktgen.c
1781     @@ -3065,7 +3065,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
1782     {
1783     while (thread_is_running(t)) {
1784    
1785     + /* note: 't' will still be around even after the unlock/lock
1786     + * cycle because pktgen_thread threads are only cleared at
1787     + * net exit
1788     + */
1789     + mutex_unlock(&pktgen_thread_lock);
1790     msleep_interruptible(100);
1791     + mutex_lock(&pktgen_thread_lock);
1792    
1793     if (signal_pending(current))
1794     goto signal;
1795     @@ -3080,6 +3086,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
1796     struct pktgen_thread *t;
1797     int sig = 1;
1798    
1799     + /* prevent from racing with rmmod */
1800     + if (!try_module_get(THIS_MODULE))
1801     + return sig;
1802     +
1803     mutex_lock(&pktgen_thread_lock);
1804    
1805     list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1806     @@ -3093,6 +3103,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
1807     t->control |= (T_STOP);
1808    
1809     mutex_unlock(&pktgen_thread_lock);
1810     + module_put(THIS_MODULE);
1811     return sig;
1812     }
1813    
1814     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1815     index 40bf19f7ae1a..232581c140a0 100644
1816     --- a/net/ipv4/route.c
1817     +++ b/net/ipv4/route.c
1818     @@ -1960,7 +1960,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1819     u32 itag = 0;
1820     struct rtable *rth;
1821     struct flowi4 fl4;
1822     - bool do_cache;
1823     + bool do_cache = true;
1824    
1825     /* IP on this device is disabled. */
1826    
1827     @@ -2037,6 +2037,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1828     if (res->type == RTN_BROADCAST) {
1829     if (IN_DEV_BFORWARD(in_dev))
1830     goto make_route;
1831     + /* not do cache if bc_forwarding is enabled */
1832     + if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
1833     + do_cache = false;
1834     goto brd_input;
1835     }
1836    
1837     @@ -2074,16 +2077,13 @@ brd_input:
1838     RT_CACHE_STAT_INC(in_brd);
1839    
1840     local_input:
1841     - do_cache = false;
1842     - if (res->fi) {
1843     - if (!itag) {
1844     - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1845     - if (rt_cache_valid(rth)) {
1846     - skb_dst_set_noref(skb, &rth->dst);
1847     - err = 0;
1848     - goto out;
1849     - }
1850     - do_cache = true;
1851     + do_cache &= res->fi && !itag;
1852     + if (do_cache) {
1853     + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1854     + if (rt_cache_valid(rth)) {
1855     + skb_dst_set_noref(skb, &rth->dst);
1856     + err = 0;
1857     + goto out;
1858     }
1859     }
1860    
1861     diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
1862     index 5833d4af7311..4856d9320b28 100644
1863     --- a/net/ipv6/raw.c
1864     +++ b/net/ipv6/raw.c
1865     @@ -782,6 +782,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1866     struct flowi6 fl6;
1867     struct ipcm6_cookie ipc6;
1868     int addr_len = msg->msg_namelen;
1869     + int hdrincl;
1870     u16 proto;
1871     int err;
1872    
1873     @@ -795,6 +796,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1874     if (msg->msg_flags & MSG_OOB)
1875     return -EOPNOTSUPP;
1876    
1877     + /* hdrincl should be READ_ONCE(inet->hdrincl)
1878     + * but READ_ONCE() doesn't work with bit fields.
1879     + * Doing this indirectly yields the same result.
1880     + */
1881     + hdrincl = inet->hdrincl;
1882     + hdrincl = READ_ONCE(hdrincl);
1883     +
1884     /*
1885     * Get and verify the address.
1886     */
1887     @@ -886,11 +894,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1888     opt = ipv6_fixup_options(&opt_space, opt);
1889    
1890     fl6.flowi6_proto = proto;
1891     - rfv.msg = msg;
1892     - rfv.hlen = 0;
1893     - err = rawv6_probe_proto_opt(&rfv, &fl6);
1894     - if (err)
1895     - goto out;
1896     +
1897     + if (!hdrincl) {
1898     + rfv.msg = msg;
1899     + rfv.hlen = 0;
1900     + err = rawv6_probe_proto_opt(&rfv, &fl6);
1901     + if (err)
1902     + goto out;
1903     + }
1904    
1905     if (!ipv6_addr_any(daddr))
1906     fl6.daddr = *daddr;
1907     @@ -907,7 +918,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1908     fl6.flowi6_oif = np->ucast_oif;
1909     security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1910    
1911     - if (inet->hdrincl)
1912     + if (hdrincl)
1913     fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
1914    
1915     if (ipc6.tclass < 0)
1916     @@ -930,7 +941,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1917     goto do_confirm;
1918    
1919     back_from_confirm:
1920     - if (inet->hdrincl)
1921     + if (hdrincl)
1922     err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
1923     msg->msg_flags, &ipc6.sockc);
1924     else {
1925     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1926     index 18df3bce73da..d98fcf926166 100644
1927     --- a/net/packet/af_packet.c
1928     +++ b/net/packet/af_packet.c
1929     @@ -3017,8 +3017,8 @@ static int packet_release(struct socket *sock)
1930    
1931     synchronize_net();
1932    
1933     + kfree(po->rollover);
1934     if (f) {
1935     - kfree(po->rollover);
1936     fanout_release_data(f);
1937     kfree(f);
1938     }
1939     diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
1940     index d664e9ade74d..0b347f46b2f4 100644
1941     --- a/net/rds/ib_rdma.c
1942     +++ b/net/rds/ib_rdma.c
1943     @@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
1944     wait_clean_list_grace();
1945    
1946     list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
1947     - if (ibmr_ret)
1948     + if (ibmr_ret) {
1949     *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
1950     -
1951     + clean_nodes = clean_nodes->next;
1952     + }
1953     /* more than one entry in llist nodes */
1954     - if (clean_nodes->next)
1955     - llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
1956     + if (clean_nodes)
1957     + llist_add_batch(clean_nodes, clean_tail,
1958     + &pool->clean_list);
1959    
1960     }
1961    
1962     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
1963     index d05c57664e36..ae65a1cfa596 100644
1964     --- a/net/sctp/sm_make_chunk.c
1965     +++ b/net/sctp/sm_make_chunk.c
1966     @@ -2329,7 +2329,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
1967     union sctp_addr addr;
1968     struct sctp_af *af;
1969     int src_match = 0;
1970     - char *cookie;
1971    
1972     /* We must include the address that the INIT packet came from.
1973     * This is the only address that matters for an INIT packet.
1974     @@ -2433,14 +2432,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
1975     /* Peer Rwnd : Current calculated value of the peer's rwnd. */
1976     asoc->peer.rwnd = asoc->peer.i.a_rwnd;
1977    
1978     - /* Copy cookie in case we need to resend COOKIE-ECHO. */
1979     - cookie = asoc->peer.cookie;
1980     - if (cookie) {
1981     - asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
1982     - if (!asoc->peer.cookie)
1983     - goto clean_up;
1984     - }
1985     -
1986     /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
1987     * high (for example, implementations MAY use the size of the receiver
1988     * advertised window).
1989     @@ -2609,7 +2600,9 @@ do_addr_param:
1990     case SCTP_PARAM_STATE_COOKIE:
1991     asoc->peer.cookie_len =
1992     ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
1993     - asoc->peer.cookie = param.cookie->body;
1994     + asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
1995     + if (!asoc->peer.cookie)
1996     + retval = 0;
1997     break;
1998    
1999     case SCTP_PARAM_HEARTBEAT_INFO:
2000     diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
2001     index 48fe8f01265f..3131b4154c74 100644
2002     --- a/net/sctp/sm_sideeffect.c
2003     +++ b/net/sctp/sm_sideeffect.c
2004     @@ -898,6 +898,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
2005     asoc->rto_initial;
2006     }
2007    
2008     + if (sctp_state(asoc, ESTABLISHED)) {
2009     + kfree(asoc->peer.cookie);
2010     + asoc->peer.cookie = NULL;
2011     + }
2012     +
2013     if (sctp_state(asoc, ESTABLISHED) ||
2014     sctp_state(asoc, CLOSED) ||
2015     sctp_state(asoc, SHUTDOWN_RECEIVED)) {
2016     diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
2017     index 8035bf495eb2..ead29c2aefa7 100644
2018     --- a/net/tls/tls_device.c
2019     +++ b/net/tls/tls_device.c
2020     @@ -545,10 +545,22 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
2021     return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
2022     }
2023    
2024     +static void tls_device_resync_rx(struct tls_context *tls_ctx,
2025     + struct sock *sk, u32 seq, u64 rcd_sn)
2026     +{
2027     + struct net_device *netdev;
2028     +
2029     + if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
2030     + return;
2031     + netdev = READ_ONCE(tls_ctx->netdev);
2032     + if (netdev)
2033     + netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
2034     + clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
2035     +}
2036     +
2037     void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
2038     {
2039     struct tls_context *tls_ctx = tls_get_ctx(sk);
2040     - struct net_device *netdev = tls_ctx->netdev;
2041     struct tls_offload_context_rx *rx_ctx;
2042     u32 is_req_pending;
2043     s64 resync_req;
2044     @@ -563,10 +575,10 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
2045     is_req_pending = resync_req;
2046    
2047     if (unlikely(is_req_pending) && req_seq == seq &&
2048     - atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
2049     - netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
2050     - seq + TLS_HEADER_SIZE - 1,
2051     - rcd_sn);
2052     + atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
2053     + seq += TLS_HEADER_SIZE - 1;
2054     + tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
2055     + }
2056     }
2057    
2058     static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
2059     @@ -954,7 +966,10 @@ static int tls_device_down(struct net_device *netdev)
2060     if (ctx->rx_conf == TLS_HW)
2061     netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
2062     TLS_OFFLOAD_CTX_DIR_RX);
2063     - ctx->netdev = NULL;
2064     + WRITE_ONCE(ctx->netdev, NULL);
2065     + smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
2066     + while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
2067     + usleep_range(10, 200);
2068     dev_put(netdev);
2069     list_del_init(&ctx->list);
2070