Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0263-5.4.164-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (hide annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 3 weeks ago) by niro
File size: 73354 byte(s)
-add missing
1 niro 3637 diff --git a/Makefile b/Makefile
2     index 91d77df0128b4..a87162756d61d 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 163
10     +SUBLEVEL = 164
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
15     index d250f4b2bfedb..bf443ca1fcf11 100644
16     --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
17     +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
18     @@ -71,6 +71,7 @@
19     tx-fault-gpio = <&cp1_gpio1 26 GPIO_ACTIVE_HIGH>;
20     pinctrl-names = "default";
21     pinctrl-0 = <&cp1_sfpp0_pins>;
22     + maximum-power-milliwatt = <2000>;
23     };
24    
25     sfp_eth1: sfp-eth1 {
26     @@ -83,6 +84,7 @@
27     tx-fault-gpio = <&cp0_gpio2 30 GPIO_ACTIVE_HIGH>;
28     pinctrl-names = "default";
29     pinctrl-0 = <&cp1_sfpp1_pins &cp0_sfpp1_pins>;
30     + maximum-power-milliwatt = <2000>;
31     };
32    
33     sfp_eth3: sfp-eth3 {
34     @@ -95,6 +97,7 @@
35     tx-fault-gpio = <&cp0_gpio2 19 GPIO_ACTIVE_HIGH>;
36     pinctrl-names = "default";
37     pinctrl-0 = <&cp0_sfp_1g_pins &cp1_sfp_1g_pins>;
38     + maximum-power-milliwatt = <2000>;
39     };
40     };
41    
42     diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
43     index 53f974817affb..caa90ae0b4acc 100644
44     --- a/arch/parisc/Makefile
45     +++ b/arch/parisc/Makefile
46     @@ -17,7 +17,12 @@
47     # Mike Shaver, Helge Deller and Martin K. Petersen
48     #
49    
50     +ifdef CONFIG_PARISC_SELF_EXTRACT
51     +boot := arch/parisc/boot
52     +KBUILD_IMAGE := $(boot)/bzImage
53     +else
54     KBUILD_IMAGE := vmlinuz
55     +endif
56    
57     NM = sh $(srctree)/arch/parisc/nm
58     CHECKFLAGS += -D__hppa__=1
59     diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
60     index 6f68784fea25f..a8c49815f58c8 100644
61     --- a/arch/parisc/install.sh
62     +++ b/arch/parisc/install.sh
63     @@ -39,6 +39,7 @@ verify "$3"
64     if [ -n "${INSTALLKERNEL}" ]; then
65     if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
66     if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
67     + if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
68     fi
69    
70     # Default install
71     diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
72     index 04508158815c1..9c1ae9d242ca9 100644
73     --- a/arch/parisc/kernel/time.c
74     +++ b/arch/parisc/kernel/time.c
75     @@ -245,27 +245,13 @@ void __init time_init(void)
76     static int __init init_cr16_clocksource(void)
77     {
78     /*
79     - * The cr16 interval timers are not syncronized across CPUs on
80     - * different sockets, so mark them unstable and lower rating on
81     - * multi-socket SMP systems.
82     + * The cr16 interval timers are not syncronized across CPUs, even if
83     + * they share the same socket.
84     */
85     if (num_online_cpus() > 1 && !running_on_qemu) {
86     - int cpu;
87     - unsigned long cpu0_loc;
88     - cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
89     -
90     - for_each_online_cpu(cpu) {
91     - if (cpu == 0)
92     - continue;
93     - if ((cpu0_loc != 0) &&
94     - (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
95     - continue;
96     -
97     - clocksource_cr16.name = "cr16_unstable";
98     - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
99     - clocksource_cr16.rating = 0;
100     - break;
101     - }
102     + clocksource_cr16.name = "cr16_unstable";
103     + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
104     + clocksource_cr16.rating = 0;
105     }
106    
107     /* XXX: We may want to mark sched_clock stable here if cr16 clocks are
108     diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
109     index e4dc64cc9c555..287bb88f76986 100644
110     --- a/arch/s390/include/asm/pci_io.h
111     +++ b/arch/s390/include/asm/pci_io.h
112     @@ -14,12 +14,13 @@
113    
114     /* I/O Map */
115     #define ZPCI_IOMAP_SHIFT 48
116     -#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
117     +#define ZPCI_IOMAP_ADDR_SHIFT 62
118     +#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
119     #define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
120     #define ZPCI_IOMAP_MAX_ENTRIES \
121     - ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
122     + (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
123     #define ZPCI_IOMAP_ADDR_IDX_MASK \
124     - (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
125     + ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
126    
127     struct zpci_iomap_entry {
128     u32 fh;
129     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
130     index f661f176966f5..9a0316a067a11 100644
131     --- a/arch/s390/kernel/setup.c
132     +++ b/arch/s390/kernel/setup.c
133     @@ -841,9 +841,6 @@ static void __init setup_memory(void)
134     storage_key_init_range(reg->base, reg->base + reg->size);
135     }
136     psw_set_key(PAGE_DEFAULT_KEY);
137     -
138     - /* Only cosmetics */
139     - memblock_enforce_memory_limit(memblock_end_of_DRAM());
140     }
141    
142     /*
143     diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
144     index 7e322e2daaf59..fe4200b895829 100644
145     --- a/arch/x86/kernel/tsc.c
146     +++ b/arch/x86/kernel/tsc.c
147     @@ -1162,6 +1162,12 @@ void mark_tsc_unstable(char *reason)
148    
149     EXPORT_SYMBOL_GPL(mark_tsc_unstable);
150    
151     +static void __init tsc_disable_clocksource_watchdog(void)
152     +{
153     + clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
154     + clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
155     +}
156     +
157     static void __init check_system_tsc_reliable(void)
158     {
159     #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
160     @@ -1178,6 +1184,23 @@ static void __init check_system_tsc_reliable(void)
161     #endif
162     if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
163     tsc_clocksource_reliable = 1;
164     +
165     + /*
166     + * Disable the clocksource watchdog when the system has:
167     + * - TSC running at constant frequency
168     + * - TSC which does not stop in C-States
169     + * - the TSC_ADJUST register which allows to detect even minimal
170     + * modifications
171     + * - not more than two sockets. As the number of sockets cannot be
172     + * evaluated at the early boot stage where this has to be
173     + * invoked, check the number of online memory nodes as a
174     + * fallback solution which is an reasonable estimate.
175     + */
176     + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
177     + boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
178     + boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
179     + nr_online_nodes <= 2)
180     + tsc_disable_clocksource_watchdog();
181     }
182    
183     /*
184     @@ -1369,9 +1392,6 @@ static int __init init_tsc_clocksource(void)
185     if (tsc_unstable)
186     goto unreg;
187    
188     - if (tsc_clocksource_reliable || no_tsc_watchdog)
189     - clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
190     -
191     if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
192     clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
193    
194     @@ -1506,7 +1526,7 @@ void __init tsc_init(void)
195     }
196    
197     if (tsc_clocksource_reliable || no_tsc_watchdog)
198     - clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
199     + tsc_disable_clocksource_watchdog();
200    
201     clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
202     detect_art();
203     diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
204     index ec534f978867d..59b1143063003 100644
205     --- a/arch/x86/kernel/tsc_sync.c
206     +++ b/arch/x86/kernel/tsc_sync.c
207     @@ -30,6 +30,7 @@ struct tsc_adjust {
208     };
209    
210     static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
211     +static struct timer_list tsc_sync_check_timer;
212    
213     /*
214     * TSC's on different sockets may be reset asynchronously.
215     @@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
216     }
217     }
218    
219     +/*
220     + * Normally the tsc_sync will be checked every time system enters idle
221     + * state, but there is still caveat that a system won't enter idle,
222     + * either because it's too busy or configured purposely to not enter
223     + * idle.
224     + *
225     + * So setup a periodic timer (every 10 minutes) to make sure the check
226     + * is always on.
227     + */
228     +
229     +#define SYNC_CHECK_INTERVAL (HZ * 600)
230     +
231     +static void tsc_sync_check_timer_fn(struct timer_list *unused)
232     +{
233     + int next_cpu;
234     +
235     + tsc_verify_tsc_adjust(false);
236     +
237     + /* Run the check for all onlined CPUs in turn */
238     + next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
239     + if (next_cpu >= nr_cpu_ids)
240     + next_cpu = cpumask_first(cpu_online_mask);
241     +
242     + tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
243     + add_timer_on(&tsc_sync_check_timer, next_cpu);
244     +}
245     +
246     +static int __init start_sync_check_timer(void)
247     +{
248     + if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
249     + return 0;
250     +
251     + timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
252     + tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
253     + add_timer(&tsc_sync_check_timer);
254     +
255     + return 0;
256     +}
257     +late_initcall(start_sync_check_timer);
258     +
259     static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
260     unsigned int cpu, bool bootcpu)
261     {
262     diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
263     index c8388389a3b05..d9990951fd0ac 100644
264     --- a/arch/x86/kvm/pmu_amd.c
265     +++ b/arch/x86/kvm/pmu_amd.c
266     @@ -266,7 +266,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
267     pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
268    
269     pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
270     - pmu->reserved_bits = 0xffffffff00200000ull;
271     + pmu->reserved_bits = 0xfffffff000280000ull;
272     pmu->version = 1;
273     /* not applicable to AMD; but clean them to prevent any fall out */
274     pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
275     diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
276     index 262f83cad3551..de371e52cfa85 100644
277     --- a/arch/x86/realmode/init.c
278     +++ b/arch/x86/realmode/init.c
279     @@ -50,6 +50,7 @@ static void __init setup_real_mode(void)
280     #ifdef CONFIG_X86_64
281     u64 *trampoline_pgd;
282     u64 efer;
283     + int i;
284     #endif
285    
286     base = (unsigned char *)real_mode_header;
287     @@ -108,8 +109,17 @@ static void __init setup_real_mode(void)
288     trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
289    
290     trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
291     +
292     + /* Map the real mode stub as virtual == physical */
293     trampoline_pgd[0] = trampoline_pgd_entry.pgd;
294     - trampoline_pgd[511] = init_top_pgt[511].pgd;
295     +
296     + /*
297     + * Include the entirety of the kernel mapping into the trampoline
298     + * PGD. This way, all mappings present in the normal kernel page
299     + * tables are usable while running on trampoline_pgd.
300     + */
301     + for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
302     + trampoline_pgd[i] = init_top_pgt[i].pgd;
303     #endif
304     }
305    
306     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
307     index 8beb418ce167b..6f572967b5552 100644
308     --- a/drivers/ata/ahci.c
309     +++ b/drivers/ata/ahci.c
310     @@ -420,6 +420,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
311     /* AMD */
312     { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
313     { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
314     + { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
315     /* AMD is using RAID class only for ahci controllers */
316     { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
317     PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
318     diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
319     index d55ee244d6931..0ba231e80b191 100644
320     --- a/drivers/ata/sata_fsl.c
321     +++ b/drivers/ata/sata_fsl.c
322     @@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
323     return 0;
324     }
325    
326     +static void sata_fsl_host_stop(struct ata_host *host)
327     +{
328     + struct sata_fsl_host_priv *host_priv = host->private_data;
329     +
330     + iounmap(host_priv->hcr_base);
331     + kfree(host_priv);
332     +}
333     +
334     /*
335     * scsi mid-layer and libata interface structures
336     */
337     @@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
338     .port_start = sata_fsl_port_start,
339     .port_stop = sata_fsl_port_stop,
340    
341     + .host_stop = sata_fsl_host_stop,
342     +
343     .pmp_attach = sata_fsl_pmp_attach,
344     .pmp_detach = sata_fsl_pmp_detach,
345     };
346     @@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
347     host_priv->ssr_base = ssr_base;
348     host_priv->csr_base = csr_base;
349    
350     - irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
351     - if (!irq) {
352     - dev_err(&ofdev->dev, "invalid irq from platform\n");
353     + irq = platform_get_irq(ofdev, 0);
354     + if (irq < 0) {
355     + retval = irq;
356     goto error_exit_with_cleanup;
357     }
358     host_priv->irq = irq;
359     @@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
360    
361     ata_host_detach(host);
362    
363     - irq_dispose_mapping(host_priv->irq);
364     - iounmap(host_priv->hcr_base);
365     - kfree(host_priv);
366     -
367     return 0;
368     }
369    
370     diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
371     index bd3c9fb029fa5..8a93d5a494fff 100644
372     --- a/drivers/char/ipmi/ipmi_msghandler.c
373     +++ b/drivers/char/ipmi/ipmi_msghandler.c
374     @@ -220,6 +220,8 @@ struct ipmi_user {
375     struct work_struct remove_work;
376     };
377    
378     +static struct workqueue_struct *remove_work_wq;
379     +
380     static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
381     __acquires(user->release_barrier)
382     {
383     @@ -1286,7 +1288,7 @@ static void free_user(struct kref *ref)
384     struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
385    
386     /* SRCU cleanup must happen in task context. */
387     - schedule_work(&user->remove_work);
388     + queue_work(remove_work_wq, &user->remove_work);
389     }
390    
391     static void _ipmi_destroy_user(struct ipmi_user *user)
392     @@ -5161,6 +5163,13 @@ static int ipmi_init_msghandler(void)
393    
394     atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
395    
396     + remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
397     + if (!remove_work_wq) {
398     + pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
399     + rv = -ENOMEM;
400     + goto out;
401     + }
402     +
403     initialized = true;
404    
405     out:
406     @@ -5186,6 +5195,8 @@ static void __exit cleanup_ipmi(void)
407     int count;
408    
409     if (initialized) {
410     + destroy_workqueue(remove_work_wq);
411     +
412     atomic_notifier_chain_unregister(&panic_notifier_list,
413     &panic_block);
414    
415     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
416     index c4e928375c40d..cb7949a2ac0ca 100644
417     --- a/drivers/cpufreq/cpufreq.c
418     +++ b/drivers/cpufreq/cpufreq.c
419     @@ -995,10 +995,9 @@ static struct kobj_type ktype_cpufreq = {
420     .release = cpufreq_sysfs_release,
421     };
422    
423     -static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
424     +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
425     + struct device *dev)
426     {
427     - struct device *dev = get_cpu_device(cpu);
428     -
429     if (unlikely(!dev))
430     return;
431    
432     @@ -1384,7 +1383,7 @@ static int cpufreq_online(unsigned int cpu)
433     if (new_policy) {
434     for_each_cpu(j, policy->related_cpus) {
435     per_cpu(cpufreq_cpu_data, j) = policy;
436     - add_cpu_dev_symlink(policy, j);
437     + add_cpu_dev_symlink(policy, j, get_cpu_device(j));
438     }
439    
440     policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
441     @@ -1547,7 +1546,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
442     /* Create sysfs link on CPU registration */
443     policy = per_cpu(cpufreq_cpu_data, cpu);
444     if (policy)
445     - add_cpu_dev_symlink(policy, cpu);
446     + add_cpu_dev_symlink(policy, cpu, dev);
447    
448     return 0;
449     }
450     diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
451     index 1c74381a4fc9d..08995e981808a 100644
452     --- a/drivers/gpu/drm/msm/msm_debugfs.c
453     +++ b/drivers/gpu/drm/msm/msm_debugfs.c
454     @@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
455     goto free_priv;
456    
457     pm_runtime_get_sync(&gpu->pdev->dev);
458     + msm_gpu_hw_init(gpu);
459     show_priv->state = gpu->funcs->gpu_state_get(gpu);
460     pm_runtime_put_sync(&gpu->pdev->dev);
461    
462     diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
463     index 37e90e42943f6..0e2d304f0d83f 100644
464     --- a/drivers/gpu/drm/sun4i/Kconfig
465     +++ b/drivers/gpu/drm/sun4i/Kconfig
466     @@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
467     default MACH_SUN8I
468     select CRC_CCITT
469     select DRM_MIPI_DSI
470     + select RESET_CONTROLLER
471     select PHY_SUN6I_MIPI_DPHY
472     help
473     Choose this option if you want have an Allwinner SoC with
474     diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
475     index 72df563477b1c..f8639a4457d23 100644
476     --- a/drivers/i2c/busses/i2c-cbus-gpio.c
477     +++ b/drivers/i2c/busses/i2c-cbus-gpio.c
478     @@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
479     }
480    
481     static const struct i2c_algorithm cbus_i2c_algo = {
482     - .smbus_xfer = cbus_i2c_smbus_xfer,
483     - .functionality = cbus_i2c_func,
484     + .smbus_xfer = cbus_i2c_smbus_xfer,
485     + .smbus_xfer_atomic = cbus_i2c_smbus_xfer,
486     + .functionality = cbus_i2c_func,
487     };
488    
489     static int cbus_i2c_remove(struct platform_device *pdev)
490     diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
491     index a7977eef2ead5..0f0c41174dddc 100644
492     --- a/drivers/i2c/busses/i2c-stm32f7.c
493     +++ b/drivers/i2c/busses/i2c-stm32f7.c
494     @@ -1394,6 +1394,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
495     {
496     struct stm32f7_i2c_dev *i2c_dev = data;
497     struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
498     + struct stm32_i2c_dma *dma = i2c_dev->dma;
499     void __iomem *base = i2c_dev->base;
500     u32 status, mask;
501     int ret = IRQ_HANDLED;
502     @@ -1418,6 +1419,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
503     if (status & STM32F7_I2C_ISR_NACKF) {
504     dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__);
505     writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
506     + if (i2c_dev->use_dma) {
507     + stm32f7_i2c_disable_dma_req(i2c_dev);
508     + dmaengine_terminate_all(dma->chan_using);
509     + }
510     f7_msg->result = -ENXIO;
511     }
512    
513     @@ -1433,7 +1438,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
514     /* Clear STOP flag */
515     writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
516    
517     - if (i2c_dev->use_dma) {
518     + if (i2c_dev->use_dma && !f7_msg->result) {
519     ret = IRQ_WAKE_THREAD;
520     } else {
521     i2c_dev->master_mode = false;
522     @@ -1446,7 +1451,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
523     if (f7_msg->stop) {
524     mask = STM32F7_I2C_CR2_STOP;
525     stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
526     - } else if (i2c_dev->use_dma) {
527     + } else if (i2c_dev->use_dma && !f7_msg->result) {
528     ret = IRQ_WAKE_THREAD;
529     } else if (f7_msg->smbus) {
530     stm32f7_i2c_smbus_rep_start(i2c_dev);
531     @@ -1586,12 +1591,23 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
532     time_left = wait_for_completion_timeout(&i2c_dev->complete,
533     i2c_dev->adap.timeout);
534     ret = f7_msg->result;
535     + if (ret) {
536     + /*
537     + * It is possible that some unsent data have already been
538     + * written into TXDR. To avoid sending old data in a
539     + * further transfer, flush TXDR in case of any error
540     + */
541     + writel_relaxed(STM32F7_I2C_ISR_TXE,
542     + i2c_dev->base + STM32F7_I2C_ISR);
543     + goto pm_free;
544     + }
545    
546     if (!time_left) {
547     dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
548     i2c_dev->msg->addr);
549     if (i2c_dev->use_dma)
550     dmaengine_terminate_all(dma->chan_using);
551     + stm32f7_i2c_wait_free_bus(i2c_dev);
552     ret = -ETIMEDOUT;
553     }
554    
555     @@ -1634,13 +1650,22 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
556     timeout = wait_for_completion_timeout(&i2c_dev->complete,
557     i2c_dev->adap.timeout);
558     ret = f7_msg->result;
559     - if (ret)
560     + if (ret) {
561     + /*
562     + * It is possible that some unsent data have already been
563     + * written into TXDR. To avoid sending old data in a
564     + * further transfer, flush TXDR in case of any error
565     + */
566     + writel_relaxed(STM32F7_I2C_ISR_TXE,
567     + i2c_dev->base + STM32F7_I2C_ISR);
568     goto pm_free;
569     + }
570    
571     if (!timeout) {
572     dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
573     if (i2c_dev->use_dma)
574     dmaengine_terminate_all(dma->chan_using);
575     + stm32f7_i2c_wait_free_bus(i2c_dev);
576     ret = -ETIMEDOUT;
577     goto pm_free;
578     }
579     diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
580     index 873f9865f0d15..b7d70c33459fd 100644
581     --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
582     +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
583     @@ -481,6 +481,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
584     goto err_exit;
585    
586     if (fw.len == 0xFFFFU) {
587     + if (sw.len > sizeof(self->rpc)) {
588     + printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
589     + err = -EINVAL;
590     + goto err_exit;
591     + }
592     err = hw_atl_utils_fw_rpc_call(self, sw.len);
593     if (err < 0)
594     goto err_exit;
595     @@ -489,6 +494,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
596    
597     if (rpc) {
598     if (fw.len) {
599     + if (fw.len > sizeof(self->rpc)) {
600     + printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
601     + err = -EINVAL;
602     + goto err_exit;
603     + }
604     err =
605     hw_atl_utils_fw_downld_dwords(self,
606     self->rpc_addr,
607     diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
608     index c813e6f2b371e..c97fc0e384ca6 100644
609     --- a/drivers/net/ethernet/dec/tulip/de4x5.c
610     +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
611     @@ -4708,6 +4708,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
612     lp->ibn = 3;
613     lp->active = *p++;
614     if (MOTO_SROM_BUG) lp->active = 0;
615     + /* if (MOTO_SROM_BUG) statement indicates lp->active could
616     + * be 8 (i.e. the size of array lp->phy) */
617     + if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
618     + return -EINVAL;
619     lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
620     lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
621     lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
622     @@ -4999,19 +5003,23 @@ mii_get_phy(struct net_device *dev)
623     }
624     if ((j == limit) && (i < DE4X5_MAX_MII)) {
625     for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
626     - lp->phy[k].addr = i;
627     - lp->phy[k].id = id;
628     - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
629     - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
630     - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
631     - lp->mii_cnt++;
632     - lp->active++;
633     - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
634     - j = de4x5_debug;
635     - de4x5_debug |= DEBUG_MII;
636     - de4x5_dbg_mii(dev, k);
637     - de4x5_debug = j;
638     - printk("\n");
639     + if (k < DE4X5_MAX_PHY) {
640     + lp->phy[k].addr = i;
641     + lp->phy[k].id = id;
642     + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
643     + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
644     + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
645     + lp->mii_cnt++;
646     + lp->active++;
647     + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
648     + j = de4x5_debug;
649     + de4x5_debug |= DEBUG_MII;
650     + de4x5_dbg_mii(dev, k);
651     + de4x5_debug = j;
652     + printk("\n");
653     + } else {
654     + goto purgatory;
655     + }
656     }
657     }
658     purgatory:
659     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
660     index ed3829ae4ef1b..580199fdd0c22 100644
661     --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
662     +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
663     @@ -398,6 +398,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
664     return;
665    
666     if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
667     + /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
668     + We need check to prevent array overflow */
669     + if (port >= DSAF_MAX_PORT_NUM)
670     + return;
671     reg_val_1 = 0x1 << port;
672     port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
673     /* there is difference between V1 and V2 in register.*/
674     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
675     index 16a390c77d198..d700f1b5a4bf7 100644
676     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
677     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
678     @@ -5773,7 +5773,7 @@ static int mvpp2_probe(struct platform_device *pdev)
679    
680     shared = num_present_cpus() - priv->nthreads;
681     if (shared > 0)
682     - bitmap_fill(&priv->lock_map,
683     + bitmap_set(&priv->lock_map, 0,
684     min_t(int, shared, MVPP2_MAX_THREADS));
685    
686     for (i = 0; i < MVPP2_MAX_THREADS; i++) {
687     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
688     index d029179a4804c..91334229c1205 100644
689     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
690     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
691     @@ -2281,9 +2281,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
692     bool carry_xdp_prog)
693     {
694     struct bpf_prog *xdp_prog;
695     - int i, t;
696     + int i, t, ret;
697    
698     - mlx4_en_copy_priv(tmp, priv, prof);
699     + ret = mlx4_en_copy_priv(tmp, priv, prof);
700     + if (ret) {
701     + en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
702     + __func__);
703     + return ret;
704     + }
705    
706     if (mlx4_en_alloc_resources(tmp)) {
707     en_warn(priv,
708     diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
709     index 44171d7bb434c..5117864738a7e 100644
710     --- a/drivers/net/ethernet/natsemi/xtsonic.c
711     +++ b/drivers/net/ethernet/natsemi/xtsonic.c
712     @@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
713     .ndo_set_mac_address = eth_mac_addr,
714     };
715    
716     -static int __init sonic_probe1(struct net_device *dev)
717     +static int sonic_probe1(struct net_device *dev)
718     {
719     unsigned int silicon_revision;
720     struct sonic_local *lp = netdev_priv(dev);
721     diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
722     index f2014c10f7c97..df43d364e6a41 100644
723     --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
724     +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
725     @@ -1079,8 +1079,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
726     sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
727     context_id = recv_ctx->context_id;
728     num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
729     - ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
730     - QLCNIC_CMD_ADD_RCV_RINGS);
731     + err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
732     + QLCNIC_CMD_ADD_RCV_RINGS);
733     + if (err) {
734     + dev_err(&adapter->pdev->dev,
735     + "Failed to alloc mbx args %d\n", err);
736     + return err;
737     + }
738     +
739     cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
740    
741     /* set up status rings, mbx 2-81 */
742     diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
743     index fe830b72c3b0f..2d98373f7a71d 100644
744     --- a/drivers/net/usb/lan78xx.c
745     +++ b/drivers/net/usb/lan78xx.c
746     @@ -2136,7 +2136,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
747     if (dev->domain_data.phyirq > 0)
748     phydev->irq = dev->domain_data.phyirq;
749     else
750     - phydev->irq = 0;
751     + phydev->irq = PHY_POLL;
752     netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
753    
754     /* set to AUTOMDIX */
755     diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
756     index f436b8c130611..be5bd2f637d80 100644
757     --- a/drivers/net/vrf.c
758     +++ b/drivers/net/vrf.c
759     @@ -221,6 +221,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
760     /* strip the ethernet header added for pass through VRF device */
761     __skb_pull(skb, skb_network_offset(skb));
762    
763     + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
764     ret = vrf_ip6_local_out(net, skb->sk, skb);
765     if (unlikely(net_xmit_eval(ret)))
766     dev->stats.tx_errors++;
767     @@ -304,6 +305,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
768     RT_SCOPE_LINK);
769     }
770    
771     + memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
772     ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
773     if (unlikely(net_xmit_eval(ret)))
774     vrf_dev->stats.tx_errors++;
775     diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
776     index ff0519ea00a5f..e68366f248fe3 100644
777     --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
778     +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
779     @@ -1276,23 +1276,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
780     const struct iwl_op_mode_ops *ops = op->ops;
781     struct dentry *dbgfs_dir = NULL;
782     struct iwl_op_mode *op_mode = NULL;
783     + int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
784     +
785     + for (retry = 0; retry <= max_retry; retry++) {
786    
787     #ifdef CONFIG_IWLWIFI_DEBUGFS
788     - drv->dbgfs_op_mode = debugfs_create_dir(op->name,
789     - drv->dbgfs_drv);
790     - dbgfs_dir = drv->dbgfs_op_mode;
791     + drv->dbgfs_op_mode = debugfs_create_dir(op->name,
792     + drv->dbgfs_drv);
793     + dbgfs_dir = drv->dbgfs_op_mode;
794     #endif
795    
796     - op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
797     + op_mode = ops->start(drv->trans, drv->trans->cfg,
798     + &drv->fw, dbgfs_dir);
799     +
800     + if (op_mode)
801     + return op_mode;
802     +
803     + IWL_ERR(drv, "retry init count %d\n", retry);
804    
805     #ifdef CONFIG_IWLWIFI_DEBUGFS
806     - if (!op_mode) {
807     debugfs_remove_recursive(drv->dbgfs_op_mode);
808     drv->dbgfs_op_mode = NULL;
809     - }
810     #endif
811     + }
812    
813     - return op_mode;
814     + return NULL;
815     }
816    
817     static void _iwl_op_mode_stop(struct iwl_drv *drv)
818     diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
819     index 2be30af7bdc30..9663db12b6f32 100644
820     --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
821     +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
822     @@ -145,4 +145,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
823     #define IWL_EXPORT_SYMBOL(sym)
824     #endif
825    
826     +/* max retry for init flow */
827     +#define IWL_MAX_INIT_RETRY 2
828     +
829     #endif /* __iwl_drv_h__ */
830     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
831     index 081cbc9ec7368..c942255aa1dbc 100644
832     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
833     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
834     @@ -71,6 +71,7 @@
835     #include <net/ieee80211_radiotap.h>
836     #include <net/tcp.h>
837    
838     +#include "iwl-drv.h"
839     #include "iwl-op-mode.h"
840     #include "iwl-io.h"
841     #include "mvm.h"
842     @@ -1129,9 +1130,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
843     {
844     struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
845     int ret;
846     + int retry, max_retry = 0;
847    
848     mutex_lock(&mvm->mutex);
849     - ret = __iwl_mvm_mac_start(mvm);
850     +
851     + /* we are starting the mac not in error flow, and restart is enabled */
852     + if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
853     + iwlwifi_mod_params.fw_restart) {
854     + max_retry = IWL_MAX_INIT_RETRY;
855     + /*
856     + * This will prevent mac80211 recovery flows to trigger during
857     + * init failures
858     + */
859     + set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
860     + }
861     +
862     + for (retry = 0; retry <= max_retry; retry++) {
863     + ret = __iwl_mvm_mac_start(mvm);
864     + if (!ret)
865     + break;
866     +
867     + IWL_ERR(mvm, "mac start retry %d\n", retry);
868     + }
869     + clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
870     +
871     mutex_unlock(&mvm->mutex);
872    
873     return ret;
874     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
875     index 5f1ecbb6fb71d..b06a9da753ff7 100644
876     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
877     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
878     @@ -1167,6 +1167,8 @@ struct iwl_mvm {
879     * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
880     * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
881     * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
882     + * @IWL_MVM_STATUS_STARTING: starting mac,
883     + * used to disable restart flow while in STARTING state
884     */
885     enum iwl_mvm_status {
886     IWL_MVM_STATUS_HW_RFKILL,
887     @@ -1177,6 +1179,7 @@ enum iwl_mvm_status {
888     IWL_MVM_STATUS_ROC_AUX_RUNNING,
889     IWL_MVM_STATUS_FIRMWARE_RUNNING,
890     IWL_MVM_STATUS_NEED_FLUSH_P2P,
891     + IWL_MVM_STATUS_STARTING,
892     };
893    
894     /* Keep track of completed init configuration */
895     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
896     index a9aab6c690e85..5973eecbc0378 100644
897     --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
898     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
899     @@ -1288,6 +1288,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
900     */
901     if (!mvm->fw_restart && fw_error) {
902     iwl_fw_error_collect(&mvm->fwrt);
903     + } else if (test_bit(IWL_MVM_STATUS_STARTING,
904     + &mvm->status)) {
905     + IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
906     } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
907     struct iwl_mvm_reprobe *reprobe;
908    
909     diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
910     index bc2dfef0de22b..24af321f625f2 100644
911     --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
912     +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
913     @@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
914     if (status == -ENODEV || status == -ENOENT)
915     return true;
916    
917     + if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
918     + return false;
919     +
920     if (status == -EPROTO || status == -ETIMEDOUT)
921     rt2x00dev->num_proto_errs++;
922     else
923     diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
924     index 3028d9f1ac59c..5d114088c88fb 100644
925     --- a/drivers/platform/x86/thinkpad_acpi.c
926     +++ b/drivers/platform/x86/thinkpad_acpi.c
927     @@ -1188,15 +1188,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
928     return status;
929     }
930    
931     -/* Query FW and update rfkill sw state for all rfkill switches */
932     -static void tpacpi_rfk_update_swstate_all(void)
933     -{
934     - unsigned int i;
935     -
936     - for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
937     - tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
938     -}
939     -
940     /*
941     * Sync the HW-blocking state of all rfkill switches,
942     * do notice it causes the rfkill core to schedule uevents
943     @@ -3135,9 +3126,6 @@ static void tpacpi_send_radiosw_update(void)
944     if (wlsw == TPACPI_RFK_RADIO_OFF)
945     tpacpi_rfk_update_hwblock_state(true);
946    
947     - /* Sync sw blocking state */
948     - tpacpi_rfk_update_swstate_all();
949     -
950     /* Sync hw blocking state last if it is hw-unblocked */
951     if (wlsw == TPACPI_RFK_RADIO_ON)
952     tpacpi_rfk_update_hwblock_state(false);
953     diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
954     index 6f21cb75d95fd..f6cce0befa7de 100644
955     --- a/drivers/scsi/scsi_transport_iscsi.c
956     +++ b/drivers/scsi/scsi_transport_iscsi.c
957     @@ -1894,12 +1894,12 @@ static void session_recovery_timedout(struct work_struct *work)
958     }
959     spin_unlock_irqrestore(&session->lock, flags);
960    
961     - if (session->transport->session_recovery_timedout)
962     - session->transport->session_recovery_timedout(session);
963     -
964     ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
965     scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
966     ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
967     +
968     + if (session->transport->session_recovery_timedout)
969     + session->transport->session_recovery_timedout(session);
970     }
971    
972     static void __iscsi_unblock_session(struct work_struct *work)
973     diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
974     index 20eab56b02cb9..f4490b8120176 100644
975     --- a/drivers/thermal/thermal_core.c
976     +++ b/drivers/thermal/thermal_core.c
977     @@ -460,6 +460,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
978     {
979     struct thermal_instance *pos;
980     tz->temperature = THERMAL_TEMP_INVALID;
981     + tz->prev_low_trip = -INT_MAX;
982     + tz->prev_high_trip = INT_MAX;
983     list_for_each_entry(pos, &tz->thermal_instances, tz_node)
984     pos->initialized = false;
985     }
986     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
987     index a2bb103f22fc6..c82c7181348de 100644
988     --- a/drivers/tty/serial/8250/8250_pci.c
989     +++ b/drivers/tty/serial/8250/8250_pci.c
990     @@ -1351,29 +1351,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
991     {
992     int scr;
993     int lcr;
994     - int actual_baud;
995     - int tolerance;
996    
997     - for (scr = 5 ; scr <= 15 ; scr++) {
998     - actual_baud = 921600 * 16 / scr;
999     - tolerance = actual_baud / 50;
1000     + for (scr = 16; scr > 4; scr--) {
1001     + unsigned int maxrate = port->uartclk / scr;
1002     + unsigned int divisor = max(maxrate / baud, 1U);
1003     + int delta = maxrate / divisor - baud;
1004    
1005     - if ((baud < actual_baud + tolerance) &&
1006     - (baud > actual_baud - tolerance)) {
1007     + if (baud > maxrate + baud / 50)
1008     + continue;
1009    
1010     + if (delta > baud / 50)
1011     + divisor++;
1012     +
1013     + if (divisor > 0xffff)
1014     + continue;
1015     +
1016     + /* Update delta due to possible divisor change */
1017     + delta = maxrate / divisor - baud;
1018     + if (abs(delta) < baud / 50) {
1019     lcr = serial_port_in(port, UART_LCR);
1020     serial_port_out(port, UART_LCR, lcr | 0x80);
1021     -
1022     - serial_port_out(port, UART_DLL, 1);
1023     - serial_port_out(port, UART_DLM, 0);
1024     + serial_port_out(port, UART_DLL, divisor & 0xff);
1025     + serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
1026     serial_port_out(port, 2, 16 - scr);
1027     serial_port_out(port, UART_LCR, lcr);
1028     return;
1029     - } else if (baud > actual_baud) {
1030     - break;
1031     }
1032     }
1033     - serial8250_do_set_divisor(port, baud, quot, quot_frac);
1034     }
1035     static int pci_pericom_setup(struct serial_private *priv,
1036     const struct pciserial_board *board,
1037     @@ -2285,12 +2289,19 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1038     .setup = pci_pericom_setup_four_at_eight,
1039     },
1040     {
1041     - .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
1042     + .vendor = PCI_VENDOR_ID_ACCESIO,
1043     .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
1044     .subvendor = PCI_ANY_ID,
1045     .subdevice = PCI_ANY_ID,
1046     .setup = pci_pericom_setup_four_at_eight,
1047     },
1048     + {
1049     + .vendor = PCI_VENDOR_ID_ACCESIO,
1050     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
1051     + .subvendor = PCI_ANY_ID,
1052     + .subdevice = PCI_ANY_ID,
1053     + .setup = pci_pericom_setup_four_at_eight,
1054     + },
1055     {
1056     .vendor = PCI_VENDOR_ID_ACCESIO,
1057     .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
1058     diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
1059     index 16720c97a4dde..6741d0f3daf94 100644
1060     --- a/drivers/tty/serial/amba-pl011.c
1061     +++ b/drivers/tty/serial/amba-pl011.c
1062     @@ -2770,6 +2770,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
1063    
1064     static const struct acpi_device_id sbsa_uart_acpi_match[] = {
1065     { "ARMH0011", 0 },
1066     + { "ARMHB000", 0 },
1067     {},
1068     };
1069     MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
1070     diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
1071     index e0718ee5d42a7..5129c2dfbe079 100644
1072     --- a/drivers/tty/serial/msm_serial.c
1073     +++ b/drivers/tty/serial/msm_serial.c
1074     @@ -603,6 +603,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
1075     u32 val;
1076     int ret;
1077    
1078     + if (IS_ENABLED(CONFIG_CONSOLE_POLL))
1079     + return;
1080     +
1081     if (!dma->chan)
1082     return;
1083    
1084     diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1085     index 38ee13bbcab81..aad640b9e3f4b 100644
1086     --- a/drivers/tty/serial/serial_core.c
1087     +++ b/drivers/tty/serial/serial_core.c
1088     @@ -1573,6 +1573,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
1089     {
1090     struct uart_state *state = container_of(port, struct uart_state, port);
1091     struct uart_port *uport = uart_port_check(state);
1092     + char *buf;
1093    
1094     /*
1095     * At this point, we stop accepting input. To do this, we
1096     @@ -1594,8 +1595,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
1097     */
1098     tty_port_set_suspended(port, 0);
1099    
1100     - uart_change_pm(state, UART_PM_STATE_OFF);
1101     + /*
1102     + * Free the transmit buffer.
1103     + */
1104     + spin_lock_irq(&uport->lock);
1105     + buf = state->xmit.buf;
1106     + state->xmit.buf = NULL;
1107     + spin_unlock_irq(&uport->lock);
1108    
1109     + if (buf)
1110     + free_page((unsigned long)buf);
1111     +
1112     + uart_change_pm(state, UART_PM_STATE_OFF);
1113     }
1114    
1115     static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
1116     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1117     index d97544fd339b1..e170c5b4d6f0c 100644
1118     --- a/drivers/usb/core/quirks.c
1119     +++ b/drivers/usb/core/quirks.c
1120     @@ -435,6 +435,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1121     { USB_DEVICE(0x1532, 0x0116), .driver_info =
1122     USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
1123    
1124     + /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
1125     + { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
1126     +
1127     /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
1128     { USB_DEVICE(0x17ef, 0xa012), .driver_info =
1129     USB_QUIRK_DISCONNECT_SUSPEND },
1130     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
1131     index 2c92fd19e7822..5ec31d2d32e05 100644
1132     --- a/drivers/usb/host/xhci-ring.c
1133     +++ b/drivers/usb/host/xhci-ring.c
1134     @@ -339,7 +339,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1135     /* Must be called with xhci->lock held, releases and aquires lock back */
1136     static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
1137     {
1138     - u32 temp_32;
1139     + struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
1140     + union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
1141     + u64 crcr;
1142     int ret;
1143    
1144     xhci_dbg(xhci, "Abort command ring\n");
1145     @@ -348,13 +350,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
1146    
1147     /*
1148     * The control bits like command stop, abort are located in lower
1149     - * dword of the command ring control register. Limit the write
1150     - * to the lower dword to avoid corrupting the command ring pointer
1151     - * in case if the command ring is stopped by the time upper dword
1152     - * is written.
1153     + * dword of the command ring control register.
1154     + * Some controllers require all 64 bits to be written to abort the ring.
1155     + * Make sure the upper dword is valid, pointing to the next command,
1156     + * avoiding corrupting the command ring pointer in case the command ring
1157     + * is stopped by the time the upper dword is written.
1158     */
1159     - temp_32 = readl(&xhci->op_regs->cmd_ring);
1160     - writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
1161     + next_trb(xhci, NULL, &new_seg, &new_deq);
1162     + if (trb_is_link(new_deq))
1163     + next_trb(xhci, NULL, &new_seg, &new_deq);
1164     +
1165     + crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
1166     + xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
1167    
1168     /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
1169     * completion of the Command Abort operation. If CRR is not negated in 5
1170     diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
1171     index 89391939630bd..1316464cf2933 100644
1172     --- a/drivers/usb/typec/tcpm/tcpm.c
1173     +++ b/drivers/usb/typec/tcpm/tcpm.c
1174     @@ -3118,11 +3118,7 @@ static void run_state_machine(struct tcpm_port *port)
1175     tcpm_try_src(port) ? SRC_TRY
1176     : SNK_ATTACHED,
1177     0);
1178     - else
1179     - /* Wait for VBUS, but not forever */
1180     - tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
1181     break;
1182     -
1183     case SRC_TRY:
1184     port->try_src_count++;
1185     tcpm_set_cc(port, tcpm_rp_cc(port));
1186     diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
1187     index a992d922b3a71..23f15f42e5cb6 100644
1188     --- a/drivers/video/console/vgacon.c
1189     +++ b/drivers/video/console/vgacon.c
1190     @@ -370,11 +370,17 @@ static void vgacon_init(struct vc_data *c, int init)
1191     struct uni_pagedir *p;
1192    
1193     /*
1194     - * We cannot be loaded as a module, therefore init is always 1,
1195     - * but vgacon_init can be called more than once, and init will
1196     - * not be 1.
1197     + * We cannot be loaded as a module, therefore init will be 1
1198     + * if we are the default console, however if we are a fallback
1199     + * console, for example if fbcon has failed registration, then
1200     + * init will be 0, so we need to make sure our boot parameters
1201     + * have been copied to the console structure for vgacon_resize
1202     + * ultimately called by vc_resize. Any subsequent calls to
1203     + * vgacon_init init will have init set to 0 too.
1204     */
1205     c->vc_can_do_color = vga_can_do_color;
1206     + c->vc_scan_lines = vga_scan_lines;
1207     + c->vc_font.height = c->vc_cell_height = vga_video_font_height;
1208    
1209     /* set dimensions manually if init != 0 since vc_resize() will fail */
1210     if (init) {
1211     @@ -383,8 +389,6 @@ static void vgacon_init(struct vc_data *c, int init)
1212     } else
1213     vc_resize(c, vga_video_num_columns, vga_video_num_lines);
1214    
1215     - c->vc_scan_lines = vga_scan_lines;
1216     - c->vc_font.height = c->vc_cell_height = vga_video_font_height;
1217     c->vc_complement_mask = 0x7700;
1218     if (vga_512_chars)
1219     c->vc_hi_font_mask = 0x0800;
1220     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1221     index 1499531bc1511..f18c6d97932ed 100644
1222     --- a/fs/btrfs/disk-io.c
1223     +++ b/fs/btrfs/disk-io.c
1224     @@ -3636,11 +3636,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
1225     */
1226     static void write_dev_flush(struct btrfs_device *device)
1227     {
1228     - struct request_queue *q = bdev_get_queue(device->bdev);
1229     struct bio *bio = device->flush_bio;
1230    
1231     +#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1232     + /*
1233     + * When a disk has write caching disabled, we skip submission of a bio
1234     + * with flush and sync requests before writing the superblock, since
1235     + * it's not needed. However when the integrity checker is enabled, this
1236     + * results in reports that there are metadata blocks referred by a
1237     + * superblock that were not properly flushed. So don't skip the bio
1238     + * submission only when the integrity checker is enabled for the sake
1239     + * of simplicity, since this is a debug tool and not meant for use in
1240     + * non-debug builds.
1241     + */
1242     + struct request_queue *q = bdev_get_queue(device->bdev);
1243     if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1244     return;
1245     +#endif
1246    
1247     bio_reset(bio);
1248     bio->bi_end_io = btrfs_end_empty_barrier;
1249     diff --git a/fs/file.c b/fs/file.c
1250     index e5d328335f88c..09cefc944f86d 100644
1251     --- a/fs/file.c
1252     +++ b/fs/file.c
1253     @@ -723,6 +723,10 @@ loop:
1254     file = NULL;
1255     else if (!get_file_rcu_many(file, refs))
1256     goto loop;
1257     + else if (__fcheck_files(files, fd) != file) {
1258     + fput_many(file, refs);
1259     + goto loop;
1260     + }
1261     }
1262     rcu_read_unlock();
1263    
1264     diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
1265     index aaec3c5b02028..dec5285a02e9d 100644
1266     --- a/fs/gfs2/bmap.c
1267     +++ b/fs/gfs2/bmap.c
1268     @@ -940,7 +940,7 @@ do_alloc:
1269     else if (height == ip->i_height)
1270     ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
1271     else
1272     - iomap->length = size - pos;
1273     + iomap->length = size - iomap->offset;
1274     } else if (flags & IOMAP_WRITE) {
1275     u64 alloc_size;
1276    
1277     diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
1278     index 6b7c926824ae0..504812ea4bc29 100644
1279     --- a/fs/nfs/nfs42proc.c
1280     +++ b/fs/nfs/nfs42proc.c
1281     @@ -295,8 +295,9 @@ static ssize_t _nfs42_proc_copy(struct file *src,
1282     goto out;
1283     }
1284    
1285     - truncate_pagecache_range(dst_inode, pos_dst,
1286     - pos_dst + res->write_res.count);
1287     + WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping,
1288     + pos_dst >> PAGE_SHIFT,
1289     + (pos_dst + res->write_res.count - 1) >> PAGE_SHIFT));
1290    
1291     status = res->write_res.count;
1292     out:
1293     diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
1294     index a121fd8e7c3a0..c7764d9e6f397 100644
1295     --- a/include/linux/kprobes.h
1296     +++ b/include/linux/kprobes.h
1297     @@ -155,6 +155,8 @@ struct kretprobe {
1298     raw_spinlock_t lock;
1299     };
1300    
1301     +#define KRETPROBE_MAX_DATA_SIZE 4096
1302     +
1303     struct kretprobe_instance {
1304     struct hlist_node hlist;
1305     struct kretprobe *rp;
1306     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1307     index ddc66ab8a1def..4860944e936db 100644
1308     --- a/include/linux/netdevice.h
1309     +++ b/include/linux/netdevice.h
1310     @@ -3943,7 +3943,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1311     static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1312     {
1313     spin_lock(&txq->_xmit_lock);
1314     - txq->xmit_lock_owner = cpu;
1315     + /* Pairs with READ_ONCE() in __dev_queue_xmit() */
1316     + WRITE_ONCE(txq->xmit_lock_owner, cpu);
1317     }
1318    
1319     static inline bool __netif_tx_acquire(struct netdev_queue *txq)
1320     @@ -3960,26 +3961,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
1321     static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1322     {
1323     spin_lock_bh(&txq->_xmit_lock);
1324     - txq->xmit_lock_owner = smp_processor_id();
1325     + /* Pairs with READ_ONCE() in __dev_queue_xmit() */
1326     + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
1327     }
1328    
1329     static inline bool __netif_tx_trylock(struct netdev_queue *txq)
1330     {
1331     bool ok = spin_trylock(&txq->_xmit_lock);
1332     - if (likely(ok))
1333     - txq->xmit_lock_owner = smp_processor_id();
1334     +
1335     + if (likely(ok)) {
1336     + /* Pairs with READ_ONCE() in __dev_queue_xmit() */
1337     + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
1338     + }
1339     return ok;
1340     }
1341    
1342     static inline void __netif_tx_unlock(struct netdev_queue *txq)
1343     {
1344     - txq->xmit_lock_owner = -1;
1345     + /* Pairs with READ_ONCE() in __dev_queue_xmit() */
1346     + WRITE_ONCE(txq->xmit_lock_owner, -1);
1347     spin_unlock(&txq->_xmit_lock);
1348     }
1349    
1350     static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1351     {
1352     - txq->xmit_lock_owner = -1;
1353     + /* Pairs with READ_ONCE() in __dev_queue_xmit() */
1354     + WRITE_ONCE(txq->xmit_lock_owner, -1);
1355     spin_unlock_bh(&txq->_xmit_lock);
1356     }
1357    
1358     diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h
1359     index b27da9f164cbd..c86fcad23fc21 100644
1360     --- a/include/linux/of_clk.h
1361     +++ b/include/linux/of_clk.h
1362     @@ -6,6 +6,9 @@
1363     #ifndef __LINUX_OF_CLK_H
1364     #define __LINUX_OF_CLK_H
1365    
1366     +struct device_node;
1367     +struct of_device_id;
1368     +
1369     #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
1370    
1371     unsigned int of_clk_get_parent_count(struct device_node *np);
1372     diff --git a/include/linux/siphash.h b/include/linux/siphash.h
1373     index bf21591a9e5e6..0cda61855d907 100644
1374     --- a/include/linux/siphash.h
1375     +++ b/include/linux/siphash.h
1376     @@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
1377     }
1378    
1379     u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
1380     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1381     u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
1382     -#endif
1383    
1384     u64 siphash_1u64(const u64 a, const siphash_key_t *key);
1385     u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
1386     @@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
1387     static inline u64 siphash(const void *data, size_t len,
1388     const siphash_key_t *key)
1389     {
1390     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1391     - if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
1392     + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
1393     + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
1394     return __siphash_unaligned(data, len, key);
1395     -#endif
1396     return ___siphash_aligned(data, len, key);
1397     }
1398    
1399     @@ -96,10 +93,8 @@ typedef struct {
1400    
1401     u32 __hsiphash_aligned(const void *data, size_t len,
1402     const hsiphash_key_t *key);
1403     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1404     u32 __hsiphash_unaligned(const void *data, size_t len,
1405     const hsiphash_key_t *key);
1406     -#endif
1407    
1408     u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
1409     u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
1410     @@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
1411     static inline u32 hsiphash(const void *data, size_t len,
1412     const hsiphash_key_t *key)
1413     {
1414     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1415     - if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
1416     + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
1417     + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
1418     return __hsiphash_unaligned(data, len, key);
1419     -#endif
1420     return ___hsiphash_aligned(data, len, key);
1421     }
1422    
1423     diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
1424     index 7fed3193f81d4..25eae5c67387e 100644
1425     --- a/include/net/fib_rules.h
1426     +++ b/include/net/fib_rules.h
1427     @@ -68,7 +68,7 @@ struct fib_rules_ops {
1428     int (*action)(struct fib_rule *,
1429     struct flowi *, int,
1430     struct fib_lookup_arg *);
1431     - bool (*suppress)(struct fib_rule *,
1432     + bool (*suppress)(struct fib_rule *, int,
1433     struct fib_lookup_arg *);
1434     int (*match)(struct fib_rule *,
1435     struct flowi *, int);
1436     diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
1437     index cb6c125628990..3bf9ecb6b0d39 100644
1438     --- a/include/net/ip_fib.h
1439     +++ b/include/net/ip_fib.h
1440     @@ -412,7 +412,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
1441     #ifdef CONFIG_IP_ROUTE_CLASSID
1442     static inline int fib_num_tclassid_users(struct net *net)
1443     {
1444     - return net->ipv4.fib_num_tclassid_users;
1445     + return atomic_read(&net->ipv4.fib_num_tclassid_users);
1446     }
1447     #else
1448     static inline int fib_num_tclassid_users(struct net *net)
1449     diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
1450     index c0c0791b19123..da171544a33c7 100644
1451     --- a/include/net/netns/ipv4.h
1452     +++ b/include/net/netns/ipv4.h
1453     @@ -61,7 +61,7 @@ struct netns_ipv4 {
1454     #endif
1455     bool fib_has_custom_local_routes;
1456     #ifdef CONFIG_IP_ROUTE_CLASSID
1457     - int fib_num_tclassid_users;
1458     + atomic_t fib_num_tclassid_users;
1459     #endif
1460     struct hlist_head *fib_table_hash;
1461     bool fib_offload_disabled;
1462     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1463     index 1668439b269d3..c93340bae3ac2 100644
1464     --- a/kernel/kprobes.c
1465     +++ b/kernel/kprobes.c
1466     @@ -2003,6 +2003,9 @@ int register_kretprobe(struct kretprobe *rp)
1467     }
1468     }
1469    
1470     + if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
1471     + return -E2BIG;
1472     +
1473     rp->kp.pre_handler = pre_handler_kretprobe;
1474     rp->kp.post_handler = NULL;
1475     rp->kp.fault_handler = NULL;
1476     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1477     index f8ca0738d729e..5befdecefe947 100644
1478     --- a/kernel/sched/core.c
1479     +++ b/kernel/sched/core.c
1480     @@ -1337,7 +1337,7 @@ static void __init init_uclamp_rq(struct rq *rq)
1481     };
1482     }
1483    
1484     - rq->uclamp_flags = 0;
1485     + rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1486     }
1487    
1488     static void __init init_uclamp(void)
1489     diff --git a/lib/siphash.c b/lib/siphash.c
1490     index c47bb6ff21499..025f0cbf6d7a7 100644
1491     --- a/lib/siphash.c
1492     +++ b/lib/siphash.c
1493     @@ -49,6 +49,7 @@
1494     SIPROUND; \
1495     return (v0 ^ v1) ^ (v2 ^ v3);
1496    
1497     +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1498     u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
1499     {
1500     const u8 *end = data + len - (len % sizeof(u64));
1501     @@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
1502     POSTAMBLE
1503     }
1504     EXPORT_SYMBOL(__siphash_aligned);
1505     +#endif
1506    
1507     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1508     u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
1509     {
1510     const u8 *end = data + len - (len % sizeof(u64));
1511     @@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
1512     POSTAMBLE
1513     }
1514     EXPORT_SYMBOL(__siphash_unaligned);
1515     -#endif
1516    
1517     /**
1518     * siphash_1u64 - compute 64-bit siphash PRF value of a u64
1519     @@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
1520     HSIPROUND; \
1521     return (v0 ^ v1) ^ (v2 ^ v3);
1522    
1523     +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1524     u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
1525     {
1526     const u8 *end = data + len - (len % sizeof(u64));
1527     @@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
1528     HPOSTAMBLE
1529     }
1530     EXPORT_SYMBOL(__hsiphash_aligned);
1531     +#endif
1532    
1533     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1534     u32 __hsiphash_unaligned(const void *data, size_t len,
1535     const hsiphash_key_t *key)
1536     {
1537     @@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
1538     HPOSTAMBLE
1539     }
1540     EXPORT_SYMBOL(__hsiphash_unaligned);
1541     -#endif
1542    
1543     /**
1544     * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
1545     @@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
1546     HSIPROUND; \
1547     return v1 ^ v3;
1548    
1549     +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1550     u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
1551     {
1552     const u8 *end = data + len - (len % sizeof(u32));
1553     @@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
1554     HPOSTAMBLE
1555     }
1556     EXPORT_SYMBOL(__hsiphash_aligned);
1557     +#endif
1558    
1559     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1560     u32 __hsiphash_unaligned(const void *data, size_t len,
1561     const hsiphash_key_t *key)
1562     {
1563     @@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
1564     HPOSTAMBLE
1565     }
1566     EXPORT_SYMBOL(__hsiphash_unaligned);
1567     -#endif
1568    
1569     /**
1570     * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
1571     diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
1572     index 811682e06951b..22f4b798d385b 100644
1573     --- a/net/can/j1939/transport.c
1574     +++ b/net/can/j1939/transport.c
1575     @@ -2004,6 +2004,12 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
1576     extd = J1939_ETP;
1577     /* fall through */
1578     case J1939_TP_CMD_BAM: /* fall through */
1579     + if (cmd == J1939_TP_CMD_BAM && !j1939_cb_is_broadcast(skcb)) {
1580     + netdev_err_once(priv->ndev, "%s: BAM to unicast (%02x), ignoring!\n",
1581     + __func__, skcb->addr.sa);
1582     + return;
1583     + }
1584     + fallthrough;
1585     case J1939_TP_CMD_RTS: /* fall through */
1586     if (skcb->addr.type != extd)
1587     return;
1588     diff --git a/net/core/dev.c b/net/core/dev.c
1589     index ff336417c9b90..a03036456221b 100644
1590     --- a/net/core/dev.c
1591     +++ b/net/core/dev.c
1592     @@ -3766,7 +3766,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
1593     if (dev->flags & IFF_UP) {
1594     int cpu = smp_processor_id(); /* ok because BHs are off */
1595    
1596     - if (txq->xmit_lock_owner != cpu) {
1597     + /* Other cpus might concurrently change txq->xmit_lock_owner
1598     + * to -1 or to their cpu id, but not to our id.
1599     + */
1600     + if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
1601     if (dev_xmit_recursion())
1602     goto recursion_alert;
1603    
1604     diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
1605     index 675f27ef6872f..83299a85480aa 100644
1606     --- a/net/core/fib_rules.c
1607     +++ b/net/core/fib_rules.c
1608     @@ -300,7 +300,7 @@ jumped:
1609     else
1610     err = ops->action(rule, fl, flags, arg);
1611    
1612     - if (!err && ops->suppress && ops->suppress(rule, arg))
1613     + if (!err && ops->suppress && ops->suppress(rule, flags, arg))
1614     continue;
1615    
1616     if (err != -EAGAIN) {
1617     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
1618     index 603a3495afa62..4a8ad46397c0e 100644
1619     --- a/net/ipv4/devinet.c
1620     +++ b/net/ipv4/devinet.c
1621     @@ -2585,7 +2585,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
1622     free:
1623     kfree(t);
1624     out:
1625     - return -ENOBUFS;
1626     + return -ENOMEM;
1627     }
1628    
1629     static void __devinet_sysctl_unregister(struct net *net,
1630     diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1631     index b875b98820ede..a95102fe66371 100644
1632     --- a/net/ipv4/fib_frontend.c
1633     +++ b/net/ipv4/fib_frontend.c
1634     @@ -1588,7 +1588,7 @@ static int __net_init fib_net_init(struct net *net)
1635     int error;
1636    
1637     #ifdef CONFIG_IP_ROUTE_CLASSID
1638     - net->ipv4.fib_num_tclassid_users = 0;
1639     + atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
1640     #endif
1641     error = ip_fib_net_init(net);
1642     if (error < 0)
1643     diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
1644     index b43a7ba5c6a42..e9a3cc9e98dfa 100644
1645     --- a/net/ipv4/fib_rules.c
1646     +++ b/net/ipv4/fib_rules.c
1647     @@ -137,7 +137,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
1648     return err;
1649     }
1650    
1651     -static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
1652     +static bool fib4_rule_suppress(struct fib_rule *rule, int flags, struct fib_lookup_arg *arg)
1653     {
1654     struct fib_result *result = (struct fib_result *) arg->result;
1655     struct net_device *dev = NULL;
1656     @@ -258,7 +258,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
1657     if (tb[FRA_FLOW]) {
1658     rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
1659     if (rule4->tclassid)
1660     - net->ipv4.fib_num_tclassid_users++;
1661     + atomic_inc(&net->ipv4.fib_num_tclassid_users);
1662     }
1663     #endif
1664    
1665     @@ -290,7 +290,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
1666    
1667     #ifdef CONFIG_IP_ROUTE_CLASSID
1668     if (((struct fib4_rule *)rule)->tclassid)
1669     - net->ipv4.fib_num_tclassid_users--;
1670     + atomic_dec(&net->ipv4.fib_num_tclassid_users);
1671     #endif
1672     net->ipv4.fib_has_custom_rules = true;
1673    
1674     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
1675     index dce85a9c20c60..c0b8154205237 100644
1676     --- a/net/ipv4/fib_semantics.c
1677     +++ b/net/ipv4/fib_semantics.c
1678     @@ -222,7 +222,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
1679     {
1680     #ifdef CONFIG_IP_ROUTE_CLASSID
1681     if (fib_nh->nh_tclassid)
1682     - net->ipv4.fib_num_tclassid_users--;
1683     + atomic_dec(&net->ipv4.fib_num_tclassid_users);
1684     #endif
1685     fib_nh_common_release(&fib_nh->nh_common);
1686     }
1687     @@ -624,7 +624,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
1688     #ifdef CONFIG_IP_ROUTE_CLASSID
1689     nh->nh_tclassid = cfg->fc_flow;
1690     if (nh->nh_tclassid)
1691     - net->ipv4.fib_num_tclassid_users++;
1692     + atomic_inc(&net->ipv4.fib_num_tclassid_users);
1693     #endif
1694     #ifdef CONFIG_IP_ROUTE_MULTIPATH
1695     nh->fib_nh_weight = nh_weight;
1696     diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
1697     index f9e8fe3ff0c5b..1727269396523 100644
1698     --- a/net/ipv6/fib6_rules.c
1699     +++ b/net/ipv6/fib6_rules.c
1700     @@ -260,7 +260,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
1701     return __fib6_rule_action(rule, flp, flags, arg);
1702     }
1703    
1704     -static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
1705     +static bool fib6_rule_suppress(struct fib_rule *rule, int flags, struct fib_lookup_arg *arg)
1706     {
1707     struct fib6_result *res = arg->result;
1708     struct rt6_info *rt = res->rt6;
1709     @@ -287,8 +287,7 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg
1710     return false;
1711    
1712     suppress_route:
1713     - if (!(arg->flags & FIB_LOOKUP_NOREF))
1714     - ip6_rt_put(rt);
1715     + ip6_rt_put_flags(rt, flags);
1716     return true;
1717     }
1718    
1719     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1720     index c7e6bf7c22c78..282bf336b15a4 100644
1721     --- a/net/mac80211/rx.c
1722     +++ b/net/mac80211/rx.c
1723     @@ -1918,7 +1918,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1724     int keyid = rx->sta->ptk_idx;
1725     sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1726    
1727     - if (ieee80211_has_protected(fc)) {
1728     + if (ieee80211_has_protected(fc) &&
1729     + !(status->flag & RX_FLAG_IV_STRIPPED)) {
1730     cs = rx->sta->cipher_scheme;
1731     keyid = ieee80211_get_keyid(rx->skb, cs);
1732    
1733     diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
1734     index 4701edffb1f7d..d5e3656fc67ca 100644
1735     --- a/net/mpls/af_mpls.c
1736     +++ b/net/mpls/af_mpls.c
1737     @@ -1491,22 +1491,52 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
1738     kfree(mdev);
1739     }
1740    
1741     -static void mpls_ifdown(struct net_device *dev, int event)
1742     +static int mpls_ifdown(struct net_device *dev, int event)
1743     {
1744     struct mpls_route __rcu **platform_label;
1745     struct net *net = dev_net(dev);
1746     - u8 alive, deleted;
1747     unsigned index;
1748    
1749     platform_label = rtnl_dereference(net->mpls.platform_label);
1750     for (index = 0; index < net->mpls.platform_labels; index++) {
1751     struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1752     + bool nh_del = false;
1753     + u8 alive = 0;
1754    
1755     if (!rt)
1756     continue;
1757    
1758     - alive = 0;
1759     - deleted = 0;
1760     + if (event == NETDEV_UNREGISTER) {
1761     + u8 deleted = 0;
1762     +
1763     + for_nexthops(rt) {
1764     + struct net_device *nh_dev =
1765     + rtnl_dereference(nh->nh_dev);
1766     +
1767     + if (!nh_dev || nh_dev == dev)
1768     + deleted++;
1769     + if (nh_dev == dev)
1770     + nh_del = true;
1771     + } endfor_nexthops(rt);
1772     +
1773     + /* if there are no more nexthops, delete the route */
1774     + if (deleted == rt->rt_nhn) {
1775     + mpls_route_update(net, index, NULL, NULL);
1776     + continue;
1777     + }
1778     +
1779     + if (nh_del) {
1780     + size_t size = sizeof(*rt) + rt->rt_nhn *
1781     + rt->rt_nh_size;
1782     + struct mpls_route *orig = rt;
1783     +
1784     + rt = kmalloc(size, GFP_KERNEL);
1785     + if (!rt)
1786     + return -ENOMEM;
1787     + memcpy(rt, orig, size);
1788     + }
1789     + }
1790     +
1791     change_nexthops(rt) {
1792     unsigned int nh_flags = nh->nh_flags;
1793    
1794     @@ -1530,16 +1560,15 @@ static void mpls_ifdown(struct net_device *dev, int event)
1795     next:
1796     if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
1797     alive++;
1798     - if (!rtnl_dereference(nh->nh_dev))
1799     - deleted++;
1800     } endfor_nexthops(rt);
1801    
1802     WRITE_ONCE(rt->rt_nhn_alive, alive);
1803    
1804     - /* if there are no more nexthops, delete the route */
1805     - if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
1806     - mpls_route_update(net, index, NULL, NULL);
1807     + if (nh_del)
1808     + mpls_route_update(net, index, rt, NULL);
1809     }
1810     +
1811     + return 0;
1812     }
1813    
1814     static void mpls_ifup(struct net_device *dev, unsigned int flags)
1815     @@ -1607,8 +1636,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1816     return NOTIFY_OK;
1817    
1818     switch (event) {
1819     + int err;
1820     +
1821     case NETDEV_DOWN:
1822     - mpls_ifdown(dev, event);
1823     + err = mpls_ifdown(dev, event);
1824     + if (err)
1825     + return notifier_from_errno(err);
1826     break;
1827     case NETDEV_UP:
1828     flags = dev_get_flags(dev);
1829     @@ -1619,13 +1652,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1830     break;
1831     case NETDEV_CHANGE:
1832     flags = dev_get_flags(dev);
1833     - if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1834     + if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
1835     mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1836     - else
1837     - mpls_ifdown(dev, event);
1838     + } else {
1839     + err = mpls_ifdown(dev, event);
1840     + if (err)
1841     + return notifier_from_errno(err);
1842     + }
1843     break;
1844     case NETDEV_UNREGISTER:
1845     - mpls_ifdown(dev, event);
1846     + err = mpls_ifdown(dev, event);
1847     + if (err)
1848     + return notifier_from_errno(err);
1849     mdev = mpls_dev_get(dev);
1850     if (mdev) {
1851     mpls_dev_sysctl_unregister(dev, mdev);
1852     @@ -1636,8 +1674,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1853     case NETDEV_CHANGENAME:
1854     mdev = mpls_dev_get(dev);
1855     if (mdev) {
1856     - int err;
1857     -
1858     mpls_dev_sysctl_unregister(dev, mdev);
1859     err = mpls_dev_sysctl_register(dev, mdev);
1860     if (err)
1861     diff --git a/net/rds/tcp.c b/net/rds/tcp.c
1862     index 1402e9166a7eb..d55d81b01d372 100644
1863     --- a/net/rds/tcp.c
1864     +++ b/net/rds/tcp.c
1865     @@ -510,7 +510,7 @@ void rds_tcp_tune(struct socket *sock)
1866     sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1867     }
1868     if (rtn->rcvbuf_size > 0) {
1869     - sk->sk_sndbuf = rtn->rcvbuf_size;
1870     + sk->sk_rcvbuf = rtn->rcvbuf_size;
1871     sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1872     }
1873     release_sock(sk);
1874     diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
1875     index e011594adcd13..23d0bc4ca3196 100644
1876     --- a/net/rxrpc/peer_object.c
1877     +++ b/net/rxrpc/peer_object.c
1878     @@ -297,6 +297,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
1879     return peer;
1880     }
1881    
1882     +static void rxrpc_free_peer(struct rxrpc_peer *peer)
1883     +{
1884     + rxrpc_put_local(peer->local);
1885     + kfree_rcu(peer, rcu);
1886     +}
1887     +
1888     /*
1889     * Set up a new incoming peer. There shouldn't be any other matching peers
1890     * since we've already done a search in the list from the non-reentrant context
1891     @@ -363,7 +369,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
1892     spin_unlock_bh(&rxnet->peer_hash_lock);
1893    
1894     if (peer)
1895     - kfree(candidate);
1896     + rxrpc_free_peer(candidate);
1897     else
1898     peer = candidate;
1899     }
1900     @@ -418,8 +424,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
1901     list_del_init(&peer->keepalive_link);
1902     spin_unlock_bh(&rxnet->peer_hash_lock);
1903    
1904     - rxrpc_put_local(peer->local);
1905     - kfree_rcu(peer, rcu);
1906     + rxrpc_free_peer(peer);
1907     }
1908    
1909     /*
1910     @@ -455,8 +460,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
1911     if (n == 0) {
1912     hash_del_rcu(&peer->hash_link);
1913     list_del_init(&peer->keepalive_link);
1914     - rxrpc_put_local(peer->local);
1915     - kfree_rcu(peer, rcu);
1916     + rxrpc_free_peer(peer);
1917     }
1918     }
1919    
1920     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
1921     index 5e1493f8deba7..fa3b20e5f4608 100644
1922     --- a/net/smc/af_smc.c
1923     +++ b/net/smc/af_smc.c
1924     @@ -467,12 +467,26 @@ static void smc_link_save_peer_info(struct smc_link *link,
1925    
1926     static void smc_switch_to_fallback(struct smc_sock *smc)
1927     {
1928     + wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
1929     + wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
1930     + unsigned long flags;
1931     +
1932     smc->use_fallback = true;
1933     if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
1934     smc->clcsock->file = smc->sk.sk_socket->file;
1935     smc->clcsock->file->private_data = smc->clcsock;
1936     smc->clcsock->wq.fasync_list =
1937     smc->sk.sk_socket->wq.fasync_list;
1938     +
1939     + /* There may be some entries remaining in
1940     + * smc socket->wq, which should be removed
1941     + * to clcsocket->wq during the fallback.
1942     + */
1943     + spin_lock_irqsave(&smc_wait->lock, flags);
1944     + spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
1945     + list_splice_init(&smc_wait->head, &clc_wait->head);
1946     + spin_unlock(&clc_wait->lock);
1947     + spin_unlock_irqrestore(&smc_wait->lock, flags);
1948     }
1949     }
1950    
1951     diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
1952     index 2eabf39dee74d..543948d970c52 100644
1953     --- a/net/smc/smc_close.c
1954     +++ b/net/smc/smc_close.c
1955     @@ -183,6 +183,7 @@ int smc_close_active(struct smc_sock *smc)
1956     int old_state;
1957     long timeout;
1958     int rc = 0;
1959     + int rc1 = 0;
1960    
1961     timeout = current->flags & PF_EXITING ?
1962     0 : sock_flag(sk, SOCK_LINGER) ?
1963     @@ -222,8 +223,11 @@ again:
1964     /* actively shutdown clcsock before peer close it,
1965     * prevent peer from entering TIME_WAIT state.
1966     */
1967     - if (smc->clcsock && smc->clcsock->sk)
1968     - rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
1969     + if (smc->clcsock && smc->clcsock->sk) {
1970     + rc1 = kernel_sock_shutdown(smc->clcsock,
1971     + SHUT_RDWR);
1972     + rc = rc ? rc : rc1;
1973     + }
1974     } else {
1975     /* peer event has changed the state */
1976     goto again;
1977     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
1978     index 02821b9140546..1436a36c1934a 100644
1979     --- a/net/tls/tls_sw.c
1980     +++ b/net/tls/tls_sw.c
1981     @@ -512,7 +512,7 @@ static int tls_do_encryption(struct sock *sk,
1982     memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
1983     prot->iv_size + prot->salt_size);
1984    
1985     - xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
1986     + xor_iv_with_seq(prot->version, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
1987    
1988     sge->offset += prot->prepend_size;
1989     sge->length -= prot->prepend_size;
1990     @@ -1483,7 +1483,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1991     else
1992     memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1993    
1994     - xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
1995     + xor_iv_with_seq(prot->version, iv + iv_offset, tls_ctx->rx.rec_seq);
1996    
1997     /* Prepare AAD */
1998     tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1999     diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
2000     index d3c0b04e2e22b..dc228bdf2bbc2 100644
2001     --- a/tools/perf/builtin-report.c
2002     +++ b/tools/perf/builtin-report.c
2003     @@ -569,14 +569,17 @@ static int report__browse_hists(struct report *rep)
2004     int ret;
2005     struct perf_session *session = rep->session;
2006     struct evlist *evlist = session->evlist;
2007     - const char *help = perf_tip(system_path(TIPDIR));
2008     + char *help = NULL, *path = NULL;
2009    
2010     - if (help == NULL) {
2011     + path = system_path(TIPDIR);
2012     + if (perf_tip(&help, path) || help == NULL) {
2013     /* fallback for people who don't install perf ;-) */
2014     - help = perf_tip(DOCDIR);
2015     - if (help == NULL)
2016     - help = "Cannot load tips.txt file, please install perf!";
2017     + free(path);
2018     + path = system_path(DOCDIR);
2019     + if (perf_tip(&help, path) || help == NULL)
2020     + help = strdup("Cannot load tips.txt file, please install perf!");
2021     }
2022     + free(path);
2023    
2024     switch (use_browser) {
2025     case 1:
2026     @@ -598,7 +601,7 @@ static int report__browse_hists(struct report *rep)
2027     ret = perf_evlist__tty_browse_hists(evlist, rep, help);
2028     break;
2029     }
2030     -
2031     + free(help);
2032     return ret;
2033     }
2034    
2035     diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
2036     index f736755000616..9ae316445f04b 100644
2037     --- a/tools/perf/ui/hist.c
2038     +++ b/tools/perf/ui/hist.c
2039     @@ -472,6 +472,18 @@ struct perf_hpp_list perf_hpp_list = {
2040     #undef __HPP_SORT_ACC_FN
2041     #undef __HPP_SORT_RAW_FN
2042    
2043     +static void fmt_free(struct perf_hpp_fmt *fmt)
2044     +{
2045     + /*
2046     + * At this point fmt should be completely
2047     + * unhooked, if not it's a bug.
2048     + */
2049     + BUG_ON(!list_empty(&fmt->list));
2050     + BUG_ON(!list_empty(&fmt->sort_list));
2051     +
2052     + if (fmt->free)
2053     + fmt->free(fmt);
2054     +}
2055    
2056     void perf_hpp__init(void)
2057     {
2058     @@ -535,9 +547,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
2059     list_add(&format->sort_list, &list->sorts);
2060     }
2061    
2062     -void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
2063     +static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
2064     {
2065     list_del_init(&format->list);
2066     + fmt_free(format);
2067     }
2068    
2069     void perf_hpp__cancel_cumulate(void)
2070     @@ -609,19 +622,6 @@ next:
2071     }
2072    
2073    
2074     -static void fmt_free(struct perf_hpp_fmt *fmt)
2075     -{
2076     - /*
2077     - * At this point fmt should be completely
2078     - * unhooked, if not it's a bug.
2079     - */
2080     - BUG_ON(!list_empty(&fmt->list));
2081     - BUG_ON(!list_empty(&fmt->sort_list));
2082     -
2083     - if (fmt->free)
2084     - fmt->free(fmt);
2085     -}
2086     -
2087     void perf_hpp__reset_output_field(struct perf_hpp_list *list)
2088     {
2089     struct perf_hpp_fmt *fmt, *tmp;
2090     diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
2091     index 4792731307947..ecce30f086de7 100644
2092     --- a/tools/perf/util/hist.h
2093     +++ b/tools/perf/util/hist.h
2094     @@ -361,7 +361,6 @@ enum {
2095     };
2096    
2097     void perf_hpp__init(void);
2098     -void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
2099     void perf_hpp__cancel_cumulate(void);
2100     void perf_hpp__setup_output_field(struct perf_hpp_list *list);
2101     void perf_hpp__reset_output_field(struct perf_hpp_list *list);
2102     diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
2103     index ae56c766eda16..b3c1ae288b478 100644
2104     --- a/tools/perf/util/util.c
2105     +++ b/tools/perf/util/util.c
2106     @@ -343,32 +343,32 @@ fetch_kernel_version(unsigned int *puint, char *str,
2107     return 0;
2108     }
2109    
2110     -const char *perf_tip(const char *dirpath)
2111     +int perf_tip(char **strp, const char *dirpath)
2112     {
2113     struct strlist *tips;
2114     struct str_node *node;
2115     - char *tip = NULL;
2116     struct strlist_config conf = {
2117     .dirname = dirpath,
2118     .file_only = true,
2119     };
2120     + int ret = 0;
2121    
2122     + *strp = NULL;
2123     tips = strlist__new("tips.txt", &conf);
2124     if (tips == NULL)
2125     - return errno == ENOENT ? NULL :
2126     - "Tip: check path of tips.txt or get more memory! ;-p";
2127     + return -errno;
2128    
2129     if (strlist__nr_entries(tips) == 0)
2130     goto out;
2131    
2132     node = strlist__entry(tips, random() % strlist__nr_entries(tips));
2133     - if (asprintf(&tip, "Tip: %s", node->s) < 0)
2134     - tip = (char *)"Tip: get more memory! ;-)";
2135     + if (asprintf(strp, "Tip: %s", node->s) < 0)
2136     + ret = -ENOMEM;
2137    
2138     out:
2139     strlist__delete(tips);
2140    
2141     - return tip;
2142     + return ret;
2143     }
2144    
2145     char *perf_exe(char *buf, int len)
2146     diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
2147     index 9969b8b46f7c3..e4a7e1cafc70a 100644
2148     --- a/tools/perf/util/util.h
2149     +++ b/tools/perf/util/util.h
2150     @@ -37,7 +37,7 @@ int fetch_kernel_version(unsigned int *puint,
2151     #define KVER_FMT "%d.%d.%d"
2152     #define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
2153    
2154     -const char *perf_tip(const char *dirpath);
2155     +int perf_tip(char **strp, const char *dirpath);
2156    
2157     #ifndef HAVE_SCHED_GETCPU_SUPPORT
2158     int sched_getcpu(void);
2159     diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
2160     index 38133da2973d4..782a8da5d9500 100755
2161     --- a/tools/testing/selftests/net/fcnal-test.sh
2162     +++ b/tools/testing/selftests/net/fcnal-test.sh
2163     @@ -3450,8 +3450,8 @@ EOF
2164     ################################################################################
2165     # main
2166    
2167     -TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter"
2168     -TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter"
2169     +TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
2170     +TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
2171     TESTS_OTHER="use_cases"
2172    
2173     PAUSE_ON_FAIL=no