Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0114-5.4.15-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3495 - (hide annotations) (download)
Mon May 11 14:36:17 2020 UTC (4 years, 1 month ago) by niro
File size: 147775 byte(s)
-linux-5.4.15
1 niro 3495 diff --git a/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
2     new file mode 100644
3     index 000000000000..f315c9723bd2
4     --- /dev/null
5     +++ b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
6     @@ -0,0 +1,27 @@
7     +OMAP ROM RNG driver binding
8     +
9     +Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does. The
10     +implementation can depend on the SoC secure ROM used.
11     +
12     +- compatible:
13     + Usage: required
14     + Value type: <string>
15     + Definition: must be "nokia,n900-rom-rng"
16     +
17     +- clocks:
18     + Usage: required
19     + Value type: <prop-encoded-array>
20     + Definition: reference to the the RNG interface clock
21     +
22     +- clock-names:
23     + Usage: required
24     + Value type: <stringlist>
25     + Definition: must be "ick"
26     +
27     +Example:
28     +
29     + rom_rng: rng {
30     + compatible = "nokia,n900-rom-rng";
31     + clocks = <&rng_ick>;
32     + clock-names = "ick";
33     + };
34     diff --git a/Makefile b/Makefile
35     index 2b2080d08bb2..30600e309c73 100644
36     --- a/Makefile
37     +++ b/Makefile
38     @@ -1,7 +1,7 @@
39     # SPDX-License-Identifier: GPL-2.0
40     VERSION = 5
41     PATCHLEVEL = 4
42     -SUBLEVEL = 14
43     +SUBLEVEL = 15
44     EXTRAVERSION =
45     NAME = Kleptomaniac Octopus
46    
47     diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts
48     index 07ac99b9cda6..cdb89b3e2a9b 100644
49     --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts
50     +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit-28.dts
51     @@ -11,22 +11,6 @@
52     #include "logicpd-torpedo-37xx-devkit.dts"
53    
54     &lcd0 {
55     -
56     - label = "28";
57     -
58     - panel-timing {
59     - clock-frequency = <9000000>;
60     - hactive = <480>;
61     - vactive = <272>;
62     - hfront-porch = <3>;
63     - hback-porch = <2>;
64     - hsync-len = <42>;
65     - vback-porch = <3>;
66     - vfront-porch = <2>;
67     - vsync-len = <11>;
68     - hsync-active = <1>;
69     - vsync-active = <1>;
70     - de-active = <1>;
71     - pixelclk-active = <0>;
72     - };
73     + /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
74     + compatible = "logicpd,type28";
75     };
76     diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
77     index 84a5ade1e865..63659880eeb3 100644
78     --- a/arch/arm/boot/dts/omap3-n900.dts
79     +++ b/arch/arm/boot/dts/omap3-n900.dts
80     @@ -155,6 +155,12 @@
81     pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
82     };
83    
84     + rom_rng: rng {
85     + compatible = "nokia,n900-rom-rng";
86     + clocks = <&rng_ick>;
87     + clock-names = "ick";
88     + };
89     +
90     /* controlled (enabled/disabled) directly by bcm2048 and wl1251 */
91     vctcxo: vctcxo {
92     compatible = "fixed-clock";
93     diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
94     index 439e143cad7b..46012ca812f4 100644
95     --- a/arch/arm/mach-omap2/display.c
96     +++ b/arch/arm/mach-omap2/display.c
97     @@ -265,6 +265,7 @@ static int __init omapdss_init_of(void)
98     r = of_platform_populate(node, NULL, NULL, &pdev->dev);
99     if (r) {
100     pr_err("Unable to populate DSS submodule devices\n");
101     + put_device(&pdev->dev);
102     return r;
103     }
104    
105     diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
106     index 33688e1d9acf..247e3f8acffe 100644
107     --- a/arch/arm/mach-omap2/pdata-quirks.c
108     +++ b/arch/arm/mach-omap2/pdata-quirks.c
109     @@ -268,14 +268,6 @@ static void __init am3517_evm_legacy_init(void)
110     am35xx_emac_reset();
111     }
112    
113     -static struct platform_device omap3_rom_rng_device = {
114     - .name = "omap3-rom-rng",
115     - .id = -1,
116     - .dev = {
117     - .platform_data = rx51_secure_rng_call,
118     - },
119     -};
120     -
121     static void __init nokia_n900_legacy_init(void)
122     {
123     hsmmc2_internal_input_clk();
124     @@ -291,9 +283,6 @@ static void __init nokia_n900_legacy_init(void)
125     pr_warn("RX-51: Not enabling ARM errata 430973 workaround\n");
126     pr_warn("Thumb binaries may crash randomly without this workaround\n");
127     }
128     -
129     - pr_info("RX-51: Registering OMAP3 HWRNG device\n");
130     - platform_device_register(&omap3_rom_rng_device);
131     }
132     }
133    
134     @@ -538,6 +527,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
135     OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
136     OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
137     &am35xx_emac_pdata),
138     + OF_DEV_AUXDATA("nokia,n900-rom-rng", 0, NULL, rx51_secure_rng_call),
139     /* McBSP modules with sidetone core */
140     #if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
141     OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata),
142     diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
143     index 9c63b596e6ce..a09595f00cab 100644
144     --- a/arch/powerpc/include/asm/archrandom.h
145     +++ b/arch/powerpc/include/asm/archrandom.h
146     @@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v)
147     unsigned long val;
148     int rc;
149    
150     - rc = arch_get_random_long(&val);
151     + rc = arch_get_random_seed_long(&val);
152     if (rc)
153     *v = val;
154    
155     diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
156     index ccf44c135389..7c05e95a5c44 100644
157     --- a/arch/powerpc/include/asm/security_features.h
158     +++ b/arch/powerpc/include/asm/security_features.h
159     @@ -9,7 +9,7 @@
160     #define _ASM_POWERPC_SECURITY_FEATURES_H
161    
162    
163     -extern unsigned long powerpc_security_features;
164     +extern u64 powerpc_security_features;
165     extern bool rfi_flush;
166    
167     /* These are bit flags */
168     @@ -24,17 +24,17 @@ void setup_stf_barrier(void);
169     void do_stf_barrier_fixups(enum stf_barrier_type types);
170     void setup_count_cache_flush(void);
171    
172     -static inline void security_ftr_set(unsigned long feature)
173     +static inline void security_ftr_set(u64 feature)
174     {
175     powerpc_security_features |= feature;
176     }
177    
178     -static inline void security_ftr_clear(unsigned long feature)
179     +static inline void security_ftr_clear(u64 feature)
180     {
181     powerpc_security_features &= ~feature;
182     }
183    
184     -static inline bool security_ftr_enabled(unsigned long feature)
185     +static inline bool security_ftr_enabled(u64 feature)
186     {
187     return !!(powerpc_security_features & feature);
188     }
189     diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
190     index adf0505dbe02..519d49547e2f 100644
191     --- a/arch/powerpc/kernel/head_fsl_booke.S
192     +++ b/arch/powerpc/kernel/head_fsl_booke.S
193     @@ -238,6 +238,9 @@ set_ivor:
194    
195     bl early_init
196    
197     +#ifdef CONFIG_KASAN
198     + bl kasan_early_init
199     +#endif
200     #ifdef CONFIG_RELOCATABLE
201     mr r3,r30
202     mr r4,r31
203     @@ -264,9 +267,6 @@ set_ivor:
204     /*
205     * Decide what sort of machine this is and initialize the MMU.
206     */
207     -#ifdef CONFIG_KASAN
208     - bl kasan_early_init
209     -#endif
210     mr r3,r30
211     mr r4,r31
212     bl machine_init
213     diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
214     index 100f1b57ec2f..eba9d4ee4baf 100644
215     --- a/arch/powerpc/kernel/prom_init.c
216     +++ b/arch/powerpc/kernel/prom_init.c
217     @@ -1053,7 +1053,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
218     .reserved2 = 0,
219     .reserved3 = 0,
220     .subprocessors = 1,
221     - .byte22 = OV5_FEAT(OV5_DRMEM_V2),
222     + .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
223     .intarch = 0,
224     .mmu = 0,
225     .hash_ext = 0,
226     diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
227     index d341b464f23c..1740a66cea84 100644
228     --- a/arch/powerpc/kernel/security.c
229     +++ b/arch/powerpc/kernel/security.c
230     @@ -16,7 +16,7 @@
231     #include <asm/setup.h>
232    
233    
234     -unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
235     +u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
236    
237     enum count_cache_flush_type {
238     COUNT_CACHE_FLUSH_NONE = 0x1,
239     @@ -109,7 +109,7 @@ device_initcall(barrier_nospec_debugfs_init);
240     static __init int security_feature_debugfs_init(void)
241     {
242     debugfs_create_x64("security_features", 0400, powerpc_debugfs_root,
243     - (u64 *)&powerpc_security_features);
244     + &powerpc_security_features);
245     return 0;
246     }
247     device_initcall(security_feature_debugfs_init);
248     diff --git a/block/bio-integrity.c b/block/bio-integrity.c
249     index fb95dbb21dd8..bf62c25cde8f 100644
250     --- a/block/bio-integrity.c
251     +++ b/block/bio-integrity.c
252     @@ -87,7 +87,7 @@ EXPORT_SYMBOL(bio_integrity_alloc);
253     * Description: Used to free the integrity portion of a bio. Usually
254     * called from bio_free().
255     */
256     -static void bio_integrity_free(struct bio *bio)
257     +void bio_integrity_free(struct bio *bio)
258     {
259     struct bio_integrity_payload *bip = bio_integrity(bio);
260     struct bio_set *bs = bio->bi_pool;
261     diff --git a/block/bio.c b/block/bio.c
262     index 906da3581a3e..94d697217887 100644
263     --- a/block/bio.c
264     +++ b/block/bio.c
265     @@ -233,6 +233,9 @@ fallback:
266     void bio_uninit(struct bio *bio)
267     {
268     bio_disassociate_blkg(bio);
269     +
270     + if (bio_integrity(bio))
271     + bio_integrity_free(bio);
272     }
273     EXPORT_SYMBOL(bio_uninit);
274    
275     diff --git a/block/blk.h b/block/blk.h
276     index ffea1691470e..ee3d5664d962 100644
277     --- a/block/blk.h
278     +++ b/block/blk.h
279     @@ -122,6 +122,7 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
280     #ifdef CONFIG_BLK_DEV_INTEGRITY
281     void blk_flush_integrity(void);
282     bool __bio_integrity_endio(struct bio *);
283     +void bio_integrity_free(struct bio *bio);
284     static inline bool bio_integrity_endio(struct bio *bio)
285     {
286     if (bio_integrity(bio))
287     @@ -167,6 +168,9 @@ static inline bool bio_integrity_endio(struct bio *bio)
288     {
289     return true;
290     }
291     +static inline void bio_integrity_free(struct bio *bio)
292     +{
293     +}
294     #endif /* CONFIG_BLK_DEV_INTEGRITY */
295    
296     unsigned long blk_rq_timeout(unsigned long timeout);
297     diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
298     index 00ec4f2bf015..c05050f474cd 100644
299     --- a/drivers/acpi/acpi_platform.c
300     +++ b/drivers/acpi/acpi_platform.c
301     @@ -31,6 +31,44 @@ static const struct acpi_device_id forbidden_id_list[] = {
302     {"", 0},
303     };
304    
305     +static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
306     +{
307     + struct device *dev;
308     +
309     + dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev);
310     + return dev ? to_platform_device(dev) : NULL;
311     +}
312     +
313     +static int acpi_platform_device_remove_notify(struct notifier_block *nb,
314     + unsigned long value, void *arg)
315     +{
316     + struct acpi_device *adev = arg;
317     + struct platform_device *pdev;
318     +
319     + switch (value) {
320     + case ACPI_RECONFIG_DEVICE_ADD:
321     + /* Nothing to do here */
322     + break;
323     + case ACPI_RECONFIG_DEVICE_REMOVE:
324     + if (!acpi_device_enumerated(adev))
325     + break;
326     +
327     + pdev = acpi_platform_device_find_by_companion(adev);
328     + if (!pdev)
329     + break;
330     +
331     + platform_device_unregister(pdev);
332     + put_device(&pdev->dev);
333     + break;
334     + }
335     +
336     + return NOTIFY_OK;
337     +}
338     +
339     +static struct notifier_block acpi_platform_notifier = {
340     + .notifier_call = acpi_platform_device_remove_notify,
341     +};
342     +
343     static void acpi_platform_fill_resource(struct acpi_device *adev,
344     const struct resource *src, struct resource *dest)
345     {
346     @@ -130,3 +168,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
347     return pdev;
348     }
349     EXPORT_SYMBOL_GPL(acpi_create_platform_device);
350     +
351     +void __init acpi_platform_init(void)
352     +{
353     + acpi_reconfig_notifier_register(&acpi_platform_notifier);
354     +}
355     diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
356     index aad6be5c0af0..915650bf519f 100644
357     --- a/drivers/acpi/scan.c
358     +++ b/drivers/acpi/scan.c
359     @@ -2174,6 +2174,7 @@ int __init acpi_scan_init(void)
360     acpi_pci_root_init();
361     acpi_pci_link_init();
362     acpi_processor_init();
363     + acpi_platform_init();
364     acpi_lpss_init();
365     acpi_apd_init();
366     acpi_cmos_rtc_init();
367     diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
368     index a1f3f0994f9f..d5b4905e2adb 100644
369     --- a/drivers/base/swnode.c
370     +++ b/drivers/base/swnode.c
371     @@ -520,7 +520,10 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
372     {
373     struct swnode *swnode = to_swnode(fwnode);
374    
375     - return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
376     + if (!swnode || !swnode->parent)
377     + return NULL;
378     +
379     + return fwnode_handle_get(&swnode->parent->fwnode);
380     }
381    
382     static struct fwnode_handle *
383     diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
384     index 648e39ce6bd9..8df3cad7c97a 100644
385     --- a/drivers/char/hw_random/omap3-rom-rng.c
386     +++ b/drivers/char/hw_random/omap3-rom-rng.c
387     @@ -20,6 +20,8 @@
388     #include <linux/workqueue.h>
389     #include <linux/clk.h>
390     #include <linux/err.h>
391     +#include <linux/of.h>
392     +#include <linux/of_device.h>
393     #include <linux/platform_device.h>
394    
395     #define RNG_RESET 0x01
396     @@ -86,14 +88,18 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
397    
398     static struct hwrng omap3_rom_rng_ops = {
399     .name = "omap3-rom",
400     - .read = omap3_rom_rng_read,
401     };
402    
403     static int omap3_rom_rng_probe(struct platform_device *pdev)
404     {
405     int ret = 0;
406    
407     - pr_info("initializing\n");
408     + omap3_rom_rng_ops.read = of_device_get_match_data(&pdev->dev);
409     + if (!omap3_rom_rng_ops.read) {
410     + dev_err(&pdev->dev, "missing rom code handler\n");
411     +
412     + return -ENODEV;
413     + }
414    
415     omap3_rom_rng_call = pdev->dev.platform_data;
416     if (!omap3_rom_rng_call) {
417     @@ -126,9 +132,16 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
418     return 0;
419     }
420    
421     +static const struct of_device_id omap_rom_rng_match[] = {
422     + { .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, },
423     + { /* sentinel */ },
424     +};
425     +MODULE_DEVICE_TABLE(of, omap_rom_rng_match);
426     +
427     static struct platform_driver omap3_rom_rng_driver = {
428     .driver = {
429     .name = "omap3-rom-rng",
430     + .of_match_table = omap_rom_rng_match,
431     },
432     .probe = omap3_rom_rng_probe,
433     .remove = omap3_rom_rng_remove,
434     diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
435     index 3c8a559506e8..0b6e7f8d9729 100644
436     --- a/drivers/char/ipmi/ipmi_msghandler.c
437     +++ b/drivers/char/ipmi/ipmi_msghandler.c
438     @@ -3039,8 +3039,11 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
439     bmc->pdev.name = "ipmi_bmc";
440    
441     rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
442     - if (rv < 0)
443     + if (rv < 0) {
444     + kfree(bmc);
445     goto out;
446     + }
447     +
448     bmc->pdev.dev.driver = &ipmidriver.driver;
449     bmc->pdev.id = rv;
450     bmc->pdev.dev.release = release_bmc_device;
451     diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
452     index 8eabf7b20101..7316312935c8 100644
453     --- a/drivers/crypto/Kconfig
454     +++ b/drivers/crypto/Kconfig
455     @@ -333,6 +333,7 @@ config CRYPTO_DEV_PPC4XX
456     depends on PPC && 4xx
457     select CRYPTO_HASH
458     select CRYPTO_AEAD
459     + select CRYPTO_AES
460     select CRYPTO_LIB_AES
461     select CRYPTO_CCM
462     select CRYPTO_CTR
463     diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
464     index 1369c5fa3087..07df012893bb 100644
465     --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
466     +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
467     @@ -175,7 +175,7 @@ static int sun4i_hash(struct ahash_request *areq)
468     */
469     unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
470     unsigned int in_i = 0;
471     - u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0;
472     + u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
473     struct sun4i_req_ctx *op = ahash_request_ctx(areq);
474     struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
475     struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
476     @@ -184,6 +184,7 @@ static int sun4i_hash(struct ahash_request *areq)
477     struct sg_mapping_iter mi;
478     int in_r, err = 0;
479     size_t copied = 0;
480     + __le32 wb = 0;
481    
482     dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
483     __func__, crypto_tfm_alg_name(areq->base.tfm),
484     @@ -395,7 +396,7 @@ hash_final:
485    
486     nbw = op->len - 4 * nwait;
487     if (nbw) {
488     - wb = *(u32 *)(op->buf + nwait * 4);
489     + wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
490     wb &= GENMASK((nbw * 8) - 1, 0);
491    
492     op->byte_count += nbw;
493     @@ -404,7 +405,7 @@ hash_final:
494    
495     /* write the remaining bytes of the nbw buffer */
496     wb |= ((1 << 7) << (nbw * 8));
497     - bf[j++] = wb;
498     + bf[j++] = le32_to_cpu(wb);
499    
500     /*
501     * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
502     @@ -423,13 +424,13 @@ hash_final:
503    
504     /* write the length of data */
505     if (op->mode == SS_OP_SHA1) {
506     - __be64 bits = cpu_to_be64(op->byte_count << 3);
507     - bf[j++] = lower_32_bits(bits);
508     - bf[j++] = upper_32_bits(bits);
509     + __be64 *bits = (__be64 *)&bf[j];
510     + *bits = cpu_to_be64(op->byte_count << 3);
511     + j += 2;
512     } else {
513     - __le64 bits = op->byte_count << 3;
514     - bf[j++] = lower_32_bits(bits);
515     - bf[j++] = upper_32_bits(bits);
516     + __le64 *bits = (__le64 *)&bf[j];
517     + *bits = cpu_to_le64(op->byte_count << 3);
518     + j += 2;
519     }
520     writesl(ss->base + SS_RXFIFO, bf, j);
521    
522     @@ -471,7 +472,7 @@ hash_final:
523     }
524     } else {
525     for (i = 0; i < 4; i++) {
526     - v = readl(ss->base + SS_MD0 + i * 4);
527     + v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
528     memcpy(areq->result + i * 4, &v, 4);
529     }
530     }
531     diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
532     index ba7c4f07fcd6..80b780e49971 100644
533     --- a/drivers/dma/ti/edma.c
534     +++ b/drivers/dma/ti/edma.c
535     @@ -2403,8 +2403,10 @@ static int edma_probe(struct platform_device *pdev)
536    
537     ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
538     sizeof(*ecc->tc_list), GFP_KERNEL);
539     - if (!ecc->tc_list)
540     - return -ENOMEM;
541     + if (!ecc->tc_list) {
542     + ret = -ENOMEM;
543     + goto err_reg1;
544     + }
545    
546     for (i = 0;; i++) {
547     ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
548     diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
549     index 4a8012e3cb8c..601af4edad5e 100644
550     --- a/drivers/firmware/arm_scmi/perf.c
551     +++ b/drivers/firmware/arm_scmi/perf.c
552     @@ -323,7 +323,7 @@ static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
553    
554     if (db->mask)
555     val = ioread64_hi_lo(db->addr) & db->mask;
556     - iowrite64_hi_lo(db->set, db->addr);
557     + iowrite64_hi_lo(db->set | val, db->addr);
558     }
559     #endif
560     }
561     diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
562     index a43d2db5cbdb..4265e9dbed84 100644
563     --- a/drivers/firmware/imx/imx-dsp.c
564     +++ b/drivers/firmware/imx/imx-dsp.c
565     @@ -114,7 +114,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
566    
567     dev_info(dev, "NXP i.MX DSP IPC initialized\n");
568    
569     - return devm_of_platform_populate(dev);
570     + return 0;
571     out:
572     kfree(chan_name);
573     for (j = 0; j < i; j++) {
574     diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
575     index 43ffec3a6fbb..7ee5b7f53aeb 100644
576     --- a/drivers/gpio/gpiolib-of.c
577     +++ b/drivers/gpio/gpiolib-of.c
578     @@ -909,16 +909,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
579     of_node_get(chip->of_node);
580    
581     ret = of_gpiochip_scan_gpios(chip);
582     - if (ret) {
583     + if (ret)
584     of_node_put(chip->of_node);
585     - gpiochip_remove_pin_ranges(chip);
586     - }
587    
588     return ret;
589     }
590    
591     void of_gpiochip_remove(struct gpio_chip *chip)
592     {
593     - gpiochip_remove_pin_ranges(chip);
594     of_node_put(chip->of_node);
595     }
596     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
597     index 3d9524a2abc4..2476306e7030 100644
598     --- a/drivers/gpio/gpiolib.c
599     +++ b/drivers/gpio/gpiolib.c
600     @@ -1452,6 +1452,7 @@ err_remove_of_chip:
601     gpiochip_free_hogs(chip);
602     of_gpiochip_remove(chip);
603     err_free_gpiochip_mask:
604     + gpiochip_remove_pin_ranges(chip);
605     gpiochip_free_valid_mask(chip);
606     err_remove_from_list:
607     spin_lock_irqsave(&gpio_lock, flags);
608     @@ -1507,8 +1508,8 @@ void gpiochip_remove(struct gpio_chip *chip)
609     gdev->chip = NULL;
610     gpiochip_irqchip_remove(chip);
611     acpi_gpiochip_remove(chip);
612     - gpiochip_remove_pin_ranges(chip);
613     of_gpiochip_remove(chip);
614     + gpiochip_remove_pin_ranges(chip);
615     gpiochip_free_valid_mask(chip);
616     /*
617     * We accept no more calls into the driver from this point, so
618     diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/sgpio-aspeed.c
619     index 7e99860ca447..8319812593e3 100644
620     --- a/drivers/gpio/sgpio-aspeed.c
621     +++ b/drivers/gpio/sgpio-aspeed.c
622     @@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
623     return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
624     default:
625     /* acturally if code runs to here, it's an error case */
626     - BUG_ON(1);
627     + BUG();
628     }
629     }
630    
631     diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
632     index 395c2259f979..9d778a0b2c5e 100644
633     --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
634     +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
635     @@ -423,7 +423,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
636     * vcn_v2_5_disable_clock_gating - disable VCN clock gating
637     *
638     * @adev: amdgpu_device pointer
639     - * @sw: enable SW clock gating
640     *
641     * Disable clock gating for VCN block
642     */
643     @@ -542,7 +541,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
644     * vcn_v2_5_enable_clock_gating - enable VCN clock gating
645     *
646     * @adev: amdgpu_device pointer
647     - * @sw: enable SW clock gating
648     *
649     * Enable clock gating for VCN block
650     */
651     diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
652     index fa8ad7db2b3a..d306cc711997 100644
653     --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
654     +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
655     @@ -1421,6 +1421,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
656     {
657     struct pp_hwmgr *hwmgr = handle;
658    
659     + *cap = false;
660     if (!hwmgr)
661     return -EINVAL;
662    
663     diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
664     index e41fd94ae5a9..b3d2b91575cb 100644
665     --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
666     +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
667     @@ -2094,8 +2094,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
668     ext_data.fpriv = file->driver_priv;
669     if (client_is_banned(ext_data.fpriv)) {
670     DRM_DEBUG("client %s[%d] banned from creating ctx\n",
671     - current->comm,
672     - pid_nr(get_task_pid(current, PIDTYPE_PID)));
673     + current->comm, task_pid_nr(current));
674     return -EIO;
675     }
676    
677     diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
678     index ad47cc95459e..bf5fcc3e5379 100644
679     --- a/drivers/gpu/drm/panel/panel-lvds.c
680     +++ b/drivers/gpu/drm/panel/panel-lvds.c
681     @@ -197,7 +197,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
682     static int panel_lvds_probe(struct platform_device *pdev)
683     {
684     struct panel_lvds *lvds;
685     - struct device_node *np;
686     int ret;
687    
688     lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
689     @@ -243,14 +242,9 @@ static int panel_lvds_probe(struct platform_device *pdev)
690     return ret;
691     }
692    
693     - np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
694     - if (np) {
695     - lvds->backlight = of_find_backlight_by_node(np);
696     - of_node_put(np);
697     -
698     - if (!lvds->backlight)
699     - return -EPROBE_DEFER;
700     - }
701     + lvds->backlight = devm_of_find_backlight(lvds->dev);
702     + if (IS_ERR(lvds->backlight))
703     + return PTR_ERR(lvds->backlight);
704    
705     /*
706     * TODO: Handle all power supplies specified in the DT node in a generic
707     @@ -266,14 +260,10 @@ static int panel_lvds_probe(struct platform_device *pdev)
708    
709     ret = drm_panel_add(&lvds->panel);
710     if (ret < 0)
711     - goto error;
712     + return ret;
713    
714     dev_set_drvdata(lvds->dev, lvds);
715     return 0;
716     -
717     -error:
718     - put_device(&lvds->backlight->dev);
719     - return ret;
720     }
721    
722     static int panel_lvds_remove(struct platform_device *pdev)
723     @@ -284,9 +274,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
724    
725     panel_lvds_disable(&lvds->panel);
726    
727     - if (lvds->backlight)
728     - put_device(&lvds->backlight->dev);
729     -
730     return 0;
731     }
732    
733     diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
734     index 12ff77dacc95..c1eb8cfe6aeb 100644
735     --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
736     +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
737     @@ -53,8 +53,10 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
738     if (err) {
739     dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
740     err);
741     - regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
742     - pfdev->devfreq.cur_volt);
743     + if (pfdev->regulator)
744     + regulator_set_voltage(pfdev->regulator,
745     + pfdev->devfreq.cur_volt,
746     + pfdev->devfreq.cur_volt);
747     return err;
748     }
749    
750     diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
751     index 62eab82a64f9..897442754fd0 100644
752     --- a/drivers/gpu/drm/radeon/cik.c
753     +++ b/drivers/gpu/drm/radeon/cik.c
754     @@ -6969,8 +6969,8 @@ static int cik_irq_init(struct radeon_device *rdev)
755     }
756    
757     /* setup interrupt control */
758     - /* XXX this should actually be a bus address, not an MC address. same on older asics */
759     - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
760     + /* set dummy read address to dummy page address */
761     + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
762     interrupt_cntl = RREG32(INTERRUPT_CNTL);
763     /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
764     * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
765     diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
766     index e937cc01910d..033bc466a862 100644
767     --- a/drivers/gpu/drm/radeon/r600.c
768     +++ b/drivers/gpu/drm/radeon/r600.c
769     @@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev)
770     }
771    
772     /* setup interrupt control */
773     - /* set dummy read address to ring address */
774     - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
775     + /* set dummy read address to dummy page address */
776     + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
777     interrupt_cntl = RREG32(INTERRUPT_CNTL);
778     /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
779     * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
780     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
781     index 05894d198a79..1d8efb0eefdb 100644
782     --- a/drivers/gpu/drm/radeon/si.c
783     +++ b/drivers/gpu/drm/radeon/si.c
784     @@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev)
785     }
786    
787     /* setup interrupt control */
788     - /* set dummy read address to ring address */
789     - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
790     + /* set dummy read address to dummy page address */
791     + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
792     interrupt_cntl = RREG32(INTERRUPT_CNTL);
793     /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
794     * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
795     diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
796     index 3fc7e6899cab..50c11a7f0467 100644
797     --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
798     +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
799     @@ -16,6 +16,7 @@
800     #include <linux/of_graph.h>
801     #include <linux/platform_device.h>
802     #include <linux/slab.h>
803     +#include <linux/sys_soc.h>
804    
805     #include <drm/drm_atomic.h>
806     #include <drm/drm_atomic_helper.h>
807     @@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
808     return 0;
809     }
810    
811     +static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
812     + .gen = 2,
813     + .quirks = RCAR_LVDS_QUIRK_LANES,
814     + .pll_setup = rcar_lvds_pll_setup_gen2,
815     +};
816     +
817     +static const struct soc_device_attribute lvds_quirk_matches[] = {
818     + {
819     + .soc_id = "r8a7790", .revision = "ES1.*",
820     + .data = &rcar_lvds_r8a7790es1_info,
821     + },
822     + { /* sentinel */ }
823     +};
824     +
825     static int rcar_lvds_probe(struct platform_device *pdev)
826     {
827     + const struct soc_device_attribute *attr;
828     struct rcar_lvds *lvds;
829     struct resource *mem;
830     int ret;
831     @@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
832     lvds->dev = &pdev->dev;
833     lvds->info = of_device_get_match_data(&pdev->dev);
834    
835     + attr = soc_device_match(lvds_quirk_matches);
836     + if (attr)
837     + lvds->info = attr->data;
838     +
839     ret = rcar_lvds_parse_dt(lvds);
840     if (ret < 0)
841     return ret;
842     @@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
843     .pll_setup = rcar_lvds_pll_setup_gen2,
844     };
845    
846     -static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
847     - .gen = 2,
848     - .quirks = RCAR_LVDS_QUIRK_LANES,
849     - .pll_setup = rcar_lvds_pll_setup_gen2,
850     -};
851     -
852     static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
853     .gen = 3,
854     .quirks = RCAR_LVDS_QUIRK_PWD,
855     @@ -930,7 +944,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
856     { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
857     { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
858     { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
859     - { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
860     + { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
861     { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
862     { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
863     { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
864     diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
865     index 613404f86668..84e3decb17b1 100644
866     --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
867     +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
868     @@ -1040,10 +1040,41 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
869     struct drm_display_mode *adjusted_mode)
870     {
871     struct vop *vop = to_vop(crtc);
872     + unsigned long rate;
873    
874     - adjusted_mode->clock =
875     - DIV_ROUND_UP(clk_round_rate(vop->dclk,
876     - adjusted_mode->clock * 1000), 1000);
877     + /*
878     + * Clock craziness.
879     + *
880     + * Key points:
881     + *
882     + * - DRM works in in kHz.
883     + * - Clock framework works in Hz.
884     + * - Rockchip's clock driver picks the clock rate that is the
885     + * same _OR LOWER_ than the one requested.
886     + *
887     + * Action plan:
888     + *
889     + * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
890     + * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
891     + * make 60000 kHz then the clock framework will actually give us
892     + * the right clock.
893     + *
894     + * NOTE: if the PLL (maybe through a divider) could actually make
895     + * a clock rate 999 Hz higher instead of the one we want then this
896     + * could be a problem. Unfortunately there's not much we can do
897     + * since it's baked into DRM to use kHz. It shouldn't matter in
898     + * practice since Rockchip PLLs are controlled by tables and
899     + * even if there is a divider in the middle I wouldn't expect PLL
900     + * rates in the table that are just a few kHz different.
901     + *
902     + * 2. Get the clock framework to round the rate for us to tell us
903     + * what it will actually make.
904     + *
905     + * 3. Store the rounded up rate so that we don't need to worry about
906     + * this in the actual clk_set_rate().
907     + */
908     + rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
909     + adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
910    
911     return true;
912     }
913     diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
914     index 07d5dfce68d4..1da347e6a358 100644
915     --- a/drivers/i2c/busses/i2c-stm32.c
916     +++ b/drivers/i2c/busses/i2c-stm32.c
917     @@ -20,13 +20,13 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
918    
919     dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
920     if (!dma)
921     - return NULL;
922     + return ERR_PTR(-ENOMEM);
923    
924     /* Request and configure I2C TX dma channel */
925     - dma->chan_tx = dma_request_slave_channel(dev, "tx");
926     - if (!dma->chan_tx) {
927     + dma->chan_tx = dma_request_chan(dev, "tx");
928     + if (IS_ERR(dma->chan_tx)) {
929     dev_dbg(dev, "can't request DMA tx channel\n");
930     - ret = -EINVAL;
931     + ret = PTR_ERR(dma->chan_tx);
932     goto fail_al;
933     }
934    
935     @@ -42,10 +42,10 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
936     }
937    
938     /* Request and configure I2C RX dma channel */
939     - dma->chan_rx = dma_request_slave_channel(dev, "rx");
940     - if (!dma->chan_rx) {
941     + dma->chan_rx = dma_request_chan(dev, "rx");
942     + if (IS_ERR(dma->chan_rx)) {
943     dev_err(dev, "can't request DMA rx channel\n");
944     - ret = -EINVAL;
945     + ret = PTR_ERR(dma->chan_rx);
946     goto fail_tx;
947     }
948    
949     @@ -75,7 +75,7 @@ fail_al:
950     devm_kfree(dev, dma);
951     dev_info(dev, "can't use DMA\n");
952    
953     - return NULL;
954     + return ERR_PTR(ret);
955     }
956    
957     void stm32_i2c_dma_free(struct stm32_i2c_dma *dma)
958     diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
959     index 84cfed17ff4f..b2634afe066d 100644
960     --- a/drivers/i2c/busses/i2c-stm32f7.c
961     +++ b/drivers/i2c/busses/i2c-stm32f7.c
962     @@ -1267,8 +1267,8 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev,
963     * slave[0] supports 7-bit and 10-bit slave address
964     * slave[1] supports 7-bit slave address only
965     */
966     - for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) {
967     - if (i == 1 && (slave->flags & I2C_CLIENT_PEC))
968     + for (i = STM32F7_I2C_MAX_SLAVE - 1; i >= 0; i--) {
969     + if (i == 1 && (slave->flags & I2C_CLIENT_TEN))
970     continue;
971     if (!i2c_dev->slave[i]) {
972     *id = i;
973     @@ -1955,6 +1955,15 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
974     i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr,
975     STM32F7_I2C_TXDR,
976     STM32F7_I2C_RXDR);
977     + if (PTR_ERR(i2c_dev->dma) == -ENODEV)
978     + i2c_dev->dma = NULL;
979     + else if (IS_ERR(i2c_dev->dma)) {
980     + ret = PTR_ERR(i2c_dev->dma);
981     + if (ret != -EPROBE_DEFER)
982     + dev_err(&pdev->dev,
983     + "Failed to request dma error %i\n", ret);
984     + goto clk_free;
985     + }
986    
987     platform_set_drvdata(pdev, i2c_dev);
988    
989     diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
990     index 59ff088c7d75..9feaec3c8329 100644
991     --- a/drivers/leds/leds-tlc591xx.c
992     +++ b/drivers/leds/leds-tlc591xx.c
993     @@ -13,6 +13,7 @@
994     #include <linux/slab.h>
995    
996     #define TLC591XX_MAX_LEDS 16
997     +#define TLC591XX_MAX_BRIGHTNESS 256
998    
999     #define TLC591XX_REG_MODE1 0x00
1000     #define MODE1_RESPON_ADDR_MASK 0xF0
1001     @@ -112,11 +113,11 @@ tlc591xx_brightness_set(struct led_classdev *led_cdev,
1002     struct tlc591xx_priv *priv = led->priv;
1003     int err;
1004    
1005     - switch (brightness) {
1006     + switch ((int)brightness) {
1007     case 0:
1008     err = tlc591xx_set_ledout(priv, led, LEDOUT_OFF);
1009     break;
1010     - case LED_FULL:
1011     + case TLC591XX_MAX_BRIGHTNESS:
1012     err = tlc591xx_set_ledout(priv, led, LEDOUT_ON);
1013     break;
1014     default:
1015     @@ -157,7 +158,7 @@ tlc591xx_configure(struct device *dev,
1016     led->priv = priv;
1017     led->led_no = i;
1018     led->ldev.brightness_set_blocking = tlc591xx_brightness_set;
1019     - led->ldev.max_brightness = LED_FULL;
1020     + led->ldev.max_brightness = TLC591XX_MAX_BRIGHTNESS;
1021     err = led_classdev_register(dev, &led->ldev);
1022     if (err < 0) {
1023     dev_err(dev, "couldn't register LED %s\n",
1024     diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
1025     index 9355db29d2f9..1767f30a1676 100644
1026     --- a/drivers/mfd/intel-lpss-pci.c
1027     +++ b/drivers/mfd/intel-lpss-pci.c
1028     @@ -122,6 +122,18 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
1029     .properties = apl_i2c_properties,
1030     };
1031    
1032     +static struct property_entry glk_i2c_properties[] = {
1033     + PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 313),
1034     + PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
1035     + PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 290),
1036     + { },
1037     +};
1038     +
1039     +static const struct intel_lpss_platform_info glk_i2c_info = {
1040     + .clk_rate = 133000000,
1041     + .properties = glk_i2c_properties,
1042     +};
1043     +
1044     static const struct intel_lpss_platform_info cnl_i2c_info = {
1045     .clk_rate = 216000000,
1046     .properties = spt_i2c_properties,
1047     @@ -174,14 +186,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
1048     { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
1049     { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
1050     /* GLK */
1051     - { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info },
1052     - { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info },
1053     - { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&bxt_i2c_info },
1054     - { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&bxt_i2c_info },
1055     - { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&bxt_i2c_info },
1056     - { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info },
1057     - { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info },
1058     - { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info },
1059     + { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
1060     + { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
1061     + { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&glk_i2c_info },
1062     + { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&glk_i2c_info },
1063     + { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&glk_i2c_info },
1064     + { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&glk_i2c_info },
1065     + { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&glk_i2c_info },
1066     + { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&glk_i2c_info },
1067     { PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info },
1068     { PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info },
1069     { PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info },
1070     diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
1071     index 2d2d9ea8be4f..3dba15bccce2 100644
1072     --- a/drivers/mmc/core/quirks.h
1073     +++ b/drivers/mmc/core/quirks.h
1074     @@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
1075     END_FIXUP
1076     };
1077    
1078     +
1079     static const struct mmc_fixup sdio_fixup_methods[] = {
1080     + SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
1081     + add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
1082     +
1083     + SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
1084     + add_quirk, MMC_QUIRK_DISABLE_CD),
1085     +
1086     SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
1087     add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
1088    
1089     diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1090     index b4b82b9c5cd6..fcbe01f61aa4 100644
1091     --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1092     +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1093     @@ -1600,13 +1600,15 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1094     * Skb freeing is not handled here.
1095     *
1096     * This function may be called on error paths in the Tx function, so guard
1097     - * against cases when not all fd relevant fields were filled in.
1098     + * against cases when not all fd relevant fields were filled in. To avoid
1099     + * reading the invalid transmission timestamp for the error paths set ts to
1100     + * false.
1101     *
1102     * Return the skb backpointer, since for S/G frames the buffer containing it
1103     * gets freed here.
1104     */
1105     static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1106     - const struct qm_fd *fd)
1107     + const struct qm_fd *fd, bool ts)
1108     {
1109     const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1110     struct device *dev = priv->net_dev->dev.parent;
1111     @@ -1620,18 +1622,6 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1112     skbh = (struct sk_buff **)phys_to_virt(addr);
1113     skb = *skbh;
1114    
1115     - if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1116     - memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1117     -
1118     - if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
1119     - &ns)) {
1120     - shhwtstamps.hwtstamp = ns_to_ktime(ns);
1121     - skb_tstamp_tx(skb, &shhwtstamps);
1122     - } else {
1123     - dev_warn(dev, "fman_port_get_tstamp failed!\n");
1124     - }
1125     - }
1126     -
1127     if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1128     nr_frags = skb_shinfo(skb)->nr_frags;
1129     dma_unmap_single(dev, addr,
1130     @@ -1654,14 +1644,29 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1131     dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1132     qm_sg_entry_get_len(&sgt[i]), dma_dir);
1133     }
1134     -
1135     - /* Free the page frag that we allocated on Tx */
1136     - skb_free_frag(phys_to_virt(addr));
1137     } else {
1138     dma_unmap_single(dev, addr,
1139     skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
1140     }
1141    
1142     + /* DMA unmapping is required before accessing the HW provided info */
1143     + if (ts && priv->tx_tstamp &&
1144     + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1145     + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1146     +
1147     + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
1148     + &ns)) {
1149     + shhwtstamps.hwtstamp = ns_to_ktime(ns);
1150     + skb_tstamp_tx(skb, &shhwtstamps);
1151     + } else {
1152     + dev_warn(dev, "fman_port_get_tstamp failed!\n");
1153     + }
1154     + }
1155     +
1156     + if (qm_fd_get_format(fd) == qm_fd_sg)
1157     + /* Free the page frag that we allocated on Tx */
1158     + skb_free_frag(phys_to_virt(addr));
1159     +
1160     return skb;
1161     }
1162    
1163     @@ -2114,7 +2119,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1164     if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
1165     return NETDEV_TX_OK;
1166    
1167     - dpaa_cleanup_tx_fd(priv, &fd);
1168     + dpaa_cleanup_tx_fd(priv, &fd, false);
1169     skb_to_fd_failed:
1170     enomem:
1171     percpu_stats->tx_errors++;
1172     @@ -2160,7 +2165,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
1173    
1174     percpu_priv->stats.tx_errors++;
1175    
1176     - skb = dpaa_cleanup_tx_fd(priv, fd);
1177     + skb = dpaa_cleanup_tx_fd(priv, fd, false);
1178     dev_kfree_skb(skb);
1179     }
1180    
1181     @@ -2200,7 +2205,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
1182    
1183     percpu_priv->tx_confirm++;
1184    
1185     - skb = dpaa_cleanup_tx_fd(priv, fd);
1186     + skb = dpaa_cleanup_tx_fd(priv, fd, true);
1187    
1188     consume_skb(skb);
1189     }
1190     @@ -2430,7 +2435,7 @@ static void egress_ern(struct qman_portal *portal,
1191     percpu_priv->stats.tx_fifo_errors++;
1192     count_ern(percpu_priv, msg);
1193    
1194     - skb = dpaa_cleanup_tx_fd(priv, fd);
1195     + skb = dpaa_cleanup_tx_fd(priv, fd, false);
1196     dev_kfree_skb_any(skb);
1197     }
1198    
1199     diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
1200     index 0aa1c34019bb..dc9a6c36cac0 100644
1201     --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
1202     +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
1203     @@ -216,7 +216,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
1204     if (err == -EINVAL)
1205     /* Older firmware versions don't support all pages */
1206     memset(&dpni_stats, 0, sizeof(dpni_stats));
1207     - else
1208     + else if (err)
1209     netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
1210    
1211     num_cnt = dpni_stats_page_size[j] / sizeof(u64);
1212     diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1213     index c0637a0cbfe8..e92a00a61755 100644
1214     --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1215     +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1216     @@ -1873,8 +1873,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1217     enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1218     struct virtchnl_queue_select *vqs =
1219     (struct virtchnl_queue_select *)msg;
1220     + struct ice_eth_stats stats = { 0 };
1221     struct ice_pf *pf = vf->pf;
1222     - struct ice_eth_stats stats;
1223     struct ice_vsi *vsi;
1224    
1225     if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1226     @@ -1893,7 +1893,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1227     goto error_param;
1228     }
1229    
1230     - memset(&stats, 0, sizeof(struct ice_eth_stats));
1231     ice_update_eth_stats(vsi);
1232    
1233     stats = vsi->eth_stats;
1234     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1235     index 947122c68493..96711e34d248 100644
1236     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1237     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1238     @@ -1615,8 +1615,11 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1239    
1240     flow_flag_clear(flow, DUP);
1241    
1242     - mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1243     - kfree(flow->peer_flow);
1244     + if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1245     + mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1246     + kfree(flow->peer_flow);
1247     + }
1248     +
1249     flow->peer_flow = NULL;
1250     }
1251    
1252     diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
1253     index f9e6744d8fd6..41ddd8fff2a7 100644
1254     --- a/drivers/net/ethernet/socionext/netsec.c
1255     +++ b/drivers/net/ethernet/socionext/netsec.c
1256     @@ -847,8 +847,8 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
1257     enum dma_data_direction dma_dir =
1258     page_pool_get_dma_dir(rx_ring->page_pool);
1259    
1260     - dma_handle = page_pool_get_dma_addr(page) +
1261     - NETSEC_RXBUF_HEADROOM;
1262     + dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
1263     + sizeof(*xdpf);
1264     dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
1265     dma_dir);
1266     tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
1267     diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1268     index 676006f32f91..479325eeaf8a 100644
1269     --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1270     +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1271     @@ -1790,10 +1790,6 @@ static int axienet_probe(struct platform_device *pdev)
1272     /* Check for these resources directly on the Ethernet node. */
1273     struct resource *res = platform_get_resource(pdev,
1274     IORESOURCE_MEM, 1);
1275     - if (!res) {
1276     - dev_err(&pdev->dev, "unable to get DMA memory resource\n");
1277     - goto free_netdev;
1278     - }
1279     lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
1280     lp->rx_irq = platform_get_irq(pdev, 1);
1281     lp->tx_irq = platform_get_irq(pdev, 0);
1282     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1283     index 78e3e689a733..0dee358864f3 100644
1284     --- a/drivers/net/hyperv/netvsc_drv.c
1285     +++ b/drivers/net/hyperv/netvsc_drv.c
1286     @@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash(
1287     else if (flow.basic.n_proto == htons(ETH_P_IPV6))
1288     hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
1289     else
1290     - hash = 0;
1291     + return 0;
1292    
1293     - skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
1294     + __skb_set_sw_hash(skb, hash, false);
1295     }
1296    
1297     return hash;
1298     @@ -795,8 +795,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
1299     skb->protocol == htons(ETH_P_IP))
1300     netvsc_comp_ipcsum(skb);
1301    
1302     - /* Do L4 checksum offload if enabled and present.
1303     - */
1304     + /* Do L4 checksum offload if enabled and present. */
1305     if (csum_info && (net->features & NETIF_F_RXCSUM)) {
1306     if (csum_info->receive.tcp_checksum_succeeded ||
1307     csum_info->receive.udp_checksum_succeeded)
1308     diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
1309     index 937d0059e8ac..5e956089bf52 100644
1310     --- a/drivers/net/phy/broadcom.c
1311     +++ b/drivers/net/phy/broadcom.c
1312     @@ -26,18 +26,13 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
1313     MODULE_AUTHOR("Maciej W. Rozycki");
1314     MODULE_LICENSE("GPL");
1315    
1316     +static int bcm54xx_config_clock_delay(struct phy_device *phydev);
1317     +
1318     static int bcm54210e_config_init(struct phy_device *phydev)
1319     {
1320     int val;
1321    
1322     - val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
1323     - val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
1324     - val |= MII_BCM54XX_AUXCTL_MISC_WREN;
1325     - bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
1326     -
1327     - val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
1328     - val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
1329     - bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
1330     + bcm54xx_config_clock_delay(phydev);
1331    
1332     if (phydev->dev_flags & PHY_BRCM_EN_MASTER_MODE) {
1333     val = phy_read(phydev, MII_CTRL1000);
1334     diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
1335     index 8aec7ccf2d79..502814c26b33 100644
1336     --- a/drivers/net/wireless/mediatek/mt76/mt76.h
1337     +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
1338     @@ -367,8 +367,8 @@ enum mt76u_in_ep {
1339    
1340     enum mt76u_out_ep {
1341     MT_EP_OUT_INBAND_CMD,
1342     - MT_EP_OUT_AC_BK,
1343     MT_EP_OUT_AC_BE,
1344     + MT_EP_OUT_AC_BK,
1345     MT_EP_OUT_AC_VI,
1346     MT_EP_OUT_AC_VO,
1347     MT_EP_OUT_HCCA,
1348     @@ -799,7 +799,8 @@ static inline int
1349     mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
1350     int timeout)
1351     {
1352     - struct usb_device *udev = to_usb_device(dev->dev);
1353     + struct usb_interface *uintf = to_usb_interface(dev->dev);
1354     + struct usb_device *udev = interface_to_usbdev(uintf);
1355     struct mt76_usb *usb = &dev->usb;
1356     unsigned int pipe;
1357    
1358     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
1359     index 00a445d27599..65d404e61404 100644
1360     --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
1361     +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
1362     @@ -226,7 +226,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
1363     u32 mac_rev;
1364     int ret;
1365    
1366     - mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
1367     + mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
1368     &drv_ops);
1369     if (!mdev)
1370     return -ENOMEM;
1371     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
1372     index da5e0f9a8bae..8b26c6108186 100644
1373     --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
1374     +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
1375     @@ -39,7 +39,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
1376     struct mt76_dev *mdev;
1377     int err;
1378    
1379     - mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops,
1380     + mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
1381     &drv_ops);
1382     if (!mdev)
1383     return -ENOMEM;
1384     diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
1385     index 20c6fe510e9d..05aa42bd9808 100644
1386     --- a/drivers/net/wireless/mediatek/mt76/usb.c
1387     +++ b/drivers/net/wireless/mediatek/mt76/usb.c
1388     @@ -20,7 +20,8 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
1389     u8 req_type, u16 val, u16 offset,
1390     void *buf, size_t len)
1391     {
1392     - struct usb_device *udev = to_usb_device(dev->dev);
1393     + struct usb_interface *uintf = to_usb_interface(dev->dev);
1394     + struct usb_device *udev = interface_to_usbdev(uintf);
1395     unsigned int pipe;
1396     int i, ret;
1397    
1398     @@ -235,7 +236,8 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
1399    
1400     static bool mt76u_check_sg(struct mt76_dev *dev)
1401     {
1402     - struct usb_device *udev = to_usb_device(dev->dev);
1403     + struct usb_interface *uintf = to_usb_interface(dev->dev);
1404     + struct usb_device *udev = interface_to_usbdev(uintf);
1405    
1406     return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
1407     (udev->bus->no_sg_constraint ||
1408     @@ -370,7 +372,8 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
1409     struct urb *urb, usb_complete_t complete_fn,
1410     void *context)
1411     {
1412     - struct usb_device *udev = to_usb_device(dev->dev);
1413     + struct usb_interface *uintf = to_usb_interface(dev->dev);
1414     + struct usb_device *udev = interface_to_usbdev(uintf);
1415     unsigned int pipe;
1416    
1417     if (dir == USB_DIR_IN)
1418     @@ -952,6 +955,7 @@ int mt76u_init(struct mt76_dev *dev,
1419     .rd_rp = mt76u_rd_rp,
1420     .type = MT76_BUS_USB,
1421     };
1422     + struct usb_device *udev = interface_to_usbdev(intf);
1423     struct mt76_usb *usb = &dev->usb;
1424    
1425     tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
1426     @@ -965,6 +969,8 @@ int mt76u_init(struct mt76_dev *dev,
1427     dev->bus = &mt76u_ops;
1428     dev->queue_ops = &usb_queue_ops;
1429    
1430     + dev_set_drvdata(&udev->dev, dev);
1431     +
1432     usb->sg_en = mt76u_check_sg(dev);
1433    
1434     return mt76u_set_endpoints(intf, usb);
1435     diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
1436     index 06f5702ab4bd..d863ab4a66c9 100644
1437     --- a/drivers/net/wireless/mediatek/mt7601u/phy.c
1438     +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
1439     @@ -213,7 +213,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
1440    
1441     do {
1442     val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
1443     - if (val && ~val)
1444     + if (val && val != 0xff)
1445     break;
1446     } while (--i);
1447    
1448     diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1449     index f1cdcd61c54a..c99f1912e266 100644
1450     --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1451     +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1452     @@ -5839,8 +5839,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1453     rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21);
1454     rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40);
1455     } else if (rt2x00_rt(rt2x00dev, RT5390) ||
1456     - rt2x00_rt(rt2x00dev, RT5392) ||
1457     - rt2x00_rt(rt2x00dev, RT6352)) {
1458     + rt2x00_rt(rt2x00dev, RT5392)) {
1459     rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
1460     rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1461     rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1462     @@ -5854,8 +5853,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1463     rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
1464     rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
1465     rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1466     - rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
1467     - rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
1468     rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
1469     rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
1470     rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
1471     diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
1472     index b082e2cc95f5..35dbdb3c4f1e 100644
1473     --- a/drivers/net/wireless/realtek/rtw88/fw.c
1474     +++ b/drivers/net/wireless/realtek/rtw88/fw.c
1475     @@ -498,9 +498,6 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
1476     {
1477     struct sk_buff *skb = rsvd_pkt->skb;
1478    
1479     - if (rsvd_pkt->add_txdesc)
1480     - rtw_fill_rsvd_page_desc(rtwdev, skb);
1481     -
1482     if (page >= 1)
1483     memcpy(buf + page_margin + page_size * (page - 1),
1484     skb->data, skb->len);
1485     @@ -625,16 +622,37 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
1486     list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
1487     iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
1488     if (!iter) {
1489     - rtw_err(rtwdev, "fail to build rsvd packet\n");
1490     + rtw_err(rtwdev, "failed to build rsvd packet\n");
1491     goto release_skb;
1492     }
1493     +
1494     + /* Fill the tx_desc for the rsvd pkt that requires one.
1495     + * And iter->len will be added with size of tx_desc_sz.
1496     + */
1497     + if (rsvd_pkt->add_txdesc)
1498     + rtw_fill_rsvd_page_desc(rtwdev, iter);
1499     +
1500     rsvd_pkt->skb = iter;
1501     rsvd_pkt->page = total_page;
1502     - if (rsvd_pkt->add_txdesc)
1503     +
1504     + /* Reserved page is downloaded via TX path, and TX path will
1505     + * generate a tx_desc at the header to describe length of
1506     + * the buffer. If we are not counting page numbers with the
1507     + * size of tx_desc added at the first rsvd_pkt (usually a
1508     + * beacon, firmware default refer to the first page as the
1509     + * content of beacon), we could generate a buffer which size
1510     + * is smaller than the actual size of the whole rsvd_page
1511     + */
1512     + if (total_page == 0) {
1513     + if (rsvd_pkt->type != RSVD_BEACON) {
1514     + rtw_err(rtwdev, "first page should be a beacon\n");
1515     + goto release_skb;
1516     + }
1517     total_page += rtw_len_to_page(iter->len + tx_desc_sz,
1518     page_size);
1519     - else
1520     + } else {
1521     total_page += rtw_len_to_page(iter->len, page_size);
1522     + }
1523     }
1524    
1525     if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
1526     @@ -647,13 +665,24 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
1527     if (!buf)
1528     goto release_skb;
1529    
1530     + /* Copy the content of each rsvd_pkt to the buf, and they should
1531     + * be aligned to the pages.
1532     + *
1533     + * Note that the first rsvd_pkt is a beacon no matter what vif->type.
1534     + * And that rsvd_pkt does not require tx_desc because when it goes
1535     + * through TX path, the TX path will generate one for it.
1536     + */
1537     list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
1538     rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
1539     page, buf, rsvd_pkt);
1540     - page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
1541     - }
1542     - list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
1543     + if (page == 0)
1544     + page += rtw_len_to_page(rsvd_pkt->skb->len +
1545     + tx_desc_sz, page_size);
1546     + else
1547     + page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
1548     +
1549     kfree_skb(rsvd_pkt->skb);
1550     + }
1551    
1552     return buf;
1553    
1554     @@ -706,6 +735,11 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
1555     goto free;
1556     }
1557    
1558     + /* The last thing is to download the *ONLY* beacon again, because
1559     + * the previous tx_desc is to describe the total rsvd page. Download
1560     + * the beacon again to replace the TX desc header, and we will get
1561     + * a correct tx_desc for the beacon in the rsvd page.
1562     + */
1563     ret = rtw_download_beacon(rtwdev, vif);
1564     if (ret) {
1565     rtw_err(rtwdev, "failed to download beacon\n");
1566     diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
1567     index 7a3a4911bde2..806af37192bc 100644
1568     --- a/drivers/net/wireless/realtek/rtw88/main.c
1569     +++ b/drivers/net/wireless/realtek/rtw88/main.c
1570     @@ -1048,19 +1048,19 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
1571     /* power on mac to read efuse */
1572     ret = rtw_chip_efuse_enable(rtwdev);
1573     if (ret)
1574     - goto out;
1575     + goto out_unlock;
1576    
1577     ret = rtw_parse_efuse_map(rtwdev);
1578     if (ret)
1579     - goto out;
1580     + goto out_disable;
1581    
1582     ret = rtw_dump_hw_feature(rtwdev);
1583     if (ret)
1584     - goto out;
1585     + goto out_disable;
1586    
1587     ret = rtw_check_supported_rfe(rtwdev);
1588     if (ret)
1589     - goto out;
1590     + goto out_disable;
1591    
1592     if (efuse->crystal_cap == 0xff)
1593     efuse->crystal_cap = 0;
1594     @@ -1087,9 +1087,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
1595     efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
1596     efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
1597    
1598     +out_disable:
1599     rtw_chip_efuse_disable(rtwdev);
1600    
1601     -out:
1602     +out_unlock:
1603     mutex_unlock(&rtwdev->mutex);
1604     return ret;
1605     }
1606     diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
1607     index a45a6447b01d..32f37d08d5bc 100644
1608     --- a/drivers/pci/controller/pcie-mobiveil.c
1609     +++ b/drivers/pci/controller/pcie-mobiveil.c
1610     @@ -235,7 +235,7 @@ static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
1611     return PCIBIOS_SUCCESSFUL;
1612     }
1613    
1614     -static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
1615     +static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
1616     {
1617     void *addr;
1618     u32 val;
1619     @@ -250,7 +250,8 @@ static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
1620     return val;
1621     }
1622    
1623     -static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
1624     +static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
1625     + size_t size)
1626     {
1627     void *addr;
1628     int ret;
1629     @@ -262,19 +263,19 @@ static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
1630     dev_err(&pcie->pdev->dev, "write CSR address failed\n");
1631     }
1632    
1633     -static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
1634     +static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
1635     {
1636     - return csr_read(pcie, off, 0x4);
1637     + return mobiveil_csr_read(pcie, off, 0x4);
1638     }
1639    
1640     -static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
1641     +static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
1642     {
1643     - csr_write(pcie, val, off, 0x4);
1644     + mobiveil_csr_write(pcie, val, off, 0x4);
1645     }
1646    
1647     static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
1648     {
1649     - return (csr_readl(pcie, LTSSM_STATUS) &
1650     + return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
1651     LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
1652     }
1653    
1654     @@ -323,7 +324,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
1655     PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
1656     PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
1657    
1658     - csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
1659     + mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
1660    
1661     return pcie->config_axi_slave_base + where;
1662     }
1663     @@ -353,13 +354,14 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
1664     chained_irq_enter(chip, desc);
1665    
1666     /* read INTx status */
1667     - val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
1668     - mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1669     + val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
1670     + mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1671     intr_status = val & mask;
1672    
1673     /* Handle INTx */
1674     if (intr_status & PAB_INTP_INTX_MASK) {
1675     - shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
1676     + shifted_status = mobiveil_csr_readl(pcie,
1677     + PAB_INTP_AMBA_MISC_STAT);
1678     shifted_status &= PAB_INTP_INTX_MASK;
1679     shifted_status >>= PAB_INTX_START;
1680     do {
1681     @@ -373,12 +375,13 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
1682     bit);
1683    
1684     /* clear interrupt handled */
1685     - csr_writel(pcie, 1 << (PAB_INTX_START + bit),
1686     - PAB_INTP_AMBA_MISC_STAT);
1687     + mobiveil_csr_writel(pcie,
1688     + 1 << (PAB_INTX_START + bit),
1689     + PAB_INTP_AMBA_MISC_STAT);
1690     }
1691    
1692     - shifted_status = csr_readl(pcie,
1693     - PAB_INTP_AMBA_MISC_STAT);
1694     + shifted_status = mobiveil_csr_readl(pcie,
1695     + PAB_INTP_AMBA_MISC_STAT);
1696     shifted_status &= PAB_INTP_INTX_MASK;
1697     shifted_status >>= PAB_INTX_START;
1698     } while (shifted_status != 0);
1699     @@ -413,7 +416,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
1700     }
1701    
1702     /* Clear the interrupt status */
1703     - csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
1704     + mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
1705     chained_irq_exit(chip, desc);
1706     }
1707    
1708     @@ -474,24 +477,24 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
1709     return;
1710     }
1711    
1712     - value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
1713     + value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
1714     value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
1715     value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
1716     (lower_32_bits(size64) & WIN_SIZE_MASK);
1717     - csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
1718     + mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
1719    
1720     - csr_writel(pcie, upper_32_bits(size64),
1721     - PAB_EXT_PEX_AMAP_SIZEN(win_num));
1722     + mobiveil_csr_writel(pcie, upper_32_bits(size64),
1723     + PAB_EXT_PEX_AMAP_SIZEN(win_num));
1724    
1725     - csr_writel(pcie, lower_32_bits(cpu_addr),
1726     - PAB_PEX_AMAP_AXI_WIN(win_num));
1727     - csr_writel(pcie, upper_32_bits(cpu_addr),
1728     - PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
1729     + mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
1730     + PAB_PEX_AMAP_AXI_WIN(win_num));
1731     + mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
1732     + PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
1733    
1734     - csr_writel(pcie, lower_32_bits(pci_addr),
1735     - PAB_PEX_AMAP_PEX_WIN_L(win_num));
1736     - csr_writel(pcie, upper_32_bits(pci_addr),
1737     - PAB_PEX_AMAP_PEX_WIN_H(win_num));
1738     + mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
1739     + PAB_PEX_AMAP_PEX_WIN_L(win_num));
1740     + mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
1741     + PAB_PEX_AMAP_PEX_WIN_H(win_num));
1742    
1743     pcie->ib_wins_configured++;
1744     }
1745     @@ -515,27 +518,29 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
1746     * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
1747     * to 4 KB in PAB_AXI_AMAP_CTRL register
1748     */
1749     - value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
1750     + value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
1751     value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
1752     value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
1753     (lower_32_bits(size64) & WIN_SIZE_MASK);
1754     - csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
1755     + mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
1756    
1757     - csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
1758     + mobiveil_csr_writel(pcie, upper_32_bits(size64),
1759     + PAB_EXT_AXI_AMAP_SIZE(win_num));
1760    
1761     /*
1762     * program AXI window base with appropriate value in
1763     * PAB_AXI_AMAP_AXI_WIN0 register
1764     */
1765     - csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
1766     - PAB_AXI_AMAP_AXI_WIN(win_num));
1767     - csr_writel(pcie, upper_32_bits(cpu_addr),
1768     - PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
1769     + mobiveil_csr_writel(pcie,
1770     + lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
1771     + PAB_AXI_AMAP_AXI_WIN(win_num));
1772     + mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
1773     + PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
1774    
1775     - csr_writel(pcie, lower_32_bits(pci_addr),
1776     - PAB_AXI_AMAP_PEX_WIN_L(win_num));
1777     - csr_writel(pcie, upper_32_bits(pci_addr),
1778     - PAB_AXI_AMAP_PEX_WIN_H(win_num));
1779     + mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
1780     + PAB_AXI_AMAP_PEX_WIN_L(win_num));
1781     + mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
1782     + PAB_AXI_AMAP_PEX_WIN_H(win_num));
1783    
1784     pcie->ob_wins_configured++;
1785     }
1786     @@ -579,42 +584,42 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
1787     struct resource_entry *win;
1788    
1789     /* setup bus numbers */
1790     - value = csr_readl(pcie, PCI_PRIMARY_BUS);
1791     + value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
1792     value &= 0xff000000;
1793     value |= 0x00ff0100;
1794     - csr_writel(pcie, value, PCI_PRIMARY_BUS);
1795     + mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
1796    
1797     /*
1798     * program Bus Master Enable Bit in Command Register in PAB Config
1799     * Space
1800     */
1801     - value = csr_readl(pcie, PCI_COMMAND);
1802     + value = mobiveil_csr_readl(pcie, PCI_COMMAND);
1803     value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
1804     - csr_writel(pcie, value, PCI_COMMAND);
1805     + mobiveil_csr_writel(pcie, value, PCI_COMMAND);
1806    
1807     /*
1808     * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
1809     * register
1810     */
1811     - pab_ctrl = csr_readl(pcie, PAB_CTRL);
1812     + pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
1813     pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
1814     - csr_writel(pcie, pab_ctrl, PAB_CTRL);
1815     + mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
1816    
1817     - csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
1818     - PAB_INTP_AMBA_MISC_ENB);
1819     + mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
1820     + PAB_INTP_AMBA_MISC_ENB);
1821    
1822     /*
1823     * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
1824     * PAB_AXI_PIO_CTRL Register
1825     */
1826     - value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
1827     + value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
1828     value |= APIO_EN_MASK;
1829     - csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
1830     + mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
1831    
1832     /* Enable PCIe PIO master */
1833     - value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
1834     + value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
1835     value |= 1 << PIO_ENABLE_SHIFT;
1836     - csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
1837     + mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
1838    
1839     /*
1840     * we'll program one outbound window for config reads and
1841     @@ -647,10 +652,10 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
1842     }
1843    
1844     /* fixup for PCIe class register */
1845     - value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
1846     + value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
1847     value &= 0xff;
1848     value |= (PCI_CLASS_BRIDGE_PCI << 16);
1849     - csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
1850     + mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
1851    
1852     /* setup MSI hardware registers */
1853     mobiveil_pcie_enable_msi(pcie);
1854     @@ -668,9 +673,9 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
1855     pcie = irq_desc_get_chip_data(desc);
1856     mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
1857     raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
1858     - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1859     + shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1860     shifted_val &= ~mask;
1861     - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1862     + mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1863     raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
1864     }
1865    
1866     @@ -684,9 +689,9 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
1867     pcie = irq_desc_get_chip_data(desc);
1868     mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
1869     raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
1870     - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1871     + shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1872     shifted_val |= mask;
1873     - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1874     + mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1875     raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
1876     }
1877    
1878     diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
1879     index 544d64a84cc0..6e457967653e 100644
1880     --- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
1881     +++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
1882     @@ -323,7 +323,8 @@ static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
1883     goto err_disable_pdi_clk;
1884    
1885     /* Check if we are in "startup ready" status */
1886     - if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
1887     + ret = ltq_vrx200_pcie_phy_wait_for_pll(phy);
1888     + if (ret)
1889     goto err_disable_phy_clk;
1890    
1891     ltq_vrx200_pcie_phy_apply_workarounds(phy);
1892     diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
1893     index 2b97fb1185a0..9ca20c947283 100644
1894     --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
1895     +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
1896     @@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
1897     {
1898     const struct pre_pll_config *cfg = pre_pll_cfg_table;
1899    
1900     + rate = (rate / 1000) * 1000;
1901     +
1902     for (; cfg->pixclock != 0; cfg++)
1903     if (cfg->pixclock == rate && !cfg->fracdiv)
1904     break;
1905     @@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
1906     {
1907     const struct pre_pll_config *cfg = pre_pll_cfg_table;
1908    
1909     + rate = (rate / 1000) * 1000;
1910     +
1911     for (; cfg->pixclock != 0; cfg++)
1912     if (cfg->pixclock == rate)
1913     break;
1914     diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
1915     index a52c5bb35033..a28bd15297f5 100644
1916     --- a/drivers/phy/ti/phy-gmii-sel.c
1917     +++ b/drivers/phy/ti/phy-gmii-sel.c
1918     @@ -69,11 +69,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
1919     break;
1920    
1921     case PHY_INTERFACE_MODE_RGMII:
1922     + case PHY_INTERFACE_MODE_RGMII_RXID:
1923     gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
1924     break;
1925    
1926     case PHY_INTERFACE_MODE_RGMII_ID:
1927     - case PHY_INTERFACE_MODE_RGMII_RXID:
1928     case PHY_INTERFACE_MODE_RGMII_TXID:
1929     gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
1930     rgmii_id = 1;
1931     diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
1932     index b9d03c33d8dc..1176d543191a 100644
1933     --- a/drivers/platform/chrome/wilco_ec/telemetry.c
1934     +++ b/drivers/platform/chrome/wilco_ec/telemetry.c
1935     @@ -406,8 +406,8 @@ static int telem_device_remove(struct platform_device *pdev)
1936     struct telem_device_data *dev_data = platform_get_drvdata(pdev);
1937    
1938     cdev_device_del(&dev_data->cdev, &dev_data->dev);
1939     - put_device(&dev_data->dev);
1940     ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
1941     + put_device(&dev_data->dev);
1942    
1943     return 0;
1944     }
1945     diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
1946     index 1bb32b7226d7..b8e1ec106627 100644
1947     --- a/drivers/power/supply/bd70528-charger.c
1948     +++ b/drivers/power/supply/bd70528-charger.c
1949     @@ -741,3 +741,4 @@ module_platform_driver(bd70528_power);
1950     MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
1951     MODULE_DESCRIPTION("BD70528 power-supply driver");
1952     MODULE_LICENSE("GPL");
1953     +MODULE_ALIAS("platform:bd70528-power");
1954     diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
1955     index 6f5840a1a82d..05273725a9ff 100644
1956     --- a/drivers/pwm/pwm-sun4i.c
1957     +++ b/drivers/pwm/pwm-sun4i.c
1958     @@ -137,10 +137,10 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
1959    
1960     val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm));
1961    
1962     - tmp = prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
1963     + tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
1964     state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
1965    
1966     - tmp = prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
1967     + tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
1968     state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
1969     }
1970    
1971     diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
1972     index 6041839ec38c..5bf8a2dc5fe7 100644
1973     --- a/drivers/regulator/bd70528-regulator.c
1974     +++ b/drivers/regulator/bd70528-regulator.c
1975     @@ -285,3 +285,4 @@ module_platform_driver(bd70528_regulator);
1976     MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
1977     MODULE_DESCRIPTION("BD70528 voltage regulator driver");
1978     MODULE_LICENSE("GPL");
1979     +MODULE_ALIAS("platform:bd70528-pmic");
1980     diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
1981     index ddfef4d43bab..627037aa66a8 100644
1982     --- a/drivers/rtc/rtc-bd70528.c
1983     +++ b/drivers/rtc/rtc-bd70528.c
1984     @@ -491,4 +491,4 @@ module_platform_driver(bd70528_rtc);
1985     MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
1986     MODULE_DESCRIPTION("BD70528 RTC driver");
1987     MODULE_LICENSE("GPL");
1988     -MODULE_ALIAS("platofrm:bd70528-rtc");
1989     +MODULE_ALIAS("platform:bd70528-rtc");
1990     diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
1991     index 9de3d46b3253..e17fac20127e 100644
1992     --- a/drivers/s390/crypto/pkey_api.c
1993     +++ b/drivers/s390/crypto/pkey_api.c
1994     @@ -740,8 +740,10 @@ static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
1995     kapqns = kmalloc(nbytes, GFP_KERNEL);
1996     if (!kapqns)
1997     return ERR_PTR(-ENOMEM);
1998     - if (copy_from_user(kapqns, uapqns, nbytes))
1999     + if (copy_from_user(kapqns, uapqns, nbytes)) {
2000     + kfree(kapqns);
2001     return ERR_PTR(-EFAULT);
2002     + }
2003     }
2004    
2005     return kapqns;
2006     diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
2007     index 65e31df37b1f..820f2c29376c 100644
2008     --- a/drivers/s390/net/qeth_core.h
2009     +++ b/drivers/s390/net/qeth_core.h
2010     @@ -620,6 +620,7 @@ struct qeth_ipato {
2011    
2012     struct qeth_channel {
2013     struct ccw_device *ccwdev;
2014     + struct qeth_cmd_buffer *active_cmd;
2015     enum qeth_channel_states state;
2016     atomic_t irq_pending;
2017     };
2018     @@ -1024,6 +1025,8 @@ int qeth_do_run_thread(struct qeth_card *, unsigned long);
2019     void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
2020     void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
2021     int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
2022     +int qeth_stop_channel(struct qeth_channel *channel);
2023     +
2024     void qeth_print_status_message(struct qeth_card *);
2025     int qeth_init_qdio_queues(struct qeth_card *);
2026     int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
2027     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2028     index 5be4d800e4ba..23852888eb2c 100644
2029     --- a/drivers/s390/net/qeth_core_main.c
2030     +++ b/drivers/s390/net/qeth_core_main.c
2031     @@ -515,7 +515,9 @@ static int __qeth_issue_next_read(struct qeth_card *card)
2032    
2033     QETH_CARD_TEXT(card, 6, "noirqpnd");
2034     rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
2035     - if (rc) {
2036     + if (!rc) {
2037     + channel->active_cmd = iob;
2038     + } else {
2039     QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
2040     rc, CARD_DEVID(card));
2041     atomic_set(&channel->irq_pending, 0);
2042     @@ -986,8 +988,21 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
2043     QETH_CARD_TEXT(card, 5, "data");
2044     }
2045    
2046     - if (qeth_intparm_is_iob(intparm))
2047     - iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
2048     + if (intparm == 0) {
2049     + QETH_CARD_TEXT(card, 5, "irqunsol");
2050     + } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
2051     + QETH_CARD_TEXT(card, 5, "irqunexp");
2052     +
2053     + dev_err(&cdev->dev,
2054     + "Received IRQ with intparm %lx, expected %px\n",
2055     + intparm, channel->active_cmd);
2056     + if (channel->active_cmd)
2057     + qeth_cancel_cmd(channel->active_cmd, -EIO);
2058     + } else {
2059     + iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
2060     + }
2061     +
2062     + channel->active_cmd = NULL;
2063    
2064     rc = qeth_check_irb_error(card, cdev, irb);
2065     if (rc) {
2066     @@ -1007,15 +1022,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
2067     if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
2068     channel->state = CH_STATE_HALTED;
2069    
2070     - if (intparm == QETH_CLEAR_CHANNEL_PARM) {
2071     - QETH_CARD_TEXT(card, 6, "clrchpar");
2072     - /* we don't have to handle this further */
2073     - intparm = 0;
2074     - }
2075     - if (intparm == QETH_HALT_CHANNEL_PARM) {
2076     - QETH_CARD_TEXT(card, 6, "hltchpar");
2077     - /* we don't have to handle this further */
2078     - intparm = 0;
2079     + if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
2080     + SCSW_FCTL_HALT_FUNC))) {
2081     + qeth_cancel_cmd(iob, -ECANCELED);
2082     + iob = NULL;
2083     }
2084    
2085     cstat = irb->scsw.cmd.cstat;
2086     @@ -1408,7 +1418,7 @@ static int qeth_clear_channel(struct qeth_card *card,
2087    
2088     QETH_CARD_TEXT(card, 3, "clearch");
2089     spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2090     - rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
2091     + rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
2092     spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2093    
2094     if (rc)
2095     @@ -1430,7 +1440,7 @@ static int qeth_halt_channel(struct qeth_card *card,
2096    
2097     QETH_CARD_TEXT(card, 3, "haltch");
2098     spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2099     - rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
2100     + rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
2101     spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2102    
2103     if (rc)
2104     @@ -1444,6 +1454,25 @@ static int qeth_halt_channel(struct qeth_card *card,
2105     return 0;
2106     }
2107    
2108     +int qeth_stop_channel(struct qeth_channel *channel)
2109     +{
2110     + struct ccw_device *cdev = channel->ccwdev;
2111     + int rc;
2112     +
2113     + rc = ccw_device_set_offline(cdev);
2114     +
2115     + spin_lock_irq(get_ccwdev_lock(cdev));
2116     + if (channel->active_cmd) {
2117     + dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
2118     + channel->active_cmd);
2119     + channel->active_cmd = NULL;
2120     + }
2121     + spin_unlock_irq(get_ccwdev_lock(cdev));
2122     +
2123     + return rc;
2124     +}
2125     +EXPORT_SYMBOL_GPL(qeth_stop_channel);
2126     +
2127     static int qeth_halt_channels(struct qeth_card *card)
2128     {
2129     int rc1 = 0, rc2 = 0, rc3 = 0;
2130     @@ -1747,6 +1776,8 @@ static int qeth_send_control_data(struct qeth_card *card,
2131     spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2132     rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2133     (addr_t) iob, 0, 0, timeout);
2134     + if (!rc)
2135     + channel->active_cmd = iob;
2136     spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2137     if (rc) {
2138     QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2139     @@ -4625,12 +4656,12 @@ EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
2140    
2141     static void qeth_determine_capabilities(struct qeth_card *card)
2142     {
2143     + struct qeth_channel *channel = &card->data;
2144     + struct ccw_device *ddev = channel->ccwdev;
2145     int rc;
2146     - struct ccw_device *ddev;
2147     int ddev_offline = 0;
2148    
2149     QETH_CARD_TEXT(card, 2, "detcapab");
2150     - ddev = CARD_DDEV(card);
2151     if (!ddev->online) {
2152     ddev_offline = 1;
2153     rc = ccw_device_set_online(ddev);
2154     @@ -4669,7 +4700,7 @@ static void qeth_determine_capabilities(struct qeth_card *card)
2155    
2156     out_offline:
2157     if (ddev_offline == 1)
2158     - ccw_device_set_offline(ddev);
2159     + qeth_stop_channel(channel);
2160     out:
2161     return;
2162     }
2163     @@ -4870,9 +4901,9 @@ retry:
2164     QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
2165     CARD_DEVID(card));
2166     rc = qeth_qdio_clear_card(card, !IS_IQD(card));
2167     - ccw_device_set_offline(CARD_DDEV(card));
2168     - ccw_device_set_offline(CARD_WDEV(card));
2169     - ccw_device_set_offline(CARD_RDEV(card));
2170     + qeth_stop_channel(&card->data);
2171     + qeth_stop_channel(&card->write);
2172     + qeth_stop_channel(&card->read);
2173     qdio_free(CARD_DDEV(card));
2174     rc = ccw_device_set_online(CARD_RDEV(card));
2175     if (rc)
2176     diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
2177     index b7c17b5c823b..65038539b324 100644
2178     --- a/drivers/s390/net/qeth_core_mpc.h
2179     +++ b/drivers/s390/net/qeth_core_mpc.h
2180     @@ -28,20 +28,6 @@ extern unsigned char IPA_PDU_HEADER[];
2181     #define QETH_TIMEOUT (10 * HZ)
2182     #define QETH_IPA_TIMEOUT (45 * HZ)
2183    
2184     -#define QETH_CLEAR_CHANNEL_PARM -10
2185     -#define QETH_HALT_CHANNEL_PARM -11
2186     -
2187     -static inline bool qeth_intparm_is_iob(unsigned long intparm)
2188     -{
2189     - switch (intparm) {
2190     - case QETH_CLEAR_CHANNEL_PARM:
2191     - case QETH_HALT_CHANNEL_PARM:
2192     - case 0:
2193     - return false;
2194     - }
2195     - return true;
2196     -}
2197     -
2198     /*****************************************************************************/
2199     /* IP Assist related definitions */
2200     /*****************************************************************************/
2201     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2202     index 11e3292c0adf..59e220749ad1 100644
2203     --- a/drivers/s390/net/qeth_l2_main.c
2204     +++ b/drivers/s390/net/qeth_l2_main.c
2205     @@ -877,9 +877,9 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
2206    
2207     out_remove:
2208     qeth_l2_stop_card(card);
2209     - ccw_device_set_offline(CARD_DDEV(card));
2210     - ccw_device_set_offline(CARD_WDEV(card));
2211     - ccw_device_set_offline(CARD_RDEV(card));
2212     + qeth_stop_channel(&card->data);
2213     + qeth_stop_channel(&card->write);
2214     + qeth_stop_channel(&card->read);
2215     qdio_free(CARD_DDEV(card));
2216    
2217     mutex_unlock(&card->conf_mutex);
2218     @@ -910,9 +910,9 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
2219     rtnl_unlock();
2220    
2221     qeth_l2_stop_card(card);
2222     - rc = ccw_device_set_offline(CARD_DDEV(card));
2223     - rc2 = ccw_device_set_offline(CARD_WDEV(card));
2224     - rc3 = ccw_device_set_offline(CARD_RDEV(card));
2225     + rc = qeth_stop_channel(&card->data);
2226     + rc2 = qeth_stop_channel(&card->write);
2227     + rc3 = qeth_stop_channel(&card->read);
2228     if (!rc)
2229     rc = (rc2) ? rc2 : rc3;
2230     if (rc)
2231     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2232     index 5152970a9aa4..a1c23e998f97 100644
2233     --- a/drivers/s390/net/qeth_l3_main.c
2234     +++ b/drivers/s390/net/qeth_l3_main.c
2235     @@ -2383,9 +2383,9 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
2236     return 0;
2237     out_remove:
2238     qeth_l3_stop_card(card);
2239     - ccw_device_set_offline(CARD_DDEV(card));
2240     - ccw_device_set_offline(CARD_WDEV(card));
2241     - ccw_device_set_offline(CARD_RDEV(card));
2242     + qeth_stop_channel(&card->data);
2243     + qeth_stop_channel(&card->write);
2244     + qeth_stop_channel(&card->read);
2245     qdio_free(CARD_DDEV(card));
2246    
2247     mutex_unlock(&card->conf_mutex);
2248     @@ -2421,9 +2421,10 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
2249     call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
2250     rtnl_unlock();
2251     }
2252     - rc = ccw_device_set_offline(CARD_DDEV(card));
2253     - rc2 = ccw_device_set_offline(CARD_WDEV(card));
2254     - rc3 = ccw_device_set_offline(CARD_RDEV(card));
2255     +
2256     + rc = qeth_stop_channel(&card->data);
2257     + rc2 = qeth_stop_channel(&card->write);
2258     + rc3 = qeth_stop_channel(&card->read);
2259     if (!rc)
2260     rc = (rc2) ? rc2 : rc3;
2261     if (rc)
2262     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2263     index 25a6a25b17a2..1e38bb967871 100644
2264     --- a/drivers/scsi/ufs/ufshcd.c
2265     +++ b/drivers/scsi/ufs/ufshcd.c
2266     @@ -6779,23 +6779,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
2267     &hba->desc_size.geom_desc);
2268     if (err)
2269     hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
2270     +
2271     err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
2272     &hba->desc_size.hlth_desc);
2273     if (err)
2274     hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
2275     }
2276    
2277     -static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
2278     -{
2279     - hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
2280     - hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
2281     - hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
2282     - hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
2283     - hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
2284     - hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
2285     - hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
2286     -}
2287     -
2288     static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
2289     {19200000, REF_CLK_FREQ_19_2_MHZ},
2290     {26000000, REF_CLK_FREQ_26_MHZ},
2291     @@ -8283,9 +8273,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
2292     hba->mmio_base = mmio_base;
2293     hba->irq = irq;
2294    
2295     - /* Set descriptor lengths to specification defaults */
2296     - ufshcd_def_desc_sizes(hba);
2297     -
2298     err = ufshcd_hba_init(hba);
2299     if (err)
2300     goto out_error;
2301     diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
2302     index 48f7ac238861..f3d8d53ab84d 100644
2303     --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
2304     +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
2305     @@ -97,13 +97,13 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
2306     return ret ? ret : copied;
2307     }
2308    
2309     -static unsigned int snoop_file_poll(struct file *file,
2310     +static __poll_t snoop_file_poll(struct file *file,
2311     struct poll_table_struct *pt)
2312     {
2313     struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
2314    
2315     poll_wait(file, &chan->wq, pt);
2316     - return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0;
2317     + return !kfifo_is_empty(&chan->fifo) ? EPOLLIN : 0;
2318     }
2319    
2320     static const struct file_operations snoop_fops = {
2321     diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
2322     index 9090ea12eaf3..4a6111635f82 100644
2323     --- a/drivers/soc/qcom/llcc-slice.c
2324     +++ b/drivers/soc/qcom/llcc-slice.c
2325     @@ -48,7 +48,7 @@
2326    
2327     static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
2328    
2329     -static const struct regmap_config llcc_regmap_config = {
2330     +static struct regmap_config llcc_regmap_config = {
2331     .reg_bits = 32,
2332     .reg_stride = 4,
2333     .val_bits = 32,
2334     @@ -323,6 +323,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
2335     if (IS_ERR(base))
2336     return ERR_CAST(base);
2337    
2338     + llcc_regmap_config.name = name;
2339     return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
2340     }
2341    
2342     diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
2343     index 3299cf5365f3..6651755e9f20 100644
2344     --- a/drivers/soc/renesas/renesas-soc.c
2345     +++ b/drivers/soc/renesas/renesas-soc.c
2346     @@ -326,7 +326,7 @@ static int __init renesas_soc_init(void)
2347     if (np) {
2348     chipid = of_iomap(np, 0);
2349     of_node_put(np);
2350     - } else if (soc->id) {
2351     + } else if (soc->id && family->reg) {
2352     chipid = ioremap(family->reg, 4);
2353     }
2354     if (chipid) {
2355     diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
2356     index 9f9c1c677cf4..0447afa970f5 100644
2357     --- a/drivers/soc/tegra/pmc.c
2358     +++ b/drivers/soc/tegra/pmc.c
2359     @@ -1899,6 +1899,20 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
2360     event->id,
2361     &pmc->irq, pmc);
2362    
2363     + /*
2364     + * GPIOs don't have an equivalent interrupt in the
2365     + * parent controller (GIC). However some code, such
2366     + * as the one in irq_get_irqchip_state(), require a
2367     + * valid IRQ chip to be set. Make sure that's the
2368     + * case by passing NULL here, which will install a
2369     + * dummy IRQ chip for the interrupt in the parent
2370     + * domain.
2371     + */
2372     + if (domain->parent)
2373     + irq_domain_set_hwirq_and_chip(domain->parent,
2374     + virq, 0, NULL,
2375     + NULL);
2376     +
2377     break;
2378     }
2379     }
2380     @@ -1908,10 +1922,22 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
2381     * dummy hardware IRQ number. This is used in the ->irq_set_type()
2382     * and ->irq_set_wake() callbacks to return early for these IRQs.
2383     */
2384     - if (i == soc->num_wake_events)
2385     + if (i == soc->num_wake_events) {
2386     err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
2387     &pmc->irq, pmc);
2388    
2389     + /*
2390     + * Interrupts without a wake event don't have a corresponding
2391     + * interrupt in the parent controller (GIC). Pass NULL for the
2392     + * chip here, which causes a dummy IRQ chip to be installed
2393     + * for the interrupt in the parent domain, to make this
2394     + * explicit.
2395     + */
2396     + if (domain->parent)
2397     + irq_domain_set_hwirq_and_chip(domain->parent, virq, 0,
2398     + NULL, NULL);
2399     + }
2400     +
2401     return err;
2402     }
2403    
2404     diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
2405     index 13b0269a0abc..cf2367ba08d6 100644
2406     --- a/drivers/tee/optee/call.c
2407     +++ b/drivers/tee/optee/call.c
2408     @@ -554,6 +554,13 @@ static int check_mem_type(unsigned long start, size_t num_pages)
2409     struct mm_struct *mm = current->mm;
2410     int rc;
2411    
2412     + /*
2413     + * Allow kernel address to register with OP-TEE as kernel
2414     + * pages are configured as normal memory only.
2415     + */
2416     + if (virt_addr_valid(start))
2417     + return 0;
2418     +
2419     down_read(&mm->mmap_sem);
2420     rc = __check_mem_type(find_vma(mm, start),
2421     start + num_pages * PAGE_SIZE);
2422     diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
2423     index 1854a3db7345..b830e0a87fba 100644
2424     --- a/drivers/tee/optee/core.c
2425     +++ b/drivers/tee/optee/core.c
2426     @@ -643,11 +643,6 @@ static struct optee *optee_probe(struct device_node *np)
2427     if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
2428     pr_info("dynamic shared memory is enabled\n");
2429    
2430     - rc = optee_enumerate_devices();
2431     - if (rc)
2432     - goto err;
2433     -
2434     - pr_info("initialized driver\n");
2435     return optee;
2436     err:
2437     if (optee) {
2438     @@ -702,9 +697,10 @@ static struct optee *optee_svc;
2439    
2440     static int __init optee_driver_init(void)
2441     {
2442     - struct device_node *fw_np;
2443     - struct device_node *np;
2444     - struct optee *optee;
2445     + struct device_node *fw_np = NULL;
2446     + struct device_node *np = NULL;
2447     + struct optee *optee = NULL;
2448     + int rc = 0;
2449    
2450     /* Node is supposed to be below /firmware */
2451     fw_np = of_find_node_by_name(NULL, "firmware");
2452     @@ -723,6 +719,14 @@ static int __init optee_driver_init(void)
2453     if (IS_ERR(optee))
2454     return PTR_ERR(optee);
2455    
2456     + rc = optee_enumerate_devices();
2457     + if (rc) {
2458     + optee_remove(optee);
2459     + return rc;
2460     + }
2461     +
2462     + pr_info("initialized driver\n");
2463     +
2464     optee_svc = optee;
2465    
2466     return 0;
2467     diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
2468     index de1d9b8fad90..d767eebf30bd 100644
2469     --- a/drivers/tee/optee/shm_pool.c
2470     +++ b/drivers/tee/optee/shm_pool.c
2471     @@ -17,6 +17,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
2472     {
2473     unsigned int order = get_order(size);
2474     struct page *page;
2475     + int rc = 0;
2476    
2477     page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
2478     if (!page)
2479     @@ -26,12 +27,34 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
2480     shm->paddr = page_to_phys(page);
2481     shm->size = PAGE_SIZE << order;
2482    
2483     - return 0;
2484     + if (shm->flags & TEE_SHM_DMA_BUF) {
2485     + unsigned int nr_pages = 1 << order, i;
2486     + struct page **pages;
2487     +
2488     + pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
2489     + if (!pages)
2490     + return -ENOMEM;
2491     +
2492     + for (i = 0; i < nr_pages; i++) {
2493     + pages[i] = page;
2494     + page++;
2495     + }
2496     +
2497     + shm->flags |= TEE_SHM_REGISTER;
2498     + rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
2499     + (unsigned long)shm->kaddr);
2500     + kfree(pages);
2501     + }
2502     +
2503     + return rc;
2504     }
2505    
2506     static void pool_op_free(struct tee_shm_pool_mgr *poolm,
2507     struct tee_shm *shm)
2508     {
2509     + if (shm->flags & TEE_SHM_DMA_BUF)
2510     + optee_shm_unregister(shm->ctx, shm);
2511     +
2512     free_pages((unsigned long)shm->kaddr, get_order(shm->size));
2513     shm->kaddr = NULL;
2514     }
2515     diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
2516     index 0bb17b046140..65cb55f3916f 100644
2517     --- a/drivers/watchdog/sprd_wdt.c
2518     +++ b/drivers/watchdog/sprd_wdt.c
2519     @@ -327,10 +327,9 @@ static int sprd_wdt_probe(struct platform_device *pdev)
2520    
2521     static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
2522     {
2523     - struct watchdog_device *wdd = dev_get_drvdata(dev);
2524     struct sprd_wdt *wdt = dev_get_drvdata(dev);
2525    
2526     - if (watchdog_active(wdd))
2527     + if (watchdog_active(&wdt->wdd))
2528     sprd_wdt_stop(&wdt->wdd);
2529     sprd_wdt_disable(wdt);
2530    
2531     @@ -339,7 +338,6 @@ static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
2532    
2533     static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
2534     {
2535     - struct watchdog_device *wdd = dev_get_drvdata(dev);
2536     struct sprd_wdt *wdt = dev_get_drvdata(dev);
2537     int ret;
2538    
2539     @@ -347,7 +345,7 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
2540     if (ret)
2541     return ret;
2542    
2543     - if (watchdog_active(wdd)) {
2544     + if (watchdog_active(&wdt->wdd)) {
2545     ret = sprd_wdt_start(&wdt->wdd);
2546     if (ret) {
2547     sprd_wdt_disable(wdt);
2548     diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
2549     index d4fbe5f85f1b..b108528bf010 100644
2550     --- a/fs/afs/dir_edit.c
2551     +++ b/fs/afs/dir_edit.c
2552     @@ -68,13 +68,11 @@ static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_
2553     static void afs_set_contig_bits(union afs_xdr_dir_block *block,
2554     int bit, unsigned int nr_slots)
2555     {
2556     - u64 mask, before, after;
2557     + u64 mask;
2558    
2559     mask = (1 << nr_slots) - 1;
2560     mask <<= bit;
2561    
2562     - before = *(u64 *)block->hdr.bitmap;
2563     -
2564     block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
2565     block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
2566     block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
2567     @@ -83,8 +81,6 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
2568     block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
2569     block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
2570     block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
2571     -
2572     - after = *(u64 *)block->hdr.bitmap;
2573     }
2574    
2575     /*
2576     @@ -93,13 +89,11 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
2577     static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
2578     int bit, unsigned int nr_slots)
2579     {
2580     - u64 mask, before, after;
2581     + u64 mask;
2582    
2583     mask = (1 << nr_slots) - 1;
2584     mask <<= bit;
2585    
2586     - before = *(u64 *)block->hdr.bitmap;
2587     -
2588     block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
2589     block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
2590     block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
2591     @@ -108,8 +102,6 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
2592     block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
2593     block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
2594     block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
2595     -
2596     - after = *(u64 *)block->hdr.bitmap;
2597     }
2598    
2599     /*
2600     diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
2601     index c4b1a89b8845..f2f81561ebb6 100644
2602     --- a/fs/nfsd/Kconfig
2603     +++ b/fs/nfsd/Kconfig
2604     @@ -73,6 +73,7 @@ config NFSD_V4
2605     select NFSD_V3
2606     select FS_POSIX_ACL
2607     select SUNRPC_GSS
2608     + select CRYPTO_MD5
2609     select CRYPTO_SHA256
2610     select GRACE_PERIOD
2611     help
2612     diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
2613     index cd6c7210a373..c7de17deeae6 100644
2614     --- a/fs/xfs/xfs_quotaops.c
2615     +++ b/fs/xfs/xfs_quotaops.c
2616     @@ -201,6 +201,9 @@ xfs_fs_rm_xquota(
2617     if (XFS_IS_QUOTA_ON(mp))
2618     return -EINVAL;
2619    
2620     + if (uflags & ~(FS_USER_QUOTA | FS_GROUP_QUOTA | FS_PROJ_QUOTA))
2621     + return -EINVAL;
2622     +
2623     if (uflags & FS_USER_QUOTA)
2624     flags |= XFS_DQ_USER;
2625     if (uflags & FS_GROUP_QUOTA)
2626     diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
2627     index d1a5d5df02f5..08b25c02b5a1 100644
2628     --- a/include/linux/mmc/sdio_ids.h
2629     +++ b/include/linux/mmc/sdio_ids.h
2630     @@ -71,6 +71,8 @@
2631    
2632     #define SDIO_VENDOR_ID_TI 0x0097
2633     #define SDIO_DEVICE_ID_TI_WL1271 0x4076
2634     +#define SDIO_VENDOR_ID_TI_WL1251 0x104c
2635     +#define SDIO_DEVICE_ID_TI_WL1251 0x9066
2636    
2637     #define SDIO_VENDOR_ID_STE 0x0020
2638     #define SDIO_DEVICE_ID_STE_CW1200 0x2280
2639     diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
2640     index 3867864cdc2f..3d3d61b5985b 100644
2641     --- a/kernel/bpf/devmap.c
2642     +++ b/kernel/bpf/devmap.c
2643     @@ -74,7 +74,7 @@ struct bpf_dtab_netdev {
2644    
2645     struct bpf_dtab {
2646     struct bpf_map map;
2647     - struct bpf_dtab_netdev **netdev_map;
2648     + struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
2649     struct list_head __percpu *flush_list;
2650     struct list_head list;
2651    
2652     @@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries)
2653     return hash;
2654     }
2655    
2656     +static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
2657     + int idx)
2658     +{
2659     + return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
2660     +}
2661     +
2662     static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
2663     {
2664     int err, cpu;
2665     @@ -120,8 +126,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
2666     bpf_map_init_from_attr(&dtab->map, attr);
2667    
2668     /* make sure page count doesn't overflow */
2669     - cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
2670     - cost += sizeof(struct list_head) * num_possible_cpus();
2671     + cost = (u64) sizeof(struct list_head) * num_possible_cpus();
2672    
2673     if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
2674     dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
2675     @@ -129,6 +134,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
2676     if (!dtab->n_buckets) /* Overflow check */
2677     return -EINVAL;
2678     cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
2679     + } else {
2680     + cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
2681     }
2682    
2683     /* if map size is larger than memlock limit, reject it */
2684     @@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
2685     for_each_possible_cpu(cpu)
2686     INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
2687    
2688     - dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
2689     - sizeof(struct bpf_dtab_netdev *),
2690     - dtab->map.numa_node);
2691     - if (!dtab->netdev_map)
2692     - goto free_percpu;
2693     -
2694     if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
2695     dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
2696     if (!dtab->dev_index_head)
2697     - goto free_map_area;
2698     + goto free_percpu;
2699    
2700     spin_lock_init(&dtab->index_lock);
2701     + } else {
2702     + dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
2703     + sizeof(struct bpf_dtab_netdev *),
2704     + dtab->map.numa_node);
2705     + if (!dtab->netdev_map)
2706     + goto free_percpu;
2707     }
2708    
2709     return 0;
2710    
2711     -free_map_area:
2712     - bpf_map_area_free(dtab->netdev_map);
2713     free_percpu:
2714     free_percpu(dtab->flush_list);
2715     free_charge:
2716     @@ -228,21 +233,40 @@ static void dev_map_free(struct bpf_map *map)
2717     cond_resched();
2718     }
2719    
2720     - for (i = 0; i < dtab->map.max_entries; i++) {
2721     - struct bpf_dtab_netdev *dev;
2722     + if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
2723     + for (i = 0; i < dtab->n_buckets; i++) {
2724     + struct bpf_dtab_netdev *dev;
2725     + struct hlist_head *head;
2726     + struct hlist_node *next;
2727    
2728     - dev = dtab->netdev_map[i];
2729     - if (!dev)
2730     - continue;
2731     + head = dev_map_index_hash(dtab, i);
2732    
2733     - free_percpu(dev->bulkq);
2734     - dev_put(dev->dev);
2735     - kfree(dev);
2736     + hlist_for_each_entry_safe(dev, next, head, index_hlist) {
2737     + hlist_del_rcu(&dev->index_hlist);
2738     + free_percpu(dev->bulkq);
2739     + dev_put(dev->dev);
2740     + kfree(dev);
2741     + }
2742     + }
2743     +
2744     + kfree(dtab->dev_index_head);
2745     + } else {
2746     + for (i = 0; i < dtab->map.max_entries; i++) {
2747     + struct bpf_dtab_netdev *dev;
2748     +
2749     + dev = dtab->netdev_map[i];
2750     + if (!dev)
2751     + continue;
2752     +
2753     + free_percpu(dev->bulkq);
2754     + dev_put(dev->dev);
2755     + kfree(dev);
2756     + }
2757     +
2758     + bpf_map_area_free(dtab->netdev_map);
2759     }
2760    
2761     free_percpu(dtab->flush_list);
2762     - bpf_map_area_free(dtab->netdev_map);
2763     - kfree(dtab->dev_index_head);
2764     kfree(dtab);
2765     }
2766    
2767     @@ -263,12 +287,6 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
2768     return 0;
2769     }
2770    
2771     -static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
2772     - int idx)
2773     -{
2774     - return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
2775     -}
2776     -
2777     struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
2778     {
2779     struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
2780     diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
2781     index 8402b29c280f..867fd72cb260 100644
2782     --- a/kernel/dma/direct.c
2783     +++ b/kernel/dma/direct.c
2784     @@ -375,7 +375,7 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
2785     {
2786     dma_addr_t dma_addr = paddr;
2787    
2788     - if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
2789     + if (unlikely(!dma_capable(dev, dma_addr, size))) {
2790     report_addr(dev, dma_addr, size);
2791     return DMA_MAPPING_ERROR;
2792     }
2793     diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
2794     index 2defc7fe74c3..fa08d55f7040 100644
2795     --- a/kernel/rcu/tree_plugin.h
2796     +++ b/kernel/rcu/tree_plugin.h
2797     @@ -1946,7 +1946,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
2798     int __maybe_unused cpu = my_rdp->cpu;
2799     unsigned long cur_gp_seq;
2800     unsigned long flags;
2801     - bool gotcbs;
2802     + bool gotcbs = false;
2803     unsigned long j = jiffies;
2804     bool needwait_gp = false; // This prevents actual uninitialized use.
2805     bool needwake;
2806     diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
2807     index a8a08030a8f7..08bdee0480b3 100644
2808     --- a/kernel/sched/deadline.c
2809     +++ b/kernel/sched/deadline.c
2810     @@ -1743,13 +1743,16 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
2811     }
2812     #endif
2813    
2814     -static void set_next_task_dl(struct rq *rq, struct task_struct *p)
2815     +static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2816     {
2817     p->se.exec_start = rq_clock_task(rq);
2818    
2819     /* You can't push away the running task */
2820     dequeue_pushable_dl_task(rq, p);
2821    
2822     + if (!first)
2823     + return;
2824     +
2825     if (hrtick_enabled(rq))
2826     start_hrtick_dl(rq, p);
2827    
2828     @@ -1785,7 +1788,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
2829     dl_se = pick_next_dl_entity(rq, dl_rq);
2830     BUG_ON(!dl_se);
2831     p = dl_task_of(dl_se);
2832     - set_next_task_dl(rq, p);
2833     + set_next_task_dl(rq, p, true);
2834     return p;
2835     }
2836    
2837     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2838     index 69a81a5709ff..c87a798d1456 100644
2839     --- a/kernel/sched/fair.c
2840     +++ b/kernel/sched/fair.c
2841     @@ -3504,9 +3504,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2842     cfs_rq->load_last_update_time_copy = sa->last_update_time;
2843     #endif
2844    
2845     - if (decayed)
2846     - cfs_rq_util_change(cfs_rq, 0);
2847     -
2848     return decayed;
2849     }
2850    
2851     @@ -3616,8 +3613,12 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
2852     attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
2853     update_tg_load_avg(cfs_rq, 0);
2854    
2855     - } else if (decayed && (flags & UPDATE_TG))
2856     - update_tg_load_avg(cfs_rq, 0);
2857     + } else if (decayed) {
2858     + cfs_rq_util_change(cfs_rq, 0);
2859     +
2860     + if (flags & UPDATE_TG)
2861     + update_tg_load_avg(cfs_rq, 0);
2862     + }
2863     }
2864    
2865     #ifndef CONFIG_64BIT
2866     @@ -7517,6 +7518,28 @@ static inline bool others_have_blocked(struct rq *rq) { return false; }
2867     static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
2868     #endif
2869    
2870     +static bool __update_blocked_others(struct rq *rq, bool *done)
2871     +{
2872     + const struct sched_class *curr_class;
2873     + u64 now = rq_clock_pelt(rq);
2874     + bool decayed;
2875     +
2876     + /*
2877     + * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
2878     + * DL and IRQ signals have been updated before updating CFS.
2879     + */
2880     + curr_class = rq->curr->sched_class;
2881     +
2882     + decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
2883     + update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
2884     + update_irq_load_avg(rq, 0);
2885     +
2886     + if (others_have_blocked(rq))
2887     + *done = false;
2888     +
2889     + return decayed;
2890     +}
2891     +
2892     #ifdef CONFIG_FAIR_GROUP_SCHED
2893    
2894     static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
2895     @@ -7536,29 +7559,11 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
2896     return true;
2897     }
2898    
2899     -static void update_blocked_averages(int cpu)
2900     +static bool __update_blocked_fair(struct rq *rq, bool *done)
2901     {
2902     - struct rq *rq = cpu_rq(cpu);
2903     struct cfs_rq *cfs_rq, *pos;
2904     - const struct sched_class *curr_class;
2905     - struct rq_flags rf;
2906     - bool done = true;
2907     -
2908     - rq_lock_irqsave(rq, &rf);
2909     - update_rq_clock(rq);
2910     -
2911     - /*
2912     - * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
2913     - * that RT, DL and IRQ signals have been updated before updating CFS.
2914     - */
2915     - curr_class = rq->curr->sched_class;
2916     - update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
2917     - update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
2918     - update_irq_load_avg(rq, 0);
2919     -
2920     - /* Don't need periodic decay once load/util_avg are null */
2921     - if (others_have_blocked(rq))
2922     - done = false;
2923     + bool decayed = false;
2924     + int cpu = cpu_of(rq);
2925    
2926     /*
2927     * Iterates the task_group tree in a bottom up fashion, see
2928     @@ -7567,9 +7572,13 @@ static void update_blocked_averages(int cpu)
2929     for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
2930     struct sched_entity *se;
2931    
2932     - if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
2933     + if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
2934     update_tg_load_avg(cfs_rq, 0);
2935    
2936     + if (cfs_rq == &rq->cfs)
2937     + decayed = true;
2938     + }
2939     +
2940     /* Propagate pending load changes to the parent, if any: */
2941     se = cfs_rq->tg->se[cpu];
2942     if (se && !skip_blocked_update(se))
2943     @@ -7584,11 +7593,10 @@ static void update_blocked_averages(int cpu)
2944    
2945     /* Don't need periodic decay once load/util_avg are null */
2946     if (cfs_rq_has_blocked(cfs_rq))
2947     - done = false;
2948     + *done = false;
2949     }
2950    
2951     - update_blocked_load_status(rq, !done);
2952     - rq_unlock_irqrestore(rq, &rf);
2953     + return decayed;
2954     }
2955    
2956     /*
2957     @@ -7638,29 +7646,16 @@ static unsigned long task_h_load(struct task_struct *p)
2958     cfs_rq_load_avg(cfs_rq) + 1);
2959     }
2960     #else
2961     -static inline void update_blocked_averages(int cpu)
2962     +static bool __update_blocked_fair(struct rq *rq, bool *done)
2963     {
2964     - struct rq *rq = cpu_rq(cpu);
2965     struct cfs_rq *cfs_rq = &rq->cfs;
2966     - const struct sched_class *curr_class;
2967     - struct rq_flags rf;
2968     -
2969     - rq_lock_irqsave(rq, &rf);
2970     - update_rq_clock(rq);
2971     -
2972     - /*
2973     - * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
2974     - * that RT, DL and IRQ signals have been updated before updating CFS.
2975     - */
2976     - curr_class = rq->curr->sched_class;
2977     - update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
2978     - update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
2979     - update_irq_load_avg(rq, 0);
2980     + bool decayed;
2981    
2982     - update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
2983     + decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
2984     + if (cfs_rq_has_blocked(cfs_rq))
2985     + *done = false;
2986    
2987     - update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
2988     - rq_unlock_irqrestore(rq, &rf);
2989     + return decayed;
2990     }
2991    
2992     static unsigned long task_h_load(struct task_struct *p)
2993     @@ -7669,6 +7664,24 @@ static unsigned long task_h_load(struct task_struct *p)
2994     }
2995     #endif
2996    
2997     +static void update_blocked_averages(int cpu)
2998     +{
2999     + bool decayed = false, done = true;
3000     + struct rq *rq = cpu_rq(cpu);
3001     + struct rq_flags rf;
3002     +
3003     + rq_lock_irqsave(rq, &rf);
3004     + update_rq_clock(rq);
3005     +
3006     + decayed |= __update_blocked_others(rq, &done);
3007     + decayed |= __update_blocked_fair(rq, &done);
3008     +
3009     + update_blocked_load_status(rq, !done);
3010     + if (decayed)
3011     + cpufreq_update_util(rq, 0);
3012     + rq_unlock_irqrestore(rq, &rf);
3013     +}
3014     +
3015     /********** Helpers for find_busiest_group ************************/
3016    
3017     /*
3018     @@ -10151,7 +10164,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
3019     * This routine is mostly called to set cfs_rq->curr field when a task
3020     * migrates between groups/classes.
3021     */
3022     -static void set_next_task_fair(struct rq *rq, struct task_struct *p)
3023     +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
3024     {
3025     struct sched_entity *se = &p->se;
3026    
3027     diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
3028     index f65ef1e2f204..131e7c86cf06 100644
3029     --- a/kernel/sched/idle.c
3030     +++ b/kernel/sched/idle.c
3031     @@ -385,7 +385,7 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
3032     {
3033     }
3034    
3035     -static void set_next_task_idle(struct rq *rq, struct task_struct *next)
3036     +static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
3037     {
3038     update_idle_core(rq);
3039     schedstat_inc(rq->sched_goidle);
3040     @@ -399,7 +399,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
3041     if (prev)
3042     put_prev_task(rq, prev);
3043    
3044     - set_next_task_idle(rq, next);
3045     + set_next_task_idle(rq, next, true);
3046    
3047     return next;
3048     }
3049     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
3050     index 9b8adc01be3d..7bf917e4d63a 100644
3051     --- a/kernel/sched/rt.c
3052     +++ b/kernel/sched/rt.c
3053     @@ -1515,13 +1515,16 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
3054     #endif
3055     }
3056    
3057     -static inline void set_next_task_rt(struct rq *rq, struct task_struct *p)
3058     +static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
3059     {
3060     p->se.exec_start = rq_clock_task(rq);
3061    
3062     /* The running task is never eligible for pushing */
3063     dequeue_pushable_task(rq, p);
3064    
3065     + if (!first)
3066     + return;
3067     +
3068     /*
3069     * If prev task was rt, put_prev_task() has already updated the
3070     * utilization. We only care of the case where we start to schedule a
3071     @@ -1575,7 +1578,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3072     return NULL;
3073    
3074     p = _pick_next_task_rt(rq);
3075     - set_next_task_rt(rq, p);
3076     + set_next_task_rt(rq, p, true);
3077     return p;
3078     }
3079    
3080     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
3081     index 49ed949f850c..e5e2605778c9 100644
3082     --- a/kernel/sched/sched.h
3083     +++ b/kernel/sched/sched.h
3084     @@ -1728,7 +1728,7 @@ struct sched_class {
3085     struct task_struct *prev,
3086     struct rq_flags *rf);
3087     void (*put_prev_task)(struct rq *rq, struct task_struct *p);
3088     - void (*set_next_task)(struct rq *rq, struct task_struct *p);
3089     + void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
3090    
3091     #ifdef CONFIG_SMP
3092     int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
3093     @@ -1780,7 +1780,7 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
3094     static inline void set_next_task(struct rq *rq, struct task_struct *next)
3095     {
3096     WARN_ON_ONCE(rq->curr != next);
3097     - next->sched_class->set_next_task(rq, next);
3098     + next->sched_class->set_next_task(rq, next, false);
3099     }
3100    
3101     #ifdef CONFIG_SMP
3102     diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
3103     index c0640739e05e..02dc0a8e3925 100644
3104     --- a/kernel/sched/stop_task.c
3105     +++ b/kernel/sched/stop_task.c
3106     @@ -29,7 +29,7 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
3107     /* we're never preempted */
3108     }
3109    
3110     -static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
3111     +static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
3112     {
3113     stop->se.exec_start = rq_clock_task(rq);
3114     }
3115     @@ -42,7 +42,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
3116     if (!sched_stop_runnable(rq))
3117     return NULL;
3118    
3119     - set_next_task_stop(rq, rq->stop);
3120     + set_next_task_stop(rq, rq->stop, true);
3121     return rq->stop;
3122     }
3123    
3124     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
3125     index 649687622654..e9c63b79e03f 100644
3126     --- a/kernel/workqueue.c
3127     +++ b/kernel/workqueue.c
3128     @@ -425,7 +425,8 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
3129     * ignored.
3130     */
3131     #define for_each_pwq(pwq, wq) \
3132     - list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
3133     + list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
3134     + lockdep_is_held(&wq->mutex)) \
3135     if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
3136     else
3137    
3138     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
3139     index f2452496ad9f..920784a9b7ff 100644
3140     --- a/net/core/neighbour.c
3141     +++ b/net/core/neighbour.c
3142     @@ -2049,8 +2049,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
3143     goto nla_put_failure;
3144     {
3145     unsigned long now = jiffies;
3146     - unsigned int flush_delta = now - tbl->last_flush;
3147     - unsigned int rand_delta = now - tbl->last_rand;
3148     + long flush_delta = now - tbl->last_flush;
3149     + long rand_delta = now - tbl->last_rand;
3150     struct neigh_hash_table *nht;
3151     struct ndt_config ndc = {
3152     .ndtc_key_len = tbl->key_len,
3153     diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
3154     index b4db68e5caa9..4c826b8bf9b1 100644
3155     --- a/net/core/net-sysfs.c
3156     +++ b/net/core/net-sysfs.c
3157     @@ -1462,14 +1462,17 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
3158     struct kobject *kobj = &queue->kobj;
3159     int error = 0;
3160    
3161     + /* Kobject_put later will trigger netdev_queue_release call
3162     + * which decreases dev refcount: Take that reference here
3163     + */
3164     + dev_hold(queue->dev);
3165     +
3166     kobj->kset = dev->queues_kset;
3167     error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
3168     "tx-%u", index);
3169     if (error)
3170     goto err;
3171    
3172     - dev_hold(queue->dev);
3173     -
3174     #ifdef CONFIG_BQL
3175     error = sysfs_create_group(kobj, &dql_group);
3176     if (error)
3177     diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
3178     index 23f67b8fdeaa..3eed90bfa2bf 100644
3179     --- a/net/openvswitch/datapath.c
3180     +++ b/net/openvswitch/datapath.c
3181     @@ -1667,6 +1667,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
3182     ovs_dp_reset_user_features(skb, info);
3183     }
3184    
3185     + ovs_unlock();
3186     goto err_destroy_meters;
3187     }
3188    
3189     @@ -1683,7 +1684,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
3190     return 0;
3191    
3192     err_destroy_meters:
3193     - ovs_unlock();
3194     ovs_meters_exit(dp);
3195     err_destroy_ports_array:
3196     kfree(dp->ports);
3197     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3198     index 529d4ce945db..118cd66b7516 100644
3199     --- a/net/packet/af_packet.c
3200     +++ b/net/packet/af_packet.c
3201     @@ -1296,15 +1296,21 @@ static void packet_sock_destruct(struct sock *sk)
3202    
3203     static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
3204     {
3205     - u32 rxhash;
3206     + u32 *history = po->rollover->history;
3207     + u32 victim, rxhash;
3208     int i, count = 0;
3209    
3210     rxhash = skb_get_hash(skb);
3211     for (i = 0; i < ROLLOVER_HLEN; i++)
3212     - if (po->rollover->history[i] == rxhash)
3213     + if (READ_ONCE(history[i]) == rxhash)
3214     count++;
3215    
3216     - po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
3217     + victim = prandom_u32() % ROLLOVER_HLEN;
3218     +
3219     + /* Avoid dirtying the cache line if possible */
3220     + if (READ_ONCE(history[victim]) != rxhash)
3221     + WRITE_ONCE(history[victim], rxhash);
3222     +
3223     return count > (ROLLOVER_HLEN >> 1);
3224     }
3225    
3226     diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
3227     index 8be2f209982b..908b60a72d95 100644
3228     --- a/net/sunrpc/auth_gss/svcauth_gss.c
3229     +++ b/net/sunrpc/auth_gss/svcauth_gss.c
3230     @@ -1075,24 +1075,32 @@ gss_read_verf(struct rpc_gss_wire_cred *gc,
3231     return 0;
3232     }
3233    
3234     -/* Ok this is really heavily depending on a set of semantics in
3235     - * how rqstp is set up by svc_recv and pages laid down by the
3236     - * server when reading a request. We are basically guaranteed that
3237     - * the token lays all down linearly across a set of pages, starting
3238     - * at iov_base in rq_arg.head[0] which happens to be the first of a
3239     - * set of pages stored in rq_pages[].
3240     - * rq_arg.head[0].iov_base will provide us the page_base to pass
3241     - * to the upcall.
3242     - */
3243     -static inline int
3244     -gss_read_proxy_verf(struct svc_rqst *rqstp,
3245     - struct rpc_gss_wire_cred *gc, __be32 *authp,
3246     - struct xdr_netobj *in_handle,
3247     - struct gssp_in_token *in_token)
3248     +static void gss_free_in_token_pages(struct gssp_in_token *in_token)
3249     {
3250     - struct kvec *argv = &rqstp->rq_arg.head[0];
3251     u32 inlen;
3252     - int res;
3253     + int i;
3254     +
3255     + i = 0;
3256     + inlen = in_token->page_len;
3257     + while (inlen) {
3258     + if (in_token->pages[i])
3259     + put_page(in_token->pages[i]);
3260     + inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
3261     + }
3262     +
3263     + kfree(in_token->pages);
3264     + in_token->pages = NULL;
3265     +}
3266     +
3267     +static int gss_read_proxy_verf(struct svc_rqst *rqstp,
3268     + struct rpc_gss_wire_cred *gc, __be32 *authp,
3269     + struct xdr_netobj *in_handle,
3270     + struct gssp_in_token *in_token)
3271     +{
3272     + struct kvec *argv = &rqstp->rq_arg.head[0];
3273     + unsigned int page_base, length;
3274     + int pages, i, res;
3275     + size_t inlen;
3276    
3277     res = gss_read_common_verf(gc, argv, authp, in_handle);
3278     if (res)
3279     @@ -1102,10 +1110,36 @@ gss_read_proxy_verf(struct svc_rqst *rqstp,
3280     if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
3281     return SVC_DENIED;
3282    
3283     - in_token->pages = rqstp->rq_pages;
3284     - in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK;
3285     + pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
3286     + in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
3287     + if (!in_token->pages)
3288     + return SVC_DENIED;
3289     + in_token->page_base = 0;
3290     in_token->page_len = inlen;
3291     + for (i = 0; i < pages; i++) {
3292     + in_token->pages[i] = alloc_page(GFP_KERNEL);
3293     + if (!in_token->pages[i]) {
3294     + gss_free_in_token_pages(in_token);
3295     + return SVC_DENIED;
3296     + }
3297     + }
3298    
3299     + length = min_t(unsigned int, inlen, argv->iov_len);
3300     + memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
3301     + inlen -= length;
3302     +
3303     + i = 1;
3304     + page_base = rqstp->rq_arg.page_base;
3305     + while (inlen) {
3306     + length = min_t(unsigned int, inlen, PAGE_SIZE);
3307     + memcpy(page_address(in_token->pages[i]),
3308     + page_address(rqstp->rq_arg.pages[i]) + page_base,
3309     + length);
3310     +
3311     + inlen -= length;
3312     + page_base = 0;
3313     + i++;
3314     + }
3315     return 0;
3316     }
3317    
3318     @@ -1280,8 +1314,11 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
3319     break;
3320     case GSS_S_COMPLETE:
3321     status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
3322     - if (status)
3323     + if (status) {
3324     + pr_info("%s: gss_proxy_save_rsc failed (%d)\n",
3325     + __func__, status);
3326     goto out;
3327     + }
3328     cli_handle.data = (u8 *)&handle;
3329     cli_handle.len = sizeof(handle);
3330     break;
3331     @@ -1292,15 +1329,20 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
3332    
3333     /* Got an answer to the upcall; use it: */
3334     if (gss_write_init_verf(sn->rsc_cache, rqstp,
3335     - &cli_handle, &ud.major_status))
3336     + &cli_handle, &ud.major_status)) {
3337     + pr_info("%s: gss_write_init_verf failed\n", __func__);
3338     goto out;
3339     + }
3340     if (gss_write_resv(resv, PAGE_SIZE,
3341     &cli_handle, &ud.out_token,
3342     - ud.major_status, ud.minor_status))
3343     + ud.major_status, ud.minor_status)) {
3344     + pr_info("%s: gss_write_resv failed\n", __func__);
3345     goto out;
3346     + }
3347    
3348     ret = SVC_COMPLETE;
3349     out:
3350     + gss_free_in_token_pages(&ud.in_token);
3351     gssp_free_upcall_data(&ud);
3352     return ret;
3353     }
3354     diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
3355     index 14ba9e72a204..f3104be8ff5d 100644
3356     --- a/net/sunrpc/xdr.c
3357     +++ b/net/sunrpc/xdr.c
3358     @@ -436,13 +436,12 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
3359     }
3360    
3361     /**
3362     - * xdr_shrink_pagelen
3363     + * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
3364     * @buf: xdr_buf
3365     * @len: bytes to remove from buf->pages
3366     *
3367     - * Shrinks XDR buffer's page array buf->pages by
3368     - * 'len' bytes. The extra data is not lost, but is instead
3369     - * moved into the tail.
3370     + * The extra data is not lost, but is instead moved into buf->tail.
3371     + * Returns the actual number of bytes moved.
3372     */
3373     static unsigned int
3374     xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
3375     @@ -455,8 +454,8 @@ xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
3376    
3377     result = 0;
3378     tail = buf->tail;
3379     - BUG_ON (len > pglen);
3380     -
3381     + if (len > buf->page_len)
3382     + len = buf-> page_len;
3383     tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
3384    
3385     /* Shift the tail first */
3386     diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
3387     index d1fcc41d5eb5..908e78bb87c6 100644
3388     --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
3389     +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
3390     @@ -195,6 +195,7 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
3391     pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
3392     #endif
3393    
3394     + rqst->rq_xtime = ktime_get();
3395     rc = svc_rdma_bc_sendto(rdma, rqst, ctxt);
3396     if (rc) {
3397     svc_rdma_send_ctxt_put(rdma, ctxt);
3398     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
3399     index 70e52f567b2a..5361b98f31ae 100644
3400     --- a/net/sunrpc/xprtsock.c
3401     +++ b/net/sunrpc/xprtsock.c
3402     @@ -2659,6 +2659,8 @@ static int bc_sendto(struct rpc_rqst *req)
3403     .iov_len = sizeof(marker),
3404     };
3405    
3406     + req->rq_xtime = ktime_get();
3407     +
3408     len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len);
3409     if (len != iov.iov_len)
3410     return -EAGAIN;
3411     @@ -2684,7 +2686,6 @@ static int bc_send_request(struct rpc_rqst *req)
3412     struct svc_xprt *xprt;
3413     int len;
3414    
3415     - dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
3416     /*
3417     * Get the server socket associated with this callback xprt
3418     */
3419     diff --git a/net/tipc/link.c b/net/tipc/link.c
3420     index 999eab592de8..a9d8a81e80cf 100644
3421     --- a/net/tipc/link.c
3422     +++ b/net/tipc/link.c
3423     @@ -1084,7 +1084,7 @@ static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
3424     return false;
3425    
3426     if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
3427     - msecs_to_jiffies(r->tolerance)))
3428     + msecs_to_jiffies(r->tolerance * 10)))
3429     return false;
3430    
3431     hdr = buf_msg(skb);
3432     diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
3433     index 6a6eae88442f..58708b4c7719 100644
3434     --- a/net/tipc/monitor.c
3435     +++ b/net/tipc/monitor.c
3436     @@ -665,6 +665,21 @@ void tipc_mon_delete(struct net *net, int bearer_id)
3437     kfree(mon);
3438     }
3439    
3440     +void tipc_mon_reinit_self(struct net *net)
3441     +{
3442     + struct tipc_monitor *mon;
3443     + int bearer_id;
3444     +
3445     + for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
3446     + mon = tipc_monitor(net, bearer_id);
3447     + if (!mon)
3448     + continue;
3449     + write_lock_bh(&mon->lock);
3450     + mon->self->addr = tipc_own_addr(net);
3451     + write_unlock_bh(&mon->lock);
3452     + }
3453     +}
3454     +
3455     int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
3456     {
3457     struct tipc_net *tn = tipc_net(net);
3458     diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h
3459     index 2a21b93e0d04..ed63d2e650b0 100644
3460     --- a/net/tipc/monitor.h
3461     +++ b/net/tipc/monitor.h
3462     @@ -77,6 +77,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
3463     u32 bearer_id);
3464     int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
3465     u32 bearer_id, u32 *prev_node);
3466     +void tipc_mon_reinit_self(struct net *net);
3467    
3468     extern const int tipc_max_domain_size;
3469     #endif
3470     diff --git a/net/tipc/net.c b/net/tipc/net.c
3471     index 85707c185360..2de3cec9929d 100644
3472     --- a/net/tipc/net.c
3473     +++ b/net/tipc/net.c
3474     @@ -42,6 +42,7 @@
3475     #include "node.h"
3476     #include "bcast.h"
3477     #include "netlink.h"
3478     +#include "monitor.h"
3479    
3480     /*
3481     * The TIPC locking policy is designed to ensure a very fine locking
3482     @@ -136,6 +137,7 @@ static void tipc_net_finalize(struct net *net, u32 addr)
3483     tipc_set_node_addr(net, addr);
3484     tipc_named_reinit(net);
3485     tipc_sk_reinit(net);
3486     + tipc_mon_reinit_self(net);
3487     tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
3488     TIPC_CLUSTER_SCOPE, 0, addr);
3489     }
3490     diff --git a/net/tipc/socket.c b/net/tipc/socket.c
3491     index 79d06c21ebe3..aea951a1f805 100644
3492     --- a/net/tipc/socket.c
3493     +++ b/net/tipc/socket.c
3494     @@ -504,7 +504,7 @@ static void __tipc_shutdown(struct socket *sock, int error)
3495     struct sock *sk = sock->sk;
3496     struct tipc_sock *tsk = tipc_sk(sk);
3497     struct net *net = sock_net(sk);
3498     - long timeout = CONN_TIMEOUT_DEFAULT;
3499     + long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
3500     u32 dnode = tsk_peer_node(tsk);
3501     struct sk_buff *skb;
3502    
3503     @@ -1396,8 +1396,10 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
3504     rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
3505     if (unlikely(rc != dlen))
3506     return rc;
3507     - if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
3508     + if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
3509     + __skb_queue_purge(&pkts);
3510     return -ENOMEM;
3511     + }
3512    
3513     trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
3514     rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
3515     @@ -2685,6 +2687,7 @@ static void tipc_sk_timeout(struct timer_list *t)
3516     if (sock_owned_by_user(sk)) {
3517     sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
3518     bh_unlock_sock(sk);
3519     + sock_put(sk);
3520     return;
3521     }
3522    
3523     diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c
3524     index ed18e9a4909c..43e38ce594d4 100644
3525     --- a/samples/bpf/sockex1_kern.c
3526     +++ b/samples/bpf/sockex1_kern.c
3527     @@ -4,12 +4,12 @@
3528     #include <uapi/linux/ip.h>
3529     #include "bpf_helpers.h"
3530    
3531     -struct bpf_map_def SEC("maps") my_map = {
3532     - .type = BPF_MAP_TYPE_ARRAY,
3533     - .key_size = sizeof(u32),
3534     - .value_size = sizeof(long),
3535     - .max_entries = 256,
3536     -};
3537     +struct {
3538     + __uint(type, BPF_MAP_TYPE_ARRAY);
3539     + __type(key, u32);
3540     + __type(value, long);
3541     + __uint(max_entries, 256);
3542     +} my_map SEC(".maps");
3543    
3544     SEC("socket1")
3545     int bpf_prog1(struct __sk_buff *skb)
3546     diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
3547     index f2f9dbc021b0..ae4bdc89b599 100644
3548     --- a/samples/bpf/sockex2_kern.c
3549     +++ b/samples/bpf/sockex2_kern.c
3550     @@ -189,12 +189,12 @@ struct pair {
3551     long bytes;
3552     };
3553    
3554     -struct bpf_map_def SEC("maps") hash_map = {
3555     - .type = BPF_MAP_TYPE_HASH,
3556     - .key_size = sizeof(__be32),
3557     - .value_size = sizeof(struct pair),
3558     - .max_entries = 1024,
3559     -};
3560     +struct {
3561     + __uint(type, BPF_MAP_TYPE_HASH);
3562     + __type(key, __be32);
3563     + __type(value, struct pair);
3564     + __uint(max_entries, 1024);
3565     +} hash_map SEC(".maps");
3566    
3567     SEC("socket2")
3568     int bpf_prog2(struct __sk_buff *skb)
3569     diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c
3570     index 219742106bfd..db6870aee42c 100644
3571     --- a/samples/bpf/xdp1_kern.c
3572     +++ b/samples/bpf/xdp1_kern.c
3573     @@ -14,12 +14,12 @@
3574     #include <linux/ipv6.h>
3575     #include "bpf_helpers.h"
3576    
3577     -struct bpf_map_def SEC("maps") rxcnt = {
3578     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3579     - .key_size = sizeof(u32),
3580     - .value_size = sizeof(long),
3581     - .max_entries = 256,
3582     -};
3583     +struct {
3584     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3585     + __type(key, u32);
3586     + __type(value, long);
3587     + __uint(max_entries, 256);
3588     +} rxcnt SEC(".maps");
3589    
3590     static int parse_ipv4(void *data, u64 nh_off, void *data_end)
3591     {
3592     diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
3593     index e01288867d15..c74b52c6d945 100644
3594     --- a/samples/bpf/xdp2_kern.c
3595     +++ b/samples/bpf/xdp2_kern.c
3596     @@ -14,12 +14,12 @@
3597     #include <linux/ipv6.h>
3598     #include "bpf_helpers.h"
3599    
3600     -struct bpf_map_def SEC("maps") rxcnt = {
3601     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3602     - .key_size = sizeof(u32),
3603     - .value_size = sizeof(long),
3604     - .max_entries = 256,
3605     -};
3606     +struct {
3607     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3608     + __type(key, u32);
3609     + __type(value, long);
3610     + __uint(max_entries, 256);
3611     +} rxcnt SEC(".maps");
3612    
3613     static void swap_src_dst_mac(void *data)
3614     {
3615     diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c
3616     index 411fdb21f8bc..cd9ff2a40a39 100644
3617     --- a/samples/bpf/xdp_adjust_tail_kern.c
3618     +++ b/samples/bpf/xdp_adjust_tail_kern.c
3619     @@ -25,12 +25,12 @@
3620     #define ICMP_TOOBIG_SIZE 98
3621     #define ICMP_TOOBIG_PAYLOAD_SIZE 92
3622    
3623     -struct bpf_map_def SEC("maps") icmpcnt = {
3624     - .type = BPF_MAP_TYPE_ARRAY,
3625     - .key_size = sizeof(__u32),
3626     - .value_size = sizeof(__u64),
3627     - .max_entries = 1,
3628     -};
3629     +struct {
3630     + __uint(type, BPF_MAP_TYPE_ARRAY);
3631     + __type(key, __u32);
3632     + __type(value, __u64);
3633     + __uint(max_entries, 1);
3634     +} icmpcnt SEC(".maps");
3635    
3636     static __always_inline void count_icmp(void)
3637     {
3638     diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c
3639     index 701a30f258b1..d013029aeaa2 100644
3640     --- a/samples/bpf/xdp_fwd_kern.c
3641     +++ b/samples/bpf/xdp_fwd_kern.c
3642     @@ -23,13 +23,12 @@
3643    
3644     #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
3645    
3646     -/* For TX-traffic redirect requires net_device ifindex to be in this devmap */
3647     -struct bpf_map_def SEC("maps") xdp_tx_ports = {
3648     - .type = BPF_MAP_TYPE_DEVMAP,
3649     - .key_size = sizeof(int),
3650     - .value_size = sizeof(int),
3651     - .max_entries = 64,
3652     -};
3653     +struct {
3654     + __uint(type, BPF_MAP_TYPE_DEVMAP);
3655     + __uint(key_size, sizeof(int));
3656     + __uint(value_size, sizeof(int));
3657     + __uint(max_entries, 64);
3658     +} xdp_tx_ports SEC(".maps");
3659    
3660     /* from include/net/ip.h */
3661     static __always_inline int ip_decrease_ttl(struct iphdr *iph)
3662     diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
3663     index a306d1c75622..cfcc31e51197 100644
3664     --- a/samples/bpf/xdp_redirect_cpu_kern.c
3665     +++ b/samples/bpf/xdp_redirect_cpu_kern.c
3666     @@ -18,12 +18,12 @@
3667     #define MAX_CPUS 64 /* WARNING - sync with _user.c */
3668    
3669     /* Special map type that can XDP_REDIRECT frames to another CPU */
3670     -struct bpf_map_def SEC("maps") cpu_map = {
3671     - .type = BPF_MAP_TYPE_CPUMAP,
3672     - .key_size = sizeof(u32),
3673     - .value_size = sizeof(u32),
3674     - .max_entries = MAX_CPUS,
3675     -};
3676     +struct {
3677     + __uint(type, BPF_MAP_TYPE_CPUMAP);
3678     + __uint(key_size, sizeof(u32));
3679     + __uint(value_size, sizeof(u32));
3680     + __uint(max_entries, MAX_CPUS);
3681     +} cpu_map SEC(".maps");
3682    
3683     /* Common stats data record to keep userspace more simple */
3684     struct datarec {
3685     @@ -35,67 +35,67 @@ struct datarec {
3686     /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
3687     * feedback. Redirect TX errors can be caught via a tracepoint.
3688     */
3689     -struct bpf_map_def SEC("maps") rx_cnt = {
3690     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3691     - .key_size = sizeof(u32),
3692     - .value_size = sizeof(struct datarec),
3693     - .max_entries = 1,
3694     -};
3695     +struct {
3696     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3697     + __type(key, u32);
3698     + __type(value, struct datarec);
3699     + __uint(max_entries, 1);
3700     +} rx_cnt SEC(".maps");
3701    
3702     /* Used by trace point */
3703     -struct bpf_map_def SEC("maps") redirect_err_cnt = {
3704     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3705     - .key_size = sizeof(u32),
3706     - .value_size = sizeof(struct datarec),
3707     - .max_entries = 2,
3708     +struct {
3709     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3710     + __type(key, u32);
3711     + __type(value, struct datarec);
3712     + __uint(max_entries, 2);
3713     /* TODO: have entries for all possible errno's */
3714     -};
3715     +} redirect_err_cnt SEC(".maps");
3716    
3717     /* Used by trace point */
3718     -struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
3719     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3720     - .key_size = sizeof(u32),
3721     - .value_size = sizeof(struct datarec),
3722     - .max_entries = MAX_CPUS,
3723     -};
3724     +struct {
3725     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3726     + __type(key, u32);
3727     + __type(value, struct datarec);
3728     + __uint(max_entries, MAX_CPUS);
3729     +} cpumap_enqueue_cnt SEC(".maps");
3730    
3731     /* Used by trace point */
3732     -struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
3733     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3734     - .key_size = sizeof(u32),
3735     - .value_size = sizeof(struct datarec),
3736     - .max_entries = 1,
3737     -};
3738     +struct {
3739     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3740     + __type(key, u32);
3741     + __type(value, struct datarec);
3742     + __uint(max_entries, 1);
3743     +} cpumap_kthread_cnt SEC(".maps");
3744    
3745     /* Set of maps controlling available CPU, and for iterating through
3746     * selectable redirect CPUs.
3747     */
3748     -struct bpf_map_def SEC("maps") cpus_available = {
3749     - .type = BPF_MAP_TYPE_ARRAY,
3750     - .key_size = sizeof(u32),
3751     - .value_size = sizeof(u32),
3752     - .max_entries = MAX_CPUS,
3753     -};
3754     -struct bpf_map_def SEC("maps") cpus_count = {
3755     - .type = BPF_MAP_TYPE_ARRAY,
3756     - .key_size = sizeof(u32),
3757     - .value_size = sizeof(u32),
3758     - .max_entries = 1,
3759     -};
3760     -struct bpf_map_def SEC("maps") cpus_iterator = {
3761     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3762     - .key_size = sizeof(u32),
3763     - .value_size = sizeof(u32),
3764     - .max_entries = 1,
3765     -};
3766     +struct {
3767     + __uint(type, BPF_MAP_TYPE_ARRAY);
3768     + __type(key, u32);
3769     + __type(value, u32);
3770     + __uint(max_entries, MAX_CPUS);
3771     +} cpus_available SEC(".maps");
3772     +struct {
3773     + __uint(type, BPF_MAP_TYPE_ARRAY);
3774     + __type(key, u32);
3775     + __type(value, u32);
3776     + __uint(max_entries, 1);
3777     +} cpus_count SEC(".maps");
3778     +struct {
3779     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3780     + __type(key, u32);
3781     + __type(value, u32);
3782     + __uint(max_entries, 1);
3783     +} cpus_iterator SEC(".maps");
3784    
3785     /* Used by trace point */
3786     -struct bpf_map_def SEC("maps") exception_cnt = {
3787     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3788     - .key_size = sizeof(u32),
3789     - .value_size = sizeof(struct datarec),
3790     - .max_entries = 1,
3791     -};
3792     +struct {
3793     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3794     + __type(key, u32);
3795     + __type(value, struct datarec);
3796     + __uint(max_entries, 1);
3797     +} exception_cnt SEC(".maps");
3798    
3799     /* Helper parse functions */
3800    
3801     diff --git a/samples/bpf/xdp_redirect_kern.c b/samples/bpf/xdp_redirect_kern.c
3802     index 8abb151e385f..1f0b7d05abb2 100644
3803     --- a/samples/bpf/xdp_redirect_kern.c
3804     +++ b/samples/bpf/xdp_redirect_kern.c
3805     @@ -19,22 +19,22 @@
3806     #include <linux/ipv6.h>
3807     #include "bpf_helpers.h"
3808    
3809     -struct bpf_map_def SEC("maps") tx_port = {
3810     - .type = BPF_MAP_TYPE_ARRAY,
3811     - .key_size = sizeof(int),
3812     - .value_size = sizeof(int),
3813     - .max_entries = 1,
3814     -};
3815     +struct {
3816     + __uint(type, BPF_MAP_TYPE_ARRAY);
3817     + __type(key, int);
3818     + __type(value, int);
3819     + __uint(max_entries, 1);
3820     +} tx_port SEC(".maps");
3821    
3822     /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
3823     * feedback. Redirect TX errors can be caught via a tracepoint.
3824     */
3825     -struct bpf_map_def SEC("maps") rxcnt = {
3826     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3827     - .key_size = sizeof(u32),
3828     - .value_size = sizeof(long),
3829     - .max_entries = 1,
3830     -};
3831     +struct {
3832     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3833     + __type(key, u32);
3834     + __type(value, long);
3835     + __uint(max_entries, 1);
3836     +} rxcnt SEC(".maps");
3837    
3838     static void swap_src_dst_mac(void *data)
3839     {
3840     diff --git a/samples/bpf/xdp_redirect_map_kern.c b/samples/bpf/xdp_redirect_map_kern.c
3841     index 740a529ba84f..4631b484c432 100644
3842     --- a/samples/bpf/xdp_redirect_map_kern.c
3843     +++ b/samples/bpf/xdp_redirect_map_kern.c
3844     @@ -19,22 +19,22 @@
3845     #include <linux/ipv6.h>
3846     #include "bpf_helpers.h"
3847    
3848     -struct bpf_map_def SEC("maps") tx_port = {
3849     - .type = BPF_MAP_TYPE_DEVMAP,
3850     - .key_size = sizeof(int),
3851     - .value_size = sizeof(int),
3852     - .max_entries = 100,
3853     -};
3854     +struct {
3855     + __uint(type, BPF_MAP_TYPE_DEVMAP);
3856     + __uint(key_size, sizeof(int));
3857     + __uint(value_size, sizeof(int));
3858     + __uint(max_entries, 100);
3859     +} tx_port SEC(".maps");
3860    
3861     /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
3862     * feedback. Redirect TX errors can be caught via a tracepoint.
3863     */
3864     -struct bpf_map_def SEC("maps") rxcnt = {
3865     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3866     - .key_size = sizeof(u32),
3867     - .value_size = sizeof(long),
3868     - .max_entries = 1,
3869     -};
3870     +struct {
3871     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3872     + __type(key, u32);
3873     + __type(value, long);
3874     + __uint(max_entries, 1);
3875     +} rxcnt SEC(".maps");
3876    
3877     static void swap_src_dst_mac(void *data)
3878     {
3879     diff --git a/samples/bpf/xdp_router_ipv4_kern.c b/samples/bpf/xdp_router_ipv4_kern.c
3880     index 993f56bc7b9a..bf11efc8e949 100644
3881     --- a/samples/bpf/xdp_router_ipv4_kern.c
3882     +++ b/samples/bpf/xdp_router_ipv4_kern.c
3883     @@ -42,44 +42,44 @@ struct direct_map {
3884     };
3885    
3886     /* Map for trie implementation*/
3887     -struct bpf_map_def SEC("maps") lpm_map = {
3888     - .type = BPF_MAP_TYPE_LPM_TRIE,
3889     - .key_size = 8,
3890     - .value_size = sizeof(struct trie_value),
3891     - .max_entries = 50,
3892     - .map_flags = BPF_F_NO_PREALLOC,
3893     -};
3894     +struct {
3895     + __uint(type, BPF_MAP_TYPE_LPM_TRIE);
3896     + __uint(key_size, 8);
3897     + __uint(value_size, sizeof(struct trie_value));
3898     + __uint(max_entries, 50);
3899     + __uint(map_flags, BPF_F_NO_PREALLOC);
3900     +} lpm_map SEC(".maps");
3901    
3902     /* Map for counter*/
3903     -struct bpf_map_def SEC("maps") rxcnt = {
3904     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3905     - .key_size = sizeof(u32),
3906     - .value_size = sizeof(u64),
3907     - .max_entries = 256,
3908     -};
3909     +struct {
3910     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3911     + __type(key, u32);
3912     + __type(value, u64);
3913     + __uint(max_entries, 256);
3914     +} rxcnt SEC(".maps");
3915    
3916     /* Map for ARP table*/
3917     -struct bpf_map_def SEC("maps") arp_table = {
3918     - .type = BPF_MAP_TYPE_HASH,
3919     - .key_size = sizeof(__be32),
3920     - .value_size = sizeof(__be64),
3921     - .max_entries = 50,
3922     -};
3923     +struct {
3924     + __uint(type, BPF_MAP_TYPE_HASH);
3925     + __type(key, __be32);
3926     + __type(value, __be64);
3927     + __uint(max_entries, 50);
3928     +} arp_table SEC(".maps");
3929    
3930     /* Map to keep the exact match entries in the route table*/
3931     -struct bpf_map_def SEC("maps") exact_match = {
3932     - .type = BPF_MAP_TYPE_HASH,
3933     - .key_size = sizeof(__be32),
3934     - .value_size = sizeof(struct direct_map),
3935     - .max_entries = 50,
3936     -};
3937     -
3938     -struct bpf_map_def SEC("maps") tx_port = {
3939     - .type = BPF_MAP_TYPE_DEVMAP,
3940     - .key_size = sizeof(int),
3941     - .value_size = sizeof(int),
3942     - .max_entries = 100,
3943     -};
3944     +struct {
3945     + __uint(type, BPF_MAP_TYPE_HASH);
3946     + __type(key, __be32);
3947     + __type(value, struct direct_map);
3948     + __uint(max_entries, 50);
3949     +} exact_match SEC(".maps");
3950     +
3951     +struct {
3952     + __uint(type, BPF_MAP_TYPE_DEVMAP);
3953     + __uint(key_size, sizeof(int));
3954     + __uint(value_size, sizeof(int));
3955     + __uint(max_entries, 100);
3956     +} tx_port SEC(".maps");
3957    
3958     /* Function to set source and destination mac of the packet */
3959     static inline void set_src_dst_mac(void *data, void *src, void *dst)
3960     diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
3961     index 222a83eed1cb..272d0f82a6b5 100644
3962     --- a/samples/bpf/xdp_rxq_info_kern.c
3963     +++ b/samples/bpf/xdp_rxq_info_kern.c
3964     @@ -23,12 +23,13 @@ enum cfg_options_flags {
3965     READ_MEM = 0x1U,
3966     SWAP_MAC = 0x2U,
3967     };
3968     -struct bpf_map_def SEC("maps") config_map = {
3969     - .type = BPF_MAP_TYPE_ARRAY,
3970     - .key_size = sizeof(int),
3971     - .value_size = sizeof(struct config),
3972     - .max_entries = 1,
3973     -};
3974     +
3975     +struct {
3976     + __uint(type, BPF_MAP_TYPE_ARRAY);
3977     + __type(key, int);
3978     + __type(value, struct config);
3979     + __uint(max_entries, 1);
3980     +} config_map SEC(".maps");
3981    
3982     /* Common stats data record (shared with userspace) */
3983     struct datarec {
3984     @@ -36,22 +37,22 @@ struct datarec {
3985     __u64 issue;
3986     };
3987    
3988     -struct bpf_map_def SEC("maps") stats_global_map = {
3989     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
3990     - .key_size = sizeof(u32),
3991     - .value_size = sizeof(struct datarec),
3992     - .max_entries = 1,
3993     -};
3994     +struct {
3995     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
3996     + __type(key, u32);
3997     + __type(value, struct datarec);
3998     + __uint(max_entries, 1);
3999     +} stats_global_map SEC(".maps");
4000    
4001     #define MAX_RXQs 64
4002    
4003     /* Stats per rx_queue_index (per CPU) */
4004     -struct bpf_map_def SEC("maps") rx_queue_index_map = {
4005     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
4006     - .key_size = sizeof(u32),
4007     - .value_size = sizeof(struct datarec),
4008     - .max_entries = MAX_RXQs + 1,
4009     -};
4010     +struct {
4011     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
4012     + __type(key, u32);
4013     + __type(value, struct datarec);
4014     + __uint(max_entries, MAX_RXQs + 1);
4015     +} rx_queue_index_map SEC(".maps");
4016    
4017     static __always_inline
4018     void swap_src_dst_mac(void *data)
4019     diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
4020     index c7e4e45d824a..b88df17853b8 100644
4021     --- a/samples/bpf/xdp_rxq_info_user.c
4022     +++ b/samples/bpf/xdp_rxq_info_user.c
4023     @@ -489,9 +489,9 @@ int main(int argc, char **argv)
4024     if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
4025     return EXIT_FAIL;
4026    
4027     - map = bpf_map__next(NULL, obj);
4028     - stats_global_map = bpf_map__next(map, obj);
4029     - rx_queue_index_map = bpf_map__next(stats_global_map, obj);
4030     + map = bpf_object__find_map_by_name(obj, "config_map");
4031     + stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map");
4032     + rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map");
4033     if (!map || !stats_global_map || !rx_queue_index_map) {
4034     printf("finding a map in obj file failed\n");
4035     return EXIT_FAIL;
4036     diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
4037     index 0f4f6e8c8611..6db450a5c1ca 100644
4038     --- a/samples/bpf/xdp_tx_iptunnel_kern.c
4039     +++ b/samples/bpf/xdp_tx_iptunnel_kern.c
4040     @@ -19,19 +19,19 @@
4041     #include "bpf_helpers.h"
4042     #include "xdp_tx_iptunnel_common.h"
4043    
4044     -struct bpf_map_def SEC("maps") rxcnt = {
4045     - .type = BPF_MAP_TYPE_PERCPU_ARRAY,
4046     - .key_size = sizeof(__u32),
4047     - .value_size = sizeof(__u64),
4048     - .max_entries = 256,
4049     -};
4050     -
4051     -struct bpf_map_def SEC("maps") vip2tnl = {
4052     - .type = BPF_MAP_TYPE_HASH,
4053     - .key_size = sizeof(struct vip),
4054     - .value_size = sizeof(struct iptnl_info),
4055     - .max_entries = MAX_IPTNL_ENTRIES,
4056     -};
4057     +struct {
4058     + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
4059     + __type(key, __u32);
4060     + __type(value, __u64);
4061     + __uint(max_entries, 256);
4062     +} rxcnt SEC(".maps");
4063     +
4064     +struct {
4065     + __uint(type, BPF_MAP_TYPE_HASH);
4066     + __type(key, struct vip);
4067     + __type(value, struct iptnl_info);
4068     + __uint(max_entries, MAX_IPTNL_ENTRIES);
4069     +} vip2tnl SEC(".maps");
4070    
4071     static __always_inline void count_tx(u32 protocol)
4072     {
4073     diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
4074     index 2998ddb323e3..436379940356 100755
4075     --- a/scripts/link-vmlinux.sh
4076     +++ b/scripts/link-vmlinux.sh
4077     @@ -127,8 +127,9 @@ gen_btf()
4078     cut -d, -f1 | cut -d' ' -f2)
4079     bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \
4080     awk '{print $4}')
4081     - ${OBJCOPY} --set-section-flags .BTF=alloc -O binary \
4082     - --only-section=.BTF ${1} .btf.vmlinux.bin 2>/dev/null
4083     + ${OBJCOPY} --change-section-address .BTF=0 \
4084     + --set-section-flags .BTF=alloc -O binary \
4085     + --only-section=.BTF ${1} .btf.vmlinux.bin
4086     ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \
4087     --rename-section .data=.BTF .btf.vmlinux.bin ${2}
4088     }
4089     diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
4090     index cbb933532981..9d0485959308 100644
4091     --- a/tools/lib/bpf/bpf.c
4092     +++ b/tools/lib/bpf/bpf.c
4093     @@ -189,7 +189,7 @@ static void *
4094     alloc_zero_tailing_info(const void *orecord, __u32 cnt,
4095     __u32 actual_rec_size, __u32 expected_rec_size)
4096     {
4097     - __u64 info_len = actual_rec_size * cnt;
4098     + __u64 info_len = (__u64)actual_rec_size * cnt;
4099     void *info, *nrecord;
4100     int i;
4101    
4102     diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
4103     index 8c67561c93b0..3ed1a27b5f7c 100644
4104     --- a/tools/lib/bpf/bpf_prog_linfo.c
4105     +++ b/tools/lib/bpf/bpf_prog_linfo.c
4106     @@ -101,6 +101,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
4107     {
4108     struct bpf_prog_linfo *prog_linfo;
4109     __u32 nr_linfo, nr_jited_func;
4110     + __u64 data_sz;
4111    
4112     nr_linfo = info->nr_line_info;
4113    
4114     @@ -122,11 +123,11 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
4115     /* Copy xlated line_info */
4116     prog_linfo->nr_linfo = nr_linfo;
4117     prog_linfo->rec_size = info->line_info_rec_size;
4118     - prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size);
4119     + data_sz = (__u64)nr_linfo * prog_linfo->rec_size;
4120     + prog_linfo->raw_linfo = malloc(data_sz);
4121     if (!prog_linfo->raw_linfo)
4122     goto err_free;
4123     - memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info,
4124     - nr_linfo * prog_linfo->rec_size);
4125     + memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz);
4126    
4127     nr_jited_func = info->nr_jited_ksyms;
4128     if (!nr_jited_func ||
4129     @@ -142,13 +143,12 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
4130     /* Copy jited_line_info */
4131     prog_linfo->nr_jited_func = nr_jited_func;
4132     prog_linfo->jited_rec_size = info->jited_line_info_rec_size;
4133     - prog_linfo->raw_jited_linfo = malloc(nr_linfo *
4134     - prog_linfo->jited_rec_size);
4135     + data_sz = (__u64)nr_linfo * prog_linfo->jited_rec_size;
4136     + prog_linfo->raw_jited_linfo = malloc(data_sz);
4137     if (!prog_linfo->raw_jited_linfo)
4138     goto err_free;
4139     memcpy(prog_linfo->raw_jited_linfo,
4140     - (void *)(long)info->jited_line_info,
4141     - nr_linfo * prog_linfo->jited_rec_size);
4142     + (void *)(long)info->jited_line_info, data_sz);
4143    
4144     /* Number of jited_line_info per jited func */
4145     prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func *
4146     diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
4147     index 1aa189a9112a..d606a358480d 100644
4148     --- a/tools/lib/bpf/btf.c
4149     +++ b/tools/lib/bpf/btf.c
4150     @@ -269,10 +269,9 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
4151     t = btf__type_by_id(btf, type_id);
4152     }
4153    
4154     +done:
4155     if (size < 0)
4156     return -EINVAL;
4157     -
4158     -done:
4159     if (nelems && size > UINT32_MAX / nelems)
4160     return -E2BIG;
4161    
4162     diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
4163     index a267cd0c0ce2..d98838c5820c 100644
4164     --- a/tools/lib/bpf/libbpf.c
4165     +++ b/tools/lib/bpf/libbpf.c
4166     @@ -3220,6 +3220,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
4167     pr_warning("oom in prog realloc\n");
4168     return -ENOMEM;
4169     }
4170     + prog->insns = new_insn;
4171    
4172     if (obj->btf_ext) {
4173     err = bpf_program_reloc_btf_ext(prog, obj,
4174     @@ -3231,7 +3232,6 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
4175    
4176     memcpy(new_insn + prog->insns_cnt, text->insns,
4177     text->insns_cnt * sizeof(*insn));
4178     - prog->insns = new_insn;
4179     prog->main_prog_cnt = prog->insns_cnt;
4180     prog->insns_cnt = new_cnt;
4181     pr_debug("added %zd insn from %s to prog %s\n",
4182     diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
4183     index 70f9e10de286..0c7386b0e42e 100644
4184     --- a/tools/lib/bpf/xsk.c
4185     +++ b/tools/lib/bpf/xsk.c
4186     @@ -73,6 +73,21 @@ struct xsk_nl_info {
4187     int fd;
4188     };
4189    
4190     +/* Up until and including Linux 5.3 */
4191     +struct xdp_ring_offset_v1 {
4192     + __u64 producer;
4193     + __u64 consumer;
4194     + __u64 desc;
4195     +};
4196     +
4197     +/* Up until and including Linux 5.3 */
4198     +struct xdp_mmap_offsets_v1 {
4199     + struct xdp_ring_offset_v1 rx;
4200     + struct xdp_ring_offset_v1 tx;
4201     + struct xdp_ring_offset_v1 fr;
4202     + struct xdp_ring_offset_v1 cr;
4203     +};
4204     +
4205     int xsk_umem__fd(const struct xsk_umem *umem)
4206     {
4207     return umem ? umem->fd : -EINVAL;
4208     @@ -133,6 +148,58 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
4209     return 0;
4210     }
4211    
4212     +static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
4213     +{
4214     + struct xdp_mmap_offsets_v1 off_v1;
4215     +
4216     + /* getsockopt on a kernel <= 5.3 has no flags fields.
4217     + * Copy over the offsets to the correct places in the >=5.4 format
4218     + * and put the flags where they would have been on that kernel.
4219     + */
4220     + memcpy(&off_v1, off, sizeof(off_v1));
4221     +
4222     + off->rx.producer = off_v1.rx.producer;
4223     + off->rx.consumer = off_v1.rx.consumer;
4224     + off->rx.desc = off_v1.rx.desc;
4225     + off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
4226     +
4227     + off->tx.producer = off_v1.tx.producer;
4228     + off->tx.consumer = off_v1.tx.consumer;
4229     + off->tx.desc = off_v1.tx.desc;
4230     + off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
4231     +
4232     + off->fr.producer = off_v1.fr.producer;
4233     + off->fr.consumer = off_v1.fr.consumer;
4234     + off->fr.desc = off_v1.fr.desc;
4235     + off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
4236     +
4237     + off->cr.producer = off_v1.cr.producer;
4238     + off->cr.consumer = off_v1.cr.consumer;
4239     + off->cr.desc = off_v1.cr.desc;
4240     + off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
4241     +}
4242     +
4243     +static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
4244     +{
4245     + socklen_t optlen;
4246     + int err;
4247     +
4248     + optlen = sizeof(*off);
4249     + err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
4250     + if (err)
4251     + return err;
4252     +
4253     + if (optlen == sizeof(*off))
4254     + return 0;
4255     +
4256     + if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
4257     + xsk_mmap_offsets_v1(off);
4258     + return 0;
4259     + }
4260     +
4261     + return -EINVAL;
4262     +}
4263     +
4264     int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
4265     __u64 size, struct xsk_ring_prod *fill,
4266     struct xsk_ring_cons *comp,
4267     @@ -141,7 +208,6 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
4268     struct xdp_mmap_offsets off;
4269     struct xdp_umem_reg mr;
4270     struct xsk_umem *umem;
4271     - socklen_t optlen;
4272     void *map;
4273     int err;
4274    
4275     @@ -190,8 +256,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
4276     goto out_socket;
4277     }
4278    
4279     - optlen = sizeof(off);
4280     - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
4281     + err = xsk_get_mmap_offsets(umem->fd, &off);
4282     if (err) {
4283     err = -errno;
4284     goto out_socket;
4285     @@ -499,7 +564,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
4286     struct sockaddr_xdp sxdp = {};
4287     struct xdp_mmap_offsets off;
4288     struct xsk_socket *xsk;
4289     - socklen_t optlen;
4290     int err;
4291    
4292     if (!umem || !xsk_ptr || !rx || !tx)
4293     @@ -558,8 +622,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
4294     }
4295     }
4296    
4297     - optlen = sizeof(off);
4298     - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
4299     + err = xsk_get_mmap_offsets(xsk->fd, &off);
4300     if (err) {
4301     err = -errno;
4302     goto out_socket;
4303     @@ -645,7 +708,6 @@ out_xsk_alloc:
4304     int xsk_umem__delete(struct xsk_umem *umem)
4305     {
4306     struct xdp_mmap_offsets off;
4307     - socklen_t optlen;
4308     int err;
4309    
4310     if (!umem)
4311     @@ -654,8 +716,7 @@ int xsk_umem__delete(struct xsk_umem *umem)
4312     if (umem->refcount)
4313     return -EBUSY;
4314    
4315     - optlen = sizeof(off);
4316     - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
4317     + err = xsk_get_mmap_offsets(umem->fd, &off);
4318     if (!err) {
4319     munmap(umem->fill->ring - off.fr.desc,
4320     off.fr.desc + umem->config.fill_size * sizeof(__u64));
4321     @@ -673,7 +734,6 @@ void xsk_socket__delete(struct xsk_socket *xsk)
4322     {
4323     size_t desc_sz = sizeof(struct xdp_desc);
4324     struct xdp_mmap_offsets off;
4325     - socklen_t optlen;
4326     int err;
4327    
4328     if (!xsk)
4329     @@ -684,8 +744,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
4330     close(xsk->prog_fd);
4331     }
4332    
4333     - optlen = sizeof(off);
4334     - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
4335     + err = xsk_get_mmap_offsets(xsk->fd, &off);
4336     if (!err) {
4337     if (xsk->rx) {
4338     munmap(xsk->rx->ring - off.rx.desc,
4339     diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
4340     index 888814df758d..ea277ce63a46 100644
4341     --- a/tools/perf/util/machine.c
4342     +++ b/tools/perf/util/machine.c
4343     @@ -767,24 +767,6 @@ int machine__process_ksymbol(struct machine *machine __maybe_unused,
4344     return machine__process_ksymbol_register(machine, event, sample);
4345     }
4346    
4347     -static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
4348     -{
4349     - const char *dup_filename;
4350     -
4351     - if (!filename || !dso || !dso->long_name)
4352     - return;
4353     - if (dso->long_name[0] != '[')
4354     - return;
4355     - if (!strchr(filename, '/'))
4356     - return;
4357     -
4358     - dup_filename = strdup(filename);
4359     - if (!dup_filename)
4360     - return;
4361     -
4362     - dso__set_long_name(dso, dup_filename, true);
4363     -}
4364     -
4365     struct map *machine__findnew_module_map(struct machine *machine, u64 start,
4366     const char *filename)
4367     {
4368     @@ -796,15 +778,8 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
4369     return NULL;
4370    
4371     map = map_groups__find_by_name(&machine->kmaps, m.name);
4372     - if (map) {
4373     - /*
4374     - * If the map's dso is an offline module, give dso__load()
4375     - * a chance to find the file path of that module by fixing
4376     - * long_name.
4377     - */
4378     - dso__adjust_kmod_long_name(map->dso, filename);
4379     + if (map)
4380     goto out;
4381     - }
4382    
4383     dso = machine__findnew_module_dso(machine, &m, filename);
4384     if (dso == NULL)
4385     diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
4386     index 0fc1b6d4b0f9..62a27ab3c2f3 100644
4387     --- a/tools/testing/selftests/cgroup/test_freezer.c
4388     +++ b/tools/testing/selftests/cgroup/test_freezer.c
4389     @@ -72,6 +72,7 @@ static int cg_prepare_for_wait(const char *cgroup)
4390     if (ret == -1) {
4391     debug("Error: inotify_add_watch() failed\n");
4392     close(fd);
4393     + fd = -1;
4394     }
4395    
4396     return fd;
4397     diff --git a/tools/testing/selftests/gen_kselftest_tar.sh b/tools/testing/selftests/gen_kselftest_tar.sh
4398     index a27e2eec3586..8b2b6088540d 100755
4399     --- a/tools/testing/selftests/gen_kselftest_tar.sh
4400     +++ b/tools/testing/selftests/gen_kselftest_tar.sh
4401     @@ -38,16 +38,21 @@ main()
4402     esac
4403     fi
4404    
4405     - install_dir=./kselftest
4406     + # Create working directory.
4407     + dest=`pwd`
4408     + install_work="$dest"/kselftest_install
4409     + install_name=kselftest
4410     + install_dir="$install_work"/"$install_name"
4411     + mkdir -p "$install_dir"
4412    
4413     -# Run install using INSTALL_KSFT_PATH override to generate install
4414     -# directory
4415     -./kselftest_install.sh
4416     -tar $copts kselftest${ext} $install_dir
4417     -echo "Kselftest archive kselftest${ext} created!"
4418     + # Run install using INSTALL_KSFT_PATH override to generate install
4419     + # directory
4420     + ./kselftest_install.sh "$install_dir"
4421     + (cd "$install_work"; tar $copts "$dest"/kselftest${ext} $install_name)
4422     + echo "Kselftest archive kselftest${ext} created!"
4423    
4424     -# clean up install directory
4425     -rm -rf kselftest
4426     + # clean up top-level install work directory
4427     + rm -rf "$install_work"
4428     }
4429    
4430     main "$@"
4431     diff --git a/tools/testing/selftests/kselftest_install.sh b/tools/testing/selftests/kselftest_install.sh
4432     index e2e1911d62d5..407af7da7037 100755
4433     --- a/tools/testing/selftests/kselftest_install.sh
4434     +++ b/tools/testing/selftests/kselftest_install.sh
4435     @@ -6,30 +6,30 @@
4436     # Author: Shuah Khan <shuahkh@osg.samsung.com>
4437     # Copyright (C) 2015 Samsung Electronics Co., Ltd.
4438    
4439     -install_loc=`pwd`
4440     -
4441     main()
4442     {
4443     - if [ $(basename $install_loc) != "selftests" ]; then
4444     + base_dir=`pwd`
4445     + install_dir="$base_dir"/kselftest_install
4446     +
4447     + # Make sure we're in the selftests top-level directory.
4448     + if [ $(basename "$base_dir") != "selftests" ]; then
4449     echo "$0: Please run it in selftests directory ..."
4450     exit 1;
4451     fi
4452     +
4453     + # Only allow installation into an existing location.
4454     if [ "$#" -eq 0 ]; then
4455     - echo "$0: Installing in default location - $install_loc ..."
4456     + echo "$0: Installing in default location - $install_dir ..."
4457     elif [ ! -d "$1" ]; then
4458     echo "$0: $1 doesn't exist!!"
4459     exit 1;
4460     else
4461     - install_loc=$1
4462     - echo "$0: Installing in specified location - $install_loc ..."
4463     + install_dir="$1"
4464     + echo "$0: Installing in specified location - $install_dir ..."
4465     fi
4466    
4467     - install_dir=$install_loc/kselftest_install
4468     -
4469     -# Create install directory
4470     - mkdir -p $install_dir
4471     -# Build tests
4472     - KSFT_INSTALL_PATH=$install_dir make install
4473     + # Build tests
4474     + KSFT_INSTALL_PATH="$install_dir" make install
4475     }
4476    
4477     main "$@"