Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0165-5.4.66-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3636 - (hide annotations) (download)
Mon Oct 24 12:37:18 2022 UTC (19 months, 1 week ago) by niro
File size: 174629 byte(s)
-add missing
1 niro 3636 diff --git a/Makefile b/Makefile
2     index 4cb68164b79ee..a3686247e10e9 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 65
10     +SUBLEVEL = 66
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
15     index 9acbeba832c0b..dcaa44e408ace 100644
16     --- a/arch/arc/boot/dts/hsdk.dts
17     +++ b/arch/arc/boot/dts/hsdk.dts
18     @@ -88,6 +88,8 @@
19    
20     arcpct: pct {
21     compatible = "snps,archs-pct";
22     + interrupt-parent = <&cpu_intc>;
23     + interrupts = <20>;
24     };
25    
26     /* TIMER0 with interrupt for clockevent */
27     @@ -208,7 +210,7 @@
28     reg = <0x8000 0x2000>;
29     interrupts = <10>;
30     interrupt-names = "macirq";
31     - phy-mode = "rgmii";
32     + phy-mode = "rgmii-id";
33     snps,pbl = <32>;
34     snps,multicast-filter-bins = <256>;
35     clocks = <&gmacclk>;
36     @@ -226,7 +228,7 @@
37     #address-cells = <1>;
38     #size-cells = <0>;
39     compatible = "snps,dwmac-mdio";
40     - phy0: ethernet-phy@0 {
41     + phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
42     reg = <0>;
43     };
44     };
45     diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
46     index a4a61531c7fb9..77712c5ffe848 100644
47     --- a/arch/arc/plat-eznps/include/plat/ctop.h
48     +++ b/arch/arc/plat-eznps/include/plat/ctop.h
49     @@ -33,7 +33,6 @@
50     #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
51     #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
52     #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
53     -#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
54     #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
55     #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)
56    
57     diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
58     index e35398cc60a06..dd71ab08136be 100644
59     --- a/arch/arm/boot/dts/bcm-hr2.dtsi
60     +++ b/arch/arm/boot/dts/bcm-hr2.dtsi
61     @@ -217,7 +217,7 @@
62     };
63    
64     qspi: spi@27200 {
65     - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
66     + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
67     reg = <0x027200 0x184>,
68     <0x027000 0x124>,
69     <0x11c408 0x004>,
70     diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
71     index 418e6b97cb2ec..8615d89fa4690 100644
72     --- a/arch/arm/boot/dts/bcm-nsp.dtsi
73     +++ b/arch/arm/boot/dts/bcm-nsp.dtsi
74     @@ -282,7 +282,7 @@
75     };
76    
77     qspi: spi@27200 {
78     - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
79     + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
80     reg = <0x027200 0x184>,
81     <0x027000 0x124>,
82     <0x11c408 0x004>,
83     diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
84     index 2d9b4dd058307..0016720ce5300 100644
85     --- a/arch/arm/boot/dts/bcm5301x.dtsi
86     +++ b/arch/arm/boot/dts/bcm5301x.dtsi
87     @@ -488,7 +488,7 @@
88     };
89    
90     spi@18029200 {
91     - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
92     + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
93     reg = <0x18029200 0x184>,
94     <0x18029000 0x124>,
95     <0x1811b408 0x004>,
96     diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
97     index 3dac6898cdc57..0108b63df77d3 100644
98     --- a/arch/arm/boot/dts/imx7ulp.dtsi
99     +++ b/arch/arm/boot/dts/imx7ulp.dtsi
100     @@ -397,7 +397,7 @@
101     clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
102     <&pcc3 IMX7ULP_CLK_PCTLC>;
103     clock-names = "gpio", "port";
104     - gpio-ranges = <&iomuxc1 0 0 32>;
105     + gpio-ranges = <&iomuxc1 0 0 20>;
106     };
107    
108     gpio_ptd: gpio@40af0000 {
109     @@ -411,7 +411,7 @@
110     clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
111     <&pcc3 IMX7ULP_CLK_PCTLD>;
112     clock-names = "gpio", "port";
113     - gpio-ranges = <&iomuxc1 0 32 32>;
114     + gpio-ranges = <&iomuxc1 0 32 12>;
115     };
116    
117     gpio_pte: gpio@40b00000 {
118     @@ -425,7 +425,7 @@
119     clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
120     <&pcc3 IMX7ULP_CLK_PCTLE>;
121     clock-names = "gpio", "port";
122     - gpio-ranges = <&iomuxc1 0 64 32>;
123     + gpio-ranges = <&iomuxc1 0 64 16>;
124     };
125    
126     gpio_ptf: gpio@40b10000 {
127     @@ -439,7 +439,7 @@
128     clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
129     <&pcc3 IMX7ULP_CLK_PCTLF>;
130     clock-names = "gpio", "port";
131     - gpio-ranges = <&iomuxc1 0 96 32>;
132     + gpio-ranges = <&iomuxc1 0 96 20>;
133     };
134     };
135    
136     diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
137     index 100396f6c2feb..395e05f10d36c 100644
138     --- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
139     +++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
140     @@ -51,6 +51,8 @@
141    
142     &mcbsp2 {
143     status = "okay";
144     + pinctrl-names = "default";
145     + pinctrl-0 = <&mcbsp2_pins>;
146     };
147    
148     &charger {
149     @@ -102,35 +104,18 @@
150     regulator-max-microvolt = <3300000>;
151     };
152    
153     - lcd0: display@0 {
154     - compatible = "panel-dpi";
155     - label = "28";
156     - status = "okay";
157     - /* default-on; */
158     + lcd0: display {
159     + /* This isn't the exact LCD, but the timings meet spec */
160     + compatible = "logicpd,type28";
161     pinctrl-names = "default";
162     pinctrl-0 = <&lcd_enable_pin>;
163     - enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
164     + backlight = <&bl>;
165     + enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
166     port {
167     lcd_in: endpoint {
168     remote-endpoint = <&dpi_out>;
169     };
170     };
171     -
172     - panel-timing {
173     - clock-frequency = <9000000>;
174     - hactive = <480>;
175     - vactive = <272>;
176     - hfront-porch = <3>;
177     - hback-porch = <2>;
178     - hsync-len = <42>;
179     - vback-porch = <3>;
180     - vfront-porch = <2>;
181     - vsync-len = <11>;
182     - hsync-active = <1>;
183     - vsync-active = <1>;
184     - de-active = <1>;
185     - pixelclk-active = <0>;
186     - };
187     };
188    
189     bl: backlight {
190     diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
191     index 449cc7616da63..e7a8f8addb6e0 100644
192     --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
193     +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
194     @@ -80,6 +80,8 @@
195     };
196    
197     &mcbsp2 {
198     + pinctrl-names = "default";
199     + pinctrl-0 = <&mcbsp2_pins>;
200     status = "okay";
201     };
202    
203     diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
204     index 5a8e58b663420..c62fcca7b4263 100644
205     --- a/arch/arm/boot/dts/ls1021a.dtsi
206     +++ b/arch/arm/boot/dts/ls1021a.dtsi
207     @@ -181,7 +181,7 @@
208     #address-cells = <1>;
209     #size-cells = <0>;
210     reg = <0x0 0x1550000 0x0 0x10000>,
211     - <0x0 0x40000000 0x0 0x40000000>;
212     + <0x0 0x40000000 0x0 0x20000000>;
213     reg-names = "QuadSPI", "QuadSPI-memory";
214     interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
215     clock-names = "qspi_en", "qspi";
216     diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
217     index 906bfb580e9e7..f261a33440710 100644
218     --- a/arch/arm/boot/dts/socfpga_arria10.dtsi
219     +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
220     @@ -819,7 +819,7 @@
221     timer3: timer3@ffd00100 {
222     compatible = "snps,dw-apb-timer";
223     interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
224     - reg = <0xffd01000 0x100>;
225     + reg = <0xffd00100 0x100>;
226     clocks = <&l4_sys_free_clk>;
227     clock-names = "timer";
228     resets = <&rst L4SYSTIMER1_RESET>;
229     diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
230     index 028e0ec30e0c0..fa248066d9d9b 100644
231     --- a/arch/arm/boot/dts/vfxxx.dtsi
232     +++ b/arch/arm/boot/dts/vfxxx.dtsi
233     @@ -495,7 +495,7 @@
234     };
235    
236     ocotp: ocotp@400a5000 {
237     - compatible = "fsl,vf610-ocotp";
238     + compatible = "fsl,vf610-ocotp", "syscon";
239     reg = <0x400a5000 0x1000>;
240     clocks = <&clks VF610_CLK_OCOTP>;
241     };
242     diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
243     index 15f7b0ed38369..39802066232e1 100644
244     --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
245     +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
246     @@ -745,7 +745,7 @@
247     };
248    
249     qspi: spi@66470200 {
250     - compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
251     + compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
252     reg = <0x66470200 0x184>,
253     <0x66470000 0x124>,
254     <0x67017408 0x004>,
255     diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
256     index 55a3d1c4bdf04..bc8540f879654 100644
257     --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
258     +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
259     @@ -349,7 +349,7 @@
260     tmu: tmu@30260000 {
261     compatible = "fsl,imx8mq-tmu";
262     reg = <0x30260000 0x10000>;
263     - interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
264     + interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
265     clocks = <&clk IMX8MQ_CLK_TMU_ROOT>;
266     little-endian;
267     fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;
268     diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
269     index b182442b87a32..426018ebb7007 100644
270     --- a/arch/arm64/kernel/module-plts.c
271     +++ b/arch/arm64/kernel/module-plts.c
272     @@ -270,8 +270,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
273     mod->arch.core.plt_shndx = i;
274     else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
275     mod->arch.init.plt_shndx = i;
276     - else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
277     - !strcmp(secstrings + sechdrs[i].sh_name,
278     + else if (!strcmp(secstrings + sechdrs[i].sh_name,
279     ".text.ftrace_trampoline"))
280     tramp = sechdrs + i;
281     else if (sechdrs[i].sh_type == SHT_SYMTAB)
282     diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
283     index 08b7f4cef2434..ddf5e97877e2b 100644
284     --- a/arch/powerpc/configs/pasemi_defconfig
285     +++ b/arch/powerpc/configs/pasemi_defconfig
286     @@ -109,7 +109,6 @@ CONFIG_FB_NVIDIA=y
287     CONFIG_FB_NVIDIA_I2C=y
288     CONFIG_FB_RADEON=y
289     # CONFIG_LCD_CLASS_DEVICE is not set
290     -CONFIG_VGACON_SOFT_SCROLLBACK=y
291     CONFIG_LOGO=y
292     CONFIG_SOUND=y
293     CONFIG_SND=y
294     diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
295     index 1372a1a7517ad..682d68f39c2b5 100644
296     --- a/arch/powerpc/configs/ppc6xx_defconfig
297     +++ b/arch/powerpc/configs/ppc6xx_defconfig
298     @@ -777,7 +777,6 @@ CONFIG_FB_TRIDENT=m
299     CONFIG_FB_SM501=m
300     CONFIG_FB_IBM_GXT4500=y
301     CONFIG_LCD_PLATFORM=m
302     -CONFIG_VGACON_SOFT_SCROLLBACK=y
303     CONFIG_FRAMEBUFFER_CONSOLE=y
304     CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
305     CONFIG_LOGO=y
306     diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
307     index 18806b4fb26a9..088709089e9b8 100644
308     --- a/arch/x86/configs/i386_defconfig
309     +++ b/arch/x86/configs/i386_defconfig
310     @@ -204,7 +204,6 @@ CONFIG_FB_MODE_HELPERS=y
311     CONFIG_FB_TILEBLITTING=y
312     CONFIG_FB_EFI=y
313     # CONFIG_LCD_CLASS_DEVICE is not set
314     -CONFIG_VGACON_SOFT_SCROLLBACK=y
315     CONFIG_LOGO=y
316     # CONFIG_LOGO_LINUX_MONO is not set
317     # CONFIG_LOGO_LINUX_VGA16 is not set
318     diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
319     index 3087c5e351e7e..8092d7baf8b50 100644
320     --- a/arch/x86/configs/x86_64_defconfig
321     +++ b/arch/x86/configs/x86_64_defconfig
322     @@ -200,7 +200,6 @@ CONFIG_FB_MODE_HELPERS=y
323     CONFIG_FB_TILEBLITTING=y
324     CONFIG_FB_EFI=y
325     # CONFIG_LCD_CLASS_DEVICE is not set
326     -CONFIG_VGACON_SOFT_SCROLLBACK=y
327     CONFIG_LOGO=y
328     # CONFIG_LOGO_LINUX_MONO is not set
329     # CONFIG_LOGO_LINUX_VGA16 is not set
330     diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
331     index 3be65495aeb8a..a1e62dda56074 100644
332     --- a/arch/x86/kvm/vmx/vmx.c
333     +++ b/arch/x86/kvm/vmx/vmx.c
334     @@ -5895,6 +5895,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
335     (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
336     exit_reason != EXIT_REASON_EPT_VIOLATION &&
337     exit_reason != EXIT_REASON_PML_FULL &&
338     + exit_reason != EXIT_REASON_APIC_ACCESS &&
339     exit_reason != EXIT_REASON_TASK_SWITCH)) {
340     vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
341     vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
342     diff --git a/block/bio.c b/block/bio.c
343     index 87505a93bcff6..f07739300dfe3 100644
344     --- a/block/bio.c
345     +++ b/block/bio.c
346     @@ -807,8 +807,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
347     struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
348    
349     if (page_is_mergeable(bv, page, len, off, same_page)) {
350     - if (bio->bi_iter.bi_size > UINT_MAX - len)
351     + if (bio->bi_iter.bi_size > UINT_MAX - len) {
352     + *same_page = false;
353     return false;
354     + }
355     bv->bv_len += len;
356     bio->bi_iter.bi_size += len;
357     return true;
358     diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
359     index d287837ed7555..5acb459856752 100644
360     --- a/drivers/atm/firestream.c
361     +++ b/drivers/atm/firestream.c
362     @@ -998,6 +998,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
363     error = make_rate (pcr, r, &tmc0, NULL);
364     if (error) {
365     kfree(tc);
366     + kfree(vcc);
367     return error;
368     }
369     }
370     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
371     index 274beda31c356..bf2f0373a3b2f 100644
372     --- a/drivers/block/rbd.c
373     +++ b/drivers/block/rbd.c
374     @@ -5280,6 +5280,9 @@ static ssize_t rbd_config_info_show(struct device *dev,
375     {
376     struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
377    
378     + if (!capable(CAP_SYS_ADMIN))
379     + return -EPERM;
380     +
381     return sprintf(buf, "%s\n", rbd_dev->config_info);
382     }
383    
384     @@ -5391,6 +5394,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
385     struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
386     int ret;
387    
388     + if (!capable(CAP_SYS_ADMIN))
389     + return -EPERM;
390     +
391     ret = rbd_dev_refresh(rbd_dev);
392     if (ret)
393     return ret;
394     @@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
395     struct rbd_client *rbdc;
396     int rc;
397    
398     + if (!capable(CAP_SYS_ADMIN))
399     + return -EPERM;
400     +
401     if (!try_module_get(THIS_MODULE))
402     return -ENODEV;
403    
404     @@ -7208,6 +7217,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
405     bool force = false;
406     int ret;
407    
408     + if (!capable(CAP_SYS_ADMIN))
409     + return -EPERM;
410     +
411     dev_id = -1;
412     opt_buf[0] = '\0';
413     sscanf(buf, "%d %5s", &dev_id, opt_buf);
414     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
415     index 927eb3fd23660..b9ca89dc75c7d 100644
416     --- a/drivers/cpufreq/intel_pstate.c
417     +++ b/drivers/cpufreq/intel_pstate.c
418     @@ -762,7 +762,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
419    
420     rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
421     WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
422     - if (global.no_turbo)
423     + if (global.no_turbo || global.turbo_disabled)
424     *current_max = HWP_GUARANTEED_PERF(cap);
425     else
426     *current_max = HWP_HIGHEST_PERF(cap);
427     @@ -2533,9 +2533,15 @@ static int intel_pstate_update_status(const char *buf, size_t size)
428     {
429     int ret;
430    
431     - if (size == 3 && !strncmp(buf, "off", size))
432     - return intel_pstate_driver ?
433     - intel_pstate_unregister_driver() : -EINVAL;
434     + if (size == 3 && !strncmp(buf, "off", size)) {
435     + if (!intel_pstate_driver)
436     + return -EINVAL;
437     +
438     + if (hwp_active)
439     + return -EBUSY;
440     +
441     + return intel_pstate_unregister_driver();
442     + }
443    
444     if (size == 6 && !strncmp(buf, "active", size)) {
445     if (intel_pstate_driver) {
446     diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
447     index 8a05db3343d39..dcbcb712de6e8 100644
448     --- a/drivers/dma/acpi-dma.c
449     +++ b/drivers/dma/acpi-dma.c
450     @@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
451     if (ret < 0) {
452     dev_warn(&adev->dev,
453     "error in parsing resource group\n");
454     - return;
455     + break;
456     }
457    
458     grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
459     }
460     +
461     + acpi_put_table((struct acpi_table_header *)csrt);
462     }
463    
464     /**
465     diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
466     index bf95f1d551c51..0ecb724b394f5 100644
467     --- a/drivers/dma/dma-jz4780.c
468     +++ b/drivers/dma/dma-jz4780.c
469     @@ -885,24 +885,11 @@ static int jz4780_dma_probe(struct platform_device *pdev)
470     return -EINVAL;
471     }
472    
473     - ret = platform_get_irq(pdev, 0);
474     - if (ret < 0)
475     - return ret;
476     -
477     - jzdma->irq = ret;
478     -
479     - ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
480     - jzdma);
481     - if (ret) {
482     - dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
483     - return ret;
484     - }
485     -
486     jzdma->clk = devm_clk_get(dev, NULL);
487     if (IS_ERR(jzdma->clk)) {
488     dev_err(dev, "failed to get clock\n");
489     ret = PTR_ERR(jzdma->clk);
490     - goto err_free_irq;
491     + return ret;
492     }
493    
494     clk_prepare_enable(jzdma->clk);
495     @@ -955,10 +942,23 @@ static int jz4780_dma_probe(struct platform_device *pdev)
496     jzchan->vchan.desc_free = jz4780_dma_desc_free;
497     }
498    
499     + ret = platform_get_irq(pdev, 0);
500     + if (ret < 0)
501     + goto err_disable_clk;
502     +
503     + jzdma->irq = ret;
504     +
505     + ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
506     + jzdma);
507     + if (ret) {
508     + dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
509     + goto err_disable_clk;
510     + }
511     +
512     ret = dmaenginem_async_device_register(dd);
513     if (ret) {
514     dev_err(dev, "failed to register device\n");
515     - goto err_disable_clk;
516     + goto err_free_irq;
517     }
518    
519     /* Register with OF DMA helpers. */
520     @@ -966,17 +966,17 @@ static int jz4780_dma_probe(struct platform_device *pdev)
521     jzdma);
522     if (ret) {
523     dev_err(dev, "failed to register OF DMA controller\n");
524     - goto err_disable_clk;
525     + goto err_free_irq;
526     }
527    
528     dev_info(dev, "JZ4780 DMA controller initialised\n");
529     return 0;
530    
531     -err_disable_clk:
532     - clk_disable_unprepare(jzdma->clk);
533     -
534     err_free_irq:
535     free_irq(jzdma->irq, jzdma);
536     +
537     +err_disable_clk:
538     + clk_disable_unprepare(jzdma->clk);
539     return ret;
540     }
541    
542     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
543     index e6da53e9c3f46..3a2a1dc9a786a 100644
544     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
545     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
546     @@ -3575,7 +3575,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
547     case AMDGPU_PP_SENSOR_GPU_POWER:
548     return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
549     case AMDGPU_PP_SENSOR_VDDGFX:
550     - if ((data->vr_config & 0xff) == 0x2)
551     + if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
552     + (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
553     val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
554     CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
555     else
556     diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
557     index fc29a3705354c..56cd14cacf5e7 100644
558     --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
559     +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
560     @@ -963,18 +963,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
561     int i, ret = 0;
562     int cmd_len = cmd_length(s);
563     struct intel_gvt *gvt = s->vgpu->gvt;
564     - u32 valid_len = CMD_LEN(1);
565     -
566     - /*
567     - * Official intel docs are somewhat sloppy , check the definition of
568     - * MI_LOAD_REGISTER_IMM.
569     - */
570     - #define MAX_VALID_LEN 127
571     - if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
572     - gvt_err("len is not valid: len=%u valid_len=%u\n",
573     - cmd_len, valid_len);
574     - return -EFAULT;
575     - }
576    
577     for (i = 1; i < cmd_len; i += 2) {
578     if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
579     diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
580     index 1f83bc18d5008..80f3b1da9fc26 100644
581     --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
582     +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
583     @@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
584     if (ret)
585     return ret;
586    
587     + gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
588     + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
589     +
590     + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
591     +
592     /* NOTE: PM4/micro-engine firmware registers look to be the same
593     * for a2xx and a3xx.. we could possibly push that part down to
594     * adreno_gpu base class. Or push both PM4 and PFP but
595     diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
596     index 5f7e98028eaf4..eeba2deeca1e8 100644
597     --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
598     +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
599     @@ -215,6 +215,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
600     if (ret)
601     return ret;
602    
603     + /*
604     + * Use the default ringbuffer size and block size but disable the RPTR
605     + * shadow
606     + */
607     + gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
608     + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
609     +
610     + /* Set the ringbuffer address */
611     + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
612     +
613     /* setup access protection: */
614     gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
615    
616     diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
617     index ab2b752566d81..05cfa81d4c540 100644
618     --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
619     +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
620     @@ -265,6 +265,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
621     if (ret)
622     return ret;
623    
624     + /*
625     + * Use the default ringbuffer size and block size but disable the RPTR
626     + * shadow
627     + */
628     + gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
629     + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
630     +
631     + /* Set the ringbuffer address */
632     + gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
633     +
634     /* Load PM4: */
635     ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
636     len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
637     diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
638     index 7829247de60e0..24b55103bfe00 100644
639     --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
640     +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
641     @@ -677,14 +677,21 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
642     if (ret)
643     return ret;
644    
645     - a5xx_preempt_hw_init(gpu);
646     -
647     a5xx_gpmu_ucode_init(gpu);
648    
649     ret = a5xx_ucode_init(gpu);
650     if (ret)
651     return ret;
652    
653     + /* Set the ringbuffer address */
654     + gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
655     + gpu->rb[0]->iova);
656     +
657     + gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
658     + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
659     +
660     + a5xx_preempt_hw_init(gpu);
661     +
662     /* Disable the interrupts through the initial bringup stage */
663     gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
664    
665     @@ -1451,7 +1458,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
666    
667     check_speed_bin(&pdev->dev);
668    
669     - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
670     + /* Restricting nr_rings to 1 to temporarily disable preemption */
671     + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
672     if (ret) {
673     a5xx_destroy(&(a5xx_gpu->base.base));
674     return ERR_PTR(ret);
675     diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
676     index be68d4e6551c2..ab75f0309d4b6 100644
677     --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
678     +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
679     @@ -512,6 +512,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
680     if (ret)
681     goto out;
682    
683     + /* Set the ringbuffer address */
684     + gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
685     + gpu->rb[0]->iova);
686     +
687     + gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
688     + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
689     +
690     /* Always come up on rb 0 */
691     a6xx_gpu->cur_ring = gpu->rb[0];
692    
693     diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
694     index 053da39da1cc0..3802ad38c519c 100644
695     --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
696     +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
697     @@ -354,26 +354,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
698     ring->memptrs->rptr = 0;
699     }
700    
701     - /*
702     - * Setup REG_CP_RB_CNTL. The same value is used across targets (with
703     - * the excpetion of A430 that disables the RPTR shadow) - the cacluation
704     - * for the ringbuffer size and block size is moved to msm_gpu.h for the
705     - * pre-processor to deal with and the A430 variant is ORed in here
706     - */
707     - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
708     - MSM_GPU_RB_CNTL_DEFAULT |
709     - (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
710     -
711     - /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
712     - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
713     - REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
714     -
715     - if (!adreno_is_a430(adreno_gpu)) {
716     - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
717     - REG_ADRENO_CP_RB_RPTR_ADDR_HI,
718     - rbmemptr(gpu->rb[0], rptr));
719     - }
720     -
721     return 0;
722     }
723    
724     @@ -381,11 +361,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
725     static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
726     struct msm_ringbuffer *ring)
727     {
728     - if (adreno_is_a430(adreno_gpu))
729     - return ring->memptrs->rptr = adreno_gpu_read(
730     - adreno_gpu, REG_ADRENO_CP_RB_RPTR);
731     - else
732     - return ring->memptrs->rptr;
733     + return ring->memptrs->rptr = adreno_gpu_read(
734     + adreno_gpu, REG_ADRENO_CP_RB_RPTR);
735     }
736    
737     struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
738     diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
739     index e397c44cc0112..39ecb5a18431e 100644
740     --- a/drivers/gpu/drm/msm/msm_ringbuffer.c
741     +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
742     @@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
743     ring->id = id;
744    
745     ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
746     - MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
747     + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
748     + &ring->iova);
749    
750     if (IS_ERR(ring->start)) {
751     ret = PTR_ERR(ring->start);
752     diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
753     index 4e29f4fe4a05e..99f081ccc15de 100644
754     --- a/drivers/gpu/drm/sun4i/sun4i_backend.c
755     +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
756     @@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
757    
758     /* We can't have an alpha plane at the lowest position */
759     if (!backend->quirks->supports_lowest_plane_alpha &&
760     - (plane_states[0]->fb->format->has_alpha ||
761     - (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)))
762     + (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
763     return -EINVAL;
764    
765     for (i = 1; i < num_planes; i++) {
766     @@ -986,7 +985,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = {
767    
768     static const struct sun4i_backend_quirks sun7i_backend_quirks = {
769     .needs_output_muxing = true,
770     - .supports_lowest_plane_alpha = true,
771     };
772    
773     static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
774     diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
775     index 0f7eafedfe8f5..ae7ae432aa4ab 100644
776     --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
777     +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
778     @@ -1409,14 +1409,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
779     if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
780     encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
781     ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
782     - if (ret)
783     + if (ret) {
784     + put_device(&pdev->dev);
785     return ret;
786     + }
787     }
788    
789     if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
790     ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
791     - if (ret)
792     + if (ret) {
793     + put_device(&pdev->dev);
794     return ret;
795     + }
796     }
797    
798     return 0;
799     diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
800     index 4f944ace665d5..f2b288037b909 100644
801     --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
802     +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
803     @@ -867,7 +867,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
804     regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
805     sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
806    
807     - bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
808     + bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL);
809     if (!bounce)
810     return -ENOMEM;
811    
812     @@ -878,7 +878,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
813     memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
814     len += sizeof(crc);
815    
816     - regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
817     + regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4));
818     regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
819     kfree(bounce);
820    
821     diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
822     index d733bbc4ac0e5..17ff24d999d18 100644
823     --- a/drivers/gpu/drm/tve200/tve200_display.c
824     +++ b/drivers/gpu/drm/tve200/tve200_display.c
825     @@ -14,6 +14,7 @@
826     #include <linux/version.h>
827     #include <linux/dma-buf.h>
828     #include <linux/of_graph.h>
829     +#include <linux/delay.h>
830    
831     #include <drm/drm_fb_cma_helper.h>
832     #include <drm/drm_fourcc.h>
833     @@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
834     struct drm_connector *connector = priv->connector;
835     u32 format = fb->format->format;
836     u32 ctrl1 = 0;
837     + int retries;
838    
839     clk_prepare_enable(priv->clk);
840    
841     + /* Reset the TVE200 and wait for it to come back online */
842     + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
843     + for (retries = 0; retries < 5; retries++) {
844     + usleep_range(30000, 50000);
845     + if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET)
846     + continue;
847     + else
848     + break;
849     + }
850     + if (retries == 5 &&
851     + readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) {
852     + dev_err(drm->dev, "can't get hardware out of reset\n");
853     + return;
854     + }
855     +
856     /* Function 1 */
857     ctrl1 |= TVE200_CTRL_CSMODE;
858     /* Interlace mode for CCIR656: parameterize? */
859     @@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
860    
861     drm_crtc_vblank_off(crtc);
862    
863     - /* Disable and Power Down */
864     + /* Disable put into reset and Power Down */
865     writel(0, priv->regs + TVE200_CTRL);
866     + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
867    
868     clk_disable_unprepare(priv->clk);
869     }
870     @@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
871     struct drm_device *drm = crtc->dev;
872     struct tve200_drm_dev_private *priv = drm->dev_private;
873    
874     + /* Clear any IRQs and enable */
875     + writel(0xFF, priv->regs + TVE200_INT_CLR);
876     writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
877     return 0;
878     }
879     diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
880     index 45c4f888b7c4e..dae193749d443 100644
881     --- a/drivers/hid/hid-elan.c
882     +++ b/drivers/hid/hid-elan.c
883     @@ -188,6 +188,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
884     ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
885     if (ret) {
886     hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
887     + input_free_device(input);
888     return ret;
889     }
890    
891     @@ -198,6 +199,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
892     if (ret) {
893     hid_err(hdev, "Failed to register elan input device: %d\n",
894     ret);
895     + input_mt_destroy_slots(input);
896     input_free_device(input);
897     return ret;
898     }
899     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
900     index fbc93d8dda5ed..e03a4d794240c 100644
901     --- a/drivers/hid/hid-ids.h
902     +++ b/drivers/hid/hid-ids.h
903     @@ -846,6 +846,7 @@
904     #define USB_DEVICE_ID_MS_POWER_COVER 0x07da
905     #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
906     #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
907     +#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
908    
909     #define USB_VENDOR_ID_MOJO 0x8282
910     #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
911     @@ -1011,6 +1012,8 @@
912     #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
913     #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
914     #define USB_DEVICE_ID_SAITEK_X52 0x075c
915     +#define USB_DEVICE_ID_SAITEK_X52_2 0x0255
916     +#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762
917    
918     #define USB_VENDOR_ID_SAMSUNG 0x0419
919     #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
920     diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
921     index 2d8b589201a4e..8cb1ca1936e42 100644
922     --- a/drivers/hid/hid-microsoft.c
923     +++ b/drivers/hid/hid-microsoft.c
924     @@ -451,6 +451,8 @@ static const struct hid_device_id ms_devices[] = {
925     .driver_data = MS_SURFACE_DIAL },
926     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
927     .driver_data = MS_QUIRK_FF },
928     + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS),
929     + .driver_data = MS_QUIRK_FF },
930     { }
931     };
932     MODULE_DEVICE_TABLE(hid, ms_devices);
933     diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
934     index 8a739ec50cc00..0440e2f6e8a3c 100644
935     --- a/drivers/hid/hid-quirks.c
936     +++ b/drivers/hid/hid-quirks.c
937     @@ -150,6 +150,8 @@ static const struct hid_device_id hid_quirks[] = {
938     { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
939     { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
940     { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
941     + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
942     + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
943     { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
944     { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
945     { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
946     diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
947     index 121b4e89f038c..bcdf25f32e220 100644
948     --- a/drivers/iio/accel/bmc150-accel-core.c
949     +++ b/drivers/iio/accel/bmc150-accel-core.c
950     @@ -189,6 +189,14 @@ struct bmc150_accel_data {
951     struct mutex mutex;
952     u8 fifo_mode, watermark;
953     s16 buffer[8];
954     + /*
955     + * Ensure there is sufficient space and correct alignment for
956     + * the timestamp if enabled
957     + */
958     + struct {
959     + __le16 channels[3];
960     + s64 ts __aligned(8);
961     + } scan;
962     u8 bw_bits;
963     u32 slope_dur;
964     u32 slope_thres;
965     @@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
966     * now.
967     */
968     for (i = 0; i < count; i++) {
969     - u16 sample[8];
970     int j, bit;
971    
972     j = 0;
973     for_each_set_bit(bit, indio_dev->active_scan_mask,
974     indio_dev->masklength)
975     - memcpy(&sample[j++], &buffer[i * 3 + bit], 2);
976     + memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
977     + sizeof(data->scan.channels[0]));
978    
979     - iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp);
980     + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
981     + tstamp);
982    
983     tstamp += sample_period;
984     }
985     diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
986     index 0b876b2dc5bd4..76429e2a6fb8f 100644
987     --- a/drivers/iio/accel/kxsd9.c
988     +++ b/drivers/iio/accel/kxsd9.c
989     @@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
990     const struct iio_poll_func *pf = p;
991     struct iio_dev *indio_dev = pf->indio_dev;
992     struct kxsd9_state *st = iio_priv(indio_dev);
993     + /*
994     + * Ensure correct positioning and alignment of timestamp.
995     + * No need to zero initialize as all elements written.
996     + */
997     + struct {
998     + __be16 chan[4];
999     + s64 ts __aligned(8);
1000     + } hw_values;
1001     int ret;
1002     - /* 4 * 16bit values AND timestamp */
1003     - __be16 hw_values[8];
1004    
1005     ret = regmap_bulk_read(st->map,
1006     KXSD9_REG_X,
1007     - &hw_values,
1008     - 8);
1009     + hw_values.chan,
1010     + sizeof(hw_values.chan));
1011     if (ret) {
1012     dev_err(st->dev,
1013     "error reading data\n");
1014     @@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
1015     }
1016    
1017     iio_push_to_buffers_with_timestamp(indio_dev,
1018     - hw_values,
1019     + &hw_values,
1020     iio_get_time_ns(indio_dev));
1021     iio_trigger_notify_done(indio_dev->trig);
1022    
1023     diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
1024     index 8b5a6aff9bf4b..70ec3490bdb85 100644
1025     --- a/drivers/iio/accel/mma7455_core.c
1026     +++ b/drivers/iio/accel/mma7455_core.c
1027     @@ -52,6 +52,14 @@
1028    
1029     struct mma7455_data {
1030     struct regmap *regmap;
1031     + /*
1032     + * Used to reorganize data. Will ensure correct alignment of
1033     + * the timestamp if present
1034     + */
1035     + struct {
1036     + __le16 channels[3];
1037     + s64 ts __aligned(8);
1038     + } scan;
1039     };
1040    
1041     static int mma7455_drdy(struct mma7455_data *mma7455)
1042     @@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
1043     struct iio_poll_func *pf = p;
1044     struct iio_dev *indio_dev = pf->indio_dev;
1045     struct mma7455_data *mma7455 = iio_priv(indio_dev);
1046     - u8 buf[16]; /* 3 x 16-bit channels + padding + ts */
1047     int ret;
1048    
1049     ret = mma7455_drdy(mma7455);
1050     if (ret)
1051     goto done;
1052    
1053     - ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf,
1054     - sizeof(__le16) * 3);
1055     + ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL,
1056     + mma7455->scan.channels,
1057     + sizeof(mma7455->scan.channels));
1058     if (ret)
1059     goto done;
1060    
1061     - iio_push_to_buffers_with_timestamp(indio_dev, buf,
1062     + iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan,
1063     iio_get_time_ns(indio_dev));
1064    
1065     done:
1066     diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
1067     index 813bca7cfc3ed..85d453b3f5ec1 100644
1068     --- a/drivers/iio/accel/mma8452.c
1069     +++ b/drivers/iio/accel/mma8452.c
1070     @@ -110,6 +110,12 @@ struct mma8452_data {
1071     int sleep_val;
1072     struct regulator *vdd_reg;
1073     struct regulator *vddio_reg;
1074     +
1075     + /* Ensure correct alignment of time stamp when present */
1076     + struct {
1077     + __be16 channels[3];
1078     + s64 ts __aligned(8);
1079     + } buffer;
1080     };
1081    
1082     /**
1083     @@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
1084     struct iio_poll_func *pf = p;
1085     struct iio_dev *indio_dev = pf->indio_dev;
1086     struct mma8452_data *data = iio_priv(indio_dev);
1087     - u8 buffer[16]; /* 3 16-bit channels + padding + ts */
1088     int ret;
1089    
1090     - ret = mma8452_read(data, (__be16 *)buffer);
1091     + ret = mma8452_read(data, data->buffer.channels);
1092     if (ret < 0)
1093     goto done;
1094    
1095     - iio_push_to_buffers_with_timestamp(indio_dev, buffer,
1096     + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
1097     iio_get_time_ns(indio_dev));
1098    
1099     done:
1100     diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
1101     index bdd7cba6f6b0b..d3e9ec00ef959 100644
1102     --- a/drivers/iio/adc/ina2xx-adc.c
1103     +++ b/drivers/iio/adc/ina2xx-adc.c
1104     @@ -146,6 +146,11 @@ struct ina2xx_chip_info {
1105     int range_vbus; /* Bus voltage maximum in V */
1106     int pga_gain_vshunt; /* Shunt voltage PGA gain */
1107     bool allow_async_readout;
1108     + /* data buffer needs space for channel data and timestamp */
1109     + struct {
1110     + u16 chan[4];
1111     + u64 ts __aligned(8);
1112     + } scan;
1113     };
1114    
1115     static const struct ina2xx_config ina2xx_config[] = {
1116     @@ -738,8 +743,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev)
1117     static int ina2xx_work_buffer(struct iio_dev *indio_dev)
1118     {
1119     struct ina2xx_chip_info *chip = iio_priv(indio_dev);
1120     - /* data buffer needs space for channel data and timestap */
1121     - unsigned short data[4 + sizeof(s64)/sizeof(short)];
1122     int bit, ret, i = 0;
1123     s64 time;
1124    
1125     @@ -758,10 +761,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
1126     if (ret < 0)
1127     return ret;
1128    
1129     - data[i++] = val;
1130     + chip->scan.chan[i++] = val;
1131     }
1132    
1133     - iio_push_to_buffers_with_timestamp(indio_dev, data, time);
1134     + iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time);
1135    
1136     return 0;
1137     };
1138     diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
1139     index 3b6f3b9a6c5ba..a1b66f92e1bff 100644
1140     --- a/drivers/iio/adc/max1118.c
1141     +++ b/drivers/iio/adc/max1118.c
1142     @@ -35,6 +35,11 @@ struct max1118 {
1143     struct spi_device *spi;
1144     struct mutex lock;
1145     struct regulator *reg;
1146     + /* Ensure natural alignment of buffer elements */
1147     + struct {
1148     + u8 channels[2];
1149     + s64 ts __aligned(8);
1150     + } scan;
1151    
1152     u8 data ____cacheline_aligned;
1153     };
1154     @@ -159,7 +164,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
1155     struct iio_poll_func *pf = p;
1156     struct iio_dev *indio_dev = pf->indio_dev;
1157     struct max1118 *adc = iio_priv(indio_dev);
1158     - u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */
1159     int scan_index;
1160     int i = 0;
1161    
1162     @@ -177,10 +181,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
1163     goto out;
1164     }
1165    
1166     - data[i] = ret;
1167     + adc->scan.channels[i] = ret;
1168     i++;
1169     }
1170     - iio_push_to_buffers_with_timestamp(indio_dev, data,
1171     + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
1172     iio_get_time_ns(indio_dev));
1173     out:
1174     mutex_unlock(&adc->lock);
1175     diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
1176     index ea24d7c58b127..8ae4cf1351579 100644
1177     --- a/drivers/iio/adc/mcp3422.c
1178     +++ b/drivers/iio/adc/mcp3422.c
1179     @@ -95,16 +95,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig)
1180     {
1181     int ret;
1182    
1183     - mutex_lock(&adc->lock);
1184     -
1185     ret = i2c_master_send(adc->i2c, &newconfig, 1);
1186     if (ret > 0) {
1187     adc->config = newconfig;
1188     ret = 0;
1189     }
1190    
1191     - mutex_unlock(&adc->lock);
1192     -
1193     return ret;
1194     }
1195    
1196     @@ -137,6 +133,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
1197     u8 config;
1198     u8 req_channel = channel->channel;
1199    
1200     + mutex_lock(&adc->lock);
1201     +
1202     if (req_channel != MCP3422_CHANNEL(adc->config)) {
1203     config = adc->config;
1204     config &= ~MCP3422_CHANNEL_MASK;
1205     @@ -144,12 +142,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
1206     config &= ~MCP3422_PGA_MASK;
1207     config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
1208     ret = mcp3422_update_config(adc, config);
1209     - if (ret < 0)
1210     + if (ret < 0) {
1211     + mutex_unlock(&adc->lock);
1212     return ret;
1213     + }
1214     msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]);
1215     }
1216    
1217     - return mcp3422_read(adc, value, &config);
1218     + ret = mcp3422_read(adc, value, &config);
1219     +
1220     + mutex_unlock(&adc->lock);
1221     +
1222     + return ret;
1223     }
1224    
1225     static int mcp3422_read_raw(struct iio_dev *iio,
1226     diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
1227     index 0235863ff77b0..cc8cbffe2b7b5 100644
1228     --- a/drivers/iio/adc/ti-adc081c.c
1229     +++ b/drivers/iio/adc/ti-adc081c.c
1230     @@ -33,6 +33,12 @@ struct adc081c {
1231    
1232     /* 8, 10 or 12 */
1233     int bits;
1234     +
1235     + /* Ensure natural alignment of buffer elements */
1236     + struct {
1237     + u16 channel;
1238     + s64 ts __aligned(8);
1239     + } scan;
1240     };
1241    
1242     #define REG_CONV_RES 0x00
1243     @@ -128,14 +134,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
1244     struct iio_poll_func *pf = p;
1245     struct iio_dev *indio_dev = pf->indio_dev;
1246     struct adc081c *data = iio_priv(indio_dev);
1247     - u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
1248     int ret;
1249    
1250     ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
1251     if (ret < 0)
1252     goto out;
1253     - buf[0] = ret;
1254     - iio_push_to_buffers_with_timestamp(indio_dev, buf,
1255     + data->scan.channel = ret;
1256     + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
1257     iio_get_time_ns(indio_dev));
1258     out:
1259     iio_trigger_notify_done(indio_dev->trig);
1260     diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
1261     index bdedf456ee05d..fc053216d282c 100644
1262     --- a/drivers/iio/adc/ti-adc084s021.c
1263     +++ b/drivers/iio/adc/ti-adc084s021.c
1264     @@ -25,6 +25,11 @@ struct adc084s021 {
1265     struct spi_transfer spi_trans;
1266     struct regulator *reg;
1267     struct mutex lock;
1268     + /* Buffer used to align data */
1269     + struct {
1270     + __be16 channels[4];
1271     + s64 ts __aligned(8);
1272     + } scan;
1273     /*
1274     * DMA (thus cache coherency maintenance) requires the
1275     * transfer buffers to live in their own cache line.
1276     @@ -140,14 +145,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc)
1277     struct iio_poll_func *pf = pollfunc;
1278     struct iio_dev *indio_dev = pf->indio_dev;
1279     struct adc084s021 *adc = iio_priv(indio_dev);
1280     - __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */
1281    
1282     mutex_lock(&adc->lock);
1283    
1284     - if (adc084s021_adc_conversion(adc, &data) < 0)
1285     + if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0)
1286     dev_err(&adc->spi->dev, "Failed to read data\n");
1287    
1288     - iio_push_to_buffers_with_timestamp(indio_dev, data,
1289     + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
1290     iio_get_time_ns(indio_dev));
1291     mutex_unlock(&adc->lock);
1292     iio_trigger_notify_done(indio_dev->trig);
1293     diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
1294     index a550b132cfb73..871690a476610 100644
1295     --- a/drivers/iio/adc/ti-ads1015.c
1296     +++ b/drivers/iio/adc/ti-ads1015.c
1297     @@ -309,6 +309,7 @@ static const struct iio_chan_spec ads1115_channels[] = {
1298     IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
1299     };
1300    
1301     +#ifdef CONFIG_PM
1302     static int ads1015_set_power_state(struct ads1015_data *data, bool on)
1303     {
1304     int ret;
1305     @@ -326,6 +327,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
1306     return ret < 0 ? ret : 0;
1307     }
1308    
1309     +#else /* !CONFIG_PM */
1310     +
1311     +static int ads1015_set_power_state(struct ads1015_data *data, bool on)
1312     +{
1313     + return 0;
1314     +}
1315     +
1316     +#endif /* !CONFIG_PM */
1317     +
1318     static
1319     int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
1320     {
1321     diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
1322     index 2ebdfc35bcda6..7bf4e9a16a6ae 100644
1323     --- a/drivers/iio/chemical/ccs811.c
1324     +++ b/drivers/iio/chemical/ccs811.c
1325     @@ -75,6 +75,11 @@ struct ccs811_data {
1326     struct ccs811_reading buffer;
1327     struct iio_trigger *drdy_trig;
1328     bool drdy_trig_on;
1329     + /* Ensures correct alignment of timestamp if present */
1330     + struct {
1331     + s16 channels[2];
1332     + s64 ts __aligned(8);
1333     + } scan;
1334     };
1335    
1336     static const struct iio_chan_spec ccs811_channels[] = {
1337     @@ -306,17 +311,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p)
1338     struct iio_dev *indio_dev = pf->indio_dev;
1339     struct ccs811_data *data = iio_priv(indio_dev);
1340     struct i2c_client *client = data->client;
1341     - s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */
1342     int ret;
1343    
1344     - ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4,
1345     - (u8 *)&buf);
1346     + ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA,
1347     + sizeof(data->scan.channels),
1348     + (u8 *)data->scan.channels);
1349     if (ret != 4) {
1350     dev_err(&client->dev, "cannot read sensor data\n");
1351     goto err;
1352     }
1353    
1354     - iio_push_to_buffers_with_timestamp(indio_dev, buf,
1355     + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
1356     iio_get_time_ns(indio_dev));
1357    
1358     err:
1359     diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
1360     index d2609e6feda4d..b4f394f058636 100644
1361     --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
1362     +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
1363     @@ -57,10 +57,13 @@ static void get_default_min_max_freq(enum motionsensor_type type,
1364     {
1365     switch (type) {
1366     case MOTIONSENSE_TYPE_ACCEL:
1367     - case MOTIONSENSE_TYPE_GYRO:
1368     *min_freq = 12500;
1369     *max_freq = 100000;
1370     break;
1371     + case MOTIONSENSE_TYPE_GYRO:
1372     + *min_freq = 25000;
1373     + *max_freq = 100000;
1374     + break;
1375     case MOTIONSENSE_TYPE_MAG:
1376     *min_freq = 5000;
1377     *max_freq = 25000;
1378     diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
1379     index 71f99d2a22c1d..ceddb6a3b61bb 100644
1380     --- a/drivers/iio/light/ltr501.c
1381     +++ b/drivers/iio/light/ltr501.c
1382     @@ -1242,13 +1242,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
1383     struct iio_poll_func *pf = p;
1384     struct iio_dev *indio_dev = pf->indio_dev;
1385     struct ltr501_data *data = iio_priv(indio_dev);
1386     - u16 buf[8];
1387     + struct {
1388     + u16 channels[3];
1389     + s64 ts __aligned(8);
1390     + } scan;
1391     __le16 als_buf[2];
1392     u8 mask = 0;
1393     int j = 0;
1394     int ret, psdata;
1395    
1396     - memset(buf, 0, sizeof(buf));
1397     + memset(&scan, 0, sizeof(scan));
1398    
1399     /* figure out which data needs to be ready */
1400     if (test_bit(0, indio_dev->active_scan_mask) ||
1401     @@ -1267,9 +1270,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
1402     if (ret < 0)
1403     return ret;
1404     if (test_bit(0, indio_dev->active_scan_mask))
1405     - buf[j++] = le16_to_cpu(als_buf[1]);
1406     + scan.channels[j++] = le16_to_cpu(als_buf[1]);
1407     if (test_bit(1, indio_dev->active_scan_mask))
1408     - buf[j++] = le16_to_cpu(als_buf[0]);
1409     + scan.channels[j++] = le16_to_cpu(als_buf[0]);
1410     }
1411    
1412     if (mask & LTR501_STATUS_PS_RDY) {
1413     @@ -1277,10 +1280,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
1414     &psdata, 2);
1415     if (ret < 0)
1416     goto done;
1417     - buf[j++] = psdata & LTR501_PS_DATA_MASK;
1418     + scan.channels[j++] = psdata & LTR501_PS_DATA_MASK;
1419     }
1420    
1421     - iio_push_to_buffers_with_timestamp(indio_dev, buf,
1422     + iio_push_to_buffers_with_timestamp(indio_dev, &scan,
1423     iio_get_time_ns(indio_dev));
1424    
1425     done:
1426     diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
1427     index d6d8007ba430a..8cc619de2c3ae 100644
1428     --- a/drivers/iio/light/max44000.c
1429     +++ b/drivers/iio/light/max44000.c
1430     @@ -75,6 +75,11 @@
1431     struct max44000_data {
1432     struct mutex lock;
1433     struct regmap *regmap;
1434     + /* Ensure naturally aligned timestamp */
1435     + struct {
1436     + u16 channels[2];
1437     + s64 ts __aligned(8);
1438     + } scan;
1439     };
1440    
1441     /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
1442     @@ -488,7 +493,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
1443     struct iio_poll_func *pf = p;
1444     struct iio_dev *indio_dev = pf->indio_dev;
1445     struct max44000_data *data = iio_priv(indio_dev);
1446     - u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */
1447     int index = 0;
1448     unsigned int regval;
1449     int ret;
1450     @@ -498,17 +502,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
1451     ret = max44000_read_alsval(data);
1452     if (ret < 0)
1453     goto out_unlock;
1454     - buf[index++] = ret;
1455     + data->scan.channels[index++] = ret;
1456     }
1457     if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
1458     ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
1459     if (ret < 0)
1460     goto out_unlock;
1461     - buf[index] = regval;
1462     + data->scan.channels[index] = regval;
1463     }
1464     mutex_unlock(&data->lock);
1465    
1466     - iio_push_to_buffers_with_timestamp(indio_dev, buf,
1467     + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
1468     iio_get_time_ns(indio_dev));
1469     iio_trigger_notify_done(indio_dev->trig);
1470     return IRQ_HANDLED;
1471     diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
1472     index 893bec5a0312b..82af903a765b2 100644
1473     --- a/drivers/iio/magnetometer/ak8975.c
1474     +++ b/drivers/iio/magnetometer/ak8975.c
1475     @@ -368,6 +368,12 @@ struct ak8975_data {
1476     struct iio_mount_matrix orientation;
1477     struct regulator *vdd;
1478     struct regulator *vid;
1479     +
1480     + /* Ensure natural alignment of timestamp */
1481     + struct {
1482     + s16 channels[3];
1483     + s64 ts __aligned(8);
1484     + } scan;
1485     };
1486    
1487     /* Enable attached power regulator if any. */
1488     @@ -805,7 +811,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
1489     const struct i2c_client *client = data->client;
1490     const struct ak_def *def = data->def;
1491     int ret;
1492     - s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
1493     __le16 fval[3];
1494    
1495     mutex_lock(&data->lock);
1496     @@ -828,12 +833,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
1497     mutex_unlock(&data->lock);
1498    
1499     /* Clamp to valid range. */
1500     - buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
1501     - buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
1502     - buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
1503     + data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
1504     + data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
1505     + data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
1506    
1507     - iio_push_to_buffers_with_timestamp(indio_dev, buff,
1508     + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
1509     iio_get_time_ns(indio_dev));
1510     +
1511     return;
1512    
1513     unlock:
1514     diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
1515     index 166b3e6d7db89..5254b1fbccfdc 100644
1516     --- a/drivers/iio/proximity/mb1232.c
1517     +++ b/drivers/iio/proximity/mb1232.c
1518     @@ -40,6 +40,11 @@ struct mb1232_data {
1519     */
1520     struct completion ranging;
1521     int irqnr;
1522     + /* Ensure correct alignment of data to push to IIO buffer */
1523     + struct {
1524     + s16 distance;
1525     + s64 ts __aligned(8);
1526     + } scan;
1527     };
1528    
1529     static irqreturn_t mb1232_handle_irq(int irq, void *dev_id)
1530     @@ -113,17 +118,13 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p)
1531     struct iio_poll_func *pf = p;
1532     struct iio_dev *indio_dev = pf->indio_dev;
1533     struct mb1232_data *data = iio_priv(indio_dev);
1534     - /*
1535     - * triggered buffer
1536     - * 16-bit channel + 48-bit padding + 64-bit timestamp
1537     - */
1538     - s16 buffer[8] = { 0 };
1539    
1540     - buffer[0] = mb1232_read_distance(data);
1541     - if (buffer[0] < 0)
1542     + data->scan.distance = mb1232_read_distance(data);
1543     + if (data->scan.distance < 0)
1544     goto err;
1545    
1546     - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
1547     + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
1548     + pf->timestamp);
1549    
1550     err:
1551     iio_trigger_notify_done(indio_dev->trig);
1552     diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
1553     index d4815f29cfd24..5d896f6b2b617 100644
1554     --- a/drivers/infiniband/core/verbs.c
1555     +++ b/drivers/infiniband/core/verbs.c
1556     @@ -1749,7 +1749,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1557    
1558     dev_put(netdev);
1559    
1560     - if (!rc) {
1561     + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1562     netdev_speed = lksettings.base.speed;
1563     } else {
1564     netdev_speed = SPEED_1000;
1565     diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
1566     index ebc3e3d4a6e2a..3b05c0640338f 100644
1567     --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
1568     +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
1569     @@ -2973,6 +2973,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
1570     wc->wc_flags |= IB_WC_GRH;
1571     }
1572    
1573     +static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
1574     + u16 vlan_id)
1575     +{
1576     + /*
1577     + * Check if the vlan is configured in the host. If not configured, it
1578     + * can be a transparent VLAN. So dont report the vlan id.
1579     + */
1580     + if (!__vlan_find_dev_deep_rcu(rdev->netdev,
1581     + htons(ETH_P_8021Q), vlan_id))
1582     + return false;
1583     + return true;
1584     +}
1585     +
1586     static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
1587     u16 *vid, u8 *sl)
1588     {
1589     @@ -3041,9 +3054,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
1590     wc->src_qp = orig_cqe->src_qp;
1591     memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
1592     if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
1593     - wc->vlan_id = vlan_id;
1594     - wc->sl = sl;
1595     - wc->wc_flags |= IB_WC_WITH_VLAN;
1596     + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
1597     + wc->vlan_id = vlan_id;
1598     + wc->sl = sl;
1599     + wc->wc_flags |= IB_WC_WITH_VLAN;
1600     + }
1601     }
1602     wc->port_num = 1;
1603     wc->vendor_err = orig_cqe->status;
1604     diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1605     index 61a1b0bdede05..b8274c6fc43e7 100644
1606     --- a/drivers/infiniband/hw/mlx4/main.c
1607     +++ b/drivers/infiniband/hw/mlx4/main.c
1608     @@ -781,7 +781,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
1609     props->ip_gids = true;
1610     props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
1611     props->max_msg_sz = mdev->dev->caps.max_msg_sz;
1612     - props->pkey_tbl_len = 1;
1613     + if (mdev->dev->caps.pkey_table_len[port])
1614     + props->pkey_tbl_len = 1;
1615     props->max_mtu = IB_MTU_4096;
1616     props->max_vl_num = 2;
1617     props->state = IB_PORT_DOWN;
1618     diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
1619     index a8c11b5e1e943..70c4ea438664d 100644
1620     --- a/drivers/infiniband/sw/rxe/rxe.c
1621     +++ b/drivers/infiniband/sw/rxe/rxe.c
1622     @@ -48,6 +48,8 @@ static void rxe_cleanup_ports(struct rxe_dev *rxe)
1623    
1624     }
1625    
1626     +bool rxe_initialized;
1627     +
1628     /* free resources for a rxe device all objects created for this device must
1629     * have been destroyed
1630     */
1631     @@ -157,9 +159,6 @@ static int rxe_init_ports(struct rxe_dev *rxe)
1632    
1633     rxe_init_port_param(port);
1634    
1635     - if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len)
1636     - return -EINVAL;
1637     -
1638     port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len,
1639     sizeof(*port->pkey_tbl), GFP_KERNEL);
1640    
1641     @@ -358,6 +357,7 @@ static int __init rxe_module_init(void)
1642     return err;
1643    
1644     rdma_link_register(&rxe_link_ops);
1645     + rxe_initialized = true;
1646     pr_info("loaded\n");
1647     return 0;
1648     }
1649     @@ -369,6 +369,7 @@ static void __exit rxe_module_exit(void)
1650     rxe_net_exit();
1651     rxe_cache_exit();
1652    
1653     + rxe_initialized = false;
1654     pr_info("unloaded\n");
1655     }
1656    
1657     diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
1658     index fb07eed9e4028..cae1b0a24c850 100644
1659     --- a/drivers/infiniband/sw/rxe/rxe.h
1660     +++ b/drivers/infiniband/sw/rxe/rxe.h
1661     @@ -67,6 +67,8 @@
1662    
1663     #define RXE_ROCE_V2_SPORT (0xc000)
1664    
1665     +extern bool rxe_initialized;
1666     +
1667     static inline u32 rxe_crc32(struct rxe_dev *rxe,
1668     u32 crc, void *next, size_t len)
1669     {
1670     diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
1671     index ea6a819b71675..ffbc50341a55a 100644
1672     --- a/drivers/infiniband/sw/rxe/rxe_mr.c
1673     +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
1674     @@ -207,6 +207,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
1675     vaddr = page_address(sg_page_iter_page(&sg_iter));
1676     if (!vaddr) {
1677     pr_warn("null vaddr\n");
1678     + ib_umem_release(umem);
1679     err = -ENOMEM;
1680     goto err1;
1681     }
1682     diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
1683     index ccda5f5a3bc0a..2af31d421bfc3 100644
1684     --- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
1685     +++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
1686     @@ -61,6 +61,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
1687     struct net_device *ndev;
1688     struct rxe_dev *exists;
1689    
1690     + if (!rxe_initialized) {
1691     + pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
1692     + return -EAGAIN;
1693     + }
1694     +
1695     len = sanitize_arg(val, intf, sizeof(intf));
1696     if (!len) {
1697     pr_err("add: invalid interface name\n");
1698     diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
1699     index 71358b0b8910c..d1fe57ac87f56 100644
1700     --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
1701     +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
1702     @@ -1078,7 +1078,7 @@ static ssize_t parent_show(struct device *device,
1703     struct rxe_dev *rxe =
1704     rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1705    
1706     - return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1707     + return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
1708     }
1709    
1710     static DEVICE_ATTR_RO(parent);
1711     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1712     index a1a035270cabf..71268d61d2b8a 100644
1713     --- a/drivers/infiniband/ulp/isert/ib_isert.c
1714     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1715     @@ -182,15 +182,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
1716     rx_desc = isert_conn->rx_descs;
1717    
1718     for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
1719     - dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
1720     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1721     + dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
1722     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1723     if (ib_dma_mapping_error(ib_dev, dma_addr))
1724     goto dma_map_fail;
1725    
1726     rx_desc->dma_addr = dma_addr;
1727    
1728     rx_sg = &rx_desc->rx_sg;
1729     - rx_sg->addr = rx_desc->dma_addr;
1730     + rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
1731     rx_sg->length = ISER_RX_PAYLOAD_SIZE;
1732     rx_sg->lkey = device->pd->local_dma_lkey;
1733     rx_desc->rx_cqe.done = isert_recv_done;
1734     @@ -202,7 +202,7 @@ dma_map_fail:
1735     rx_desc = isert_conn->rx_descs;
1736     for (j = 0; j < i; j++, rx_desc++) {
1737     ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
1738     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1739     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1740     }
1741     kfree(isert_conn->rx_descs);
1742     isert_conn->rx_descs = NULL;
1743     @@ -223,7 +223,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
1744     rx_desc = isert_conn->rx_descs;
1745     for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
1746     ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
1747     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1748     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1749     }
1750    
1751     kfree(isert_conn->rx_descs);
1752     @@ -408,10 +408,9 @@ isert_free_login_buf(struct isert_conn *isert_conn)
1753     ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
1754     kfree(isert_conn->login_rsp_buf);
1755    
1756     - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
1757     - ISER_RX_PAYLOAD_SIZE,
1758     - DMA_FROM_DEVICE);
1759     - kfree(isert_conn->login_req_buf);
1760     + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
1761     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1762     + kfree(isert_conn->login_desc);
1763     }
1764    
1765     static int
1766     @@ -420,25 +419,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
1767     {
1768     int ret;
1769    
1770     - isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
1771     + isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
1772     GFP_KERNEL);
1773     - if (!isert_conn->login_req_buf)
1774     + if (!isert_conn->login_desc)
1775     return -ENOMEM;
1776    
1777     - isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
1778     - isert_conn->login_req_buf,
1779     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1780     - ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
1781     + isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
1782     + isert_conn->login_desc->buf,
1783     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1784     + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
1785     if (ret) {
1786     - isert_err("login_req_dma mapping error: %d\n", ret);
1787     - isert_conn->login_req_dma = 0;
1788     - goto out_free_login_req_buf;
1789     + isert_err("login_desc dma mapping error: %d\n", ret);
1790     + isert_conn->login_desc->dma_addr = 0;
1791     + goto out_free_login_desc;
1792     }
1793    
1794     isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
1795     if (!isert_conn->login_rsp_buf) {
1796     ret = -ENOMEM;
1797     - goto out_unmap_login_req_buf;
1798     + goto out_unmap_login_desc;
1799     }
1800    
1801     isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
1802     @@ -455,11 +454,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
1803    
1804     out_free_login_rsp_buf:
1805     kfree(isert_conn->login_rsp_buf);
1806     -out_unmap_login_req_buf:
1807     - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
1808     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1809     -out_free_login_req_buf:
1810     - kfree(isert_conn->login_req_buf);
1811     +out_unmap_login_desc:
1812     + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
1813     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1814     +out_free_login_desc:
1815     + kfree(isert_conn->login_desc);
1816     return ret;
1817     }
1818    
1819     @@ -578,7 +577,7 @@ isert_connect_release(struct isert_conn *isert_conn)
1820     ib_destroy_qp(isert_conn->qp);
1821     }
1822    
1823     - if (isert_conn->login_req_buf)
1824     + if (isert_conn->login_desc)
1825     isert_free_login_buf(isert_conn);
1826    
1827     isert_device_put(device);
1828     @@ -964,17 +963,18 @@ isert_login_post_recv(struct isert_conn *isert_conn)
1829     int ret;
1830    
1831     memset(&sge, 0, sizeof(struct ib_sge));
1832     - sge.addr = isert_conn->login_req_dma;
1833     + sge.addr = isert_conn->login_desc->dma_addr +
1834     + isert_get_hdr_offset(isert_conn->login_desc);
1835     sge.length = ISER_RX_PAYLOAD_SIZE;
1836     sge.lkey = isert_conn->device->pd->local_dma_lkey;
1837    
1838     isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1839     sge.addr, sge.length, sge.lkey);
1840    
1841     - isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
1842     + isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
1843    
1844     memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1845     - rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
1846     + rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
1847     rx_wr.sg_list = &sge;
1848     rx_wr.num_sge = 1;
1849    
1850     @@ -1051,7 +1051,7 @@ post_send:
1851     static void
1852     isert_rx_login_req(struct isert_conn *isert_conn)
1853     {
1854     - struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
1855     + struct iser_rx_desc *rx_desc = isert_conn->login_desc;
1856     int rx_buflen = isert_conn->login_req_len;
1857     struct iscsi_conn *conn = isert_conn->conn;
1858     struct iscsi_login *login = conn->conn_login;
1859     @@ -1063,7 +1063,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
1860    
1861     if (login->first_request) {
1862     struct iscsi_login_req *login_req =
1863     - (struct iscsi_login_req *)&rx_desc->iscsi_header;
1864     + (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
1865     /*
1866     * Setup the initial iscsi_login values from the leading
1867     * login request PDU.
1868     @@ -1082,13 +1082,13 @@ isert_rx_login_req(struct isert_conn *isert_conn)
1869     login->tsih = be16_to_cpu(login_req->tsih);
1870     }
1871    
1872     - memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1873     + memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
1874    
1875     size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1876     isert_dbg("Using login payload size: %d, rx_buflen: %d "
1877     "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1878     MAX_KEY_VALUE_PAIRS);
1879     - memcpy(login->req_buf, &rx_desc->data[0], size);
1880     + memcpy(login->req_buf, isert_get_data(rx_desc), size);
1881    
1882     if (login->first_request) {
1883     complete(&isert_conn->login_comp);
1884     @@ -1153,14 +1153,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1885     if (imm_data_len != data_len) {
1886     sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1887     sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1888     - &rx_desc->data[0], imm_data_len);
1889     + isert_get_data(rx_desc), imm_data_len);
1890     isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1891     sg_nents, imm_data_len);
1892     } else {
1893     sg_init_table(&isert_cmd->sg, 1);
1894     cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1895     cmd->se_cmd.t_data_nents = 1;
1896     - sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1897     + sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
1898     + imm_data_len);
1899     isert_dbg("Transfer Immediate imm_data_len: %d\n",
1900     imm_data_len);
1901     }
1902     @@ -1229,9 +1230,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1903     }
1904     isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1905     "sg_nents: %u from %p %u\n", sg_start, sg_off,
1906     - sg_nents, &rx_desc->data[0], unsol_data_len);
1907     + sg_nents, isert_get_data(rx_desc), unsol_data_len);
1908    
1909     - sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1910     + sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
1911     unsol_data_len);
1912    
1913     rc = iscsit_check_dataout_payload(cmd, hdr, false);
1914     @@ -1290,7 +1291,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
1915     }
1916     cmd->text_in_ptr = text_in;
1917    
1918     - memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1919     + memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
1920    
1921     return iscsit_process_text_cmd(conn, cmd, hdr);
1922     }
1923     @@ -1300,7 +1301,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1924     uint32_t read_stag, uint64_t read_va,
1925     uint32_t write_stag, uint64_t write_va)
1926     {
1927     - struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1928     + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
1929     struct iscsi_conn *conn = isert_conn->conn;
1930     struct iscsi_cmd *cmd;
1931     struct isert_cmd *isert_cmd;
1932     @@ -1398,8 +1399,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1933     struct isert_conn *isert_conn = wc->qp->qp_context;
1934     struct ib_device *ib_dev = isert_conn->cm_id->device;
1935     struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1936     - struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1937     - struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
1938     + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
1939     + struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
1940     uint64_t read_va = 0, write_va = 0;
1941     uint32_t read_stag = 0, write_stag = 0;
1942    
1943     @@ -1413,7 +1414,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1944     rx_desc->in_use = true;
1945    
1946     ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1947     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1948     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1949    
1950     isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1951     rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1952     @@ -1448,7 +1449,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1953     read_stag, read_va, write_stag, write_va);
1954    
1955     ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1956     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1957     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1958     }
1959    
1960     static void
1961     @@ -1462,8 +1463,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1962     return;
1963     }
1964    
1965     - ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
1966     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1967     + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
1968     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1969    
1970     isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1971    
1972     @@ -1478,8 +1479,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1973     complete(&isert_conn->login_req_comp);
1974     mutex_unlock(&isert_conn->mutex);
1975    
1976     - ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
1977     - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1978     + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
1979     + ISER_RX_SIZE, DMA_FROM_DEVICE);
1980     }
1981    
1982     static void
1983     diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
1984     index 3b296bac4f603..d267a6d60d87d 100644
1985     --- a/drivers/infiniband/ulp/isert/ib_isert.h
1986     +++ b/drivers/infiniband/ulp/isert/ib_isert.h
1987     @@ -59,9 +59,11 @@
1988     ISERT_MAX_TX_MISC_PDUS + \
1989     ISERT_MAX_RX_MISC_PDUS)
1990    
1991     -#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
1992     - (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
1993     - sizeof(struct ib_cqe) + sizeof(bool)))
1994     +/*
1995     + * RX size is default of 8k plus headers, but data needs to align to
1996     + * 512 boundary, so use 1024 to have the extra space for alignment.
1997     + */
1998     +#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
1999    
2000     #define ISCSI_ISER_SG_TABLESIZE 256
2001    
2002     @@ -80,21 +82,41 @@ enum iser_conn_state {
2003     };
2004    
2005     struct iser_rx_desc {
2006     - struct iser_ctrl iser_header;
2007     - struct iscsi_hdr iscsi_header;
2008     - char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
2009     + char buf[ISER_RX_SIZE];
2010     u64 dma_addr;
2011     struct ib_sge rx_sg;
2012     struct ib_cqe rx_cqe;
2013     bool in_use;
2014     - char pad[ISER_RX_PAD_SIZE];
2015     -} __packed;
2016     +};
2017    
2018     static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
2019     {
2020     return container_of(cqe, struct iser_rx_desc, rx_cqe);
2021     }
2022    
2023     +static void *isert_get_iser_hdr(struct iser_rx_desc *desc)
2024     +{
2025     + return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN;
2026     +}
2027     +
2028     +static size_t isert_get_hdr_offset(struct iser_rx_desc *desc)
2029     +{
2030     + return isert_get_iser_hdr(desc) - (void *)desc->buf;
2031     +}
2032     +
2033     +static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc)
2034     +{
2035     + return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl);
2036     +}
2037     +
2038     +static void *isert_get_data(struct iser_rx_desc *desc)
2039     +{
2040     + void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN;
2041     +
2042     + WARN_ON((uintptr_t)data & 511);
2043     + return data;
2044     +}
2045     +
2046     struct iser_tx_desc {
2047     struct iser_ctrl iser_header;
2048     struct iscsi_hdr iscsi_header;
2049     @@ -141,9 +163,8 @@ struct isert_conn {
2050     u32 responder_resources;
2051     u32 initiator_depth;
2052     bool pi_support;
2053     - struct iser_rx_desc *login_req_buf;
2054     + struct iser_rx_desc *login_desc;
2055     char *login_rsp_buf;
2056     - u64 login_req_dma;
2057     int login_req_len;
2058     u64 login_rsp_dma;
2059     struct iser_rx_desc *rx_descs;
2060     diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
2061     index d6d85debd01b0..05f3d93cf480c 100644
2062     --- a/drivers/iommu/amd_iommu_v2.c
2063     +++ b/drivers/iommu/amd_iommu_v2.c
2064     @@ -741,6 +741,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
2065    
2066     might_sleep();
2067    
2068     + /*
2069     + * When memory encryption is active the device is likely not in a
2070     + * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
2071     + */
2072     + if (mem_encrypt_active())
2073     + return -ENODEV;
2074     +
2075     if (!amd_iommu_v2_supported())
2076     return -ENODEV;
2077    
2078     diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
2079     index 93d346c01110d..4c229dd2b6e54 100644
2080     --- a/drivers/mmc/core/sdio_ops.c
2081     +++ b/drivers/mmc/core/sdio_ops.c
2082     @@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
2083     struct sg_table sgtable;
2084     unsigned int nents, left_size, i;
2085     unsigned int seg_size = card->host->max_seg_size;
2086     + int err;
2087    
2088     WARN_ON(blksz == 0);
2089    
2090     @@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
2091    
2092     mmc_set_data_timeout(&data, card);
2093    
2094     - mmc_wait_for_req(card->host, &mrq);
2095     + mmc_pre_req(card->host, &mrq);
2096    
2097     - if (nents > 1)
2098     - sg_free_table(&sgtable);
2099     + mmc_wait_for_req(card->host, &mrq);
2100    
2101     if (cmd.error)
2102     - return cmd.error;
2103     - if (data.error)
2104     - return data.error;
2105     -
2106     - if (mmc_host_is_spi(card->host)) {
2107     + err = cmd.error;
2108     + else if (data.error)
2109     + err = data.error;
2110     + else if (mmc_host_is_spi(card->host))
2111     /* host driver already reported errors */
2112     - } else {
2113     - if (cmd.resp[0] & R5_ERROR)
2114     - return -EIO;
2115     - if (cmd.resp[0] & R5_FUNCTION_NUMBER)
2116     - return -EINVAL;
2117     - if (cmd.resp[0] & R5_OUT_OF_RANGE)
2118     - return -ERANGE;
2119     - }
2120     + err = 0;
2121     + else if (cmd.resp[0] & R5_ERROR)
2122     + err = -EIO;
2123     + else if (cmd.resp[0] & R5_FUNCTION_NUMBER)
2124     + err = -EINVAL;
2125     + else if (cmd.resp[0] & R5_OUT_OF_RANGE)
2126     + err = -ERANGE;
2127     + else
2128     + err = 0;
2129    
2130     - return 0;
2131     + mmc_post_req(card->host, &mrq, err);
2132     +
2133     + if (nents > 1)
2134     + sg_free_table(&sgtable);
2135     +
2136     + return err;
2137     }
2138    
2139     int sdio_reset(struct mmc_host *host)
2140     diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
2141     index 5581a5c86fce3..b2d924c5e82ee 100644
2142     --- a/drivers/mmc/host/sdhci-acpi.c
2143     +++ b/drivers/mmc/host/sdhci-acpi.c
2144     @@ -547,12 +547,18 @@ static int amd_select_drive_strength(struct mmc_card *card,
2145     return MMC_SET_DRIVER_TYPE_A;
2146     }
2147    
2148     -static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
2149     +static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable)
2150     {
2151     + struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
2152     + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
2153     +
2154     /* AMD Platform requires dll setting */
2155     sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
2156     usleep_range(10, 20);
2157     - sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
2158     + if (enable)
2159     + sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
2160     +
2161     + amd_host->dll_enabled = enable;
2162     }
2163    
2164     /*
2165     @@ -592,10 +598,8 @@ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2166    
2167     /* DLL is only required for HS400 */
2168     if (host->timing == MMC_TIMING_MMC_HS400 &&
2169     - !amd_host->dll_enabled) {
2170     - sdhci_acpi_amd_hs400_dll(host);
2171     - amd_host->dll_enabled = true;
2172     - }
2173     + !amd_host->dll_enabled)
2174     + sdhci_acpi_amd_hs400_dll(host, true);
2175     }
2176     }
2177    
2178     @@ -616,10 +620,23 @@ static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2179     return err;
2180     }
2181    
2182     +static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
2183     +{
2184     + struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
2185     + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
2186     +
2187     + if (mask & SDHCI_RESET_ALL) {
2188     + amd_host->tuned_clock = false;
2189     + sdhci_acpi_amd_hs400_dll(host, false);
2190     + }
2191     +
2192     + sdhci_reset(host, mask);
2193     +}
2194     +
2195     static const struct sdhci_ops sdhci_acpi_ops_amd = {
2196     .set_clock = sdhci_set_clock,
2197     .set_bus_width = sdhci_set_bus_width,
2198     - .reset = sdhci_reset,
2199     + .reset = amd_sdhci_reset,
2200     .set_uhs_signaling = sdhci_set_uhs_signaling,
2201     };
2202    
2203     diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
2204     index 84cffdef264b7..8bed81cf03adc 100644
2205     --- a/drivers/mmc/host/sdhci-msm.c
2206     +++ b/drivers/mmc/host/sdhci-msm.c
2207     @@ -1096,7 +1096,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
2208     static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
2209     {
2210     struct sdhci_host *host = mmc_priv(mmc);
2211     - int tuning_seq_cnt = 3;
2212     + int tuning_seq_cnt = 10;
2213     u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
2214     int rc;
2215     struct mmc_ios ios = host->mmc->ios;
2216     @@ -1152,6 +1152,22 @@ retry:
2217     } while (++phase < ARRAY_SIZE(tuned_phases));
2218    
2219     if (tuned_phase_cnt) {
2220     + if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
2221     + /*
2222     + * All phases valid is _almost_ as bad as no phases
2223     + * valid. Probably all phases are not really reliable
2224     + * but we didn't detect where the unreliable place is.
2225     + * That means we'll essentially be guessing and hoping
2226     + * we get a good phase. Better to try a few times.
2227     + */
2228     + dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
2229     + mmc_hostname(mmc));
2230     + if (--tuning_seq_cnt) {
2231     + tuned_phase_cnt = 0;
2232     + goto retry;
2233     + }
2234     + }
2235     +
2236     rc = msm_find_most_appropriate_phase(host, tuned_phases,
2237     tuned_phase_cnt);
2238     if (rc < 0)
2239     diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
2240     index fd1251ec84711..66ad46d0ba88b 100644
2241     --- a/drivers/mmc/host/sdhci-of-esdhc.c
2242     +++ b/drivers/mmc/host/sdhci-of-esdhc.c
2243     @@ -81,6 +81,7 @@ struct sdhci_esdhc {
2244     bool quirk_tuning_erratum_type2;
2245     bool quirk_ignore_data_inhibit;
2246     bool quirk_delay_before_data_reset;
2247     + bool quirk_trans_complete_erratum;
2248     bool in_sw_tuning;
2249     unsigned int peripheral_clock;
2250     const struct esdhc_clk_fixup *clk_fixup;
2251     @@ -1082,10 +1083,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host,
2252    
2253     static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
2254     {
2255     + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2256     + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
2257     u32 command;
2258    
2259     - if (of_find_compatible_node(NULL, NULL,
2260     - "fsl,p2020-esdhc")) {
2261     + if (esdhc->quirk_trans_complete_erratum) {
2262     command = SDHCI_GET_CMD(sdhci_readw(host,
2263     SDHCI_COMMAND));
2264     if (command == MMC_WRITE_MULTIPLE_BLOCK &&
2265     @@ -1239,8 +1241,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
2266     esdhc->clk_fixup = match->data;
2267     np = pdev->dev.of_node;
2268    
2269     - if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
2270     + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
2271     esdhc->quirk_delay_before_data_reset = true;
2272     + esdhc->quirk_trans_complete_erratum = true;
2273     + }
2274    
2275     clk = of_clk_get(np, 0);
2276     if (!IS_ERR(clk)) {
2277     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2278     index a8ce6ca0f5081..92af7204711c8 100644
2279     --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2280     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2281     @@ -21,6 +21,7 @@
2282     #include <net/pkt_cls.h>
2283     #include <net/tcp.h>
2284     #include <net/vxlan.h>
2285     +#include <net/geneve.h>
2286    
2287     #include "hnae3.h"
2288     #include "hns3_enet.h"
2289     @@ -795,7 +796,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
2290     * and it is udp packet, which has a dest port as the IANA assigned.
2291     * the hardware is expected to do the checksum offload, but the
2292     * hardware will not do the checksum offload when udp dest port is
2293     - * 4789.
2294     + * 4789 or 6081.
2295     */
2296     static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
2297     {
2298     @@ -804,7 +805,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
2299     l4.hdr = skb_transport_header(skb);
2300    
2301     if (!(!skb->encapsulation &&
2302     - l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
2303     + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
2304     + l4.udp->dest == htons(GENEVE_UDP_PORT))))
2305     return false;
2306    
2307     skb_checksum_help(skb);
2308     diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
2309     index a030f5aa6b951..cc33441af4691 100644
2310     --- a/drivers/net/wan/hdlc_cisco.c
2311     +++ b/drivers/net/wan/hdlc_cisco.c
2312     @@ -370,6 +370,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
2313     memcpy(&state(hdlc)->settings, &new_settings, size);
2314     spin_lock_init(&state(hdlc)->lock);
2315     dev->header_ops = &cisco_header_ops;
2316     + dev->hard_header_len = sizeof(struct hdlc_header);
2317     dev->type = ARPHRD_CISCO;
2318     call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2319     netif_dormant_on(dev);
2320     diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
2321     index 996eb9c55b39a..2cff914aada55 100644
2322     --- a/drivers/net/wan/lapbether.c
2323     +++ b/drivers/net/wan/lapbether.c
2324     @@ -210,6 +210,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
2325    
2326     skb->dev = dev = lapbeth->ethdev;
2327    
2328     + skb_reset_network_header(skb);
2329     +
2330     dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
2331    
2332     dev_queue_xmit(skb);
2333     @@ -340,6 +342,7 @@ static int lapbeth_new_device(struct net_device *dev)
2334     */
2335     ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
2336     + dev->needed_headroom;
2337     + ndev->needed_tailroom = dev->needed_tailroom;
2338    
2339     lapbeth = netdev_priv(ndev);
2340     lapbeth->axdev = ndev;
2341     diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
2342     index 9642971e89cea..4578547659839 100644
2343     --- a/drivers/nfc/st95hf/core.c
2344     +++ b/drivers/nfc/st95hf/core.c
2345     @@ -966,7 +966,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
2346     rc = down_killable(&stcontext->exchange_lock);
2347     if (rc) {
2348     WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n");
2349     - return rc;
2350     + goto free_skb_resp;
2351     }
2352    
2353     rc = st95hf_spi_send(&stcontext->spicontext, skb->data,
2354     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2355     index 3cb017fa3a790..2d2673d360ff2 100644
2356     --- a/drivers/nvme/host/core.c
2357     +++ b/drivers/nvme/host/core.c
2358     @@ -4148,7 +4148,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
2359     }
2360     EXPORT_SYMBOL_GPL(nvme_unfreeze);
2361    
2362     -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
2363     +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
2364     {
2365     struct nvme_ns *ns;
2366    
2367     @@ -4159,6 +4159,7 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
2368     break;
2369     }
2370     up_read(&ctrl->namespaces_rwsem);
2371     + return timeout;
2372     }
2373     EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
2374    
2375     diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
2376     index 74b8818ac9a1e..3bb71f177dfda 100644
2377     --- a/drivers/nvme/host/fabrics.c
2378     +++ b/drivers/nvme/host/fabrics.c
2379     @@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
2380     struct nvme_request *req = nvme_req(rq);
2381    
2382     /*
2383     - * If we are in some state of setup or teardown only allow
2384     - * internally generated commands.
2385     + * currently we have a problem sending passthru commands
2386     + * on the admin_q if the controller is not LIVE because we can't
2387     + * make sure that they are going out after the admin connect,
2388     + * controller enable and/or other commands in the initialization
2389     + * sequence. until the controller will be LIVE, fail with
2390     + * BLK_STS_RESOURCE so that they will be rescheduled.
2391     */
2392     - if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
2393     + if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
2394     return false;
2395    
2396     /*
2397     @@ -576,9 +580,8 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
2398     * which is require to set the queue live in the appropinquate states.
2399     */
2400     switch (ctrl->state) {
2401     - case NVME_CTRL_NEW:
2402     case NVME_CTRL_CONNECTING:
2403     - if (nvme_is_fabrics(req->cmd) &&
2404     + if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
2405     req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
2406     return true;
2407     break;
2408     diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2409     index 056953bd8bd81..2bd9f7c3084f2 100644
2410     --- a/drivers/nvme/host/nvme.h
2411     +++ b/drivers/nvme/host/nvme.h
2412     @@ -485,7 +485,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl);
2413     void nvme_sync_queues(struct nvme_ctrl *ctrl);
2414     void nvme_unfreeze(struct nvme_ctrl *ctrl);
2415     void nvme_wait_freeze(struct nvme_ctrl *ctrl);
2416     -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
2417     +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
2418     void nvme_start_freeze(struct nvme_ctrl *ctrl);
2419    
2420     #define NVME_QID_ANY -1
2421     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2422     index 100da11ce98cb..a91433bdf5de4 100644
2423     --- a/drivers/nvme/host/pci.c
2424     +++ b/drivers/nvme/host/pci.c
2425     @@ -1274,8 +1274,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
2426     dev_warn_ratelimited(dev->ctrl.device,
2427     "I/O %d QID %d timeout, disable controller\n",
2428     req->tag, nvmeq->qid);
2429     - nvme_dev_disable(dev, true);
2430     nvme_req(req)->flags |= NVME_REQ_CANCELLED;
2431     + nvme_dev_disable(dev, true);
2432     return BLK_EH_DONE;
2433     case NVME_CTRL_RESETTING:
2434     return BLK_EH_RESET_TIMER;
2435     @@ -1292,10 +1292,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
2436     dev_warn(dev->ctrl.device,
2437     "I/O %d QID %d timeout, reset controller\n",
2438     req->tag, nvmeq->qid);
2439     + nvme_req(req)->flags |= NVME_REQ_CANCELLED;
2440     nvme_dev_disable(dev, false);
2441     nvme_reset_ctrl(&dev->ctrl);
2442    
2443     - nvme_req(req)->flags |= NVME_REQ_CANCELLED;
2444     return BLK_EH_DONE;
2445     }
2446    
2447     diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
2448     index d0336545e1fe0..f0847f2bb117b 100644
2449     --- a/drivers/nvme/host/rdma.c
2450     +++ b/drivers/nvme/host/rdma.c
2451     @@ -110,6 +110,7 @@ struct nvme_rdma_ctrl {
2452     struct sockaddr_storage src_addr;
2453    
2454     struct nvme_ctrl ctrl;
2455     + struct mutex teardown_lock;
2456     bool use_inline_data;
2457     u32 io_queues[HCTX_MAX_TYPES];
2458     };
2459     @@ -898,7 +899,15 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
2460    
2461     if (!new) {
2462     nvme_start_queues(&ctrl->ctrl);
2463     - nvme_wait_freeze(&ctrl->ctrl);
2464     + if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
2465     + /*
2466     + * If we timed out waiting for freeze we are likely to
2467     + * be stuck. Fail the controller initialization just
2468     + * to be safe.
2469     + */
2470     + ret = -ENODEV;
2471     + goto out_wait_freeze_timed_out;
2472     + }
2473     blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
2474     ctrl->ctrl.queue_count - 1);
2475     nvme_unfreeze(&ctrl->ctrl);
2476     @@ -906,6 +915,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
2477    
2478     return 0;
2479    
2480     +out_wait_freeze_timed_out:
2481     + nvme_stop_queues(&ctrl->ctrl);
2482     + nvme_rdma_stop_io_queues(ctrl);
2483     out_cleanup_connect_q:
2484     if (new)
2485     blk_cleanup_queue(ctrl->ctrl.connect_q);
2486     @@ -920,6 +932,7 @@ out_free_io_queues:
2487     static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
2488     bool remove)
2489     {
2490     + mutex_lock(&ctrl->teardown_lock);
2491     blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2492     nvme_rdma_stop_queue(&ctrl->queues[0]);
2493     if (ctrl->ctrl.admin_tagset) {
2494     @@ -930,11 +943,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
2495     if (remove)
2496     blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2497     nvme_rdma_destroy_admin_queue(ctrl, remove);
2498     + mutex_unlock(&ctrl->teardown_lock);
2499     }
2500    
2501     static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
2502     bool remove)
2503     {
2504     + mutex_lock(&ctrl->teardown_lock);
2505     if (ctrl->ctrl.queue_count > 1) {
2506     nvme_start_freeze(&ctrl->ctrl);
2507     nvme_stop_queues(&ctrl->ctrl);
2508     @@ -948,6 +963,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
2509     nvme_start_queues(&ctrl->ctrl);
2510     nvme_rdma_destroy_io_queues(ctrl, remove);
2511     }
2512     + mutex_unlock(&ctrl->teardown_lock);
2513     }
2514    
2515     static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
2516     @@ -1096,6 +1112,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
2517     if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
2518     return;
2519    
2520     + dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2521     queue_work(nvme_reset_wq, &ctrl->err_work);
2522     }
2523    
2524     @@ -1699,6 +1716,22 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
2525     return 0;
2526     }
2527    
2528     +static void nvme_rdma_complete_timed_out(struct request *rq)
2529     +{
2530     + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2531     + struct nvme_rdma_queue *queue = req->queue;
2532     + struct nvme_rdma_ctrl *ctrl = queue->ctrl;
2533     +
2534     + /* fence other contexts that may complete the command */
2535     + mutex_lock(&ctrl->teardown_lock);
2536     + nvme_rdma_stop_queue(queue);
2537     + if (!blk_mq_request_completed(rq)) {
2538     + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2539     + blk_mq_complete_request(rq);
2540     + }
2541     + mutex_unlock(&ctrl->teardown_lock);
2542     +}
2543     +
2544     static enum blk_eh_timer_return
2545     nvme_rdma_timeout(struct request *rq, bool reserved)
2546     {
2547     @@ -1709,29 +1742,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
2548     dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
2549     rq->tag, nvme_rdma_queue_idx(queue));
2550    
2551     - /*
2552     - * Restart the timer if a controller reset is already scheduled. Any
2553     - * timed out commands would be handled before entering the connecting
2554     - * state.
2555     - */
2556     - if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2557     - return BLK_EH_RESET_TIMER;
2558     -
2559     if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2560     /*
2561     - * Teardown immediately if controller times out while starting
2562     - * or we are already started error recovery. all outstanding
2563     - * requests are completed on shutdown, so we return BLK_EH_DONE.
2564     + * If we are resetting, connecting or deleting we should
2565     + * complete immediately because we may block controller
2566     + * teardown or setup sequence
2567     + * - ctrl disable/shutdown fabrics requests
2568     + * - connect requests
2569     + * - initialization admin requests
2570     + * - I/O requests that entered after unquiescing and
2571     + * the controller stopped responding
2572     + *
2573     + * All other requests should be cancelled by the error
2574     + * recovery work, so it's fine that we fail it here.
2575     */
2576     - flush_work(&ctrl->err_work);
2577     - nvme_rdma_teardown_io_queues(ctrl, false);
2578     - nvme_rdma_teardown_admin_queue(ctrl, false);
2579     + nvme_rdma_complete_timed_out(rq);
2580     return BLK_EH_DONE;
2581     }
2582    
2583     - dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2584     + /*
2585     + * LIVE state should trigger the normal error recovery which will
2586     + * handle completing this request.
2587     + */
2588     nvme_rdma_error_recovery(ctrl);
2589     -
2590     return BLK_EH_RESET_TIMER;
2591     }
2592    
2593     @@ -1988,6 +2021,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2594     return ERR_PTR(-ENOMEM);
2595     ctrl->ctrl.opts = opts;
2596     INIT_LIST_HEAD(&ctrl->list);
2597     + mutex_init(&ctrl->teardown_lock);
2598    
2599     if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2600     opts->trsvcid =
2601     diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
2602     index 0166ff0e4738e..9b81763b44d99 100644
2603     --- a/drivers/nvme/host/tcp.c
2604     +++ b/drivers/nvme/host/tcp.c
2605     @@ -110,6 +110,7 @@ struct nvme_tcp_ctrl {
2606     struct sockaddr_storage src_addr;
2607     struct nvme_ctrl ctrl;
2608    
2609     + struct mutex teardown_lock;
2610     struct work_struct err_work;
2611     struct delayed_work connect_work;
2612     struct nvme_tcp_request async_req;
2613     @@ -420,6 +421,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
2614     if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
2615     return;
2616    
2617     + dev_warn(ctrl->device, "starting error recovery\n");
2618     queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
2619     }
2620    
2621     @@ -1438,7 +1440,6 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
2622    
2623     if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
2624     return;
2625     -
2626     __nvme_tcp_stop_queue(queue);
2627     }
2628    
2629     @@ -1692,7 +1693,15 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
2630    
2631     if (!new) {
2632     nvme_start_queues(ctrl);
2633     - nvme_wait_freeze(ctrl);
2634     + if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
2635     + /*
2636     + * If we timed out waiting for freeze we are likely to
2637     + * be stuck. Fail the controller initialization just
2638     + * to be safe.
2639     + */
2640     + ret = -ENODEV;
2641     + goto out_wait_freeze_timed_out;
2642     + }
2643     blk_mq_update_nr_hw_queues(ctrl->tagset,
2644     ctrl->queue_count - 1);
2645     nvme_unfreeze(ctrl);
2646     @@ -1700,6 +1709,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
2647    
2648     return 0;
2649    
2650     +out_wait_freeze_timed_out:
2651     + nvme_stop_queues(ctrl);
2652     + nvme_tcp_stop_io_queues(ctrl);
2653     out_cleanup_connect_q:
2654     if (new)
2655     blk_cleanup_queue(ctrl->connect_q);
2656     @@ -1785,6 +1797,7 @@ out_free_queue:
2657     static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2658     bool remove)
2659     {
2660     + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
2661     blk_mq_quiesce_queue(ctrl->admin_q);
2662     nvme_tcp_stop_queue(ctrl, 0);
2663     if (ctrl->admin_tagset) {
2664     @@ -1795,13 +1808,16 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2665     if (remove)
2666     blk_mq_unquiesce_queue(ctrl->admin_q);
2667     nvme_tcp_destroy_admin_queue(ctrl, remove);
2668     + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
2669     }
2670    
2671     static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2672     bool remove)
2673     {
2674     + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
2675     if (ctrl->queue_count <= 1)
2676     - return;
2677     + goto out;
2678     + blk_mq_quiesce_queue(ctrl->admin_q);
2679     nvme_start_freeze(ctrl);
2680     nvme_stop_queues(ctrl);
2681     nvme_tcp_stop_io_queues(ctrl);
2682     @@ -1813,6 +1829,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2683     if (remove)
2684     nvme_start_queues(ctrl);
2685     nvme_tcp_destroy_io_queues(ctrl, remove);
2686     +out:
2687     + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
2688     }
2689    
2690     static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2691     @@ -2051,40 +2069,55 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2692     nvme_tcp_queue_request(&ctrl->async_req);
2693     }
2694    
2695     +static void nvme_tcp_complete_timed_out(struct request *rq)
2696     +{
2697     + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2698     + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2699     +
2700     + /* fence other contexts that may complete the command */
2701     + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
2702     + nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2703     + if (!blk_mq_request_completed(rq)) {
2704     + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2705     + blk_mq_complete_request(rq);
2706     + }
2707     + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
2708     +}
2709     +
2710     static enum blk_eh_timer_return
2711     nvme_tcp_timeout(struct request *rq, bool reserved)
2712     {
2713     struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2714     - struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2715     + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2716     struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2717    
2718     - /*
2719     - * Restart the timer if a controller reset is already scheduled. Any
2720     - * timed out commands would be handled before entering the connecting
2721     - * state.
2722     - */
2723     - if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2724     - return BLK_EH_RESET_TIMER;
2725     -
2726     - dev_warn(ctrl->ctrl.device,
2727     + dev_warn(ctrl->device,
2728     "queue %d: timeout request %#x type %d\n",
2729     nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2730    
2731     - if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2732     + if (ctrl->state != NVME_CTRL_LIVE) {
2733     /*
2734     - * Teardown immediately if controller times out while starting
2735     - * or we are already started error recovery. all outstanding
2736     - * requests are completed on shutdown, so we return BLK_EH_DONE.
2737     + * If we are resetting, connecting or deleting we should
2738     + * complete immediately because we may block controller
2739     + * teardown or setup sequence
2740     + * - ctrl disable/shutdown fabrics requests
2741     + * - connect requests
2742     + * - initialization admin requests
2743     + * - I/O requests that entered after unquiescing and
2744     + * the controller stopped responding
2745     + *
2746     + * All other requests should be cancelled by the error
2747     + * recovery work, so it's fine that we fail it here.
2748     */
2749     - flush_work(&ctrl->err_work);
2750     - nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2751     - nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
2752     + nvme_tcp_complete_timed_out(rq);
2753     return BLK_EH_DONE;
2754     }
2755    
2756     - dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2757     - nvme_tcp_error_recovery(&ctrl->ctrl);
2758     -
2759     + /*
2760     + * LIVE state should trigger the normal error recovery which will
2761     + * handle completing this request.
2762     + */
2763     + nvme_tcp_error_recovery(ctrl);
2764     return BLK_EH_RESET_TIMER;
2765     }
2766    
2767     @@ -2311,6 +2344,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2768     nvme_tcp_reconnect_ctrl_work);
2769     INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2770     INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2771     + mutex_init(&ctrl->teardown_lock);
2772    
2773     if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2774     opts->trsvcid =
2775     diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
2776     index 22014e76d7714..e31823f19a0fa 100644
2777     --- a/drivers/nvme/target/tcp.c
2778     +++ b/drivers/nvme/target/tcp.c
2779     @@ -150,6 +150,11 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
2780     static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
2781     struct nvmet_tcp_cmd *cmd)
2782     {
2783     + if (unlikely(!queue->nr_cmds)) {
2784     + /* We didn't allocate cmds yet, send 0xffff */
2785     + return USHRT_MAX;
2786     + }
2787     +
2788     return cmd - queue->cmds;
2789     }
2790    
2791     @@ -847,7 +852,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
2792     struct nvme_tcp_data_pdu *data = &queue->pdu.data;
2793     struct nvmet_tcp_cmd *cmd;
2794    
2795     - cmd = &queue->cmds[data->ttag];
2796     + if (likely(queue->nr_cmds))
2797     + cmd = &queue->cmds[data->ttag];
2798     + else
2799     + cmd = &queue->connect;
2800    
2801     if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
2802     pr_err("ttag %u unexpected data offset %u (expected %u)\n",
2803     diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
2804     index 27dd20a7fe131..5ddbf9a1f328b 100644
2805     --- a/drivers/phy/qualcomm/phy-qcom-qmp.c
2806     +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
2807     @@ -402,8 +402,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
2808     QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
2809     QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
2810     QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
2811     - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
2812     - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
2813     + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
2814     + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
2815     QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
2816     QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
2817     QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
2818     @@ -429,7 +429,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
2819     QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
2820     QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
2821     QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
2822     - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
2823     QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
2824     QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
2825     QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
2826     @@ -438,7 +437,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
2827     QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
2828     QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
2829     QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
2830     - QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
2831     };
2832    
2833     static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
2834     @@ -446,6 +444,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
2835     QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
2836     QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
2837     QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
2838     + QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36),
2839     + QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
2840     };
2841    
2842     static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
2843     @@ -456,7 +456,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
2844     QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
2845     QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
2846     QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
2847     - QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
2848     };
2849    
2850     static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
2851     @@ -1107,6 +1106,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
2852     .pwrdn_ctrl = SW_PWRDN,
2853     };
2854    
2855     +static const char * const ipq8074_pciephy_clk_l[] = {
2856     + "aux", "cfg_ahb",
2857     +};
2858     /* list of resets */
2859     static const char * const ipq8074_pciephy_reset_l[] = {
2860     "phy", "common",
2861     @@ -1124,8 +1126,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
2862     .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
2863     .pcs_tbl = ipq8074_pcie_pcs_tbl,
2864     .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
2865     - .clk_list = NULL,
2866     - .num_clks = 0,
2867     + .clk_list = ipq8074_pciephy_clk_l,
2868     + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
2869     .reset_list = ipq8074_pciephy_reset_l,
2870     .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
2871     .vreg_list = NULL,
2872     diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
2873     index 335ea5d7ef400..f6b1e6359b8ca 100644
2874     --- a/drivers/phy/qualcomm/phy-qcom-qmp.h
2875     +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
2876     @@ -77,6 +77,8 @@
2877     #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
2878    
2879     /* Only for QMP V2 PHY - TX registers */
2880     +#define QSERDES_TX_EMP_POST1_LVL 0x018
2881     +#define QSERDES_TX_SLEW_CNTL 0x040
2882     #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
2883     #define QSERDES_TX_DEBUG_BUS_SEL 0x064
2884     #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
2885     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
2886     index a17aebe0aa7a7..55fc80de5ef16 100644
2887     --- a/drivers/regulator/core.c
2888     +++ b/drivers/regulator/core.c
2889     @@ -235,8 +235,8 @@ static bool regulator_supply_is_couple(struct regulator_dev *rdev)
2890     static void regulator_unlock_recursive(struct regulator_dev *rdev,
2891     unsigned int n_coupled)
2892     {
2893     - struct regulator_dev *c_rdev;
2894     - int i;
2895     + struct regulator_dev *c_rdev, *supply_rdev;
2896     + int i, supply_n_coupled;
2897    
2898     for (i = n_coupled; i > 0; i--) {
2899     c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1];
2900     @@ -244,10 +244,13 @@ static void regulator_unlock_recursive(struct regulator_dev *rdev,
2901     if (!c_rdev)
2902     continue;
2903    
2904     - if (c_rdev->supply && !regulator_supply_is_couple(c_rdev))
2905     - regulator_unlock_recursive(
2906     - c_rdev->supply->rdev,
2907     - c_rdev->coupling_desc.n_coupled);
2908     + if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) {
2909     + supply_rdev = c_rdev->supply->rdev;
2910     + supply_n_coupled = supply_rdev->coupling_desc.n_coupled;
2911     +
2912     + regulator_unlock_recursive(supply_rdev,
2913     + supply_n_coupled);
2914     + }
2915    
2916     regulator_unlock(c_rdev);
2917     }
2918     @@ -1456,7 +1459,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
2919     const char *consumer_dev_name,
2920     const char *supply)
2921     {
2922     - struct regulator_map *node;
2923     + struct regulator_map *node, *new_node;
2924     int has_dev;
2925    
2926     if (supply == NULL)
2927     @@ -1467,6 +1470,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
2928     else
2929     has_dev = 0;
2930    
2931     + new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
2932     + if (new_node == NULL)
2933     + return -ENOMEM;
2934     +
2935     + new_node->regulator = rdev;
2936     + new_node->supply = supply;
2937     +
2938     + if (has_dev) {
2939     + new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
2940     + if (new_node->dev_name == NULL) {
2941     + kfree(new_node);
2942     + return -ENOMEM;
2943     + }
2944     + }
2945     +
2946     + mutex_lock(&regulator_list_mutex);
2947     list_for_each_entry(node, &regulator_map_list, list) {
2948     if (node->dev_name && consumer_dev_name) {
2949     if (strcmp(node->dev_name, consumer_dev_name) != 0)
2950     @@ -1484,26 +1503,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
2951     node->regulator->desc->name,
2952     supply,
2953     dev_name(&rdev->dev), rdev_get_name(rdev));
2954     - return -EBUSY;
2955     + goto fail;
2956     }
2957    
2958     - node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
2959     - if (node == NULL)
2960     - return -ENOMEM;
2961     -
2962     - node->regulator = rdev;
2963     - node->supply = supply;
2964     -
2965     - if (has_dev) {
2966     - node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
2967     - if (node->dev_name == NULL) {
2968     - kfree(node);
2969     - return -ENOMEM;
2970     - }
2971     - }
2972     + list_add(&new_node->list, &regulator_map_list);
2973     + mutex_unlock(&regulator_list_mutex);
2974    
2975     - list_add(&node->list, &regulator_map_list);
2976     return 0;
2977     +
2978     +fail:
2979     + mutex_unlock(&regulator_list_mutex);
2980     + kfree(new_node->dev_name);
2981     + kfree(new_node);
2982     + return -EBUSY;
2983     }
2984    
2985     static void unset_regulator_supplies(struct regulator_dev *rdev)
2986     @@ -1575,44 +1587,53 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
2987     const char *supply_name)
2988     {
2989     struct regulator *regulator;
2990     - char buf[REG_STR_SIZE];
2991     - int err, size;
2992     + int err;
2993     +
2994     + if (dev) {
2995     + char buf[REG_STR_SIZE];
2996     + int size;
2997     +
2998     + size = snprintf(buf, REG_STR_SIZE, "%s-%s",
2999     + dev->kobj.name, supply_name);
3000     + if (size >= REG_STR_SIZE)
3001     + return NULL;
3002     +
3003     + supply_name = kstrdup(buf, GFP_KERNEL);
3004     + if (supply_name == NULL)
3005     + return NULL;
3006     + } else {
3007     + supply_name = kstrdup_const(supply_name, GFP_KERNEL);
3008     + if (supply_name == NULL)
3009     + return NULL;
3010     + }
3011    
3012     regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
3013     - if (regulator == NULL)
3014     + if (regulator == NULL) {
3015     + kfree(supply_name);
3016     return NULL;
3017     + }
3018    
3019     - regulator_lock(rdev);
3020     regulator->rdev = rdev;
3021     + regulator->supply_name = supply_name;
3022     +
3023     + regulator_lock(rdev);
3024     list_add(&regulator->list, &rdev->consumer_list);
3025     + regulator_unlock(rdev);
3026    
3027     if (dev) {
3028     regulator->dev = dev;
3029    
3030     /* Add a link to the device sysfs entry */
3031     - size = snprintf(buf, REG_STR_SIZE, "%s-%s",
3032     - dev->kobj.name, supply_name);
3033     - if (size >= REG_STR_SIZE)
3034     - goto overflow_err;
3035     -
3036     - regulator->supply_name = kstrdup(buf, GFP_KERNEL);
3037     - if (regulator->supply_name == NULL)
3038     - goto overflow_err;
3039     -
3040     err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
3041     - buf);
3042     + supply_name);
3043     if (err) {
3044     rdev_dbg(rdev, "could not add device link %s err %d\n",
3045     dev->kobj.name, err);
3046     /* non-fatal */
3047     }
3048     - } else {
3049     - regulator->supply_name = kstrdup_const(supply_name, GFP_KERNEL);
3050     - if (regulator->supply_name == NULL)
3051     - goto overflow_err;
3052     }
3053    
3054     - regulator->debugfs = debugfs_create_dir(regulator->supply_name,
3055     + regulator->debugfs = debugfs_create_dir(supply_name,
3056     rdev->debugfs);
3057     if (!regulator->debugfs) {
3058     rdev_dbg(rdev, "Failed to create debugfs directory\n");
3059     @@ -1637,13 +1658,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
3060     _regulator_is_enabled(rdev))
3061     regulator->always_on = true;
3062    
3063     - regulator_unlock(rdev);
3064     return regulator;
3065     -overflow_err:
3066     - list_del(&regulator->list);
3067     - kfree(regulator);
3068     - regulator_unlock(rdev);
3069     - return NULL;
3070     }
3071    
3072     static int _regulator_get_enable_time(struct regulator_dev *rdev)
3073     @@ -2217,10 +2232,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias);
3074     static int regulator_ena_gpio_request(struct regulator_dev *rdev,
3075     const struct regulator_config *config)
3076     {
3077     - struct regulator_enable_gpio *pin;
3078     + struct regulator_enable_gpio *pin, *new_pin;
3079     struct gpio_desc *gpiod;
3080    
3081     gpiod = config->ena_gpiod;
3082     + new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL);
3083     +
3084     + mutex_lock(&regulator_list_mutex);
3085    
3086     list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
3087     if (pin->gpiod == gpiod) {
3088     @@ -2229,9 +2247,13 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
3089     }
3090     }
3091    
3092     - pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL);
3093     - if (pin == NULL)
3094     + if (new_pin == NULL) {
3095     + mutex_unlock(&regulator_list_mutex);
3096     return -ENOMEM;
3097     + }
3098     +
3099     + pin = new_pin;
3100     + new_pin = NULL;
3101    
3102     pin->gpiod = gpiod;
3103     list_add(&pin->list, &regulator_ena_gpio_list);
3104     @@ -2239,6 +2261,10 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
3105     update_ena_gpio_to_rdev:
3106     pin->request_count++;
3107     rdev->ena_pin = pin;
3108     +
3109     + mutex_unlock(&regulator_list_mutex);
3110     + kfree(new_pin);
3111     +
3112     return 0;
3113     }
3114    
3115     @@ -4857,13 +4883,9 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev)
3116     return;
3117     }
3118    
3119     - regulator_lock(c_rdev);
3120     -
3121     c_desc->coupled_rdevs[i] = c_rdev;
3122     c_desc->n_resolved++;
3123    
3124     - regulator_unlock(c_rdev);
3125     -
3126     regulator_resolve_coupling(c_rdev);
3127     }
3128     }
3129     @@ -4948,7 +4970,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev)
3130     if (!of_check_coupling_data(rdev))
3131     return -EPERM;
3132    
3133     + mutex_lock(&regulator_list_mutex);
3134     rdev->coupling_desc.coupler = regulator_find_coupler(rdev);
3135     + mutex_unlock(&regulator_list_mutex);
3136     +
3137     if (IS_ERR(rdev->coupling_desc.coupler)) {
3138     err = PTR_ERR(rdev->coupling_desc.coupler);
3139     rdev_err(rdev, "failed to get coupler: %d\n", err);
3140     @@ -5043,6 +5068,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
3141     ret = -ENOMEM;
3142     goto rinse;
3143     }
3144     + device_initialize(&rdev->dev);
3145    
3146     /*
3147     * Duplicate the config so the driver could override it after
3148     @@ -5050,9 +5076,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
3149     */
3150     config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
3151     if (config == NULL) {
3152     - kfree(rdev);
3153     ret = -ENOMEM;
3154     - goto rinse;
3155     + goto clean;
3156     }
3157    
3158     init_data = regulator_of_get_init_data(dev, regulator_desc, config,
3159     @@ -5064,10 +5089,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
3160     * from a gpio extender or something else.
3161     */
3162     if (PTR_ERR(init_data) == -EPROBE_DEFER) {
3163     - kfree(config);
3164     - kfree(rdev);
3165     ret = -EPROBE_DEFER;
3166     - goto rinse;
3167     + goto clean;
3168     }
3169    
3170     /*
3171     @@ -5108,9 +5131,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
3172     }
3173    
3174     if (config->ena_gpiod) {
3175     - mutex_lock(&regulator_list_mutex);
3176     ret = regulator_ena_gpio_request(rdev, config);
3177     - mutex_unlock(&regulator_list_mutex);
3178     if (ret != 0) {
3179     rdev_err(rdev, "Failed to request enable GPIO: %d\n",
3180     ret);
3181     @@ -5122,7 +5143,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3182     }
3183    
3184     /* register with sysfs */
3185     - device_initialize(&rdev->dev);
3186     rdev->dev.class = &regulator_class;
3187     rdev->dev.parent = dev;
3188     dev_set_name(&rdev->dev, "regulator.%lu",
3189     @@ -5150,27 +5170,22 @@ regulator_register(const struct regulator_desc *regulator_desc,
3190     if (ret < 0)
3191     goto wash;
3192    
3193     - mutex_lock(&regulator_list_mutex);
3194     ret = regulator_init_coupling(rdev);
3195     - mutex_unlock(&regulator_list_mutex);
3196     if (ret < 0)
3197     goto wash;
3198    
3199     /* add consumers devices */
3200     if (init_data) {
3201     - mutex_lock(&regulator_list_mutex);
3202     for (i = 0; i < init_data->num_consumer_supplies; i++) {
3203     ret = set_consumer_device_supply(rdev,
3204     init_data->consumer_supplies[i].dev_name,
3205     init_data->consumer_supplies[i].supply);
3206     if (ret < 0) {
3207     - mutex_unlock(&regulator_list_mutex);
3208     dev_err(dev, "Failed to set supply %s\n",
3209     init_data->consumer_supplies[i].supply);
3210     goto unset_supplies;
3211     }
3212     }
3213     - mutex_unlock(&regulator_list_mutex);
3214     }
3215    
3216     if (!rdev->desc->ops->get_voltage &&
3217     @@ -5205,13 +5220,11 @@ wash:
3218     mutex_lock(&regulator_list_mutex);
3219     regulator_ena_gpio_free(rdev);
3220     mutex_unlock(&regulator_list_mutex);
3221     - put_device(&rdev->dev);
3222     - rdev = NULL;
3223     clean:
3224     if (dangling_of_gpiod)
3225     gpiod_put(config->ena_gpiod);
3226     - kfree(rdev);
3227     kfree(config);
3228     + put_device(&rdev->dev);
3229     rinse:
3230     if (dangling_cfg_gpiod)
3231     gpiod_put(cfg->ena_gpiod);
3232     diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
3233     index e9e00740f7ca6..dd755a56cf521 100644
3234     --- a/drivers/scsi/libsas/sas_ata.c
3235     +++ b/drivers/scsi/libsas/sas_ata.c
3236     @@ -208,7 +208,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
3237     task->num_scatter = si;
3238     }
3239    
3240     - task->data_dir = qc->dma_dir;
3241     + if (qc->tf.protocol == ATA_PROT_NODATA)
3242     + task->data_dir = DMA_NONE;
3243     + else
3244     + task->data_dir = qc->dma_dir;
3245     task->scatter = qc->sg;
3246     task->ata_task.retry_count = 1;
3247     task->task_state_flags = SAS_TASK_STATE_PENDING;
3248     diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
3249     index 5bcef9769740b..5dcd7b9b72ced 100644
3250     --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
3251     +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
3252     @@ -3738,7 +3738,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
3253     instance = irq_ctx->instance;
3254    
3255     if (irq_ctx->irq_line_enable) {
3256     - disable_irq(irq_ctx->os_irq);
3257     + disable_irq_nosync(irq_ctx->os_irq);
3258     irq_ctx->irq_line_enable = false;
3259     }
3260    
3261     diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
3262     index 7fd1d731555f9..b7e44634d0dc2 100644
3263     --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
3264     +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
3265     @@ -1632,7 +1632,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
3266     reply_q = container_of(irqpoll, struct adapter_reply_queue,
3267     irqpoll);
3268     if (reply_q->irq_line_enable) {
3269     - disable_irq(reply_q->os_irq);
3270     + disable_irq_nosync(reply_q->os_irq);
3271     reply_q->irq_line_enable = false;
3272     }
3273     num_entries = _base_process_reply_queue(reply_q);
3274     diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
3275     index e69f94a8c3a86..de7c57e17710a 100644
3276     --- a/drivers/soundwire/stream.c
3277     +++ b/drivers/soundwire/stream.c
3278     @@ -702,6 +702,7 @@ error:
3279     kfree(wbuf);
3280     error_1:
3281     kfree(wr_msg);
3282     + bus->defer_msg.msg = NULL;
3283     return ret;
3284     }
3285    
3286     @@ -825,9 +826,10 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
3287     error:
3288     list_for_each_entry(m_rt, &stream->master_list, stream_node) {
3289     bus = m_rt->bus;
3290     -
3291     - kfree(bus->defer_msg.msg->buf);
3292     - kfree(bus->defer_msg.msg);
3293     + if (bus->defer_msg.msg) {
3294     + kfree(bus->defer_msg.msg->buf);
3295     + kfree(bus->defer_msg.msg);
3296     + }
3297     }
3298    
3299     msg_unlock:
3300     diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
3301     index 8146c2d91d307..9d8ceb63f7db1 100644
3302     --- a/drivers/spi/spi-stm32.c
3303     +++ b/drivers/spi/spi-stm32.c
3304     @@ -931,7 +931,11 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
3305     }
3306    
3307     if (sr & STM32H7_SPI_SR_SUSP) {
3308     - dev_warn(spi->dev, "Communication suspended\n");
3309     + static DEFINE_RATELIMIT_STATE(rs,
3310     + DEFAULT_RATELIMIT_INTERVAL * 10,
3311     + 1);
3312     + if (__ratelimit(&rs))
3313     + dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
3314     if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
3315     stm32h7_spi_read_rxfifo(spi, false);
3316     /*
3317     @@ -2050,7 +2054,7 @@ static int stm32_spi_resume(struct device *dev)
3318     }
3319    
3320     ret = pm_runtime_get_sync(dev);
3321     - if (ret) {
3322     + if (ret < 0) {
3323     dev_err(dev, "Unable to power device:%d\n", ret);
3324     return ret;
3325     }
3326     diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
3327     index 4ac30accf226a..cc329b990e165 100644
3328     --- a/drivers/staging/greybus/audio_topology.c
3329     +++ b/drivers/staging/greybus/audio_topology.c
3330     @@ -460,6 +460,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
3331     val = ucontrol->value.integer.value[0] & mask;
3332     connect = !!val;
3333    
3334     + ret = gb_pm_runtime_get_sync(bundle);
3335     + if (ret)
3336     + return ret;
3337     +
3338     + ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
3339     + GB_AUDIO_INVALID_INDEX, &gbvalue);
3340     + if (ret)
3341     + goto exit;
3342     +
3343     /* update ucontrol */
3344     if (gbvalue.value.integer_value[0] != val) {
3345     for (wi = 0; wi < wlist->num_widgets; wi++) {
3346     @@ -473,25 +482,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
3347     gbvalue.value.integer_value[0] =
3348     cpu_to_le32(ucontrol->value.integer.value[0]);
3349    
3350     - ret = gb_pm_runtime_get_sync(bundle);
3351     - if (ret)
3352     - return ret;
3353     -
3354     ret = gb_audio_gb_set_control(module->mgmt_connection,
3355     data->ctl_id,
3356     GB_AUDIO_INVALID_INDEX, &gbvalue);
3357     -
3358     - gb_pm_runtime_put_autosuspend(bundle);
3359     -
3360     - if (ret) {
3361     - dev_err_ratelimited(codec->dev,
3362     - "%d:Error in %s for %s\n", ret,
3363     - __func__, kcontrol->id.name);
3364     - return ret;
3365     - }
3366     }
3367    
3368     - return 0;
3369     +exit:
3370     + gb_pm_runtime_put_autosuspend(bundle);
3371     + if (ret)
3372     + dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
3373     + __func__, kcontrol->id.name);
3374     + return ret;
3375     }
3376    
3377     #define SOC_DAPM_MIXER_GB(xname, kcount, data) \
3378     diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
3379     index e29c14e0ed498..ed4ff78dd02aa 100644
3380     --- a/drivers/staging/wlan-ng/hfa384x_usb.c
3381     +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
3382     @@ -526,13 +526,8 @@ static void hfa384x_usb_defer(struct work_struct *data)
3383     */
3384     void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
3385     {
3386     - memset(hw, 0, sizeof(*hw));
3387     hw->usb = usb;
3388    
3389     - /* set up the endpoints */
3390     - hw->endp_in = usb_rcvbulkpipe(usb, 1);
3391     - hw->endp_out = usb_sndbulkpipe(usb, 2);
3392     -
3393     /* Set up the waitq */
3394     init_waitqueue_head(&hw->cmdq);
3395    
3396     diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
3397     index 8d32b1603d10a..9eee72aff7233 100644
3398     --- a/drivers/staging/wlan-ng/prism2usb.c
3399     +++ b/drivers/staging/wlan-ng/prism2usb.c
3400     @@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
3401     const struct usb_device_id *id)
3402     {
3403     struct usb_device *dev;
3404     - const struct usb_endpoint_descriptor *epd;
3405     - const struct usb_host_interface *iface_desc = interface->cur_altsetting;
3406     + struct usb_endpoint_descriptor *bulk_in, *bulk_out;
3407     + struct usb_host_interface *iface_desc = interface->cur_altsetting;
3408     struct wlandevice *wlandev = NULL;
3409     struct hfa384x *hw = NULL;
3410     int result = 0;
3411    
3412     - if (iface_desc->desc.bNumEndpoints != 2) {
3413     - result = -ENODEV;
3414     - goto failed;
3415     - }
3416     -
3417     - result = -EINVAL;
3418     - epd = &iface_desc->endpoint[1].desc;
3419     - if (!usb_endpoint_is_bulk_in(epd))
3420     - goto failed;
3421     - epd = &iface_desc->endpoint[2].desc;
3422     - if (!usb_endpoint_is_bulk_out(epd))
3423     + result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL);
3424     + if (result)
3425     goto failed;
3426    
3427     dev = interface_to_usbdev(interface);
3428     @@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
3429     }
3430    
3431     /* Initialize the hw data */
3432     + hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress);
3433     + hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress);
3434     hfa384x_create(hw, dev);
3435     hw->wlandev = wlandev;
3436    
3437     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3438     index d1ce94c608a9f..bca183369ad8b 100644
3439     --- a/drivers/target/iscsi/iscsi_target.c
3440     +++ b/drivers/target/iscsi/iscsi_target.c
3441     @@ -1386,14 +1386,27 @@ static u32 iscsit_do_crypto_hash_sg(
3442     sg = cmd->first_data_sg;
3443     page_off = cmd->first_data_sg_off;
3444    
3445     + if (data_length && page_off) {
3446     + struct scatterlist first_sg;
3447     + u32 len = min_t(u32, data_length, sg->length - page_off);
3448     +
3449     + sg_init_table(&first_sg, 1);
3450     + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
3451     +
3452     + ahash_request_set_crypt(hash, &first_sg, NULL, len);
3453     + crypto_ahash_update(hash);
3454     +
3455     + data_length -= len;
3456     + sg = sg_next(sg);
3457     + }
3458     +
3459     while (data_length) {
3460     - u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
3461     + u32 cur_len = min_t(u32, data_length, sg->length);
3462    
3463     ahash_request_set_crypt(hash, sg, NULL, cur_len);
3464     crypto_ahash_update(hash);
3465    
3466     data_length -= cur_len;
3467     - page_off = 0;
3468     /* iscsit_map_iovec has already checked for invalid sg pointers */
3469     sg = sg_next(sg);
3470     }
3471     diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
3472     index 731ee67fe914b..0cc5ea195273f 100644
3473     --- a/drivers/target/iscsi/iscsi_target_login.c
3474     +++ b/drivers/target/iscsi/iscsi_target_login.c
3475     @@ -1172,7 +1172,7 @@ void iscsit_free_conn(struct iscsi_conn *conn)
3476     }
3477    
3478     void iscsi_target_login_sess_out(struct iscsi_conn *conn,
3479     - struct iscsi_np *np, bool zero_tsih, bool new_sess)
3480     + bool zero_tsih, bool new_sess)
3481     {
3482     if (!new_sess)
3483     goto old_sess_out;
3484     @@ -1190,7 +1190,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
3485     conn->sess = NULL;
3486    
3487     old_sess_out:
3488     - iscsi_stop_login_thread_timer(np);
3489     /*
3490     * If login negotiation fails check if the Time2Retain timer
3491     * needs to be restarted.
3492     @@ -1430,8 +1429,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
3493     new_sess_out:
3494     new_sess = true;
3495     old_sess_out:
3496     + iscsi_stop_login_thread_timer(np);
3497     tpg_np = conn->tpg_np;
3498     - iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
3499     + iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
3500     new_sess = false;
3501    
3502     if (tpg) {
3503     diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
3504     index 3b8e3639ff5d0..fc95e6150253f 100644
3505     --- a/drivers/target/iscsi/iscsi_target_login.h
3506     +++ b/drivers/target/iscsi/iscsi_target_login.h
3507     @@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
3508     extern void iscsit_free_conn(struct iscsi_conn *);
3509     extern int iscsit_start_kthreads(struct iscsi_conn *);
3510     extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
3511     -extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
3512     - bool, bool);
3513     +extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool);
3514     extern int iscsi_target_login_thread(void *);
3515     extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
3516    
3517     diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
3518     index 685d771b51d41..e32d93b927428 100644
3519     --- a/drivers/target/iscsi/iscsi_target_nego.c
3520     +++ b/drivers/target/iscsi/iscsi_target_nego.c
3521     @@ -535,12 +535,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in
3522    
3523     static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
3524     {
3525     - struct iscsi_np *np = login->np;
3526     bool zero_tsih = login->zero_tsih;
3527    
3528     iscsi_remove_failed_auth_entry(conn);
3529     iscsi_target_nego_release(conn);
3530     - iscsi_target_login_sess_out(conn, np, zero_tsih, true);
3531     + iscsi_target_login_sess_out(conn, zero_tsih, true);
3532     }
3533    
3534     struct conn_timeout {
3535     diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
3536     index a1ac2f0723b0b..041c68ea329f4 100644
3537     --- a/drivers/usb/core/message.c
3538     +++ b/drivers/usb/core/message.c
3539     @@ -1204,6 +1204,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
3540     }
3541     }
3542    
3543     +/*
3544     + * usb_disable_device_endpoints -- Disable all endpoints for a device
3545     + * @dev: the device whose endpoints are being disabled
3546     + * @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
3547     + */
3548     +static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0)
3549     +{
3550     + struct usb_hcd *hcd = bus_to_hcd(dev->bus);
3551     + int i;
3552     +
3553     + if (hcd->driver->check_bandwidth) {
3554     + /* First pass: Cancel URBs, leave endpoint pointers intact. */
3555     + for (i = skip_ep0; i < 16; ++i) {
3556     + usb_disable_endpoint(dev, i, false);
3557     + usb_disable_endpoint(dev, i + USB_DIR_IN, false);
3558     + }
3559     + /* Remove endpoints from the host controller internal state */
3560     + mutex_lock(hcd->bandwidth_mutex);
3561     + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
3562     + mutex_unlock(hcd->bandwidth_mutex);
3563     + }
3564     + /* Second pass: remove endpoint pointers */
3565     + for (i = skip_ep0; i < 16; ++i) {
3566     + usb_disable_endpoint(dev, i, true);
3567     + usb_disable_endpoint(dev, i + USB_DIR_IN, true);
3568     + }
3569     +}
3570     +
3571     /**
3572     * usb_disable_device - Disable all the endpoints for a USB device
3573     * @dev: the device whose endpoints are being disabled
3574     @@ -1217,7 +1245,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
3575     void usb_disable_device(struct usb_device *dev, int skip_ep0)
3576     {
3577     int i;
3578     - struct usb_hcd *hcd = bus_to_hcd(dev->bus);
3579    
3580     /* getting rid of interfaces will disconnect
3581     * any drivers bound to them (a key side effect)
3582     @@ -1263,22 +1290,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
3583    
3584     dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
3585     skip_ep0 ? "non-ep0" : "all");
3586     - if (hcd->driver->check_bandwidth) {
3587     - /* First pass: Cancel URBs, leave endpoint pointers intact. */
3588     - for (i = skip_ep0; i < 16; ++i) {
3589     - usb_disable_endpoint(dev, i, false);
3590     - usb_disable_endpoint(dev, i + USB_DIR_IN, false);
3591     - }
3592     - /* Remove endpoints from the host controller internal state */
3593     - mutex_lock(hcd->bandwidth_mutex);
3594     - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
3595     - mutex_unlock(hcd->bandwidth_mutex);
3596     - /* Second pass: remove endpoint pointers */
3597     - }
3598     - for (i = skip_ep0; i < 16; ++i) {
3599     - usb_disable_endpoint(dev, i, true);
3600     - usb_disable_endpoint(dev, i + USB_DIR_IN, true);
3601     - }
3602     +
3603     + usb_disable_device_endpoints(dev, skip_ep0);
3604     }
3605    
3606     /**
3607     @@ -1521,6 +1534,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface);
3608     * The caller must own the device lock.
3609     *
3610     * Return: Zero on success, else a negative error code.
3611     + *
3612     + * If this routine fails the device will probably be in an unusable state
3613     + * with endpoints disabled, and interfaces only partially enabled.
3614     */
3615     int usb_reset_configuration(struct usb_device *dev)
3616     {
3617     @@ -1536,10 +1552,7 @@ int usb_reset_configuration(struct usb_device *dev)
3618     * calls during probe() are fine
3619     */
3620    
3621     - for (i = 1; i < 16; ++i) {
3622     - usb_disable_endpoint(dev, i, true);
3623     - usb_disable_endpoint(dev, i + USB_DIR_IN, true);
3624     - }
3625     + usb_disable_device_endpoints(dev, 1); /* skip ep0*/
3626    
3627     config = dev->actconfig;
3628     retval = 0;
3629     @@ -1552,34 +1565,10 @@ int usb_reset_configuration(struct usb_device *dev)
3630     mutex_unlock(hcd->bandwidth_mutex);
3631     return -ENOMEM;
3632     }
3633     - /* Make sure we have enough bandwidth for each alternate setting 0 */
3634     - for (i = 0; i < config->desc.bNumInterfaces; i++) {
3635     - struct usb_interface *intf = config->interface[i];
3636     - struct usb_host_interface *alt;
3637    
3638     - alt = usb_altnum_to_altsetting(intf, 0);
3639     - if (!alt)
3640     - alt = &intf->altsetting[0];
3641     - if (alt != intf->cur_altsetting)
3642     - retval = usb_hcd_alloc_bandwidth(dev, NULL,
3643     - intf->cur_altsetting, alt);
3644     - if (retval < 0)
3645     - break;
3646     - }
3647     - /* If not, reinstate the old alternate settings */
3648     + /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */
3649     + retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL);
3650     if (retval < 0) {
3651     -reset_old_alts:
3652     - for (i--; i >= 0; i--) {
3653     - struct usb_interface *intf = config->interface[i];
3654     - struct usb_host_interface *alt;
3655     -
3656     - alt = usb_altnum_to_altsetting(intf, 0);
3657     - if (!alt)
3658     - alt = &intf->altsetting[0];
3659     - if (alt != intf->cur_altsetting)
3660     - usb_hcd_alloc_bandwidth(dev, NULL,
3661     - alt, intf->cur_altsetting);
3662     - }
3663     usb_enable_lpm(dev);
3664     mutex_unlock(hcd->bandwidth_mutex);
3665     return retval;
3666     @@ -1588,8 +1577,12 @@ reset_old_alts:
3667     USB_REQ_SET_CONFIGURATION, 0,
3668     config->desc.bConfigurationValue, 0,
3669     NULL, 0, USB_CTRL_SET_TIMEOUT);
3670     - if (retval < 0)
3671     - goto reset_old_alts;
3672     + if (retval < 0) {
3673     + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
3674     + usb_enable_lpm(dev);
3675     + mutex_unlock(hcd->bandwidth_mutex);
3676     + return retval;
3677     + }
3678     mutex_unlock(hcd->bandwidth_mutex);
3679    
3680     /* re-init hc/hcd interface/endpoint state */
3681     diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
3682     index f19694e69f5c3..2f594c88d9058 100644
3683     --- a/drivers/usb/core/sysfs.c
3684     +++ b/drivers/usb/core/sysfs.c
3685     @@ -889,7 +889,11 @@ read_descriptors(struct file *filp, struct kobject *kobj,
3686     size_t srclen, n;
3687     int cfgno;
3688     void *src;
3689     + int retval;
3690    
3691     + retval = usb_lock_device_interruptible(udev);
3692     + if (retval < 0)
3693     + return -EINTR;
3694     /* The binary attribute begins with the device descriptor.
3695     * Following that are the raw descriptor entries for all the
3696     * configurations (config plus subsidiary descriptors).
3697     @@ -914,6 +918,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
3698     off -= srclen;
3699     }
3700     }
3701     + usb_unlock_device(udev);
3702     return count - nleft;
3703     }
3704    
3705     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3706     index 33f1cca7eaa61..ae98fe94fe91e 100644
3707     --- a/drivers/usb/serial/ftdi_sio.c
3708     +++ b/drivers/usb/serial/ftdi_sio.c
3709     @@ -713,6 +713,7 @@ static const struct usb_device_id id_table_combined[] = {
3710     { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
3711     { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
3712     { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
3713     + { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) },
3714     { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
3715     { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
3716     { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
3717     diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
3718     index e8373528264c3..b5ca17a5967a0 100644
3719     --- a/drivers/usb/serial/ftdi_sio_ids.h
3720     +++ b/drivers/usb/serial/ftdi_sio_ids.h
3721     @@ -160,6 +160,7 @@
3722     #define XSENS_AWINDA_DONGLE_PID 0x0102
3723     #define XSENS_MTW_PID 0x0200 /* Xsens MTw */
3724     #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
3725     +#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */
3726     #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
3727    
3728     /* Xsens devices using FTDI VID */
3729     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3730     index 9b7cee98ea607..f7a6ac05ac57a 100644
3731     --- a/drivers/usb/serial/option.c
3732     +++ b/drivers/usb/serial/option.c
3733     @@ -1094,14 +1094,18 @@ static const struct usb_device_id option_ids[] = {
3734     { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
3735     .driver_info = RSVD(1) | RSVD(3) },
3736     /* Quectel products using Quectel vendor ID */
3737     - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
3738     - .driver_info = RSVD(4) },
3739     - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
3740     - .driver_info = RSVD(4) },
3741     - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95),
3742     - .driver_info = RSVD(4) },
3743     - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
3744     - .driver_info = RSVD(4) },
3745     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
3746     + .driver_info = NUMEP2 },
3747     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) },
3748     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
3749     + .driver_info = NUMEP2 },
3750     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
3751     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
3752     + .driver_info = NUMEP2 },
3753     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
3754     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff),
3755     + .driver_info = NUMEP2 },
3756     + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) },
3757     { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
3758     .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
3759     { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
3760     @@ -1819,6 +1823,8 @@ static const struct usb_device_id option_ids[] = {
3761     { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
3762     { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
3763     .driver_info = RSVD(7) },
3764     + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */
3765     + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */
3766     { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
3767     .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
3768     { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
3769     diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
3770     index a18112a83faed..dda8bd39c9186 100644
3771     --- a/drivers/usb/typec/ucsi/ucsi_acpi.c
3772     +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
3773     @@ -64,11 +64,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
3774    
3775     static int ucsi_acpi_probe(struct platform_device *pdev)
3776     {
3777     + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
3778     struct ucsi_acpi *ua;
3779     struct resource *res;
3780     acpi_status status;
3781     int ret;
3782    
3783     + if (adev->dep_unmet)
3784     + return -EPROBE_DEFER;
3785     +
3786     ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL);
3787     if (!ua)
3788     return -ENOMEM;
3789     diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
3790     index c10e17fb9a9a9..3b432a18b5ab6 100644
3791     --- a/drivers/video/console/Kconfig
3792     +++ b/drivers/video/console/Kconfig
3793     @@ -22,52 +22,6 @@ config VGA_CONSOLE
3794    
3795     Say Y.
3796    
3797     -config VGACON_SOFT_SCROLLBACK
3798     - bool "Enable Scrollback Buffer in System RAM"
3799     - depends on VGA_CONSOLE
3800     - default n
3801     - help
3802     - The scrollback buffer of the standard VGA console is located in
3803     - the VGA RAM. The size of this RAM is fixed and is quite small.
3804     - If you require a larger scrollback buffer, this can be placed in
3805     - System RAM which is dynamically allocated during initialization.
3806     - Placing the scrollback buffer in System RAM will slightly slow
3807     - down the console.
3808     -
3809     - If you want this feature, say 'Y' here and enter the amount of
3810     - RAM to allocate for this buffer. If unsure, say 'N'.
3811     -
3812     -config VGACON_SOFT_SCROLLBACK_SIZE
3813     - int "Scrollback Buffer Size (in KB)"
3814     - depends on VGACON_SOFT_SCROLLBACK
3815     - range 1 1024
3816     - default "64"
3817     - help
3818     - Enter the amount of System RAM to allocate for scrollback
3819     - buffers of VGA consoles. Each 64KB will give you approximately
3820     - 16 80x25 screenfuls of scrollback buffer.
3821     -
3822     -config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT
3823     - bool "Persistent Scrollback History for each console by default"
3824     - depends on VGACON_SOFT_SCROLLBACK
3825     - default n
3826     - help
3827     - Say Y here if the scrollback history should persist by default when
3828     - switching between consoles. Otherwise, the scrollback history will be
3829     - flushed each time the console is switched. This feature can also be
3830     - enabled using the boot command line parameter
3831     - 'vgacon.scrollback_persistent=1'.
3832     -
3833     - This feature might break your tool of choice to flush the scrollback
3834     - buffer, e.g. clear(1) will work fine but Debian's clear_console(1)
3835     - will be broken, which might cause security issues.
3836     - You can use the escape sequence \e[3J instead if this feature is
3837     - activated.
3838     -
3839     - Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each
3840     - created tty device.
3841     - So if you use a RAM-constrained system, say N here.
3842     -
3843     config MDA_CONSOLE
3844     depends on !M68K && !PARISC && ISA
3845     tristate "MDA text console (dual-headed)"
3846     diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
3847     index e079b910feb24..55507df335bdd 100644
3848     --- a/drivers/video/console/vgacon.c
3849     +++ b/drivers/video/console/vgacon.c
3850     @@ -165,214 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c)
3851     write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2);
3852     }
3853    
3854     -#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
3855     -/* software scrollback */
3856     -struct vgacon_scrollback_info {
3857     - void *data;
3858     - int tail;
3859     - int size;
3860     - int rows;
3861     - int cnt;
3862     - int cur;
3863     - int save;
3864     - int restore;
3865     -};
3866     -
3867     -static struct vgacon_scrollback_info *vgacon_scrollback_cur;
3868     -static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES];
3869     -static bool scrollback_persistent = \
3870     - IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT);
3871     -module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000);
3872     -MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles");
3873     -
3874     -static void vgacon_scrollback_reset(int vc_num, size_t reset_size)
3875     -{
3876     - struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num];
3877     -
3878     - if (scrollback->data && reset_size > 0)
3879     - memset(scrollback->data, 0, reset_size);
3880     -
3881     - scrollback->cnt = 0;
3882     - scrollback->tail = 0;
3883     - scrollback->cur = 0;
3884     -}
3885     -
3886     -static void vgacon_scrollback_init(int vc_num)
3887     -{
3888     - int pitch = vga_video_num_columns * 2;
3889     - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
3890     - int rows = size / pitch;
3891     - void *data;
3892     -
3893     - data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024,
3894     - GFP_NOWAIT);
3895     -
3896     - vgacon_scrollbacks[vc_num].data = data;
3897     - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
3898     -
3899     - vgacon_scrollback_cur->rows = rows - 1;
3900     - vgacon_scrollback_cur->size = rows * pitch;
3901     -
3902     - vgacon_scrollback_reset(vc_num, size);
3903     -}
3904     -
3905     -static void vgacon_scrollback_switch(int vc_num)
3906     -{
3907     - if (!scrollback_persistent)
3908     - vc_num = 0;
3909     -
3910     - if (!vgacon_scrollbacks[vc_num].data) {
3911     - vgacon_scrollback_init(vc_num);
3912     - } else {
3913     - if (scrollback_persistent) {
3914     - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
3915     - } else {
3916     - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
3917     -
3918     - vgacon_scrollback_reset(vc_num, size);
3919     - }
3920     - }
3921     -}
3922     -
3923     -static void vgacon_scrollback_startup(void)
3924     -{
3925     - vgacon_scrollback_cur = &vgacon_scrollbacks[0];
3926     - vgacon_scrollback_init(0);
3927     -}
3928     -
3929     -static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
3930     -{
3931     - void *p;
3932     -
3933     - if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size ||
3934     - c->vc_num != fg_console)
3935     - return;
3936     -
3937     - p = (void *) (c->vc_origin + t * c->vc_size_row);
3938     -
3939     - while (count--) {
3940     - if ((vgacon_scrollback_cur->tail + c->vc_size_row) >
3941     - vgacon_scrollback_cur->size)
3942     - vgacon_scrollback_cur->tail = 0;
3943     -
3944     - scr_memcpyw(vgacon_scrollback_cur->data +
3945     - vgacon_scrollback_cur->tail,
3946     - p, c->vc_size_row);
3947     -
3948     - vgacon_scrollback_cur->cnt++;
3949     - p += c->vc_size_row;
3950     - vgacon_scrollback_cur->tail += c->vc_size_row;
3951     -
3952     - if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size)
3953     - vgacon_scrollback_cur->tail = 0;
3954     -
3955     - if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows)
3956     - vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows;
3957     -
3958     - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
3959     - }
3960     -}
3961     -
3962     -static void vgacon_restore_screen(struct vc_data *c)
3963     -{
3964     - c->vc_origin = c->vc_visible_origin;
3965     - vgacon_scrollback_cur->save = 0;
3966     -
3967     - if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
3968     - scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
3969     - c->vc_screenbuf_size > vga_vram_size ?
3970     - vga_vram_size : c->vc_screenbuf_size);
3971     - vgacon_scrollback_cur->restore = 1;
3972     - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
3973     - }
3974     -}
3975     -
3976     -static void vgacon_scrolldelta(struct vc_data *c, int lines)
3977     -{
3978     - int start, end, count, soff;
3979     -
3980     - if (!lines) {
3981     - vgacon_restore_screen(c);
3982     - return;
3983     - }
3984     -
3985     - if (!vgacon_scrollback_cur->data)
3986     - return;
3987     -
3988     - if (!vgacon_scrollback_cur->save) {
3989     - vgacon_cursor(c, CM_ERASE);
3990     - vgacon_save_screen(c);
3991     - c->vc_origin = (unsigned long)c->vc_screenbuf;
3992     - vgacon_scrollback_cur->save = 1;
3993     - }
3994     -
3995     - vgacon_scrollback_cur->restore = 0;
3996     - start = vgacon_scrollback_cur->cur + lines;
3997     - end = start + abs(lines);
3998     -
3999     - if (start < 0)
4000     - start = 0;
4001     -
4002     - if (start > vgacon_scrollback_cur->cnt)
4003     - start = vgacon_scrollback_cur->cnt;
4004     -
4005     - if (end < 0)
4006     - end = 0;
4007     -
4008     - if (end > vgacon_scrollback_cur->cnt)
4009     - end = vgacon_scrollback_cur->cnt;
4010     -
4011     - vgacon_scrollback_cur->cur = start;
4012     - count = end - start;
4013     - soff = vgacon_scrollback_cur->tail -
4014     - ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row);
4015     - soff -= count * c->vc_size_row;
4016     -
4017     - if (soff < 0)
4018     - soff += vgacon_scrollback_cur->size;
4019     -
4020     - count = vgacon_scrollback_cur->cnt - start;
4021     -
4022     - if (count > c->vc_rows)
4023     - count = c->vc_rows;
4024     -
4025     - if (count) {
4026     - int copysize;
4027     -
4028     - int diff = c->vc_rows - count;
4029     - void *d = (void *) c->vc_visible_origin;
4030     - void *s = (void *) c->vc_screenbuf;
4031     -
4032     - count *= c->vc_size_row;
4033     - /* how much memory to end of buffer left? */
4034     - copysize = min(count, vgacon_scrollback_cur->size - soff);
4035     - scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize);
4036     - d += copysize;
4037     - count -= copysize;
4038     -
4039     - if (count) {
4040     - scr_memcpyw(d, vgacon_scrollback_cur->data, count);
4041     - d += count;
4042     - }
4043     -
4044     - if (diff)
4045     - scr_memcpyw(d, s, diff * c->vc_size_row);
4046     - } else
4047     - vgacon_cursor(c, CM_MOVE);
4048     -}
4049     -
4050     -static void vgacon_flush_scrollback(struct vc_data *c)
4051     -{
4052     - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
4053     -
4054     - vgacon_scrollback_reset(c->vc_num, size);
4055     -}
4056     -#else
4057     -#define vgacon_scrollback_startup(...) do { } while (0)
4058     -#define vgacon_scrollback_init(...) do { } while (0)
4059     -#define vgacon_scrollback_update(...) do { } while (0)
4060     -#define vgacon_scrollback_switch(...) do { } while (0)
4061     -
4062     static void vgacon_restore_screen(struct vc_data *c)
4063     {
4064     if (c->vc_origin != c->vc_visible_origin)
4065     @@ -386,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
4066     vga_set_mem_top(c);
4067     }
4068    
4069     -static void vgacon_flush_scrollback(struct vc_data *c)
4070     -{
4071     -}
4072     -#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
4073     -
4074     static const char *vgacon_startup(void)
4075     {
4076     const char *display_desc = NULL;
4077     @@ -573,10 +360,7 @@ static const char *vgacon_startup(void)
4078     vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH;
4079     vgacon_yres = vga_scan_lines;
4080    
4081     - if (!vga_init_done) {
4082     - vgacon_scrollback_startup();
4083     - vga_init_done = true;
4084     - }
4085     + vga_init_done = true;
4086    
4087     return display_desc;
4088     }
4089     @@ -867,7 +651,6 @@ static int vgacon_switch(struct vc_data *c)
4090     vgacon_doresize(c, c->vc_cols, c->vc_rows);
4091     }
4092    
4093     - vgacon_scrollback_switch(c->vc_num);
4094     return 0; /* Redrawing not needed */
4095     }
4096    
4097     @@ -1384,7 +1167,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
4098     oldo = c->vc_origin;
4099     delta = lines * c->vc_size_row;
4100     if (dir == SM_UP) {
4101     - vgacon_scrollback_update(c, t, lines);
4102     if (c->vc_scr_end + delta >= vga_vram_end) {
4103     scr_memcpyw((u16 *) vga_vram_base,
4104     (u16 *) (oldo + delta),
4105     @@ -1448,7 +1230,6 @@ const struct consw vga_con = {
4106     .con_save_screen = vgacon_save_screen,
4107     .con_build_attr = vgacon_build_attr,
4108     .con_invert_region = vgacon_invert_region,
4109     - .con_flush_scrollback = vgacon_flush_scrollback,
4110     };
4111     EXPORT_SYMBOL(vga_con);
4112    
4113     diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
4114     index 35ebeeccde4df..436365efae731 100644
4115     --- a/drivers/video/fbdev/core/bitblit.c
4116     +++ b/drivers/video/fbdev/core/bitblit.c
4117     @@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
4118     }
4119    
4120     static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4121     - int softback_lines, int fg, int bg)
4122     + int fg, int bg)
4123     {
4124     struct fb_cursor cursor;
4125     struct fbcon_ops *ops = info->fbcon_par;
4126     @@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4127    
4128     cursor.set = 0;
4129    
4130     - if (softback_lines) {
4131     - if (y + softback_lines >= vc->vc_rows) {
4132     - mode = CM_ERASE;
4133     - ops->cursor_flash = 0;
4134     - return;
4135     - } else
4136     - y += softback_lines;
4137     - }
4138     -
4139     c = scr_readw((u16 *) vc->vc_pos);
4140     attribute = get_attribute(info, c);
4141     src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
4142     diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
4143     index 31f00d72f1493..8685d28dfdaaf 100644
4144     --- a/drivers/video/fbdev/core/fbcon.c
4145     +++ b/drivers/video/fbdev/core/fbcon.c
4146     @@ -122,12 +122,6 @@ static int logo_lines;
4147     /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
4148     enums. */
4149     static int logo_shown = FBCON_LOGO_CANSHOW;
4150     -/* Software scrollback */
4151     -static int fbcon_softback_size = 32768;
4152     -static unsigned long softback_buf, softback_curr;
4153     -static unsigned long softback_in;
4154     -static unsigned long softback_top, softback_end;
4155     -static int softback_lines;
4156     /* console mappings */
4157     static int first_fb_vc;
4158     static int last_fb_vc = MAX_NR_CONSOLES - 1;
4159     @@ -167,8 +161,6 @@ static int margin_color;
4160    
4161     static const struct consw fb_con;
4162    
4163     -#define CM_SOFTBACK (8)
4164     -
4165     #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
4166    
4167     static int fbcon_set_origin(struct vc_data *);
4168     @@ -373,18 +365,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
4169     return color;
4170     }
4171    
4172     -static void fbcon_update_softback(struct vc_data *vc)
4173     -{
4174     - int l = fbcon_softback_size / vc->vc_size_row;
4175     -
4176     - if (l > 5)
4177     - softback_end = softback_buf + l * vc->vc_size_row;
4178     - else
4179     - /* Smaller scrollback makes no sense, and 0 would screw
4180     - the operation totally */
4181     - softback_top = 0;
4182     -}
4183     -
4184     static void fb_flashcursor(struct work_struct *work)
4185     {
4186     struct fb_info *info = container_of(work, struct fb_info, queue);
4187     @@ -414,7 +394,7 @@ static void fb_flashcursor(struct work_struct *work)
4188     c = scr_readw((u16 *) vc->vc_pos);
4189     mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
4190     CM_ERASE : CM_DRAW;
4191     - ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
4192     + ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
4193     get_color(vc, info, c, 0));
4194     console_unlock();
4195     }
4196     @@ -471,13 +451,7 @@ static int __init fb_console_setup(char *this_opt)
4197     }
4198    
4199     if (!strncmp(options, "scrollback:", 11)) {
4200     - options += 11;
4201     - if (*options) {
4202     - fbcon_softback_size = simple_strtoul(options, &options, 0);
4203     - if (*options == 'k' || *options == 'K') {
4204     - fbcon_softback_size *= 1024;
4205     - }
4206     - }
4207     + pr_warn("Ignoring scrollback size option\n");
4208     continue;
4209     }
4210    
4211     @@ -1016,31 +990,6 @@ static const char *fbcon_startup(void)
4212    
4213     set_blitting_type(vc, info);
4214    
4215     - if (info->fix.type != FB_TYPE_TEXT) {
4216     - if (fbcon_softback_size) {
4217     - if (!softback_buf) {
4218     - softback_buf =
4219     - (unsigned long)
4220     - kvmalloc(fbcon_softback_size,
4221     - GFP_KERNEL);
4222     - if (!softback_buf) {
4223     - fbcon_softback_size = 0;
4224     - softback_top = 0;
4225     - }
4226     - }
4227     - } else {
4228     - if (softback_buf) {
4229     - kvfree((void *) softback_buf);
4230     - softback_buf = 0;
4231     - softback_top = 0;
4232     - }
4233     - }
4234     - if (softback_buf)
4235     - softback_in = softback_top = softback_curr =
4236     - softback_buf;
4237     - softback_lines = 0;
4238     - }
4239     -
4240     /* Setup default font */
4241     if (!p->fontdata && !vc->vc_font.data) {
4242     if (!fontname[0] || !(font = find_font(fontname)))
4243     @@ -1214,9 +1163,6 @@ static void fbcon_init(struct vc_data *vc, int init)
4244     if (logo)
4245     fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
4246    
4247     - if (vc == svc && softback_buf)
4248     - fbcon_update_softback(vc);
4249     -
4250     if (ops->rotate_font && ops->rotate_font(info, vc)) {
4251     ops->rotate = FB_ROTATE_UR;
4252     set_blitting_type(vc, info);
4253     @@ -1379,7 +1325,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
4254     {
4255     struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
4256     struct fbcon_ops *ops = info->fbcon_par;
4257     - int y;
4258     int c = scr_readw((u16 *) vc->vc_pos);
4259    
4260     ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
4261     @@ -1393,16 +1338,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
4262     fbcon_add_cursor_timer(info);
4263    
4264     ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
4265     - if (mode & CM_SOFTBACK) {
4266     - mode &= ~CM_SOFTBACK;
4267     - y = softback_lines;
4268     - } else {
4269     - if (softback_lines)
4270     - fbcon_set_origin(vc);
4271     - y = 0;
4272     - }
4273    
4274     - ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
4275     + ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
4276     get_color(vc, info, c, 0));
4277     }
4278    
4279     @@ -1473,8 +1410,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
4280    
4281     if (con_is_visible(vc)) {
4282     update_screen(vc);
4283     - if (softback_buf)
4284     - fbcon_update_softback(vc);
4285     }
4286     }
4287    
4288     @@ -1612,99 +1547,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
4289     scrollback_current = 0;
4290     }
4291    
4292     -static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p,
4293     - long delta)
4294     -{
4295     - int count = vc->vc_rows;
4296     - unsigned short *d, *s;
4297     - unsigned long n;
4298     - int line = 0;
4299     -
4300     - d = (u16 *) softback_curr;
4301     - if (d == (u16 *) softback_in)
4302     - d = (u16 *) vc->vc_origin;
4303     - n = softback_curr + delta * vc->vc_size_row;
4304     - softback_lines -= delta;
4305     - if (delta < 0) {
4306     - if (softback_curr < softback_top && n < softback_buf) {
4307     - n += softback_end - softback_buf;
4308     - if (n < softback_top) {
4309     - softback_lines -=
4310     - (softback_top - n) / vc->vc_size_row;
4311     - n = softback_top;
4312     - }
4313     - } else if (softback_curr >= softback_top
4314     - && n < softback_top) {
4315     - softback_lines -=
4316     - (softback_top - n) / vc->vc_size_row;
4317     - n = softback_top;
4318     - }
4319     - } else {
4320     - if (softback_curr > softback_in && n >= softback_end) {
4321     - n += softback_buf - softback_end;
4322     - if (n > softback_in) {
4323     - n = softback_in;
4324     - softback_lines = 0;
4325     - }
4326     - } else if (softback_curr <= softback_in && n > softback_in) {
4327     - n = softback_in;
4328     - softback_lines = 0;
4329     - }
4330     - }
4331     - if (n == softback_curr)
4332     - return;
4333     - softback_curr = n;
4334     - s = (u16 *) softback_curr;
4335     - if (s == (u16 *) softback_in)
4336     - s = (u16 *) vc->vc_origin;
4337     - while (count--) {
4338     - unsigned short *start;
4339     - unsigned short *le;
4340     - unsigned short c;
4341     - int x = 0;
4342     - unsigned short attr = 1;
4343     -
4344     - start = s;
4345     - le = advance_row(s, 1);
4346     - do {
4347     - c = scr_readw(s);
4348     - if (attr != (c & 0xff00)) {
4349     - attr = c & 0xff00;
4350     - if (s > start) {
4351     - fbcon_putcs(vc, start, s - start,
4352     - line, x);
4353     - x += s - start;
4354     - start = s;
4355     - }
4356     - }
4357     - if (c == scr_readw(d)) {
4358     - if (s > start) {
4359     - fbcon_putcs(vc, start, s - start,
4360     - line, x);
4361     - x += s - start + 1;
4362     - start = s + 1;
4363     - } else {
4364     - x++;
4365     - start++;
4366     - }
4367     - }
4368     - s++;
4369     - d++;
4370     - } while (s < le);
4371     - if (s > start)
4372     - fbcon_putcs(vc, start, s - start, line, x);
4373     - line++;
4374     - if (d == (u16 *) softback_end)
4375     - d = (u16 *) softback_buf;
4376     - if (d == (u16 *) softback_in)
4377     - d = (u16 *) vc->vc_origin;
4378     - if (s == (u16 *) softback_end)
4379     - s = (u16 *) softback_buf;
4380     - if (s == (u16 *) softback_in)
4381     - s = (u16 *) vc->vc_origin;
4382     - }
4383     -}
4384     -
4385     static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
4386     int line, int count, int dy)
4387     {
4388     @@ -1844,31 +1686,6 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
4389     }
4390     }
4391    
4392     -static inline void fbcon_softback_note(struct vc_data *vc, int t,
4393     - int count)
4394     -{
4395     - unsigned short *p;
4396     -
4397     - if (vc->vc_num != fg_console)
4398     - return;
4399     - p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
4400     -
4401     - while (count) {
4402     - scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row);
4403     - count--;
4404     - p = advance_row(p, 1);
4405     - softback_in += vc->vc_size_row;
4406     - if (softback_in == softback_end)
4407     - softback_in = softback_buf;
4408     - if (softback_in == softback_top) {
4409     - softback_top += vc->vc_size_row;
4410     - if (softback_top == softback_end)
4411     - softback_top = softback_buf;
4412     - }
4413     - }
4414     - softback_curr = softback_in;
4415     -}
4416     -
4417     static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
4418     enum con_scroll dir, unsigned int count)
4419     {
4420     @@ -1891,8 +1708,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
4421     case SM_UP:
4422     if (count > vc->vc_rows) /* Maximum realistic size */
4423     count = vc->vc_rows;
4424     - if (softback_top)
4425     - fbcon_softback_note(vc, t, count);
4426     if (logo_shown >= 0)
4427     goto redraw_up;
4428     switch (p->scrollmode) {
4429     @@ -2263,14 +2078,6 @@ static int fbcon_switch(struct vc_data *vc)
4430     info = registered_fb[con2fb_map[vc->vc_num]];
4431     ops = info->fbcon_par;
4432    
4433     - if (softback_top) {
4434     - if (softback_lines)
4435     - fbcon_set_origin(vc);
4436     - softback_top = softback_curr = softback_in = softback_buf;
4437     - softback_lines = 0;
4438     - fbcon_update_softback(vc);
4439     - }
4440     -
4441     if (logo_shown >= 0) {
4442     struct vc_data *conp2 = vc_cons[logo_shown].d;
4443    
4444     @@ -2593,9 +2400,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
4445     int cnt;
4446     char *old_data = NULL;
4447    
4448     - if (con_is_visible(vc) && softback_lines)
4449     - fbcon_set_origin(vc);
4450     -
4451     resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
4452     if (p->userfont)
4453     old_data = vc->vc_font.data;
4454     @@ -2621,8 +2425,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
4455     cols /= w;
4456     rows /= h;
4457     vc_resize(vc, cols, rows);
4458     - if (con_is_visible(vc) && softback_buf)
4459     - fbcon_update_softback(vc);
4460     } else if (con_is_visible(vc)
4461     && vc->vc_mode == KD_TEXT) {
4462     fbcon_clear_margins(vc, 0);
4463     @@ -2781,19 +2583,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
4464    
4465     static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
4466     {
4467     - unsigned long p;
4468     - int line;
4469     -
4470     - if (vc->vc_num != fg_console || !softback_lines)
4471     - return (u16 *) (vc->vc_origin + offset);
4472     - line = offset / vc->vc_size_row;
4473     - if (line >= softback_lines)
4474     - return (u16 *) (vc->vc_origin + offset -
4475     - softback_lines * vc->vc_size_row);
4476     - p = softback_curr + offset;
4477     - if (p >= softback_end)
4478     - p += softback_buf - softback_end;
4479     - return (u16 *) p;
4480     + return (u16 *) (vc->vc_origin + offset);
4481     }
4482    
4483     static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
4484     @@ -2807,22 +2597,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
4485    
4486     x = offset % vc->vc_cols;
4487     y = offset / vc->vc_cols;
4488     - if (vc->vc_num == fg_console)
4489     - y += softback_lines;
4490     ret = pos + (vc->vc_cols - x) * 2;
4491     - } else if (vc->vc_num == fg_console && softback_lines) {
4492     - unsigned long offset = pos - softback_curr;
4493     -
4494     - if (pos < softback_curr)
4495     - offset += softback_end - softback_buf;
4496     - offset /= 2;
4497     - x = offset % vc->vc_cols;
4498     - y = offset / vc->vc_cols;
4499     - ret = pos + (vc->vc_cols - x) * 2;
4500     - if (ret == softback_end)
4501     - ret = softback_buf;
4502     - if (ret == softback_in)
4503     - ret = vc->vc_origin;
4504     } else {
4505     /* Should not happen */
4506     x = y = 0;
4507     @@ -2850,106 +2625,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
4508     a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) |
4509     (((a) & 0x0700) << 4);
4510     scr_writew(a, p++);
4511     - if (p == (u16 *) softback_end)
4512     - p = (u16 *) softback_buf;
4513     - if (p == (u16 *) softback_in)
4514     - p = (u16 *) vc->vc_origin;
4515     }
4516     }
4517    
4518     -static void fbcon_scrolldelta(struct vc_data *vc, int lines)
4519     -{
4520     - struct fb_info *info = registered_fb[con2fb_map[fg_console]];
4521     - struct fbcon_ops *ops = info->fbcon_par;
4522     - struct fbcon_display *disp = &fb_display[fg_console];
4523     - int offset, limit, scrollback_old;
4524     -
4525     - if (softback_top) {
4526     - if (vc->vc_num != fg_console)
4527     - return;
4528     - if (vc->vc_mode != KD_TEXT || !lines)
4529     - return;
4530     - if (logo_shown >= 0) {
4531     - struct vc_data *conp2 = vc_cons[logo_shown].d;
4532     -
4533     - if (conp2->vc_top == logo_lines
4534     - && conp2->vc_bottom == conp2->vc_rows)
4535     - conp2->vc_top = 0;
4536     - if (logo_shown == vc->vc_num) {
4537     - unsigned long p, q;
4538     - int i;
4539     -
4540     - p = softback_in;
4541     - q = vc->vc_origin +
4542     - logo_lines * vc->vc_size_row;
4543     - for (i = 0; i < logo_lines; i++) {
4544     - if (p == softback_top)
4545     - break;
4546     - if (p == softback_buf)
4547     - p = softback_end;
4548     - p -= vc->vc_size_row;
4549     - q -= vc->vc_size_row;
4550     - scr_memcpyw((u16 *) q, (u16 *) p,
4551     - vc->vc_size_row);
4552     - }
4553     - softback_in = softback_curr = p;
4554     - update_region(vc, vc->vc_origin,
4555     - logo_lines * vc->vc_cols);
4556     - }
4557     - logo_shown = FBCON_LOGO_CANSHOW;
4558     - }
4559     - fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
4560     - fbcon_redraw_softback(vc, disp, lines);
4561     - fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
4562     - return;
4563     - }
4564     -
4565     - if (!scrollback_phys_max)
4566     - return;
4567     -
4568     - scrollback_old = scrollback_current;
4569     - scrollback_current -= lines;
4570     - if (scrollback_current < 0)
4571     - scrollback_current = 0;
4572     - else if (scrollback_current > scrollback_max)
4573     - scrollback_current = scrollback_max;
4574     - if (scrollback_current == scrollback_old)
4575     - return;
4576     -
4577     - if (fbcon_is_inactive(vc, info))
4578     - return;
4579     -
4580     - fbcon_cursor(vc, CM_ERASE);
4581     -
4582     - offset = disp->yscroll - scrollback_current;
4583     - limit = disp->vrows;
4584     - switch (disp->scrollmode) {
4585     - case SCROLL_WRAP_MOVE:
4586     - info->var.vmode |= FB_VMODE_YWRAP;
4587     - break;
4588     - case SCROLL_PAN_MOVE:
4589     - case SCROLL_PAN_REDRAW:
4590     - limit -= vc->vc_rows;
4591     - info->var.vmode &= ~FB_VMODE_YWRAP;
4592     - break;
4593     - }
4594     - if (offset < 0)
4595     - offset += limit;
4596     - else if (offset >= limit)
4597     - offset -= limit;
4598     -
4599     - ops->var.xoffset = 0;
4600     - ops->var.yoffset = offset * vc->vc_font.height;
4601     - ops->update_start(info);
4602     -
4603     - if (!scrollback_current)
4604     - fbcon_cursor(vc, CM_DRAW);
4605     -}
4606     -
4607     static int fbcon_set_origin(struct vc_data *vc)
4608     {
4609     - if (softback_lines)
4610     - fbcon_scrolldelta(vc, softback_lines);
4611     return 0;
4612     }
4613    
4614     @@ -3013,8 +2693,6 @@ static void fbcon_modechanged(struct fb_info *info)
4615    
4616     fbcon_set_palette(vc, color_table);
4617     update_screen(vc);
4618     - if (softback_buf)
4619     - fbcon_update_softback(vc);
4620     }
4621     }
4622    
4623     @@ -3425,7 +3103,6 @@ static const struct consw fb_con = {
4624     .con_font_default = fbcon_set_def_font,
4625     .con_font_copy = fbcon_copy_font,
4626     .con_set_palette = fbcon_set_palette,
4627     - .con_scrolldelta = fbcon_scrolldelta,
4628     .con_set_origin = fbcon_set_origin,
4629     .con_invert_region = fbcon_invert_region,
4630     .con_screen_pos = fbcon_screen_pos,
4631     @@ -3660,9 +3337,6 @@ static void fbcon_exit(void)
4632     }
4633     #endif
4634    
4635     - kvfree((void *)softback_buf);
4636     - softback_buf = 0UL;
4637     -
4638     for_each_registered_fb(i) {
4639     int pending = 0;
4640    
4641     diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
4642     index 20dea853765f5..78bb14c03643e 100644
4643     --- a/drivers/video/fbdev/core/fbcon.h
4644     +++ b/drivers/video/fbdev/core/fbcon.h
4645     @@ -62,7 +62,7 @@ struct fbcon_ops {
4646     void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
4647     int color, int bottom_only);
4648     void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
4649     - int softback_lines, int fg, int bg);
4650     + int fg, int bg);
4651     int (*update_start)(struct fb_info *info);
4652     int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
4653     struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
4654     diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
4655     index 78f3a56214782..71ad6967a70ee 100644
4656     --- a/drivers/video/fbdev/core/fbcon_ccw.c
4657     +++ b/drivers/video/fbdev/core/fbcon_ccw.c
4658     @@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
4659     }
4660    
4661     static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4662     - int softback_lines, int fg, int bg)
4663     + int fg, int bg)
4664     {
4665     struct fb_cursor cursor;
4666     struct fbcon_ops *ops = info->fbcon_par;
4667     @@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4668    
4669     cursor.set = 0;
4670    
4671     - if (softback_lines) {
4672     - if (y + softback_lines >= vc->vc_rows) {
4673     - mode = CM_ERASE;
4674     - ops->cursor_flash = 0;
4675     - return;
4676     - } else
4677     - y += softback_lines;
4678     - }
4679     -
4680     c = scr_readw((u16 *) vc->vc_pos);
4681     attribute = get_attribute(info, c);
4682     src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
4683     diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
4684     index fd098ff17574b..31fe5dd651d44 100644
4685     --- a/drivers/video/fbdev/core/fbcon_cw.c
4686     +++ b/drivers/video/fbdev/core/fbcon_cw.c
4687     @@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
4688     }
4689    
4690     static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4691     - int softback_lines, int fg, int bg)
4692     + int fg, int bg)
4693     {
4694     struct fb_cursor cursor;
4695     struct fbcon_ops *ops = info->fbcon_par;
4696     @@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4697    
4698     cursor.set = 0;
4699    
4700     - if (softback_lines) {
4701     - if (y + softback_lines >= vc->vc_rows) {
4702     - mode = CM_ERASE;
4703     - ops->cursor_flash = 0;
4704     - return;
4705     - } else
4706     - y += softback_lines;
4707     - }
4708     -
4709     c = scr_readw((u16 *) vc->vc_pos);
4710     attribute = get_attribute(info, c);
4711     src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
4712     diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
4713     index e165a3fad29ad..b2dd1370e39b2 100644
4714     --- a/drivers/video/fbdev/core/fbcon_ud.c
4715     +++ b/drivers/video/fbdev/core/fbcon_ud.c
4716     @@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
4717     }
4718    
4719     static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4720     - int softback_lines, int fg, int bg)
4721     + int fg, int bg)
4722     {
4723     struct fb_cursor cursor;
4724     struct fbcon_ops *ops = info->fbcon_par;
4725     @@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4726    
4727     cursor.set = 0;
4728    
4729     - if (softback_lines) {
4730     - if (y + softback_lines >= vc->vc_rows) {
4731     - mode = CM_ERASE;
4732     - ops->cursor_flash = 0;
4733     - return;
4734     - } else
4735     - y += softback_lines;
4736     - }
4737     -
4738     c = scr_readw((u16 *) vc->vc_pos);
4739     attribute = get_attribute(info, c);
4740     src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
4741     diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
4742     index 93390312957ff..eb664dbf96f66 100644
4743     --- a/drivers/video/fbdev/core/tileblit.c
4744     +++ b/drivers/video/fbdev/core/tileblit.c
4745     @@ -80,7 +80,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
4746     }
4747    
4748     static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
4749     - int softback_lines, int fg, int bg)
4750     + int fg, int bg)
4751     {
4752     struct fb_tilecursor cursor;
4753     int use_sw = (vc->vc_cursor_type & 0x10);
4754     diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
4755     index 2c6a576ed84c7..4b83109202b1c 100644
4756     --- a/drivers/video/fbdev/vga16fb.c
4757     +++ b/drivers/video/fbdev/vga16fb.c
4758     @@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i
4759     char oldop = setop(0);
4760     char oldsr = setsr(0);
4761     char oldmask = selectmask();
4762     - const char *cdat = image->data;
4763     + const unsigned char *cdat = image->data;
4764     u32 dx = image->dx;
4765     char __iomem *where;
4766     int y;
4767     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4768     index 541497036cc24..31c1ed554d26d 100644
4769     --- a/fs/btrfs/extent-tree.c
4770     +++ b/fs/btrfs/extent-tree.c
4771     @@ -402,12 +402,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
4772     if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
4773     ASSERT(eb->fs_info);
4774     /*
4775     - * Every shared one has parent tree
4776     - * block, which must be aligned to
4777     - * nodesize.
4778     + * Every shared one has parent tree block,
4779     + * which must be aligned to sector size.
4780     */
4781     if (offset &&
4782     - IS_ALIGNED(offset, eb->fs_info->nodesize))
4783     + IS_ALIGNED(offset, eb->fs_info->sectorsize))
4784     return type;
4785     }
4786     } else if (is_data == BTRFS_REF_TYPE_DATA) {
4787     @@ -416,12 +415,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
4788     if (type == BTRFS_SHARED_DATA_REF_KEY) {
4789     ASSERT(eb->fs_info);
4790     /*
4791     - * Every shared one has parent tree
4792     - * block, which must be aligned to
4793     - * nodesize.
4794     + * Every shared one has parent tree block,
4795     + * which must be aligned to sector size.
4796     */
4797     if (offset &&
4798     - IS_ALIGNED(offset, eb->fs_info->nodesize))
4799     + IS_ALIGNED(offset, eb->fs_info->sectorsize))
4800     return type;
4801     }
4802     } else {
4803     @@ -431,8 +429,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
4804     }
4805    
4806     btrfs_print_leaf((struct extent_buffer *)eb);
4807     - btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
4808     - eb->start, type);
4809     + btrfs_err(eb->fs_info,
4810     + "eb %llu iref 0x%lx invalid extent inline ref type %d",
4811     + eb->start, (unsigned long)iref, type);
4812     WARN_ON(1);
4813    
4814     return BTRFS_REF_TYPE_INVALID;
4815     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
4816     index 775fd5975191b..63394b450afcc 100644
4817     --- a/fs/btrfs/ioctl.c
4818     +++ b/fs/btrfs/ioctl.c
4819     @@ -2215,7 +2215,8 @@ static noinline int search_ioctl(struct inode *inode,
4820     key.offset = sk->min_offset;
4821    
4822     while (1) {
4823     - ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
4824     + ret = fault_in_pages_writeable(ubuf + sk_offset,
4825     + *buf_size - sk_offset);
4826     if (ret)
4827     break;
4828    
4829     diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
4830     index 9cb50577d9823..f4edadf1067f2 100644
4831     --- a/fs/btrfs/print-tree.c
4832     +++ b/fs/btrfs/print-tree.c
4833     @@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
4834     * offset is supposed to be a tree block which
4835     * must be aligned to nodesize.
4836     */
4837     - if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
4838     - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
4839     - offset, (unsigned long long)eb->fs_info->nodesize);
4840     + if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
4841     + pr_info(
4842     + "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
4843     + offset, eb->fs_info->sectorsize);
4844     break;
4845     case BTRFS_EXTENT_DATA_REF_KEY:
4846     dref = (struct btrfs_extent_data_ref *)(&iref->offset);
4847     @@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
4848     * must be aligned to nodesize.
4849     */
4850     if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
4851     - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
4852     - offset, (unsigned long long)eb->fs_info->nodesize);
4853     + pr_info(
4854     + "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
4855     + offset, eb->fs_info->sectorsize);
4856     break;
4857     default:
4858     pr_cont("(extent %llu has INVALID ref type %d)\n",
4859     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4860     index 81be71fb569e1..4ecd6663dfb51 100644
4861     --- a/fs/btrfs/volumes.c
4862     +++ b/fs/btrfs/volumes.c
4863     @@ -4,6 +4,7 @@
4864     */
4865    
4866     #include <linux/sched.h>
4867     +#include <linux/sched/mm.h>
4868     #include <linux/bio.h>
4869     #include <linux/slab.h>
4870     #include <linux/buffer_head.h>
4871     @@ -6708,8 +6709,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
4872     u64 devid, u8 *dev_uuid)
4873     {
4874     struct btrfs_device *device;
4875     + unsigned int nofs_flag;
4876    
4877     + /*
4878     + * We call this under the chunk_mutex, so we want to use NOFS for this
4879     + * allocation, however we don't want to change btrfs_alloc_device() to
4880     + * always do NOFS because we use it in a lot of other GFP_KERNEL safe
4881     + * places.
4882     + */
4883     + nofs_flag = memalloc_nofs_save();
4884     device = btrfs_alloc_device(NULL, &devid, dev_uuid);
4885     + memalloc_nofs_restore(nofs_flag);
4886     if (IS_ERR(device))
4887     return device;
4888    
4889     diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
4890     index b43960794922d..943637298f650 100644
4891     --- a/fs/debugfs/file.c
4892     +++ b/fs/debugfs/file.c
4893     @@ -176,7 +176,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
4894     goto out;
4895    
4896     if (!fops_get(real_fops)) {
4897     -#ifdef MODULE
4898     +#ifdef CONFIG_MODULES
4899     if (real_fops->owner &&
4900     real_fops->owner->state == MODULE_STATE_GOING)
4901     goto out;
4902     @@ -311,7 +311,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
4903     goto out;
4904    
4905     if (!fops_get(real_fops)) {
4906     -#ifdef MODULE
4907     +#ifdef CONFIG_MODULES
4908     if (real_fops->owner &&
4909     real_fops->owner->state == MODULE_STATE_GOING)
4910     goto out;
4911     diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
4912     index fe277ee5ec7c4..5472ed3ce6943 100644
4913     --- a/fs/xfs/libxfs/xfs_attr_leaf.c
4914     +++ b/fs/xfs/libxfs/xfs_attr_leaf.c
4915     @@ -583,8 +583,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args)
4916     ASSERT(ifp->if_flags & XFS_IFINLINE);
4917     }
4918     xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
4919     - hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
4920     - hdr->count = 0;
4921     + hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data;
4922     + memset(hdr, 0, sizeof(*hdr));
4923     hdr->totsize = cpu_to_be16(sizeof(*hdr));
4924     xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
4925     }
4926     diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
4927     index 588d446130944..443cf33f66668 100644
4928     --- a/fs/xfs/libxfs/xfs_ialloc.c
4929     +++ b/fs/xfs/libxfs/xfs_ialloc.c
4930     @@ -679,7 +679,7 @@ xfs_ialloc_ag_alloc(
4931     args.minalignslop = igeo->cluster_align - 1;
4932    
4933     /* Allow space for the inode btree to split. */
4934     - args.minleft = igeo->inobt_maxlevels - 1;
4935     + args.minleft = igeo->inobt_maxlevels;
4936     if ((error = xfs_alloc_vextent(&args)))
4937     return error;
4938    
4939     @@ -727,7 +727,7 @@ xfs_ialloc_ag_alloc(
4940     /*
4941     * Allow space for the inode btree to split.
4942     */
4943     - args.minleft = igeo->inobt_maxlevels - 1;
4944     + args.minleft = igeo->inobt_maxlevels;
4945     if ((error = xfs_alloc_vextent(&args)))
4946     return error;
4947     }
4948     diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
4949     index c6df01a2a1585..7ad3659c5d2a9 100644
4950     --- a/fs/xfs/libxfs/xfs_trans_space.h
4951     +++ b/fs/xfs/libxfs/xfs_trans_space.h
4952     @@ -58,7 +58,7 @@
4953     #define XFS_IALLOC_SPACE_RES(mp) \
4954     (M_IGEO(mp)->ialloc_blks + \
4955     ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \
4956     - (M_IGEO(mp)->inobt_maxlevels - 1)))
4957     + M_IGEO(mp)->inobt_maxlevels))
4958    
4959     /*
4960     * Space reservation values for various transactions.
4961     diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
4962     index 9a33f171aa822..625f491b95de8 100644
4963     --- a/include/linux/netfilter/nf_conntrack_sctp.h
4964     +++ b/include/linux/netfilter/nf_conntrack_sctp.h
4965     @@ -9,6 +9,8 @@ struct ip_ct_sctp {
4966     enum sctp_conntrack state;
4967    
4968     __be32 vtag[IP_CT_DIR_MAX];
4969     + u8 last_dir;
4970     + u8 flags;
4971     };
4972    
4973     #endif /* _NF_CONNTRACK_SCTP_H */
4974     diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
4975     index 9b1d43d671a3f..8c18dc6d3fde5 100644
4976     --- a/include/soc/nps/common.h
4977     +++ b/include/soc/nps/common.h
4978     @@ -45,6 +45,12 @@
4979     #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
4980     #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
4981    
4982     +#ifndef AUX_IENABLE
4983     +#define AUX_IENABLE 0x40c
4984     +#endif
4985     +
4986     +#define CTOP_AUX_IACK (0xFFFFF800 + 0x088)
4987     +
4988     #ifndef __ASSEMBLY__
4989    
4990     /* In order to increase compilation test coverage */
4991     diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
4992     index ec37563674d62..60c7be5ff5c8c 100644
4993     --- a/kernel/gcov/gcc_4_7.c
4994     +++ b/kernel/gcov/gcc_4_7.c
4995     @@ -19,7 +19,9 @@
4996     #include <linux/vmalloc.h>
4997     #include "gcov.h"
4998    
4999     -#if (__GNUC__ >= 7)
5000     +#if (__GNUC__ >= 10)
5001     +#define GCOV_COUNTERS 8
5002     +#elif (__GNUC__ >= 7)
5003     #define GCOV_COUNTERS 9
5004     #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
5005     #define GCOV_COUNTERS 10
5006     diff --git a/lib/kobject.c b/lib/kobject.c
5007     index 386873bdd51c9..0c6d17503a113 100644
5008     --- a/lib/kobject.c
5009     +++ b/lib/kobject.c
5010     @@ -630,8 +630,12 @@ static void __kobject_del(struct kobject *kobj)
5011     */
5012     void kobject_del(struct kobject *kobj)
5013     {
5014     - struct kobject *parent = kobj->parent;
5015     + struct kobject *parent;
5016     +
5017     + if (!kobj)
5018     + return;
5019    
5020     + parent = kobj->parent;
5021     __kobject_del(kobj);
5022     kobject_put(parent);
5023     }
5024     diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
5025     index 4f897b14b6069..810cca24b3990 100644
5026     --- a/net/netfilter/nf_conntrack_proto_sctp.c
5027     +++ b/net/netfilter/nf_conntrack_proto_sctp.c
5028     @@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
5029     [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
5030     };
5031    
5032     +#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
5033     +
5034     #define sNO SCTP_CONNTRACK_NONE
5035     #define sCL SCTP_CONNTRACK_CLOSED
5036     #define sCW SCTP_CONNTRACK_COOKIE_WAIT
5037     @@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
5038     u_int32_t offset, count;
5039     unsigned int *timeouts;
5040     unsigned long map[256 / sizeof(unsigned long)] = { 0 };
5041     + bool ignore = false;
5042    
5043     if (sctp_error(skb, dataoff, state))
5044     return -NF_ACCEPT;
5045     @@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
5046     /* Sec 8.5.1 (D) */
5047     if (sh->vtag != ct->proto.sctp.vtag[dir])
5048     goto out_unlock;
5049     - } else if (sch->type == SCTP_CID_HEARTBEAT ||
5050     - sch->type == SCTP_CID_HEARTBEAT_ACK) {
5051     + } else if (sch->type == SCTP_CID_HEARTBEAT) {
5052     + if (ct->proto.sctp.vtag[dir] == 0) {
5053     + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
5054     + ct->proto.sctp.vtag[dir] = sh->vtag;
5055     + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
5056     + if (test_bit(SCTP_CID_DATA, map) || ignore)
5057     + goto out_unlock;
5058     +
5059     + ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
5060     + ct->proto.sctp.last_dir = dir;
5061     + ignore = true;
5062     + continue;
5063     + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
5064     + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
5065     + }
5066     + } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
5067     if (ct->proto.sctp.vtag[dir] == 0) {
5068     pr_debug("Setting vtag %x for dir %d\n",
5069     sh->vtag, dir);
5070     ct->proto.sctp.vtag[dir] = sh->vtag;
5071     } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
5072     - pr_debug("Verification tag check failed\n");
5073     - goto out_unlock;
5074     + if (test_bit(SCTP_CID_DATA, map) || ignore)
5075     + goto out_unlock;
5076     +
5077     + if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
5078     + ct->proto.sctp.last_dir == dir)
5079     + goto out_unlock;
5080     +
5081     + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
5082     + ct->proto.sctp.vtag[dir] = sh->vtag;
5083     + ct->proto.sctp.vtag[!dir] = 0;
5084     + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
5085     + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
5086     }
5087     }
5088    
5089     @@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
5090     }
5091     spin_unlock_bh(&ct->lock);
5092    
5093     + /* allow but do not refresh timeout */
5094     + if (ignore)
5095     + return NF_ACCEPT;
5096     +
5097     timeouts = nf_ct_timeout_lookup(ct);
5098     if (!timeouts)
5099     timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
5100     diff --git a/net/wireless/util.c b/net/wireless/util.c
5101     index 8481e9ac33da5..9abafd76ec50e 100644
5102     --- a/net/wireless/util.c
5103     +++ b/net/wireless/util.c
5104     @@ -116,11 +116,13 @@ int ieee80211_frequency_to_channel(int freq)
5105     return (freq - 2407) / 5;
5106     else if (freq >= 4910 && freq <= 4980)
5107     return (freq - 4000) / 5;
5108     - else if (freq < 5945)
5109     + else if (freq < 5925)
5110     return (freq - 5000) / 5;
5111     + else if (freq == 5935)
5112     + return 2;
5113     else if (freq <= 45000) /* DMG band lower limit */
5114     - /* see 802.11ax D4.1 27.3.22.2 */
5115     - return (freq - 5940) / 5;
5116     + /* see 802.11ax D6.1 27.3.22.2 */
5117     + return (freq - 5950) / 5;
5118     else if (freq >= 58320 && freq <= 70200)
5119     return (freq - 56160) / 2160;
5120     else
5121     diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
5122     index c946fd8beebca..b84e12f4f8046 100644
5123     --- a/sound/hda/hdac_device.c
5124     +++ b/sound/hda/hdac_device.c
5125     @@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init);
5126     void snd_hdac_device_exit(struct hdac_device *codec)
5127     {
5128     pm_runtime_put_noidle(&codec->dev);
5129     + /* keep balance of runtime PM child_count in parent device */
5130     + pm_runtime_set_suspended(&codec->dev);
5131     snd_hdac_bus_remove_device(codec->bus, codec);
5132     kfree(codec->vendor_name);
5133     kfree(codec->chip_name);
5134     diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
5135     index e5191584638ab..e378cb33c69df 100644
5136     --- a/sound/pci/hda/hda_tegra.c
5137     +++ b/sound/pci/hda/hda_tegra.c
5138     @@ -169,6 +169,10 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev)
5139     struct hdac_bus *bus = azx_bus(chip);
5140    
5141     if (chip && chip->running) {
5142     + /* enable controller wake up event */
5143     + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
5144     + STATESTS_INT_MASK);
5145     +
5146     azx_stop_chip(chip);
5147     synchronize_irq(bus->irq);
5148     azx_enter_link_reset(chip);
5149     @@ -191,6 +195,9 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev)
5150     if (chip && chip->running) {
5151     hda_tegra_init(hda);
5152     azx_init_chip(chip, 1);
5153     + /* disable controller wake up event*/
5154     + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
5155     + ~STATESTS_INT_MASK);
5156     }
5157    
5158     return 0;
5159     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
5160     index a13bad262598d..51798632d334c 100644
5161     --- a/sound/pci/hda/patch_hdmi.c
5162     +++ b/sound/pci/hda/patch_hdmi.c
5163     @@ -3678,6 +3678,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec)
5164    
5165     static int patch_tegra_hdmi(struct hda_codec *codec)
5166     {
5167     + struct hdmi_spec *spec;
5168     int err;
5169    
5170     err = patch_generic_hdmi(codec);
5171     @@ -3685,6 +3686,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
5172     return err;
5173    
5174     codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
5175     + spec = codec->spec;
5176     + spec->chmap.ops.chmap_cea_alloc_validate_get_type =
5177     + nvhdmi_chmap_cea_alloc_validate_get_type;
5178     + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
5179    
5180     return 0;
5181     }
5182     @@ -4200,6 +4205,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
5183     HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
5184     HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
5185     HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
5186     +HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
5187     HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
5188     HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
5189     HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
5190     diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
5191     index 7656c7ce79d90..0e73a16874c4c 100644
5192     --- a/tools/testing/selftests/timers/Makefile
5193     +++ b/tools/testing/selftests/timers/Makefile
5194     @@ -13,6 +13,7 @@ DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \
5195    
5196     TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS)
5197    
5198     +TEST_FILES := settings
5199    
5200     include ../lib.mk
5201    
5202     diff --git a/tools/testing/selftests/timers/settings b/tools/testing/selftests/timers/settings
5203     new file mode 100644
5204     index 0000000000000..e7b9417537fbc
5205     --- /dev/null
5206     +++ b/tools/testing/selftests/timers/settings
5207     @@ -0,0 +1 @@
5208     +timeout=0
5209     diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5210     index 7501ec8a46004..1e1c4f6a85c7d 100644
5211     --- a/virt/kvm/arm/mmu.c
5212     +++ b/virt/kvm/arm/mmu.c
5213     @@ -1814,7 +1814,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
5214     (fault_status == FSC_PERM &&
5215     stage2_is_exec(kvm, fault_ipa, vma_pagesize));
5216    
5217     - if (vma_pagesize == PUD_SIZE) {
5218     + /*
5219     + * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
5220     + * all we have is a 2-level page table. Trying to map a PUD in
5221     + * this case would be fatally wrong.
5222     + */
5223     + if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
5224     pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
5225    
5226     new_pud = kvm_pud_mkhuge(new_pud);
5227     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5228     index 278bdc53047e8..4eed7fd8db939 100644
5229     --- a/virt/kvm/kvm_main.c
5230     +++ b/virt/kvm/kvm_main.c
5231     @@ -4010,7 +4010,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5232     void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5233     struct kvm_io_device *dev)
5234     {
5235     - int i;
5236     + int i, j;
5237     struct kvm_io_bus *new_bus, *bus;
5238    
5239     bus = kvm_get_bus(kvm, bus_idx);
5240     @@ -4027,17 +4027,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5241    
5242     new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5243     GFP_KERNEL_ACCOUNT);
5244     - if (!new_bus) {
5245     + if (new_bus) {
5246     + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5247     + new_bus->dev_count--;
5248     + memcpy(new_bus->range + i, bus->range + i + 1,
5249     + (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
5250     + } else {
5251     pr_err("kvm: failed to shrink bus, removing it completely\n");
5252     - goto broken;
5253     + for (j = 0; j < bus->dev_count; j++) {
5254     + if (j == i)
5255     + continue;
5256     + kvm_iodevice_destructor(bus->range[j].dev);
5257     + }
5258     }
5259    
5260     - memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5261     - new_bus->dev_count--;
5262     - memcpy(new_bus->range + i, bus->range + i + 1,
5263     - (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
5264     -
5265     -broken:
5266     rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5267     synchronize_srcu_expedited(&kvm->srcu);
5268     kfree(bus);