Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0252-5.4.153-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (hide annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (20 months ago) by niro
File size: 92075 byte(s)
-sync kernel patches
1 niro 3635 diff --git a/Makefile b/Makefile
2     index ffcdc36c56f54..df9b1d07ca097 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 152
10     +SUBLEVEL = 153
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
15     index 64faf5b46d92f..03c43c1912a7e 100644
16     --- a/arch/arm/boot/dts/imx53-m53menlo.dts
17     +++ b/arch/arm/boot/dts/imx53-m53menlo.dts
18     @@ -56,6 +56,7 @@
19     panel {
20     compatible = "edt,etm0700g0dh6";
21     pinctrl-0 = <&pinctrl_display_gpio>;
22     + pinctrl-names = "default";
23     enable-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
24    
25     port {
26     @@ -76,8 +77,7 @@
27     regulator-name = "vbus";
28     regulator-min-microvolt = <5000000>;
29     regulator-max-microvolt = <5000000>;
30     - gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
31     - enable-active-high;
32     + gpio = <&gpio1 2 0>;
33     };
34     };
35    
36     diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
37     index 0abd61108a539..ec16979825378 100644
38     --- a/arch/arm/boot/dts/omap3430-sdp.dts
39     +++ b/arch/arm/boot/dts/omap3430-sdp.dts
40     @@ -101,7 +101,7 @@
41    
42     nand@1,0 {
43     compatible = "ti,omap2-nand";
44     - reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
45     + reg = <1 0 4>; /* CS1, offset 0, IO size 4 */
46     interrupt-parent = <&gpmc>;
47     interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
48     <1 IRQ_TYPE_NONE>; /* termcount */
49     diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
50     index 2b075e287610f..764984c95c686 100644
51     --- a/arch/arm/boot/dts/qcom-apq8064.dtsi
52     +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
53     @@ -198,7 +198,7 @@
54     clock-frequency = <19200000>;
55     };
56    
57     - pxo_board {
58     + pxo_board: pxo_board {
59     compatible = "fixed-clock";
60     #clock-cells = <0>;
61     clock-frequency = <27000000>;
62     @@ -1147,7 +1147,7 @@
63     };
64    
65     gpu: adreno-3xx@4300000 {
66     - compatible = "qcom,adreno-3xx";
67     + compatible = "qcom,adreno-320.2", "qcom,adreno";
68     reg = <0x04300000 0x20000>;
69     reg-names = "kgsl_3d0_reg_memory";
70     interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
71     @@ -1162,7 +1162,6 @@
72     <&mmcc GFX3D_AHB_CLK>,
73     <&mmcc GFX3D_AXI_CLK>,
74     <&mmcc MMSS_IMEM_AHB_CLK>;
75     - qcom,chipid = <0x03020002>;
76    
77     iommus = <&gfx3d 0
78     &gfx3d 1
79     @@ -1305,7 +1304,7 @@
80     reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
81     clock-names = "iface_clk", "ref";
82     clocks = <&mmcc DSI_M_AHB_CLK>,
83     - <&cxo_board>;
84     + <&pxo_board>;
85     };
86    
87    
88     diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
89     index baf3b47601af0..1b73e4e76310c 100644
90     --- a/arch/arm/mach-imx/pm-imx6.c
91     +++ b/arch/arm/mach-imx/pm-imx6.c
92     @@ -9,6 +9,7 @@
93     #include <linux/io.h>
94     #include <linux/irq.h>
95     #include <linux/genalloc.h>
96     +#include <linux/irqchip/arm-gic.h>
97     #include <linux/mfd/syscon.h>
98     #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
99     #include <linux/of.h>
100     @@ -618,6 +619,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
101    
102     static void imx6_pm_stby_poweroff(void)
103     {
104     + gic_cpu_if_down(0);
105     imx6_set_lpm(STOP_POWER_OFF);
106     imx6q_suspend_finish(0);
107    
108     diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
109     index eb74aa1826614..6289b288d60a6 100644
110     --- a/arch/arm/mach-omap2/omap_hwmod.c
111     +++ b/arch/arm/mach-omap2/omap_hwmod.c
112     @@ -3656,6 +3656,8 @@ int omap_hwmod_init_module(struct device *dev,
113     oh->flags |= HWMOD_SWSUP_SIDLE_ACT;
114     if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
115     oh->flags |= HWMOD_SWSUP_MSTANDBY;
116     + if (data->cfg->quirks & SYSC_QUIRK_CLKDM_NOAUTO)
117     + oh->flags |= HWMOD_CLKDM_NOAUTO;
118    
119     error = omap_hwmod_check_module(dev, oh, data, sysc_fields,
120     rev_offs, sysc_offs, syss_offs,
121     diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
122     index b51a8c7b01114..1c6e57f1dbc48 100644
123     --- a/arch/arm/net/bpf_jit_32.c
124     +++ b/arch/arm/net/bpf_jit_32.c
125     @@ -36,6 +36,10 @@
126     * +-----+
127     * |RSVD | JIT scratchpad
128     * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
129     + * | ... | caller-saved registers
130     + * +-----+
131     + * | ... | arguments passed on stack
132     + * ARM_SP during call => +-----|
133     * | |
134     * | ... | Function call stack
135     * | |
136     @@ -63,6 +67,12 @@
137     *
138     * When popping registers off the stack at the end of a BPF function, we
139     * reference them via the current ARM_FP register.
140     + *
141     + * Some eBPF operations are implemented via a call to a helper function.
142     + * Such calls are "invisible" in the eBPF code, so it is up to the calling
143     + * program to preserve any caller-saved ARM registers during the call. The
144     + * JIT emits code to push and pop those registers onto the stack, immediately
145     + * above the callee stack frame.
146     */
147     #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
148     1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
149     @@ -70,6 +80,8 @@
150     #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
151     #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
152    
153     +#define CALLER_MASK (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
154     +
155     enum {
156     /* Stack layout - these are offsets from (top of stack - 4) */
157     BPF_R2_HI,
158     @@ -464,6 +476,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
159    
160     static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
161     {
162     + const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
163     const s8 *tmp = bpf2a32[TMP_REG_1];
164    
165     #if __LINUX_ARM_ARCH__ == 7
166     @@ -495,11 +508,17 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
167     emit(ARM_MOV_R(ARM_R0, rm), ctx);
168     }
169    
170     + /* Push caller-saved registers on stack */
171     + emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
172     +
173     /* Call appropriate function */
174     emit_mov_i(ARM_IP, op == BPF_DIV ?
175     (u32)jit_udiv32 : (u32)jit_mod32, ctx);
176     emit_blx_r(ARM_IP, ctx);
177    
178     + /* Restore caller-saved registers from stack */
179     + emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
180     +
181     /* Save return value */
182     if (rd != ARM_R0)
183     emit(ARM_MOV_R(rd, ARM_R0), ctx);
184     diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
185     index 5716ac20bddd1..02ae6bfff5658 100644
186     --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
187     +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
188     @@ -287,6 +287,24 @@
189     status = "disabled";
190     };
191    
192     + can0: can@2180000 {
193     + compatible = "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan";
194     + reg = <0x0 0x2180000 0x0 0x10000>;
195     + interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
196     + clocks = <&sysclk>, <&clockgen 4 1>;
197     + clock-names = "ipg", "per";
198     + status = "disabled";
199     + };
200     +
201     + can1: can@2190000 {
202     + compatible = "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan";
203     + reg = <0x0 0x2190000 0x0 0x10000>;
204     + interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
205     + clocks = <&sysclk>, <&clockgen 4 1>;
206     + clock-names = "ipg", "per";
207     + status = "disabled";
208     + };
209     +
210     duart0: serial@21c0500 {
211     compatible = "fsl,ns16550", "ns16550a";
212     reg = <0x00 0x21c0500 0x0 0x100>;
213     @@ -496,14 +514,14 @@
214     compatible = "arm,sp805", "arm,primecell";
215     reg = <0x0 0xc000000 0x0 0x1000>;
216     clocks = <&clockgen 4 15>, <&clockgen 4 15>;
217     - clock-names = "apb_pclk", "wdog_clk";
218     + clock-names = "wdog_clk", "apb_pclk";
219     };
220    
221     cluster1_core1_watchdog: watchdog@c010000 {
222     compatible = "arm,sp805", "arm,primecell";
223     reg = <0x0 0xc010000 0x0 0x1000>;
224     clocks = <&clockgen 4 15>, <&clockgen 4 15>;
225     - clock-names = "apb_pclk", "wdog_clk";
226     + clock-names = "wdog_clk", "apb_pclk";
227     };
228    
229     sai1: audio-controller@f100000 {
230     diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
231     index c676d0771762f..407ebdb35cd2e 100644
232     --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
233     +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
234     @@ -640,56 +640,56 @@
235     compatible = "arm,sp805-wdt", "arm,primecell";
236     reg = <0x0 0xc000000 0x0 0x1000>;
237     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
238     - clock-names = "apb_pclk", "wdog_clk";
239     + clock-names = "wdog_clk", "apb_pclk";
240     };
241    
242     cluster1_core1_watchdog: wdt@c010000 {
243     compatible = "arm,sp805-wdt", "arm,primecell";
244     reg = <0x0 0xc010000 0x0 0x1000>;
245     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
246     - clock-names = "apb_pclk", "wdog_clk";
247     + clock-names = "wdog_clk", "apb_pclk";
248     };
249    
250     cluster1_core2_watchdog: wdt@c020000 {
251     compatible = "arm,sp805-wdt", "arm,primecell";
252     reg = <0x0 0xc020000 0x0 0x1000>;
253     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
254     - clock-names = "apb_pclk", "wdog_clk";
255     + clock-names = "wdog_clk", "apb_pclk";
256     };
257    
258     cluster1_core3_watchdog: wdt@c030000 {
259     compatible = "arm,sp805-wdt", "arm,primecell";
260     reg = <0x0 0xc030000 0x0 0x1000>;
261     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
262     - clock-names = "apb_pclk", "wdog_clk";
263     + clock-names = "wdog_clk", "apb_pclk";
264     };
265    
266     cluster2_core0_watchdog: wdt@c100000 {
267     compatible = "arm,sp805-wdt", "arm,primecell";
268     reg = <0x0 0xc100000 0x0 0x1000>;
269     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
270     - clock-names = "apb_pclk", "wdog_clk";
271     + clock-names = "wdog_clk", "apb_pclk";
272     };
273    
274     cluster2_core1_watchdog: wdt@c110000 {
275     compatible = "arm,sp805-wdt", "arm,primecell";
276     reg = <0x0 0xc110000 0x0 0x1000>;
277     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
278     - clock-names = "apb_pclk", "wdog_clk";
279     + clock-names = "wdog_clk", "apb_pclk";
280     };
281    
282     cluster2_core2_watchdog: wdt@c120000 {
283     compatible = "arm,sp805-wdt", "arm,primecell";
284     reg = <0x0 0xc120000 0x0 0x1000>;
285     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
286     - clock-names = "apb_pclk", "wdog_clk";
287     + clock-names = "wdog_clk", "apb_pclk";
288     };
289    
290     cluster2_core3_watchdog: wdt@c130000 {
291     compatible = "arm,sp805-wdt", "arm,primecell";
292     reg = <0x0 0xc130000 0x0 0x1000>;
293     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
294     - clock-names = "apb_pclk", "wdog_clk";
295     + clock-names = "wdog_clk", "apb_pclk";
296     };
297    
298     fsl_mc: fsl-mc@80c000000 {
299     diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
300     index cdb2fa47637da..82f0fe6acbfb7 100644
301     --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
302     +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
303     @@ -230,56 +230,56 @@
304     compatible = "arm,sp805-wdt", "arm,primecell";
305     reg = <0x0 0xc000000 0x0 0x1000>;
306     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
307     - clock-names = "apb_pclk", "wdog_clk";
308     + clock-names = "wdog_clk", "apb_pclk";
309     };
310    
311     cluster1_core1_watchdog: wdt@c010000 {
312     compatible = "arm,sp805-wdt", "arm,primecell";
313     reg = <0x0 0xc010000 0x0 0x1000>;
314     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
315     - clock-names = "apb_pclk", "wdog_clk";
316     + clock-names = "wdog_clk", "apb_pclk";
317     };
318    
319     cluster2_core0_watchdog: wdt@c100000 {
320     compatible = "arm,sp805-wdt", "arm,primecell";
321     reg = <0x0 0xc100000 0x0 0x1000>;
322     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
323     - clock-names = "apb_pclk", "wdog_clk";
324     + clock-names = "wdog_clk", "apb_pclk";
325     };
326    
327     cluster2_core1_watchdog: wdt@c110000 {
328     compatible = "arm,sp805-wdt", "arm,primecell";
329     reg = <0x0 0xc110000 0x0 0x1000>;
330     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
331     - clock-names = "apb_pclk", "wdog_clk";
332     + clock-names = "wdog_clk", "apb_pclk";
333     };
334    
335     cluster3_core0_watchdog: wdt@c200000 {
336     compatible = "arm,sp805-wdt", "arm,primecell";
337     reg = <0x0 0xc200000 0x0 0x1000>;
338     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
339     - clock-names = "apb_pclk", "wdog_clk";
340     + clock-names = "wdog_clk", "apb_pclk";
341     };
342    
343     cluster3_core1_watchdog: wdt@c210000 {
344     compatible = "arm,sp805-wdt", "arm,primecell";
345     reg = <0x0 0xc210000 0x0 0x1000>;
346     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
347     - clock-names = "apb_pclk", "wdog_clk";
348     + clock-names = "wdog_clk", "apb_pclk";
349     };
350    
351     cluster4_core0_watchdog: wdt@c300000 {
352     compatible = "arm,sp805-wdt", "arm,primecell";
353     reg = <0x0 0xc300000 0x0 0x1000>;
354     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
355     - clock-names = "apb_pclk", "wdog_clk";
356     + clock-names = "wdog_clk", "apb_pclk";
357     };
358    
359     cluster4_core1_watchdog: wdt@c310000 {
360     compatible = "arm,sp805-wdt", "arm,primecell";
361     reg = <0x0 0xc310000 0x0 0x1000>;
362     clocks = <&clockgen 4 3>, <&clockgen 4 3>;
363     - clock-names = "apb_pclk", "wdog_clk";
364     + clock-names = "wdog_clk", "apb_pclk";
365     };
366    
367     crypto: crypto@8000000 {
368     diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
369     index c0b197458665d..6f7dfcb8c0421 100644
370     --- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
371     +++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
372     @@ -17,7 +17,7 @@
373     #size-cells = <0>;
374    
375     pon: power-on@800 {
376     - compatible = "qcom,pm8916-pon";
377     + compatible = "qcom,pm8998-pon";
378     reg = <0x0800>;
379     pwrkey {
380     compatible = "qcom,pm8941-pwrkey";
381     diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
382     index 6ecdc690f7336..2bfef67d52c63 100644
383     --- a/arch/mips/Kconfig
384     +++ b/arch/mips/Kconfig
385     @@ -46,6 +46,7 @@ config MIPS
386     select HAVE_ARCH_TRACEHOOK
387     select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
388     select HAVE_ASM_MODVERSIONS
389     + select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS
390     select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
391     select HAVE_CONTEXT_TRACKING
392     select HAVE_COPY_THREAD_TLS
393     diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
394     index 2d03af7d6b19d..d55912349039c 100644
395     --- a/arch/mips/net/Makefile
396     +++ b/arch/mips/net/Makefile
397     @@ -1,4 +1,5 @@
398     # SPDX-License-Identifier: GPL-2.0-only
399     # MIPS networking code
400    
401     +obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
402     obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
403     diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
404     new file mode 100644
405     index 0000000000000..29a288ff4f183
406     --- /dev/null
407     +++ b/arch/mips/net/bpf_jit.c
408     @@ -0,0 +1,1299 @@
409     +/*
410     + * Just-In-Time compiler for BPF filters on MIPS
411     + *
412     + * Copyright (c) 2014 Imagination Technologies Ltd.
413     + * Author: Markos Chandras <markos.chandras@imgtec.com>
414     + *
415     + * This program is free software; you can redistribute it and/or modify it
416     + * under the terms of the GNU General Public License as published by the
417     + * Free Software Foundation; version 2 of the License.
418     + */
419     +
420     +#include <linux/bitops.h>
421     +#include <linux/compiler.h>
422     +#include <linux/errno.h>
423     +#include <linux/filter.h>
424     +#include <linux/if_vlan.h>
425     +#include <linux/moduleloader.h>
426     +#include <linux/netdevice.h>
427     +#include <linux/string.h>
428     +#include <linux/slab.h>
429     +#include <linux/types.h>
430     +#include <asm/asm.h>
431     +#include <asm/bitops.h>
432     +#include <asm/cacheflush.h>
433     +#include <asm/cpu-features.h>
434     +#include <asm/uasm.h>
435     +
436     +#include "bpf_jit.h"
437     +
438     +/* ABI
439     + * r_skb_hl SKB header length
440     + * r_data SKB data pointer
441     + * r_off Offset
442     + * r_A BPF register A
443     + * r_X BPF register X
444     + * r_skb *skb
445     + * r_M *scratch memory
446     + * r_skb_len SKB length
447     + *
448     + * On entry (*bpf_func)(*skb, *filter)
449     + * a0 = MIPS_R_A0 = skb;
450     + * a1 = MIPS_R_A1 = filter;
451     + *
452     + * Stack
453     + * ...
454     + * M[15]
455     + * M[14]
456     + * M[13]
457     + * ...
458     + * M[0] <-- r_M
459     + * saved reg k-1
460     + * saved reg k-2
461     + * ...
462     + * saved reg 0 <-- r_sp
463     + * <no argument area>
464     + *
465     + * Packet layout
466     + *
467     + * <--------------------- len ------------------------>
468     + * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
469     + * ----------------------------------------------------
470     + * | skb->data |
471     + * ----------------------------------------------------
472     + */
473     +
474     +#define ptr typeof(unsigned long)
475     +
476     +#define SCRATCH_OFF(k) (4 * (k))
477     +
478     +/* JIT flags */
479     +#define SEEN_CALL (1 << BPF_MEMWORDS)
480     +#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
481     +#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
482     +#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
483     +#define SEEN_OFF SEEN_SREG(2)
484     +#define SEEN_A SEEN_SREG(3)
485     +#define SEEN_X SEEN_SREG(4)
486     +#define SEEN_SKB SEEN_SREG(5)
487     +#define SEEN_MEM SEEN_SREG(6)
488     +/* SEEN_SK_DATA also implies skb_hl an skb_len */
489     +#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
490     +
491     +/* Arguments used by JIT */
492     +#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
493     +
494     +#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
495     +
496     +/**
497     + * struct jit_ctx - JIT context
498     + * @skf: The sk_filter
499     + * @prologue_bytes: Number of bytes for prologue
500     + * @idx: Instruction index
501     + * @flags: JIT flags
502     + * @offsets: Instruction offsets
503     + * @target: Memory location for the compiled filter
504     + */
505     +struct jit_ctx {
506     + const struct bpf_prog *skf;
507     + unsigned int prologue_bytes;
508     + u32 idx;
509     + u32 flags;
510     + u32 *offsets;
511     + u32 *target;
512     +};
513     +
514     +
515     +static inline int optimize_div(u32 *k)
516     +{
517     + /* power of 2 divides can be implemented with right shift */
518     + if (!(*k & (*k-1))) {
519     + *k = ilog2(*k);
520     + return 1;
521     + }
522     +
523     + return 0;
524     +}
525     +
526     +static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
527     +
528     +/* Simply emit the instruction if the JIT memory space has been allocated */
529     +#define emit_instr(ctx, func, ...) \
530     +do { \
531     + if ((ctx)->target != NULL) { \
532     + u32 *p = &(ctx)->target[ctx->idx]; \
533     + uasm_i_##func(&p, ##__VA_ARGS__); \
534     + } \
535     + (ctx)->idx++; \
536     +} while (0)
537     +
538     +/*
539     + * Similar to emit_instr but it must be used when we need to emit
540     + * 32-bit or 64-bit instructions
541     + */
542     +#define emit_long_instr(ctx, func, ...) \
543     +do { \
544     + if ((ctx)->target != NULL) { \
545     + u32 *p = &(ctx)->target[ctx->idx]; \
546     + UASM_i_##func(&p, ##__VA_ARGS__); \
547     + } \
548     + (ctx)->idx++; \
549     +} while (0)
550     +
551     +/* Determine if immediate is within the 16-bit signed range */
552     +static inline bool is_range16(s32 imm)
553     +{
554     + return !(imm >= SBIT(15) || imm < -SBIT(15));
555     +}
556     +
557     +static inline void emit_addu(unsigned int dst, unsigned int src1,
558     + unsigned int src2, struct jit_ctx *ctx)
559     +{
560     + emit_instr(ctx, addu, dst, src1, src2);
561     +}
562     +
563     +static inline void emit_nop(struct jit_ctx *ctx)
564     +{
565     + emit_instr(ctx, nop);
566     +}
567     +
568     +/* Load a u32 immediate to a register */
569     +static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
570     +{
571     + if (ctx->target != NULL) {
572     + /* addiu can only handle s16 */
573     + if (!is_range16(imm)) {
574     + u32 *p = &ctx->target[ctx->idx];
575     + uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
576     + p = &ctx->target[ctx->idx + 1];
577     + uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
578     + } else {
579     + u32 *p = &ctx->target[ctx->idx];
580     + uasm_i_addiu(&p, dst, r_zero, imm);
581     + }
582     + }
583     + ctx->idx++;
584     +
585     + if (!is_range16(imm))
586     + ctx->idx++;
587     +}
588     +
589     +static inline void emit_or(unsigned int dst, unsigned int src1,
590     + unsigned int src2, struct jit_ctx *ctx)
591     +{
592     + emit_instr(ctx, or, dst, src1, src2);
593     +}
594     +
595     +static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
596     + struct jit_ctx *ctx)
597     +{
598     + if (imm >= BIT(16)) {
599     + emit_load_imm(r_tmp, imm, ctx);
600     + emit_or(dst, src, r_tmp, ctx);
601     + } else {
602     + emit_instr(ctx, ori, dst, src, imm);
603     + }
604     +}
605     +
606     +static inline void emit_daddiu(unsigned int dst, unsigned int src,
607     + int imm, struct jit_ctx *ctx)
608     +{
609     + /*
610     + * Only used for stack, so the imm is relatively small
611     + * and it fits in 15-bits
612     + */
613     + emit_instr(ctx, daddiu, dst, src, imm);
614     +}
615     +
616     +static inline void emit_addiu(unsigned int dst, unsigned int src,
617     + u32 imm, struct jit_ctx *ctx)
618     +{
619     + if (!is_range16(imm)) {
620     + emit_load_imm(r_tmp, imm, ctx);
621     + emit_addu(dst, r_tmp, src, ctx);
622     + } else {
623     + emit_instr(ctx, addiu, dst, src, imm);
624     + }
625     +}
626     +
627     +static inline void emit_and(unsigned int dst, unsigned int src1,
628     + unsigned int src2, struct jit_ctx *ctx)
629     +{
630     + emit_instr(ctx, and, dst, src1, src2);
631     +}
632     +
633     +static inline void emit_andi(unsigned int dst, unsigned int src,
634     + u32 imm, struct jit_ctx *ctx)
635     +{
636     + /* If imm does not fit in u16 then load it to register */
637     + if (imm >= BIT(16)) {
638     + emit_load_imm(r_tmp, imm, ctx);
639     + emit_and(dst, src, r_tmp, ctx);
640     + } else {
641     + emit_instr(ctx, andi, dst, src, imm);
642     + }
643     +}
644     +
645     +static inline void emit_xor(unsigned int dst, unsigned int src1,
646     + unsigned int src2, struct jit_ctx *ctx)
647     +{
648     + emit_instr(ctx, xor, dst, src1, src2);
649     +}
650     +
651     +static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
652     +{
653     + /* If imm does not fit in u16 then load it to register */
654     + if (imm >= BIT(16)) {
655     + emit_load_imm(r_tmp, imm, ctx);
656     + emit_xor(dst, src, r_tmp, ctx);
657     + } else {
658     + emit_instr(ctx, xori, dst, src, imm);
659     + }
660     +}
661     +
662     +static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
663     +{
664     + emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
665     +}
666     +
667     +static inline void emit_subu(unsigned int dst, unsigned int src1,
668     + unsigned int src2, struct jit_ctx *ctx)
669     +{
670     + emit_instr(ctx, subu, dst, src1, src2);
671     +}
672     +
673     +static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
674     +{
675     + emit_subu(reg, r_zero, reg, ctx);
676     +}
677     +
678     +static inline void emit_sllv(unsigned int dst, unsigned int src,
679     + unsigned int sa, struct jit_ctx *ctx)
680     +{
681     + emit_instr(ctx, sllv, dst, src, sa);
682     +}
683     +
684     +static inline void emit_sll(unsigned int dst, unsigned int src,
685     + unsigned int sa, struct jit_ctx *ctx)
686     +{
687     + /* sa is 5-bits long */
688     + if (sa >= BIT(5))
689     + /* Shifting >= 32 results in zero */
690     + emit_jit_reg_move(dst, r_zero, ctx);
691     + else
692     + emit_instr(ctx, sll, dst, src, sa);
693     +}
694     +
695     +static inline void emit_srlv(unsigned int dst, unsigned int src,
696     + unsigned int sa, struct jit_ctx *ctx)
697     +{
698     + emit_instr(ctx, srlv, dst, src, sa);
699     +}
700     +
701     +static inline void emit_srl(unsigned int dst, unsigned int src,
702     + unsigned int sa, struct jit_ctx *ctx)
703     +{
704     + /* sa is 5-bits long */
705     + if (sa >= BIT(5))
706     + /* Shifting >= 32 results in zero */
707     + emit_jit_reg_move(dst, r_zero, ctx);
708     + else
709     + emit_instr(ctx, srl, dst, src, sa);
710     +}
711     +
712     +static inline void emit_slt(unsigned int dst, unsigned int src1,
713     + unsigned int src2, struct jit_ctx *ctx)
714     +{
715     + emit_instr(ctx, slt, dst, src1, src2);
716     +}
717     +
718     +static inline void emit_sltu(unsigned int dst, unsigned int src1,
719     + unsigned int src2, struct jit_ctx *ctx)
720     +{
721     + emit_instr(ctx, sltu, dst, src1, src2);
722     +}
723     +
724     +static inline void emit_sltiu(unsigned dst, unsigned int src,
725     + unsigned int imm, struct jit_ctx *ctx)
726     +{
727     + /* 16 bit immediate */
728     + if (!is_range16((s32)imm)) {
729     + emit_load_imm(r_tmp, imm, ctx);
730     + emit_sltu(dst, src, r_tmp, ctx);
731     + } else {
732     + emit_instr(ctx, sltiu, dst, src, imm);
733     + }
734     +
735     +}
736     +
737     +/* Store register on the stack */
738     +static inline void emit_store_stack_reg(ptr reg, ptr base,
739     + unsigned int offset,
740     + struct jit_ctx *ctx)
741     +{
742     + emit_long_instr(ctx, SW, reg, offset, base);
743     +}
744     +
745     +static inline void emit_store(ptr reg, ptr base, unsigned int offset,
746     + struct jit_ctx *ctx)
747     +{
748     + emit_instr(ctx, sw, reg, offset, base);
749     +}
750     +
751     +static inline void emit_load_stack_reg(ptr reg, ptr base,
752     + unsigned int offset,
753     + struct jit_ctx *ctx)
754     +{
755     + emit_long_instr(ctx, LW, reg, offset, base);
756     +}
757     +
758     +static inline void emit_load(unsigned int reg, unsigned int base,
759     + unsigned int offset, struct jit_ctx *ctx)
760     +{
761     + emit_instr(ctx, lw, reg, offset, base);
762     +}
763     +
764     +static inline void emit_load_byte(unsigned int reg, unsigned int base,
765     + unsigned int offset, struct jit_ctx *ctx)
766     +{
767     + emit_instr(ctx, lb, reg, offset, base);
768     +}
769     +
770     +static inline void emit_half_load(unsigned int reg, unsigned int base,
771     + unsigned int offset, struct jit_ctx *ctx)
772     +{
773     + emit_instr(ctx, lh, reg, offset, base);
774     +}
775     +
776     +static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
777     + unsigned int offset, struct jit_ctx *ctx)
778     +{
779     + emit_instr(ctx, lhu, reg, offset, base);
780     +}
781     +
782     +static inline void emit_mul(unsigned int dst, unsigned int src1,
783     + unsigned int src2, struct jit_ctx *ctx)
784     +{
785     + emit_instr(ctx, mul, dst, src1, src2);
786     +}
787     +
788     +static inline void emit_div(unsigned int dst, unsigned int src,
789     + struct jit_ctx *ctx)
790     +{
791     + if (ctx->target != NULL) {
792     + u32 *p = &ctx->target[ctx->idx];
793     + uasm_i_divu(&p, dst, src);
794     + p = &ctx->target[ctx->idx + 1];
795     + uasm_i_mflo(&p, dst);
796     + }
797     + ctx->idx += 2; /* 2 insts */
798     +}
799     +
800     +static inline void emit_mod(unsigned int dst, unsigned int src,
801     + struct jit_ctx *ctx)
802     +{
803     + if (ctx->target != NULL) {
804     + u32 *p = &ctx->target[ctx->idx];
805     + uasm_i_divu(&p, dst, src);
806     + p = &ctx->target[ctx->idx + 1];
807     + uasm_i_mfhi(&p, dst);
808     + }
809     + ctx->idx += 2; /* 2 insts */
810     +}
811     +
812     +static inline void emit_dsll(unsigned int dst, unsigned int src,
813     + unsigned int sa, struct jit_ctx *ctx)
814     +{
815     + emit_instr(ctx, dsll, dst, src, sa);
816     +}
817     +
818     +static inline void emit_dsrl32(unsigned int dst, unsigned int src,
819     + unsigned int sa, struct jit_ctx *ctx)
820     +{
821     + emit_instr(ctx, dsrl32, dst, src, sa);
822     +}
823     +
824     +static inline void emit_wsbh(unsigned int dst, unsigned int src,
825     + struct jit_ctx *ctx)
826     +{
827     + emit_instr(ctx, wsbh, dst, src);
828     +}
829     +
830     +/* load pointer to register */
831     +static inline void emit_load_ptr(unsigned int dst, unsigned int src,
832     + int imm, struct jit_ctx *ctx)
833     +{
834     + /* src contains the base addr of the 32/64-pointer */
835     + emit_long_instr(ctx, LW, dst, imm, src);
836     +}
837     +
838     +/* load a function pointer to register */
839     +static inline void emit_load_func(unsigned int reg, ptr imm,
840     + struct jit_ctx *ctx)
841     +{
842     + if (IS_ENABLED(CONFIG_64BIT)) {
843     + /* At this point imm is always 64-bit */
844     + emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
845     + emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
846     + emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
847     + emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
848     + emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
849     + } else {
850     + emit_load_imm(reg, imm, ctx);
851     + }
852     +}
853     +
854     +/* Move to real MIPS register */
855     +static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
856     +{
857     + emit_long_instr(ctx, ADDU, dst, src, r_zero);
858     +}
859     +
860     +/* Move to JIT (32-bit) register */
861     +static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
862     +{
863     + emit_addu(dst, src, r_zero, ctx);
864     +}
865     +
866     +/* Compute the immediate value for PC-relative branches. */
867     +static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
868     +{
869     + if (ctx->target == NULL)
870     + return 0;
871     +
872     + /*
873     + * We want a pc-relative branch. We only do forward branches
874     + * so tgt is always after pc. tgt is the instruction offset
875     + * we want to jump to.
876     +
877     + * Branch on MIPS:
878     + * I: target_offset <- sign_extend(offset)
879     + * I+1: PC += target_offset (delay slot)
880     + *
881     + * ctx->idx currently points to the branch instruction
882     + * but the offset is added to the delay slot so we need
883     + * to subtract 4.
884     + */
885     + return ctx->offsets[tgt] -
886     + (ctx->idx * 4 - ctx->prologue_bytes) - 4;
887     +}
888     +
889     +static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
890     + unsigned int imm, struct jit_ctx *ctx)
891     +{
892     + if (ctx->target != NULL) {
893     + u32 *p = &ctx->target[ctx->idx];
894     +
895     + switch (cond) {
896     + case MIPS_COND_EQ:
897     + uasm_i_beq(&p, reg1, reg2, imm);
898     + break;
899     + case MIPS_COND_NE:
900     + uasm_i_bne(&p, reg1, reg2, imm);
901     + break;
902     + case MIPS_COND_ALL:
903     + uasm_i_b(&p, imm);
904     + break;
905     + default:
906     + pr_warn("%s: Unhandled branch conditional: %d\n",
907     + __func__, cond);
908     + }
909     + }
910     + ctx->idx++;
911     +}
912     +
913     +static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
914     +{
915     + emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
916     +}
917     +
918     +static inline void emit_jalr(unsigned int link, unsigned int reg,
919     + struct jit_ctx *ctx)
920     +{
921     + emit_instr(ctx, jalr, link, reg);
922     +}
923     +
924     +static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
925     +{
926     + emit_instr(ctx, jr, reg);
927     +}
928     +
929     +static inline u16 align_sp(unsigned int num)
930     +{
931     + /* Double word alignment for 32-bit, quadword for 64-bit */
932     + unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
933     + num = (num + (align - 1)) & -align;
934     + return num;
935     +}
936     +
937     +static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
938     +{
939     + int i = 0, real_off = 0;
940     + u32 sflags, tmp_flags;
941     +
942     + /* Adjust the stack pointer */
943     + if (offset)
944     + emit_stack_offset(-align_sp(offset), ctx);
945     +
946     + tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
947     + /* sflags is essentially a bitmap */
948     + while (tmp_flags) {
949     + if ((sflags >> i) & 0x1) {
950     + emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
951     + ctx);
952     + real_off += SZREG;
953     + }
954     + i++;
955     + tmp_flags >>= 1;
956     + }
957     +
958     + /* save return address */
959     + if (ctx->flags & SEEN_CALL) {
960     + emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
961     + real_off += SZREG;
962     + }
963     +
964     + /* Setup r_M leaving the alignment gap if necessary */
965     + if (ctx->flags & SEEN_MEM) {
966     + if (real_off % (SZREG * 2))
967     + real_off += SZREG;
968     + emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
969     + }
970     +}
971     +
972     +static void restore_bpf_jit_regs(struct jit_ctx *ctx,
973     + unsigned int offset)
974     +{
975     + int i, real_off = 0;
976     + u32 sflags, tmp_flags;
977     +
978     + tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
979     + /* sflags is a bitmap */
980     + i = 0;
981     + while (tmp_flags) {
982     + if ((sflags >> i) & 0x1) {
983     + emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
984     + ctx);
985     + real_off += SZREG;
986     + }
987     + i++;
988     + tmp_flags >>= 1;
989     + }
990     +
991     + /* restore return address */
992     + if (ctx->flags & SEEN_CALL)
993     + emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
994     +
995     + /* Restore the sp and discard the scrach memory */
996     + if (offset)
997     + emit_stack_offset(align_sp(offset), ctx);
998     +}
999     +
1000     +static unsigned int get_stack_depth(struct jit_ctx *ctx)
1001     +{
1002     + int sp_off = 0;
1003     +
1004     +
1005     + /* How may s* regs do we need to preserved? */
1006     + sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
1007     +
1008     + if (ctx->flags & SEEN_MEM)
1009     + sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
1010     +
1011     + if (ctx->flags & SEEN_CALL)
1012     + sp_off += SZREG; /* Space for our ra register */
1013     +
1014     + return sp_off;
1015     +}
1016     +
1017     +static void build_prologue(struct jit_ctx *ctx)
1018     +{
1019     + int sp_off;
1020     +
1021     + /* Calculate the total offset for the stack pointer */
1022     + sp_off = get_stack_depth(ctx);
1023     + save_bpf_jit_regs(ctx, sp_off);
1024     +
1025     + if (ctx->flags & SEEN_SKB)
1026     + emit_reg_move(r_skb, MIPS_R_A0, ctx);
1027     +
1028     + if (ctx->flags & SEEN_SKB_DATA) {
1029     + /* Load packet length */
1030     + emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
1031     + ctx);
1032     + emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
1033     + ctx);
1034     + /* Load the data pointer */
1035     + emit_load_ptr(r_skb_data, r_skb,
1036     + offsetof(struct sk_buff, data), ctx);
1037     + /* Load the header length */
1038     + emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
1039     + }
1040     +
1041     + if (ctx->flags & SEEN_X)
1042     + emit_jit_reg_move(r_X, r_zero, ctx);
1043     +
1044     + /*
1045     + * Do not leak kernel data to userspace, we only need to clear
1046     + * r_A if it is ever used. In fact if it is never used, we
1047     + * will not save/restore it, so clearing it in this case would
1048     + * corrupt the state of the caller.
1049     + */
1050     + if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
1051     + (ctx->flags & SEEN_A))
1052     + emit_jit_reg_move(r_A, r_zero, ctx);
1053     +}
1054     +
1055     +static void build_epilogue(struct jit_ctx *ctx)
1056     +{
1057     + unsigned int sp_off;
1058     +
1059     + /* Calculate the total offset for the stack pointer */
1060     +
1061     + sp_off = get_stack_depth(ctx);
1062     + restore_bpf_jit_regs(ctx, sp_off);
1063     +
1064     + /* Return */
1065     + emit_jr(r_ra, ctx);
1066     + emit_nop(ctx);
1067     +}
1068     +
1069     +#define CHOOSE_LOAD_FUNC(K, func) \
1070     + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
1071     + func##_positive)
1072     +
1073     +static bool is_bad_offset(int b_off)
1074     +{
1075     + return b_off > 0x1ffff || b_off < -0x20000;
1076     +}
1077     +
1078     +static int build_body(struct jit_ctx *ctx)
1079     +{
1080     + const struct bpf_prog *prog = ctx->skf;
1081     + const struct sock_filter *inst;
1082     + unsigned int i, off, condt;
1083     + u32 k, b_off __maybe_unused;
1084     + u8 (*sk_load_func)(unsigned long *skb, int offset);
1085     +
1086     + for (i = 0; i < prog->len; i++) {
1087     + u16 code;
1088     +
1089     + inst = &(prog->insns[i]);
1090     + pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
1091     + __func__, inst->code, inst->jt, inst->jf, inst->k);
1092     + k = inst->k;
1093     + code = bpf_anc_helper(inst);
1094     +
1095     + if (ctx->target == NULL)
1096     + ctx->offsets[i] = ctx->idx * 4;
1097     +
1098     + switch (code) {
1099     + case BPF_LD | BPF_IMM:
1100     + /* A <- k ==> li r_A, k */
1101     + ctx->flags |= SEEN_A;
1102     + emit_load_imm(r_A, k, ctx);
1103     + break;
1104     + case BPF_LD | BPF_W | BPF_LEN:
1105     + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
1106     + /* A <- len ==> lw r_A, offset(skb) */
1107     + ctx->flags |= SEEN_SKB | SEEN_A;
1108     + off = offsetof(struct sk_buff, len);
1109     + emit_load(r_A, r_skb, off, ctx);
1110     + break;
1111     + case BPF_LD | BPF_MEM:
1112     + /* A <- M[k] ==> lw r_A, offset(M) */
1113     + ctx->flags |= SEEN_MEM | SEEN_A;
1114     + emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
1115     + break;
1116     + case BPF_LD | BPF_W | BPF_ABS:
1117     + /* A <- P[k:4] */
1118     + sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
1119     + goto load;
1120     + case BPF_LD | BPF_H | BPF_ABS:
1121     + /* A <- P[k:2] */
1122     + sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
1123     + goto load;
1124     + case BPF_LD | BPF_B | BPF_ABS:
1125     + /* A <- P[k:1] */
1126     + sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
1127     +load:
1128     + emit_load_imm(r_off, k, ctx);
1129     +load_common:
1130     + ctx->flags |= SEEN_CALL | SEEN_OFF |
1131     + SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
1132     +
1133     + emit_load_func(r_s0, (ptr)sk_load_func, ctx);
1134     + emit_reg_move(MIPS_R_A0, r_skb, ctx);
1135     + emit_jalr(MIPS_R_RA, r_s0, ctx);
1136     + /* Load second argument to delay slot */
1137     + emit_reg_move(MIPS_R_A1, r_off, ctx);
1138     + /* Check the error value */
1139     + emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
1140     + ctx);
1141     + /* Load return register on DS for failures */
1142     + emit_reg_move(r_ret, r_zero, ctx);
1143     + /* Return with error */
1144     + b_off = b_imm(prog->len, ctx);
1145     + if (is_bad_offset(b_off))
1146     + return -E2BIG;
1147     + emit_b(b_off, ctx);
1148     + emit_nop(ctx);
1149     + break;
1150     + case BPF_LD | BPF_W | BPF_IND:
1151     + /* A <- P[X + k:4] */
1152     + sk_load_func = sk_load_word;
1153     + goto load_ind;
1154     + case BPF_LD | BPF_H | BPF_IND:
1155     + /* A <- P[X + k:2] */
1156     + sk_load_func = sk_load_half;
1157     + goto load_ind;
1158     + case BPF_LD | BPF_B | BPF_IND:
1159     + /* A <- P[X + k:1] */
1160     + sk_load_func = sk_load_byte;
1161     +load_ind:
1162     + ctx->flags |= SEEN_OFF | SEEN_X;
1163     + emit_addiu(r_off, r_X, k, ctx);
1164     + goto load_common;
1165     + case BPF_LDX | BPF_IMM:
1166     + /* X <- k */
1167     + ctx->flags |= SEEN_X;
1168     + emit_load_imm(r_X, k, ctx);
1169     + break;
1170     + case BPF_LDX | BPF_MEM:
1171     + /* X <- M[k] */
1172     + ctx->flags |= SEEN_X | SEEN_MEM;
1173     + emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
1174     + break;
1175     + case BPF_LDX | BPF_W | BPF_LEN:
1176     + /* X <- len */
1177     + ctx->flags |= SEEN_X | SEEN_SKB;
1178     + off = offsetof(struct sk_buff, len);
1179     + emit_load(r_X, r_skb, off, ctx);
1180     + break;
1181     + case BPF_LDX | BPF_B | BPF_MSH:
1182     + /* X <- 4 * (P[k:1] & 0xf) */
1183     + ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
1184     + /* Load offset to a1 */
1185     + emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
1186     + /*
1187     + * This may emit two instructions so it may not fit
1188     + * in the delay slot. So use a0 in the delay slot.
1189     + */
1190     + emit_load_imm(MIPS_R_A1, k, ctx);
1191     + emit_jalr(MIPS_R_RA, r_s0, ctx);
1192     + emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
1193     + /* Check the error value */
1194     + b_off = b_imm(prog->len, ctx);
1195     + if (is_bad_offset(b_off))
1196     + return -E2BIG;
1197     + emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
1198     + emit_reg_move(r_ret, r_zero, ctx);
1199     + /* We are good */
1200     + /* X <- P[1:K] & 0xf */
1201     + emit_andi(r_X, r_A, 0xf, ctx);
1202     + /* X << 2 */
1203     + emit_b(b_imm(i + 1, ctx), ctx);
1204     + emit_sll(r_X, r_X, 2, ctx); /* delay slot */
1205     + break;
1206     + case BPF_ST:
1207     + /* M[k] <- A */
1208     + ctx->flags |= SEEN_MEM | SEEN_A;
1209     + emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
1210     + break;
1211     + case BPF_STX:
1212     + /* M[k] <- X */
1213     + ctx->flags |= SEEN_MEM | SEEN_X;
1214     + emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
1215     + break;
1216     + case BPF_ALU | BPF_ADD | BPF_K:
1217     + /* A += K */
1218     + ctx->flags |= SEEN_A;
1219     + emit_addiu(r_A, r_A, k, ctx);
1220     + break;
1221     + case BPF_ALU | BPF_ADD | BPF_X:
1222     + /* A += X */
1223     + ctx->flags |= SEEN_A | SEEN_X;
1224     + emit_addu(r_A, r_A, r_X, ctx);
1225     + break;
1226     + case BPF_ALU | BPF_SUB | BPF_K:
1227     + /* A -= K */
1228     + ctx->flags |= SEEN_A;
1229     + emit_addiu(r_A, r_A, -k, ctx);
1230     + break;
1231     + case BPF_ALU | BPF_SUB | BPF_X:
1232     + /* A -= X */
1233     + ctx->flags |= SEEN_A | SEEN_X;
1234     + emit_subu(r_A, r_A, r_X, ctx);
1235     + break;
1236     + case BPF_ALU | BPF_MUL | BPF_K:
1237     + /* A *= K */
1238     + /* Load K to scratch register before MUL */
1239     + ctx->flags |= SEEN_A;
1240     + emit_load_imm(r_s0, k, ctx);
1241     + emit_mul(r_A, r_A, r_s0, ctx);
1242     + break;
1243     + case BPF_ALU | BPF_MUL | BPF_X:
1244     + /* A *= X */
1245     + ctx->flags |= SEEN_A | SEEN_X;
1246     + emit_mul(r_A, r_A, r_X, ctx);
1247     + break;
1248     + case BPF_ALU | BPF_DIV | BPF_K:
1249     + /* A /= k */
1250     + if (k == 1)
1251     + break;
1252     + if (optimize_div(&k)) {
1253     + ctx->flags |= SEEN_A;
1254     + emit_srl(r_A, r_A, k, ctx);
1255     + break;
1256     + }
1257     + ctx->flags |= SEEN_A;
1258     + emit_load_imm(r_s0, k, ctx);
1259     + emit_div(r_A, r_s0, ctx);
1260     + break;
1261     + case BPF_ALU | BPF_MOD | BPF_K:
1262     + /* A %= k */
1263     + if (k == 1) {
1264     + ctx->flags |= SEEN_A;
1265     + emit_jit_reg_move(r_A, r_zero, ctx);
1266     + } else {
1267     + ctx->flags |= SEEN_A;
1268     + emit_load_imm(r_s0, k, ctx);
1269     + emit_mod(r_A, r_s0, ctx);
1270     + }
1271     + break;
1272     + case BPF_ALU | BPF_DIV | BPF_X:
1273     + /* A /= X */
1274     + ctx->flags |= SEEN_X | SEEN_A;
1275     + /* Check if r_X is zero */
1276     + b_off = b_imm(prog->len, ctx);
1277     + if (is_bad_offset(b_off))
1278     + return -E2BIG;
1279     + emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
1280     + emit_load_imm(r_ret, 0, ctx); /* delay slot */
1281     + emit_div(r_A, r_X, ctx);
1282     + break;
1283     + case BPF_ALU | BPF_MOD | BPF_X:
1284     + /* A %= X */
1285     + ctx->flags |= SEEN_X | SEEN_A;
1286     + /* Check if r_X is zero */
1287     + b_off = b_imm(prog->len, ctx);
1288     + if (is_bad_offset(b_off))
1289     + return -E2BIG;
1290     + emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
1291     + emit_load_imm(r_ret, 0, ctx); /* delay slot */
1292     + emit_mod(r_A, r_X, ctx);
1293     + break;
1294     + case BPF_ALU | BPF_OR | BPF_K:
1295     + /* A |= K */
1296     + ctx->flags |= SEEN_A;
1297     + emit_ori(r_A, r_A, k, ctx);
1298     + break;
1299     + case BPF_ALU | BPF_OR | BPF_X:
1300     + /* A |= X */
1301     + ctx->flags |= SEEN_A;
1302     + emit_ori(r_A, r_A, r_X, ctx);
1303     + break;
1304     + case BPF_ALU | BPF_XOR | BPF_K:
1305     + /* A ^= k */
1306     + ctx->flags |= SEEN_A;
1307     + emit_xori(r_A, r_A, k, ctx);
1308     + break;
1309     + case BPF_ANC | SKF_AD_ALU_XOR_X:
1310     + case BPF_ALU | BPF_XOR | BPF_X:
1311     + /* A ^= X */
1312     + ctx->flags |= SEEN_A;
1313     + emit_xor(r_A, r_A, r_X, ctx);
1314     + break;
1315     + case BPF_ALU | BPF_AND | BPF_K:
1316     + /* A &= K */
1317     + ctx->flags |= SEEN_A;
1318     + emit_andi(r_A, r_A, k, ctx);
1319     + break;
1320     + case BPF_ALU | BPF_AND | BPF_X:
1321     + /* A &= X */
1322     + ctx->flags |= SEEN_A | SEEN_X;
1323     + emit_and(r_A, r_A, r_X, ctx);
1324     + break;
1325     + case BPF_ALU | BPF_LSH | BPF_K:
1326     + /* A <<= K */
1327     + ctx->flags |= SEEN_A;
1328     + emit_sll(r_A, r_A, k, ctx);
1329     + break;
1330     + case BPF_ALU | BPF_LSH | BPF_X:
1331     + /* A <<= X */
1332     + ctx->flags |= SEEN_A | SEEN_X;
1333     + emit_sllv(r_A, r_A, r_X, ctx);
1334     + break;
1335     + case BPF_ALU | BPF_RSH | BPF_K:
1336     + /* A >>= K */
1337     + ctx->flags |= SEEN_A;
1338     + emit_srl(r_A, r_A, k, ctx);
1339     + break;
1340     + case BPF_ALU | BPF_RSH | BPF_X:
1341     + ctx->flags |= SEEN_A | SEEN_X;
1342     + emit_srlv(r_A, r_A, r_X, ctx);
1343     + break;
1344     + case BPF_ALU | BPF_NEG:
1345     + /* A = -A */
1346     + ctx->flags |= SEEN_A;
1347     + emit_neg(r_A, ctx);
1348     + break;
1349     + case BPF_JMP | BPF_JA:
1350     + /* pc += K */
1351     + b_off = b_imm(i + k + 1, ctx);
1352     + if (is_bad_offset(b_off))
1353     + return -E2BIG;
1354     + emit_b(b_off, ctx);
1355     + emit_nop(ctx);
1356     + break;
1357     + case BPF_JMP | BPF_JEQ | BPF_K:
1358     + /* pc += ( A == K ) ? pc->jt : pc->jf */
1359     + condt = MIPS_COND_EQ | MIPS_COND_K;
1360     + goto jmp_cmp;
1361     + case BPF_JMP | BPF_JEQ | BPF_X:
1362     + ctx->flags |= SEEN_X;
1363     + /* pc += ( A == X ) ? pc->jt : pc->jf */
1364     + condt = MIPS_COND_EQ | MIPS_COND_X;
1365     + goto jmp_cmp;
1366     + case BPF_JMP | BPF_JGE | BPF_K:
1367     + /* pc += ( A >= K ) ? pc->jt : pc->jf */
1368     + condt = MIPS_COND_GE | MIPS_COND_K;
1369     + goto jmp_cmp;
1370     + case BPF_JMP | BPF_JGE | BPF_X:
1371     + ctx->flags |= SEEN_X;
1372     + /* pc += ( A >= X ) ? pc->jt : pc->jf */
1373     + condt = MIPS_COND_GE | MIPS_COND_X;
1374     + goto jmp_cmp;
1375     + case BPF_JMP | BPF_JGT | BPF_K:
1376     + /* pc += ( A > K ) ? pc->jt : pc->jf */
1377     + condt = MIPS_COND_GT | MIPS_COND_K;
1378     + goto jmp_cmp;
1379     + case BPF_JMP | BPF_JGT | BPF_X:
1380     + ctx->flags |= SEEN_X;
1381     + /* pc += ( A > X ) ? pc->jt : pc->jf */
1382     + condt = MIPS_COND_GT | MIPS_COND_X;
1383     +jmp_cmp:
1384     + /* Greater or Equal */
1385     + if ((condt & MIPS_COND_GE) ||
1386     + (condt & MIPS_COND_GT)) {
1387     + if (condt & MIPS_COND_K) { /* K */
1388     + ctx->flags |= SEEN_A;
1389     + emit_sltiu(r_s0, r_A, k, ctx);
1390     + } else { /* X */
1391     + ctx->flags |= SEEN_A |
1392     + SEEN_X;
1393     + emit_sltu(r_s0, r_A, r_X, ctx);
1394     + }
1395     + /* A < (K|X) ? r_scrach = 1 */
1396     + b_off = b_imm(i + inst->jf + 1, ctx);
1397     + emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
1398     + ctx);
1399     + emit_nop(ctx);
1400     + /* A > (K|X) ? scratch = 0 */
1401     + if (condt & MIPS_COND_GT) {
1402     + /* Checking for equality */
1403     + ctx->flags |= SEEN_A | SEEN_X;
1404     + if (condt & MIPS_COND_K)
1405     + emit_load_imm(r_s0, k, ctx);
1406     + else
1407     + emit_jit_reg_move(r_s0, r_X,
1408     + ctx);
1409     + b_off = b_imm(i + inst->jf + 1, ctx);
1410     + emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1411     + b_off, ctx);
1412     + emit_nop(ctx);
1413     + /* Finally, A > K|X */
1414     + b_off = b_imm(i + inst->jt + 1, ctx);
1415     + emit_b(b_off, ctx);
1416     + emit_nop(ctx);
1417     + } else {
1418     + /* A >= (K|X) so jump */
1419     + b_off = b_imm(i + inst->jt + 1, ctx);
1420     + emit_b(b_off, ctx);
1421     + emit_nop(ctx);
1422     + }
1423     + } else {
1424     + /* A == K|X */
1425     + if (condt & MIPS_COND_K) { /* K */
1426     + ctx->flags |= SEEN_A;
1427     + emit_load_imm(r_s0, k, ctx);
1428     + /* jump true */
1429     + b_off = b_imm(i + inst->jt + 1, ctx);
1430     + emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1431     + b_off, ctx);
1432     + emit_nop(ctx);
1433     + /* jump false */
1434     + b_off = b_imm(i + inst->jf + 1,
1435     + ctx);
1436     + emit_bcond(MIPS_COND_NE, r_A, r_s0,
1437     + b_off, ctx);
1438     + emit_nop(ctx);
1439     + } else { /* X */
1440     + /* jump true */
1441     + ctx->flags |= SEEN_A | SEEN_X;
1442     + b_off = b_imm(i + inst->jt + 1,
1443     + ctx);
1444     + emit_bcond(MIPS_COND_EQ, r_A, r_X,
1445     + b_off, ctx);
1446     + emit_nop(ctx);
1447     + /* jump false */
1448     + b_off = b_imm(i + inst->jf + 1, ctx);
1449     + emit_bcond(MIPS_COND_NE, r_A, r_X,
1450     + b_off, ctx);
1451     + emit_nop(ctx);
1452     + }
1453     + }
1454     + break;
1455     + case BPF_JMP | BPF_JSET | BPF_K:
1456     + ctx->flags |= SEEN_A;
1457     + /* pc += (A & K) ? pc -> jt : pc -> jf */
1458     + emit_load_imm(r_s1, k, ctx);
1459     + emit_and(r_s0, r_A, r_s1, ctx);
1460     + /* jump true */
1461     + b_off = b_imm(i + inst->jt + 1, ctx);
1462     + emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1463     + emit_nop(ctx);
1464     + /* jump false */
1465     + b_off = b_imm(i + inst->jf + 1, ctx);
1466     + emit_b(b_off, ctx);
1467     + emit_nop(ctx);
1468     + break;
1469     + case BPF_JMP | BPF_JSET | BPF_X:
1470     + ctx->flags |= SEEN_X | SEEN_A;
1471     + /* pc += (A & X) ? pc -> jt : pc -> jf */
1472     + emit_and(r_s0, r_A, r_X, ctx);
1473     + /* jump true */
1474     + b_off = b_imm(i + inst->jt + 1, ctx);
1475     + emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1476     + emit_nop(ctx);
1477     + /* jump false */
1478     + b_off = b_imm(i + inst->jf + 1, ctx);
1479     + emit_b(b_off, ctx);
1480     + emit_nop(ctx);
1481     + break;
1482     + case BPF_RET | BPF_A:
1483     + ctx->flags |= SEEN_A;
1484     + if (i != prog->len - 1) {
1485     + /*
1486     + * If this is not the last instruction
1487     + * then jump to the epilogue
1488     + */
1489     + b_off = b_imm(prog->len, ctx);
1490     + if (is_bad_offset(b_off))
1491     + return -E2BIG;
1492     + emit_b(b_off, ctx);
1493     + }
1494     + emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1495     + break;
1496     + case BPF_RET | BPF_K:
1497     + /*
1498     + * It can emit two instructions so it does not fit on
1499     + * the delay slot.
1500     + */
1501     + emit_load_imm(r_ret, k, ctx);
1502     + if (i != prog->len - 1) {
1503     + /*
1504     + * If this is not the last instruction
1505     + * then jump to the epilogue
1506     + */
1507     + b_off = b_imm(prog->len, ctx);
1508     + if (is_bad_offset(b_off))
1509     + return -E2BIG;
1510     + emit_b(b_off, ctx);
1511     + emit_nop(ctx);
1512     + }
1513     + break;
1514     + case BPF_MISC | BPF_TAX:
1515     + /* X = A */
1516     + ctx->flags |= SEEN_X | SEEN_A;
1517     + emit_jit_reg_move(r_X, r_A, ctx);
1518     + break;
1519     + case BPF_MISC | BPF_TXA:
1520     + /* A = X */
1521     + ctx->flags |= SEEN_A | SEEN_X;
1522     + emit_jit_reg_move(r_A, r_X, ctx);
1523     + break;
1524     + /* AUX */
1525     + case BPF_ANC | SKF_AD_PROTOCOL:
1526     + /* A = ntohs(skb->protocol */
1527     + ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1528     + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1529     + protocol) != 2);
1530     + off = offsetof(struct sk_buff, protocol);
1531     + emit_half_load(r_A, r_skb, off, ctx);
1532     +#ifdef CONFIG_CPU_LITTLE_ENDIAN
1533     + /* This needs little endian fixup */
1534     + if (cpu_has_wsbh) {
1535     + /* R2 and later have the wsbh instruction */
1536     + emit_wsbh(r_A, r_A, ctx);
1537     + } else {
1538     + /* Get first byte */
1539     + emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1540     + /* Shift it */
1541     + emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1542     + /* Get second byte */
1543     + emit_srl(r_tmp_imm, r_A, 8, ctx);
1544     + emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1545     + /* Put everyting together in r_A */
1546     + emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1547     + }
1548     +#endif
1549     + break;
1550     + case BPF_ANC | SKF_AD_CPU:
1551     + ctx->flags |= SEEN_A | SEEN_OFF;
1552     + /* A = current_thread_info()->cpu */
1553     + BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1554     + cpu) != 4);
1555     + off = offsetof(struct thread_info, cpu);
1556     + /* $28/gp points to the thread_info struct */
1557     + emit_load(r_A, 28, off, ctx);
1558     + break;
1559     + case BPF_ANC | SKF_AD_IFINDEX:
1560     + /* A = skb->dev->ifindex */
1561     + case BPF_ANC | SKF_AD_HATYPE:
1562     + /* A = skb->dev->type */
1563     + ctx->flags |= SEEN_SKB | SEEN_A;
1564     + off = offsetof(struct sk_buff, dev);
1565     + /* Load *dev pointer */
1566     + emit_load_ptr(r_s0, r_skb, off, ctx);
1567     + /* error (0) in the delay slot */
1568     + b_off = b_imm(prog->len, ctx);
1569     + if (is_bad_offset(b_off))
1570     + return -E2BIG;
1571     + emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
1572     + emit_reg_move(r_ret, r_zero, ctx);
1573     + if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
1574     + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
1575     + off = offsetof(struct net_device, ifindex);
1576     + emit_load(r_A, r_s0, off, ctx);
1577     + } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
1578     + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
1579     + off = offsetof(struct net_device, type);
1580     + emit_half_load_unsigned(r_A, r_s0, off, ctx);
1581     + }
1582     + break;
1583     + case BPF_ANC | SKF_AD_MARK:
1584     + ctx->flags |= SEEN_SKB | SEEN_A;
1585     + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1586     + off = offsetof(struct sk_buff, mark);
1587     + emit_load(r_A, r_skb, off, ctx);
1588     + break;
1589     + case BPF_ANC | SKF_AD_RXHASH:
1590     + ctx->flags |= SEEN_SKB | SEEN_A;
1591     + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1592     + off = offsetof(struct sk_buff, hash);
1593     + emit_load(r_A, r_skb, off, ctx);
1594     + break;
1595     + case BPF_ANC | SKF_AD_VLAN_TAG:
1596     + ctx->flags |= SEEN_SKB | SEEN_A;
1597     + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1598     + vlan_tci) != 2);
1599     + off = offsetof(struct sk_buff, vlan_tci);
1600     + emit_half_load_unsigned(r_A, r_skb, off, ctx);
1601     + break;
1602     + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1603     + ctx->flags |= SEEN_SKB | SEEN_A;
1604     + emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
1605     + if (PKT_VLAN_PRESENT_BIT)
1606     + emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
1607     + if (PKT_VLAN_PRESENT_BIT < 7)
1608     + emit_andi(r_A, r_A, 1, ctx);
1609     + break;
1610     + case BPF_ANC | SKF_AD_PKTTYPE:
1611     + ctx->flags |= SEEN_SKB;
1612     +
1613     + emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
1614     + /* Keep only the last 3 bits */
1615     + emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1616     +#ifdef __BIG_ENDIAN_BITFIELD
1617     + /* Get the actual packet type to the lower 3 bits */
1618     + emit_srl(r_A, r_A, 5, ctx);
1619     +#endif
1620     + break;
1621     + case BPF_ANC | SKF_AD_QUEUE:
1622     + ctx->flags |= SEEN_SKB | SEEN_A;
1623     + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1624     + queue_mapping) != 2);
1625     + BUILD_BUG_ON(offsetof(struct sk_buff,
1626     + queue_mapping) > 0xff);
1627     + off = offsetof(struct sk_buff, queue_mapping);
1628     + emit_half_load_unsigned(r_A, r_skb, off, ctx);
1629     + break;
1630     + default:
1631     + pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1632     + inst->code);
1633     + return -1;
1634     + }
1635     + }
1636     +
1637     + /* compute offsets only during the first pass */
1638     + if (ctx->target == NULL)
1639     + ctx->offsets[i] = ctx->idx * 4;
1640     +
1641     + return 0;
1642     +}
1643     +
1644     +void bpf_jit_compile(struct bpf_prog *fp)
1645     +{
1646     + struct jit_ctx ctx;
1647     + unsigned int alloc_size, tmp_idx;
1648     +
1649     + if (!bpf_jit_enable)
1650     + return;
1651     +
1652     + memset(&ctx, 0, sizeof(ctx));
1653     +
1654     + ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1655     + if (ctx.offsets == NULL)
1656     + return;
1657     +
1658     + ctx.skf = fp;
1659     +
1660     + if (build_body(&ctx))
1661     + goto out;
1662     +
1663     + tmp_idx = ctx.idx;
1664     + build_prologue(&ctx);
1665     + ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1666     + /* just to complete the ctx.idx count */
1667     + build_epilogue(&ctx);
1668     +
1669     + alloc_size = 4 * ctx.idx;
1670     + ctx.target = module_alloc(alloc_size);
1671     + if (ctx.target == NULL)
1672     + goto out;
1673     +
1674     + /* Clean it */
1675     + memset(ctx.target, 0, alloc_size);
1676     +
1677     + ctx.idx = 0;
1678     +
1679     + /* Generate the actual JIT code */
1680     + build_prologue(&ctx);
1681     + if (build_body(&ctx)) {
1682     + module_memfree(ctx.target);
1683     + goto out;
1684     + }
1685     + build_epilogue(&ctx);
1686     +
1687     + /* Update the icache */
1688     + flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1689     +
1690     + if (bpf_jit_enable > 1)
1691     + /* Dump JIT code */
1692     + bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1693     +
1694     + fp->bpf_func = (void *)ctx.target;
1695     + fp->jited = 1;
1696     +
1697     +out:
1698     + kfree(ctx.offsets);
1699     +}
1700     +
1701     +void bpf_jit_free(struct bpf_prog *fp)
1702     +{
1703     + if (fp->jited)
1704     + module_memfree(fp->bpf_func);
1705     +
1706     + bpf_prog_unlock_free(fp);
1707     +}
1708     diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
1709     new file mode 100644
1710     index 0000000000000..57154c5883b6f
1711     --- /dev/null
1712     +++ b/arch/mips/net/bpf_jit_asm.S
1713     @@ -0,0 +1,285 @@
1714     +/*
1715     + * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
1716     + * compiler.
1717     + *
1718     + * Copyright (C) 2015 Imagination Technologies Ltd.
1719     + * Author: Markos Chandras <markos.chandras@imgtec.com>
1720     + *
1721     + * This program is free software; you can redistribute it and/or modify it
1722     + * under the terms of the GNU General Public License as published by the
1723     + * Free Software Foundation; version 2 of the License.
1724     + */
1725     +
1726     +#include <asm/asm.h>
1727     +#include <asm/isa-rev.h>
1728     +#include <asm/regdef.h>
1729     +#include "bpf_jit.h"
1730     +
1731     +/* ABI
1732     + *
1733     + * r_skb_hl skb header length
1734     + * r_skb_data skb data
1735     + * r_off(a1) offset register
1736     + * r_A BPF register A
1737     + * r_X PF register X
1738     + * r_skb(a0) *skb
1739     + * r_M *scratch memory
1740     + * r_skb_le skb length
1741     + * r_s0 Scratch register 0
1742     + * r_s1 Scratch register 1
1743     + *
1744     + * On entry:
1745     + * a0: *skb
1746     + * a1: offset (imm or imm + X)
1747     + *
1748     + * All non-BPF-ABI registers are free for use. On return, we only
1749     + * care about r_ret. The BPF-ABI registers are assumed to remain
1750     + * unmodified during the entire filter operation.
1751     + */
1752     +
1753     +#define skb a0
1754     +#define offset a1
1755     +#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
1756     +
1757     + /* We know better :) so prevent assembler reordering etc */
1758     + .set noreorder
1759     +
1760     +#define is_offset_negative(TYPE) \
1761     + /* If offset is negative we have more work to do */ \
1762     + slti t0, offset, 0; \
1763     + bgtz t0, bpf_slow_path_##TYPE##_neg; \
1764     + /* Be careful what follows in DS. */
1765     +
1766     +#define is_offset_in_header(SIZE, TYPE) \
1767     + /* Reading from header? */ \
1768     + addiu $r_s0, $r_skb_hl, -SIZE; \
1769     + slt t0, $r_s0, offset; \
1770     + bgtz t0, bpf_slow_path_##TYPE; \
1771     +
1772     +LEAF(sk_load_word)
1773     + is_offset_negative(word)
1774     +FEXPORT(sk_load_word_positive)
1775     + is_offset_in_header(4, word)
1776     + /* Offset within header boundaries */
1777     + PTR_ADDU t1, $r_skb_data, offset
1778     + .set reorder
1779     + lw $r_A, 0(t1)
1780     + .set noreorder
1781     +#ifdef CONFIG_CPU_LITTLE_ENDIAN
1782     +# if MIPS_ISA_REV >= 2
1783     + wsbh t0, $r_A
1784     + rotr $r_A, t0, 16
1785     +# else
1786     + sll t0, $r_A, 24
1787     + srl t1, $r_A, 24
1788     + srl t2, $r_A, 8
1789     + or t0, t0, t1
1790     + andi t2, t2, 0xff00
1791     + andi t1, $r_A, 0xff00
1792     + or t0, t0, t2
1793     + sll t1, t1, 8
1794     + or $r_A, t0, t1
1795     +# endif
1796     +#endif
1797     + jr $r_ra
1798     + move $r_ret, zero
1799     + END(sk_load_word)
1800     +
1801     +LEAF(sk_load_half)
1802     + is_offset_negative(half)
1803     +FEXPORT(sk_load_half_positive)
1804     + is_offset_in_header(2, half)
1805     + /* Offset within header boundaries */
1806     + PTR_ADDU t1, $r_skb_data, offset
1807     + lhu $r_A, 0(t1)
1808     +#ifdef CONFIG_CPU_LITTLE_ENDIAN
1809     +# if MIPS_ISA_REV >= 2
1810     + wsbh $r_A, $r_A
1811     +# else
1812     + sll t0, $r_A, 8
1813     + srl t1, $r_A, 8
1814     + andi t0, t0, 0xff00
1815     + or $r_A, t0, t1
1816     +# endif
1817     +#endif
1818     + jr $r_ra
1819     + move $r_ret, zero
1820     + END(sk_load_half)
1821     +
1822     +LEAF(sk_load_byte)
1823     + is_offset_negative(byte)
1824     +FEXPORT(sk_load_byte_positive)
1825     + is_offset_in_header(1, byte)
1826     + /* Offset within header boundaries */
1827     + PTR_ADDU t1, $r_skb_data, offset
1828     + lbu $r_A, 0(t1)
1829     + jr $r_ra
1830     + move $r_ret, zero
1831     + END(sk_load_byte)
1832     +
1833     +/*
1834     + * call skb_copy_bits:
1835     + * (prototype in linux/skbuff.h)
1836     + *
1837     + * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
1838     + *
1839     + * o32 mandates we leave 4 spaces for argument registers in case
1840     + * the callee needs to use them. Even though we don't care about
1841     + * the argument registers ourselves, we need to allocate that space
1842     + * to remain ABI compliant since the callee may want to use that space.
1843     + * We also allocate 2 more spaces for $r_ra and our return register (*to).
1844     + *
1845     + * n64 is a bit different. The *caller* will allocate the space to preserve
1846     + * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
1847     + * good reason but it does not matter that much really.
1848     + *
1849     + * (void *to) is returned in r_s0
1850     + *
1851     + */
1852     +#ifdef CONFIG_CPU_LITTLE_ENDIAN
1853     +#define DS_OFFSET(SIZE) (4 * SZREG)
1854     +#else
1855     +#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
1856     +#endif
1857     +#define bpf_slow_path_common(SIZE) \
1858     + /* Quick check. Are we within reasonable boundaries? */ \
1859     + LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
1860     + sltu $r_s0, offset, $r_s1; \
1861     + beqz $r_s0, fault; \
1862     + /* Load 4th argument in DS */ \
1863     + LONG_ADDIU a3, zero, SIZE; \
1864     + PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
1865     + PTR_LA t0, skb_copy_bits; \
1866     + PTR_S $r_ra, (5 * SZREG)($r_sp); \
1867     + /* Assign low slot to a2 */ \
1868     + PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
1869     + jalr t0; \
1870     + /* Reset our destination slot (DS but it's ok) */ \
1871     + INT_S zero, (4 * SZREG)($r_sp); \
1872     + /* \
1873     + * skb_copy_bits returns 0 on success and -EFAULT \
1874     + * on error. Our data live in a2. Do not bother with \
1875     + * our data if an error has been returned. \
1876     + */ \
1877     + /* Restore our frame */ \
1878     + PTR_L $r_ra, (5 * SZREG)($r_sp); \
1879     + INT_L $r_s0, (4 * SZREG)($r_sp); \
1880     + bltz v0, fault; \
1881     + PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
1882     + move $r_ret, zero; \
1883     +
1884     +NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
1885     + bpf_slow_path_common(4)
1886     +#ifdef CONFIG_CPU_LITTLE_ENDIAN
1887     +# if MIPS_ISA_REV >= 2
1888     + wsbh t0, $r_s0
1889     + jr $r_ra
1890     + rotr $r_A, t0, 16
1891     +# else
1892     + sll t0, $r_s0, 24
1893     + srl t1, $r_s0, 24
1894     + srl t2, $r_s0, 8
1895     + or t0, t0, t1
1896     + andi t2, t2, 0xff00
1897     + andi t1, $r_s0, 0xff00
1898     + or t0, t0, t2
1899     + sll t1, t1, 8
1900     + jr $r_ra
1901     + or $r_A, t0, t1
1902     +# endif
1903     +#else
1904     + jr $r_ra
1905     + move $r_A, $r_s0
1906     +#endif
1907     +
1908     + END(bpf_slow_path_word)
1909     +
1910     +NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
1911     + bpf_slow_path_common(2)
1912     +#ifdef CONFIG_CPU_LITTLE_ENDIAN
1913     +# if MIPS_ISA_REV >= 2
1914     + jr $r_ra
1915     + wsbh $r_A, $r_s0
1916     +# else
1917     + sll t0, $r_s0, 8
1918     + andi t1, $r_s0, 0xff00
1919     + andi t0, t0, 0xff00
1920     + srl t1, t1, 8
1921     + jr $r_ra
1922     + or $r_A, t0, t1
1923     +# endif
1924     +#else
1925     + jr $r_ra
1926     + move $r_A, $r_s0
1927     +#endif
1928     +
1929     + END(bpf_slow_path_half)
1930     +
1931     +NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
1932     + bpf_slow_path_common(1)
1933     + jr $r_ra
1934     + move $r_A, $r_s0
1935     +
1936     + END(bpf_slow_path_byte)
1937     +
1938     +/*
1939     + * Negative entry points
1940     + */
1941     + .macro bpf_is_end_of_data
1942     + li t0, SKF_LL_OFF
1943     + /* Reading link layer data? */
1944     + slt t1, offset, t0
1945     + bgtz t1, fault
1946     + /* Be careful what follows in DS. */
1947     + .endm
1948     +/*
1949     + * call skb_copy_bits:
1950     + * (prototype in linux/filter.h)
1951     + *
1952     + * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
1953     + * int k, unsigned int size)
1954     + *
1955     + * see above (bpf_slow_path_common) for ABI restrictions
1956     + */
1957     +#define bpf_negative_common(SIZE) \
1958     + PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
1959     + PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
1960     + PTR_S $r_ra, (5 * SZREG)($r_sp); \
1961     + jalr t0; \
1962     + li a2, SIZE; \
1963     + PTR_L $r_ra, (5 * SZREG)($r_sp); \
1964     + /* Check return pointer */ \
1965     + beqz v0, fault; \
1966     + PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
1967     + /* Preserve our pointer */ \
1968     + move $r_s0, v0; \
1969     + /* Set return value */ \
1970     + move $r_ret, zero; \
1971     +
1972     +bpf_slow_path_word_neg:
1973     + bpf_is_end_of_data
1974     +NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
1975     + bpf_negative_common(4)
1976     + jr $r_ra
1977     + lw $r_A, 0($r_s0)
1978     + END(sk_load_word_negative)
1979     +
1980     +bpf_slow_path_half_neg:
1981     + bpf_is_end_of_data
1982     +NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
1983     + bpf_negative_common(2)
1984     + jr $r_ra
1985     + lhu $r_A, 0($r_s0)
1986     + END(sk_load_half_negative)
1987     +
1988     +bpf_slow_path_byte_neg:
1989     + bpf_is_end_of_data
1990     +NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
1991     + bpf_negative_common(1)
1992     + jr $r_ra
1993     + lbu $r_A, 0($r_s0)
1994     + END(sk_load_byte_negative)
1995     +
1996     +fault:
1997     + jr $r_ra
1998     + addiu $r_ret, zero, 1
1999     diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
2000     index 5ba6fbfca2742..f82f85c65964c 100644
2001     --- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts
2002     +++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
2003     @@ -154,7 +154,7 @@
2004    
2005     fm1mac3: ethernet@e4000 {
2006     phy-handle = <&sgmii_aqr_phy3>;
2007     - phy-connection-type = "sgmii-2500";
2008     + phy-connection-type = "2500base-x";
2009     sleep = <&rcpm 0x20000000>;
2010     };
2011    
2012     diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
2013     index 13ce76cc5affe..80dff2c2bf677 100644
2014     --- a/arch/riscv/include/uapi/asm/unistd.h
2015     +++ b/arch/riscv/include/uapi/asm/unistd.h
2016     @@ -18,9 +18,10 @@
2017     #ifdef __LP64__
2018     #define __ARCH_WANT_NEW_STAT
2019     #define __ARCH_WANT_SET_GET_RLIMIT
2020     -#define __ARCH_WANT_SYS_CLONE3
2021     #endif /* __LP64__ */
2022    
2023     +#define __ARCH_WANT_SYS_CLONE3
2024     +
2025     #include <asm-generic/unistd.h>
2026    
2027     /*
2028     diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
2029     index 2d29966276296..f63e4cb6c9b31 100644
2030     --- a/arch/s390/net/bpf_jit_comp.c
2031     +++ b/arch/s390/net/bpf_jit_comp.c
2032     @@ -1385,7 +1385,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
2033     jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
2034     if (jit.addrs == NULL) {
2035     fp = orig_fp;
2036     - goto out;
2037     + goto free_addrs;
2038     }
2039     /*
2040     * Three initial passes:
2041     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
2042     index 36a28b9e46cbd..8c1590432e866 100644
2043     --- a/arch/x86/Kconfig
2044     +++ b/arch/x86/Kconfig
2045     @@ -1425,7 +1425,7 @@ config HIGHMEM4G
2046    
2047     config HIGHMEM64G
2048     bool "64GB"
2049     - depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
2050     + depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
2051     select X86_PAE
2052     ---help---
2053     Select this if you have a 32-bit processor and more than 4
2054     diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
2055     index 2f9ec14be3b11..6f6b1d04dadf9 100644
2056     --- a/arch/x86/kernel/early-quirks.c
2057     +++ b/arch/x86/kernel/early-quirks.c
2058     @@ -710,12 +710,6 @@ static struct chipset early_qrk[] __initdata = {
2059     */
2060     { PCI_VENDOR_ID_INTEL, 0x0f00,
2061     PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
2062     - { PCI_VENDOR_ID_INTEL, 0x3e20,
2063     - PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
2064     - { PCI_VENDOR_ID_INTEL, 0x3ec4,
2065     - PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
2066     - { PCI_VENDOR_ID_INTEL, 0x8a12,
2067     - PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
2068     { PCI_VENDOR_ID_BROADCOM, 0x4331,
2069     PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
2070     {}
2071     diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
2072     index c6f791bc481eb..9834d221e390f 100644
2073     --- a/arch/x86/kernel/hpet.c
2074     +++ b/arch/x86/kernel/hpet.c
2075     @@ -9,6 +9,7 @@
2076    
2077     #include <asm/hpet.h>
2078     #include <asm/time.h>
2079     +#include <asm/mwait.h>
2080    
2081     #undef pr_fmt
2082     #define pr_fmt(fmt) "hpet: " fmt
2083     @@ -806,6 +807,83 @@ static bool __init hpet_counting(void)
2084     return false;
2085     }
2086    
2087     +static bool __init mwait_pc10_supported(void)
2088     +{
2089     + unsigned int eax, ebx, ecx, mwait_substates;
2090     +
2091     + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2092     + return false;
2093     +
2094     + if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
2095     + return false;
2096     +
2097     + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
2098     + return false;
2099     +
2100     + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
2101     +
2102     + return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
2103     + (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
2104     + (mwait_substates & (0xF << 28));
2105     +}
2106     +
2107     +/*
2108     + * Check whether the system supports PC10. If so force disable HPET as that
2109     + * stops counting in PC10. This check is overbroad as it does not take any
2110     + * of the following into account:
2111     + *
2112     + * - ACPI tables
2113     + * - Enablement of intel_idle
2114     + * - Command line arguments which limit intel_idle C-state support
2115     + *
2116     + * That's perfectly fine. HPET is a piece of hardware designed by committee
2117     + * and the only reasons why it is still in use on modern systems is the
2118     + * fact that it is impossible to reliably query TSC and CPU frequency via
2119     + * CPUID or firmware.
2120     + *
2121     + * If HPET is functional it is useful for calibrating TSC, but this can be
2122     + * done via PMTIMER as well which seems to be the last remaining timer on
2123     + * X86/INTEL platforms that has not been completely wreckaged by feature
2124     + * creep.
2125     + *
2126     + * In theory HPET support should be removed altogether, but there are older
2127     + * systems out there which depend on it because TSC and APIC timer are
2128     + * dysfunctional in deeper C-states.
2129     + *
2130     + * It's only 20 years now that hardware people have been asked to provide
2131     + * reliable and discoverable facilities which can be used for timekeeping
2132     + * and per CPU timer interrupts.
2133     + *
2134     + * The probability that this problem is going to be solved in the
2135     + * forseeable future is close to zero, so the kernel has to be cluttered
2136     + * with heuristics to keep up with the ever growing amount of hardware and
2137     + * firmware trainwrecks. Hopefully some day hardware people will understand
2138     + * that the approach of "This can be fixed in software" is not sustainable.
2139     + * Hope dies last...
2140     + */
2141     +static bool __init hpet_is_pc10_damaged(void)
2142     +{
2143     + unsigned long long pcfg;
2144     +
2145     + /* Check whether PC10 substates are supported */
2146     + if (!mwait_pc10_supported())
2147     + return false;
2148     +
2149     + /* Check whether PC10 is enabled in PKG C-state limit */
2150     + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
2151     + if ((pcfg & 0xF) < 8)
2152     + return false;
2153     +
2154     + if (hpet_force_user) {
2155     + pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
2156     + return false;
2157     + }
2158     +
2159     + pr_info("HPET dysfunctional in PC10. Force disabled.\n");
2160     + boot_hpet_disable = true;
2161     + return true;
2162     +}
2163     +
2164     /**
2165     * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
2166     */
2167     @@ -819,6 +897,9 @@ int __init hpet_enable(void)
2168     if (!is_hpet_capable())
2169     return 0;
2170    
2171     + if (hpet_is_pc10_damaged())
2172     + return 0;
2173     +
2174     hpet_set_mapping();
2175     if (!hpet_virt_address)
2176     return 0;
2177     diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
2178     index ee2beda590d0d..1d4a00e767ece 100644
2179     --- a/arch/x86/platform/olpc/olpc.c
2180     +++ b/arch/x86/platform/olpc/olpc.c
2181     @@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
2182    
2183     static struct olpc_ec_driver ec_xo1_5_driver = {
2184     .ec_cmd = olpc_xo1_ec_cmd,
2185     -#ifdef CONFIG_OLPC_XO1_5_SCI
2186     +#ifdef CONFIG_OLPC_XO15_SCI
2187     /*
2188     * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
2189     * compiled in
2190     diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
2191     index 9c12babc016cd..6fc05cba61a27 100644
2192     --- a/arch/xtensa/include/asm/kmem_layout.h
2193     +++ b/arch/xtensa/include/asm/kmem_layout.h
2194     @@ -11,6 +11,7 @@
2195     #ifndef _XTENSA_KMEM_LAYOUT_H
2196     #define _XTENSA_KMEM_LAYOUT_H
2197    
2198     +#include <asm/core.h>
2199     #include <asm/types.h>
2200    
2201     #ifdef CONFIG_MMU
2202     @@ -65,6 +66,34 @@
2203    
2204     #endif
2205    
2206     +/* KIO definition */
2207     +
2208     +#if XCHAL_HAVE_PTP_MMU
2209     +#define XCHAL_KIO_CACHED_VADDR 0xe0000000
2210     +#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
2211     +#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
2212     +#else
2213     +#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
2214     +#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
2215     +#endif
2216     +#define XCHAL_KIO_SIZE 0x10000000
2217     +
2218     +#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
2219     +#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
2220     +#ifndef __ASSEMBLY__
2221     +extern unsigned long xtensa_kio_paddr;
2222     +
2223     +static inline unsigned long xtensa_get_kio_paddr(void)
2224     +{
2225     + return xtensa_kio_paddr;
2226     +}
2227     +#endif
2228     +#else
2229     +#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
2230     +#endif
2231     +
2232     +/* KERNEL_STACK definition */
2233     +
2234     #ifndef CONFIG_KASAN
2235     #define KERNEL_STACK_SHIFT 13
2236     #else
2237     diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
2238     index 79fe3007919eb..4220c6dac44f4 100644
2239     --- a/arch/xtensa/include/asm/vectors.h
2240     +++ b/arch/xtensa/include/asm/vectors.h
2241     @@ -21,50 +21,14 @@
2242     #include <asm/core.h>
2243     #include <asm/kmem_layout.h>
2244    
2245     -#if XCHAL_HAVE_PTP_MMU
2246     -#define XCHAL_KIO_CACHED_VADDR 0xe0000000
2247     -#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
2248     -#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
2249     -#else
2250     -#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
2251     -#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
2252     -#endif
2253     -#define XCHAL_KIO_SIZE 0x10000000
2254     -
2255     -#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
2256     -#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
2257     -#ifndef __ASSEMBLY__
2258     -extern unsigned long xtensa_kio_paddr;
2259     -
2260     -static inline unsigned long xtensa_get_kio_paddr(void)
2261     -{
2262     - return xtensa_kio_paddr;
2263     -}
2264     -#endif
2265     -#else
2266     -#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
2267     -#endif
2268     -
2269     -#if defined(CONFIG_MMU)
2270     -
2271     -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
2272     -/* Image Virtual Start Address */
2273     -#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
2274     - CONFIG_KERNEL_LOAD_ADDRESS - \
2275     +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
2276     +#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
2277     + XCHAL_KSEG_CACHED_VADDR - \
2278     XCHAL_KSEG_PADDR)
2279     #else
2280     #define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
2281     #endif
2282    
2283     -#else /* !defined(CONFIG_MMU) */
2284     - /* MMU Not being used - Virtual == Physical */
2285     -
2286     -/* Location of the start of the kernel text, _start */
2287     -#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
2288     -
2289     -
2290     -#endif /* CONFIG_MMU */
2291     -
2292     #define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
2293     #ifdef CONFIG_VECTORS_OFFSET
2294     #define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
2295     diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
2296     index a48bf2d10ac2d..80cc9770a8d2d 100644
2297     --- a/arch/xtensa/kernel/irq.c
2298     +++ b/arch/xtensa/kernel/irq.c
2299     @@ -145,7 +145,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
2300    
2301     void __init init_IRQ(void)
2302     {
2303     -#ifdef CONFIG_OF
2304     +#ifdef CONFIG_USE_OF
2305     irqchip_init();
2306     #else
2307     #ifdef CONFIG_HAVE_SMP
2308     diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
2309     index d08172138369b..5a25bc2b80521 100644
2310     --- a/arch/xtensa/kernel/setup.c
2311     +++ b/arch/xtensa/kernel/setup.c
2312     @@ -64,7 +64,7 @@ extern unsigned long initrd_end;
2313     extern int initrd_below_start_ok;
2314     #endif
2315    
2316     -#ifdef CONFIG_OF
2317     +#ifdef CONFIG_USE_OF
2318     void *dtb_start = __dtb_start;
2319     #endif
2320    
2321     @@ -126,7 +126,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
2322    
2323     #endif /* CONFIG_BLK_DEV_INITRD */
2324    
2325     -#ifdef CONFIG_OF
2326     +#ifdef CONFIG_USE_OF
2327    
2328     static int __init parse_tag_fdt(const bp_tag_t *tag)
2329     {
2330     @@ -136,7 +136,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
2331    
2332     __tagtable(BP_TAG_FDT, parse_tag_fdt);
2333    
2334     -#endif /* CONFIG_OF */
2335     +#endif /* CONFIG_USE_OF */
2336    
2337     static int __init parse_tag_cmdline(const bp_tag_t* tag)
2338     {
2339     @@ -184,7 +184,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
2340     }
2341     #endif
2342    
2343     -#ifdef CONFIG_OF
2344     +#ifdef CONFIG_USE_OF
2345    
2346     #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
2347     unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
2348     @@ -233,7 +233,7 @@ void __init early_init_devtree(void *params)
2349     strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2350     }
2351    
2352     -#endif /* CONFIG_OF */
2353     +#endif /* CONFIG_USE_OF */
2354    
2355     /*
2356     * Initialize architecture. (Early stage)
2357     @@ -254,7 +254,7 @@ void __init init_arch(bp_tag_t *bp_start)
2358     if (bp_start)
2359     parse_bootparam(bp_start);
2360    
2361     -#ifdef CONFIG_OF
2362     +#ifdef CONFIG_USE_OF
2363     early_init_devtree(dtb_start);
2364     #endif
2365    
2366     diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
2367     index 03678c4afc39b..bc858a7f98ba4 100644
2368     --- a/arch/xtensa/mm/mmu.c
2369     +++ b/arch/xtensa/mm/mmu.c
2370     @@ -101,7 +101,7 @@ void init_mmu(void)
2371    
2372     void init_kio(void)
2373     {
2374     -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
2375     +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
2376     /*
2377     * Update the IO area mapping in case xtensa_kio_paddr has changed
2378     */
2379     diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
2380     index 90053c4a8290d..469ca73de4ce7 100644
2381     --- a/drivers/bus/ti-sysc.c
2382     +++ b/drivers/bus/ti-sysc.c
2383     @@ -1388,6 +1388,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
2384     /* Quirks that need to be set based on detected module */
2385     SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
2386     SYSC_MODULE_QUIRK_AESS),
2387     + /* Errata i893 handling for dra7 dcan1 and 2 */
2388     + SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
2389     + SYSC_QUIRK_CLKDM_NOAUTO),
2390     SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
2391     SYSC_QUIRK_CLKDM_NOAUTO),
2392     SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
2393     diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
2394     index 3b13feca970f7..3c54d61e4fa94 100644
2395     --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
2396     +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
2397     @@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
2398     .open = nouveau_debugfs_pstate_open,
2399     .read = seq_read,
2400     .write = nouveau_debugfs_pstate_set,
2401     + .release = single_release,
2402     };
2403    
2404     static struct drm_info_list nouveau_debugfs_list[] = {
2405     diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
2406     index c70983780ae79..fe466ee4c49bf 100644
2407     --- a/drivers/i2c/i2c-core-acpi.c
2408     +++ b/drivers/i2c/i2c-core-acpi.c
2409     @@ -436,6 +436,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
2410     break;
2411    
2412     i2c_acpi_register_device(adapter, adev, &info);
2413     + put_device(&adapter->dev);
2414     break;
2415     case ACPI_RECONFIG_DEVICE_REMOVE:
2416     if (!acpi_device_enumerated(adev))
2417     diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
2418     index a3e3b274f0ea3..cdd57ce55b2fa 100644
2419     --- a/drivers/mmc/host/meson-gx-mmc.c
2420     +++ b/drivers/mmc/host/meson-gx-mmc.c
2421     @@ -738,7 +738,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
2422     writel(start, host->regs + SD_EMMC_START);
2423     }
2424    
2425     -/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
2426     +/* local sg copy for dram_access_quirk */
2427     static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
2428     size_t buflen, bool to_buffer)
2429     {
2430     @@ -756,21 +756,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
2431     sg_miter_start(&miter, sgl, nents, sg_flags);
2432    
2433     while ((offset < buflen) && sg_miter_next(&miter)) {
2434     - unsigned int len;
2435     + unsigned int buf_offset = 0;
2436     + unsigned int len, left;
2437     + u32 *buf = miter.addr;
2438    
2439     len = min(miter.length, buflen - offset);
2440     + left = len;
2441    
2442     - /* When dram_access_quirk, the bounce buffer is a iomem mapping */
2443     - if (host->dram_access_quirk) {
2444     - if (to_buffer)
2445     - memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
2446     - else
2447     - memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
2448     + if (to_buffer) {
2449     + do {
2450     + writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
2451     +
2452     + buf_offset += 4;
2453     + left -= 4;
2454     + } while (left);
2455     } else {
2456     - if (to_buffer)
2457     - memcpy(host->bounce_buf + offset, miter.addr, len);
2458     - else
2459     - memcpy(miter.addr, host->bounce_buf + offset, len);
2460     + do {
2461     + *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
2462     +
2463     + buf_offset += 4;
2464     + left -= 4;
2465     + } while (left);
2466     }
2467    
2468     offset += len;
2469     @@ -822,7 +828,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
2470     if (data->flags & MMC_DATA_WRITE) {
2471     cmd_cfg |= CMD_CFG_DATA_WR;
2472     WARN_ON(xfer_bytes > host->bounce_buf_size);
2473     - meson_mmc_copy_buffer(host, data, xfer_bytes, true);
2474     + if (host->dram_access_quirk)
2475     + meson_mmc_copy_buffer(host, data, xfer_bytes, true);
2476     + else
2477     + sg_copy_to_buffer(data->sg, data->sg_len,
2478     + host->bounce_buf, xfer_bytes);
2479     dma_wmb();
2480     }
2481    
2482     @@ -841,12 +851,43 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
2483     writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
2484     }
2485    
2486     +static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
2487     +{
2488     + struct scatterlist *sg;
2489     + int i;
2490     +
2491     + /* Reject request if any element offset or size is not 32bit aligned */
2492     + for_each_sg(data->sg, sg, data->sg_len, i) {
2493     + if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
2494     + !IS_ALIGNED(sg->length, sizeof(u32))) {
2495     + dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
2496     + data->sg->offset, data->sg->length);
2497     + return -EINVAL;
2498     + }
2499     + }
2500     +
2501     + return 0;
2502     +}
2503     +
2504     static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
2505     {
2506     struct meson_host *host = mmc_priv(mmc);
2507     bool needs_pre_post_req = mrq->data &&
2508     !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
2509    
2510     + /*
2511     + * The memory at the end of the controller used as bounce buffer for
2512     + * the dram_access_quirk only accepts 32bit read/write access,
2513     + * check the aligment and length of the data before starting the request.
2514     + */
2515     + if (host->dram_access_quirk && mrq->data) {
2516     + mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
2517     + if (mrq->cmd->error) {
2518     + mmc_request_done(mmc, mrq);
2519     + return;
2520     + }
2521     + }
2522     +
2523     if (needs_pre_post_req) {
2524     meson_mmc_get_transfer_mode(mmc, mrq);
2525     if (!meson_mmc_desc_chain_mode(mrq->data))
2526     @@ -991,7 +1032,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
2527     if (meson_mmc_bounce_buf_read(data)) {
2528     xfer_bytes = data->blksz * data->blocks;
2529     WARN_ON(xfer_bytes > host->bounce_buf_size);
2530     - meson_mmc_copy_buffer(host, data, xfer_bytes, false);
2531     + if (host->dram_access_quirk)
2532     + meson_mmc_copy_buffer(host, data, xfer_bytes, false);
2533     + else
2534     + sg_copy_from_buffer(data->sg, data->sg_len,
2535     + host->bounce_buf, xfer_bytes);
2536     }
2537    
2538     next_cmd = meson_mmc_get_next_command(cmd);
2539     diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
2540     index ebc37e2569221..f19edd4c6c5bb 100644
2541     --- a/drivers/net/ethernet/google/gve/gve.h
2542     +++ b/drivers/net/ethernet/google/gve/gve.h
2543     @@ -391,7 +391,7 @@ struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
2544     gve_num_tx_qpls(priv));
2545    
2546     /* we are out of rx qpls */
2547     - if (id == priv->qpl_cfg.qpl_map_size)
2548     + if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
2549     return NULL;
2550    
2551     set_bit(id, priv->qpl_cfg.qpl_id_map);
2552     diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
2553     index f8dfa7501f65a..5b450c6100add 100644
2554     --- a/drivers/net/ethernet/google/gve/gve_main.c
2555     +++ b/drivers/net/ethernet/google/gve/gve_main.c
2556     @@ -30,6 +30,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
2557     {
2558     struct gve_priv *priv = netdev_priv(dev);
2559     unsigned int start;
2560     + u64 packets, bytes;
2561     int ring;
2562    
2563     if (priv->rx) {
2564     @@ -37,10 +38,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
2565     do {
2566     start =
2567     u64_stats_fetch_begin(&priv->rx[ring].statss);
2568     - s->rx_packets += priv->rx[ring].rpackets;
2569     - s->rx_bytes += priv->rx[ring].rbytes;
2570     + packets = priv->rx[ring].rpackets;
2571     + bytes = priv->rx[ring].rbytes;
2572     } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
2573     start));
2574     + s->rx_packets += packets;
2575     + s->rx_bytes += bytes;
2576     }
2577     }
2578     if (priv->tx) {
2579     @@ -48,10 +51,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
2580     do {
2581     start =
2582     u64_stats_fetch_begin(&priv->tx[ring].statss);
2583     - s->tx_packets += priv->tx[ring].pkt_done;
2584     - s->tx_bytes += priv->tx[ring].bytes_done;
2585     + packets = priv->tx[ring].pkt_done;
2586     + bytes = priv->tx[ring].bytes_done;
2587     } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
2588     start));
2589     + s->tx_packets += packets;
2590     + s->tx_bytes += bytes;
2591     }
2592     }
2593     }
2594     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2595     index 21ab7d2caddf5..917be10a5cf5c 100644
2596     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2597     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2598     @@ -4817,7 +4817,8 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
2599     {
2600     int i;
2601    
2602     - i40e_free_misc_vector(pf);
2603     + if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
2604     + i40e_free_misc_vector(pf);
2605    
2606     i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
2607     I40E_IWARP_IRQ_PILE_ID);
2608     @@ -9616,7 +9617,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
2609     if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
2610     /* retry with a larger buffer */
2611     buf_len = data_size;
2612     - } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
2613     + } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
2614     dev_info(&pf->pdev->dev,
2615     "capability discovery failed, err %s aq_err %s\n",
2616     i40e_stat_str(&pf->hw, err),
2617     diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
2618     index 5bf06eac04ba3..bec73f0640d03 100644
2619     --- a/drivers/net/phy/mdio_bus.c
2620     +++ b/drivers/net/phy/mdio_bus.c
2621     @@ -385,6 +385,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
2622     bus->dev.groups = NULL;
2623     dev_set_name(&bus->dev, "%s", bus->id);
2624    
2625     + /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
2626     + * the device in mdiobus_free()
2627     + *
2628     + * State will be updated later in this function in case of success
2629     + */
2630     + bus->state = MDIOBUS_UNREGISTERED;
2631     +
2632     err = device_register(&bus->dev);
2633     if (err) {
2634     pr_err("mii_bus %s failed to register\n", bus->id);
2635     diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
2636     index 27b67f12ec455..5657c604602e8 100644
2637     --- a/drivers/net/phy/sfp.c
2638     +++ b/drivers/net/phy/sfp.c
2639     @@ -115,7 +115,7 @@ static const char * const sm_state_strings[] = {
2640     [SFP_S_LINK_UP] = "link_up",
2641     [SFP_S_TX_FAULT] = "tx_fault",
2642     [SFP_S_REINIT] = "reinit",
2643     - [SFP_S_TX_DISABLE] = "rx_disable",
2644     + [SFP_S_TX_DISABLE] = "tx_disable",
2645     };
2646    
2647     static const char *sm_state_to_str(unsigned short sm_state)
2648     diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
2649     index dcd6e00c80467..a50656632df93 100644
2650     --- a/drivers/ptp/ptp_pch.c
2651     +++ b/drivers/ptp/ptp_pch.c
2652     @@ -683,6 +683,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
2653     },
2654     {0}
2655     };
2656     +MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
2657    
2658     static struct pci_driver pch_driver = {
2659     .name = KBUILD_MODNAME,
2660     diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
2661     index eba7f76f9d61a..6034cd8992b0e 100644
2662     --- a/drivers/soc/qcom/mdt_loader.c
2663     +++ b/drivers/soc/qcom/mdt_loader.c
2664     @@ -98,7 +98,7 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
2665     if (ehdr->e_phnum < 2)
2666     return ERR_PTR(-EINVAL);
2667    
2668     - if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD)
2669     + if (phdrs[0].p_type == PT_LOAD)
2670     return ERR_PTR(-EINVAL);
2671    
2672     if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
2673     diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
2674     index 176696f8f38d1..3303bcaf67154 100644
2675     --- a/drivers/soc/qcom/socinfo.c
2676     +++ b/drivers/soc/qcom/socinfo.c
2677     @@ -447,7 +447,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
2678     /* Feed the soc specific unique data into entropy pool */
2679     add_device_randomness(info, item_size);
2680    
2681     - platform_set_drvdata(pdev, qs->soc_dev);
2682     + platform_set_drvdata(pdev, qs);
2683    
2684     return 0;
2685     }
2686     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2687     index e139cda35f639..5dc8827ede7e8 100644
2688     --- a/drivers/usb/class/cdc-acm.c
2689     +++ b/drivers/usb/class/cdc-acm.c
2690     @@ -339,6 +339,9 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
2691     acm->iocount.overrun++;
2692     spin_unlock_irqrestore(&acm->read_lock, flags);
2693    
2694     + if (newctrl & ACM_CTRL_BRK)
2695     + tty_flip_buffer_push(&acm->port);
2696     +
2697     if (difference)
2698     wake_up_all(&acm->wioctl);
2699    
2700     @@ -474,11 +477,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
2701    
2702     static void acm_process_read_urb(struct acm *acm, struct urb *urb)
2703     {
2704     + unsigned long flags;
2705     +
2706     if (!urb->actual_length)
2707     return;
2708    
2709     + spin_lock_irqsave(&acm->read_lock, flags);
2710     tty_insert_flip_string(&acm->port, urb->transfer_buffer,
2711     urb->actual_length);
2712     + spin_unlock_irqrestore(&acm->read_lock, flags);
2713     +
2714     tty_flip_buffer_push(&acm->port);
2715     }
2716    
2717     diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig
2718     index d611477aae414..196f4a3975871 100644
2719     --- a/drivers/usb/common/Kconfig
2720     +++ b/drivers/usb/common/Kconfig
2721     @@ -6,8 +6,7 @@ config USB_COMMON
2722    
2723     config USB_LED_TRIG
2724     bool "USB LED Triggers"
2725     - depends on LEDS_CLASS && LEDS_TRIGGERS
2726     - select USB_COMMON
2727     + depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
2728     help
2729     This option adds LED triggers for USB host and/or gadget activity.
2730    
2731     diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
2732     index b40db48f8874d..89391939630bd 100644
2733     --- a/drivers/usb/typec/tcpm/tcpm.c
2734     +++ b/drivers/usb/typec/tcpm/tcpm.c
2735     @@ -3679,6 +3679,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2736     tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2737     break;
2738     case SRC_ATTACHED:
2739     + case SRC_STARTUP:
2740     case SRC_SEND_CAPABILITIES:
2741     case SRC_READY:
2742     if (tcpm_port_is_disconnected(port) ||
2743     diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
2744     index b9f6a82a04953..6fdc6ab3ceb87 100644
2745     --- a/drivers/video/fbdev/gbefb.c
2746     +++ b/drivers/video/fbdev/gbefb.c
2747     @@ -1269,7 +1269,7 @@ static struct platform_device *gbefb_device;
2748     static int __init gbefb_init(void)
2749     {
2750     int ret = platform_driver_register(&gbefb_driver);
2751     - if (!ret) {
2752     + if (IS_ENABLED(CONFIG_SGI_IP32) && !ret) {
2753     gbefb_device = platform_device_alloc("gbefb", 0);
2754     if (gbefb_device) {
2755     ret = platform_device_add(gbefb_device);
2756     diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
2757     index be31c296eed4c..07f362c63ae90 100644
2758     --- a/drivers/xen/balloon.c
2759     +++ b/drivers/xen/balloon.c
2760     @@ -508,12 +508,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
2761     }
2762    
2763     /*
2764     - * Stop waiting if either state is not BP_EAGAIN and ballooning action is
2765     - * needed, or if the credit has changed while state is BP_EAGAIN.
2766     + * Stop waiting if either state is BP_DONE and ballooning action is
2767     + * needed, or if the credit has changed while state is not BP_DONE.
2768     */
2769     static bool balloon_thread_cond(enum bp_state state, long credit)
2770     {
2771     - if (state != BP_EAGAIN)
2772     + if (state == BP_DONE)
2773     credit = 0;
2774    
2775     return current_credit() != credit || kthread_should_stop();
2776     @@ -533,10 +533,19 @@ static int balloon_thread(void *unused)
2777    
2778     set_freezable();
2779     for (;;) {
2780     - if (state == BP_EAGAIN)
2781     - timeout = balloon_stats.schedule_delay * HZ;
2782     - else
2783     + switch (state) {
2784     + case BP_DONE:
2785     + case BP_ECANCELED:
2786     timeout = 3600 * HZ;
2787     + break;
2788     + case BP_EAGAIN:
2789     + timeout = balloon_stats.schedule_delay * HZ;
2790     + break;
2791     + case BP_WAIT:
2792     + timeout = HZ;
2793     + break;
2794     + }
2795     +
2796     credit = current_credit();
2797    
2798     wait_event_freezable_timeout(balloon_thread_wq,
2799     diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
2800     index 9c9422e9fac4d..d4ff944cd16e1 100644
2801     --- a/drivers/xen/privcmd.c
2802     +++ b/drivers/xen/privcmd.c
2803     @@ -810,11 +810,12 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
2804     unsigned int domid =
2805     (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
2806     DOMID_SELF : kdata.dom;
2807     - int num;
2808     + int num, *errs = (int *)pfns;
2809    
2810     + BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
2811     num = xen_remap_domain_mfn_array(vma,
2812     kdata.addr & PAGE_MASK,
2813     - pfns, kdata.num, (int *)pfns,
2814     + pfns, kdata.num, errs,
2815     vma->vm_page_prot,
2816     domid,
2817     vma->vm_private_data);
2818     @@ -824,7 +825,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
2819     unsigned int i;
2820    
2821     for (i = 0; i < num; i++) {
2822     - rc = pfns[i];
2823     + rc = errs[i];
2824     if (rc < 0)
2825     break;
2826     }
2827     diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2828     index d6f244559e759..e61d9c4359573 100644
2829     --- a/fs/nfsd/nfs4xdr.c
2830     +++ b/fs/nfsd/nfs4xdr.c
2831     @@ -3131,15 +3131,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
2832     goto fail;
2833     cd->rd_maxcount -= entry_bytes;
2834     /*
2835     - * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
2836     - * let's always let through the first entry, at least:
2837     + * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
2838     + * notes that it could be zero. If it is zero, then the server
2839     + * should enforce only the rd_maxcount value.
2840     */
2841     - if (!cd->rd_dircount)
2842     - goto fail;
2843     - name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
2844     - if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
2845     - goto fail;
2846     - cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
2847     + if (cd->rd_dircount) {
2848     + name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
2849     + if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
2850     + goto fail;
2851     + cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
2852     + if (!cd->rd_dircount)
2853     + cd->rd_maxcount = 0;
2854     + }
2855    
2856     cd->cookie_offset = cookie_offset;
2857     skip_entry:
2858     diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
2859     index 7f39d6091dfa0..8e03d6c25097a 100644
2860     --- a/fs/nfsd/nfsctl.c
2861     +++ b/fs/nfsd/nfsctl.c
2862     @@ -1549,7 +1549,7 @@ static int __init init_nfsd(void)
2863     goto out_free_all;
2864     return 0;
2865     out_free_all:
2866     - unregister_pernet_subsys(&nfsd_net_ops);
2867     + unregister_filesystem(&nfsd_fs_type);
2868     out_free_exports:
2869     remove_proc_entry("fs/nfs/exports", NULL);
2870     remove_proc_entry("fs/nfs", NULL);
2871     diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
2872     index 073be36b0686c..876de87f604cd 100644
2873     --- a/fs/overlayfs/dir.c
2874     +++ b/fs/overlayfs/dir.c
2875     @@ -1162,9 +1162,13 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
2876     goto out_dput;
2877     }
2878     } else {
2879     - if (!d_is_negative(newdentry) &&
2880     - (!new_opaque || !ovl_is_whiteout(newdentry)))
2881     - goto out_dput;
2882     + if (!d_is_negative(newdentry)) {
2883     + if (!new_opaque || !ovl_is_whiteout(newdentry))
2884     + goto out_dput;
2885     + } else {
2886     + if (flags & RENAME_EXCHANGE)
2887     + goto out_dput;
2888     + }
2889     }
2890    
2891     if (olddentry == trap)
2892     diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
2893     index fba2ade28fb3a..49c7a09d688d7 100644
2894     --- a/kernel/bpf/stackmap.c
2895     +++ b/kernel/bpf/stackmap.c
2896     @@ -60,7 +60,8 @@ static inline int stack_map_data_size(struct bpf_map *map)
2897    
2898     static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
2899     {
2900     - u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
2901     + u64 elem_size = sizeof(struct stack_map_bucket) +
2902     + (u64)smap->map.value_size;
2903     int err;
2904    
2905     smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
2906     diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
2907     index 8a664148f57aa..cbcbc19efcb34 100644
2908     --- a/net/bridge/br_netlink.c
2909     +++ b/net/bridge/br_netlink.c
2910     @@ -1536,7 +1536,7 @@ static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
2911     }
2912    
2913     return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
2914     - nla_total_size(sizeof(struct br_mcast_stats)) +
2915     + nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
2916     nla_total_size(0);
2917     }
2918    
2919     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2920     index 6fbc9cb09dc0e..a53b101ce41ae 100644
2921     --- a/net/core/rtnetlink.c
2922     +++ b/net/core/rtnetlink.c
2923     @@ -4950,7 +4950,7 @@ nla_put_failure:
2924     static size_t if_nlmsg_stats_size(const struct net_device *dev,
2925     u32 filter_mask)
2926     {
2927     - size_t size = 0;
2928     + size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
2929    
2930     if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
2931     size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
2932     diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
2933     index 006a34b185378..72fdf1fcbcaa9 100644
2934     --- a/net/ipv4/inet_hashtables.c
2935     +++ b/net/ipv4/inet_hashtables.c
2936     @@ -239,8 +239,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
2937    
2938     if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
2939     return -1;
2940     + score = sk->sk_bound_dev_if ? 2 : 1;
2941    
2942     - score = sk->sk_family == PF_INET ? 2 : 1;
2943     + if (sk->sk_family == PF_INET)
2944     + score++;
2945     if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2946     score++;
2947     }
2948     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2949     index de04d99418850..fdbd56ee1300c 100644
2950     --- a/net/ipv4/udp.c
2951     +++ b/net/ipv4/udp.c
2952     @@ -386,7 +386,8 @@ static int compute_score(struct sock *sk, struct net *net,
2953     dif, sdif);
2954     if (!dev_match)
2955     return -1;
2956     - score += 4;
2957     + if (sk->sk_bound_dev_if)
2958     + score += 4;
2959    
2960     if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2961     score++;
2962     diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
2963     index fbe9d4295eac3..ab12e00f6bfff 100644
2964     --- a/net/ipv6/inet6_hashtables.c
2965     +++ b/net/ipv6/inet6_hashtables.c
2966     @@ -104,7 +104,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
2967     if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
2968     return -1;
2969    
2970     - score = 1;
2971     + score = sk->sk_bound_dev_if ? 2 : 1;
2972     if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2973     score++;
2974     }
2975     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2976     index 5b8266f3e47f0..0f57c682afdd8 100644
2977     --- a/net/ipv6/udp.c
2978     +++ b/net/ipv6/udp.c
2979     @@ -133,7 +133,8 @@ static int compute_score(struct sock *sk, struct net *net,
2980     dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
2981     if (!dev_match)
2982     return -1;
2983     - score++;
2984     + if (sk->sk_bound_dev_if)
2985     + score++;
2986    
2987     if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2988     score++;
2989     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2990     index acc76a738cfd8..cb35680db9b29 100644
2991     --- a/net/netlink/af_netlink.c
2992     +++ b/net/netlink/af_netlink.c
2993     @@ -585,7 +585,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
2994    
2995     /* We need to ensure that the socket is hashed and visible. */
2996     smp_wmb();
2997     - nlk_sk(sk)->bound = portid;
2998     + /* Paired with lockless reads from netlink_bind(),
2999     + * netlink_connect() and netlink_sendmsg().
3000     + */
3001     + WRITE_ONCE(nlk_sk(sk)->bound, portid);
3002    
3003     err:
3004     release_sock(sk);
3005     @@ -1003,7 +1006,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
3006     if (nlk->ngroups < BITS_PER_LONG)
3007     groups &= (1UL << nlk->ngroups) - 1;
3008    
3009     - bound = nlk->bound;
3010     + /* Paired with WRITE_ONCE() in netlink_insert() */
3011     + bound = READ_ONCE(nlk->bound);
3012     if (bound) {
3013     /* Ensure nlk->portid is up-to-date. */
3014     smp_rmb();
3015     @@ -1089,8 +1093,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
3016    
3017     /* No need for barriers here as we return to user-space without
3018     * using any of the bound attributes.
3019     + * Paired with WRITE_ONCE() in netlink_insert().
3020     */
3021     - if (!nlk->bound)
3022     + if (!READ_ONCE(nlk->bound))
3023     err = netlink_autobind(sock);
3024    
3025     if (err == 0) {
3026     @@ -1879,7 +1884,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3027     dst_group = nlk->dst_group;
3028     }
3029    
3030     - if (!nlk->bound) {
3031     + /* Paired with WRITE_ONCE() in netlink_insert() */
3032     + if (!READ_ONCE(nlk->bound)) {
3033     err = netlink_autobind(sock);
3034     if (err)
3035     goto out;
3036     diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
3037     index 37c8aa75d70c5..56f4c1621e444 100644
3038     --- a/net/sched/sch_fifo.c
3039     +++ b/net/sched/sch_fifo.c
3040     @@ -148,6 +148,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
3041     if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
3042     return 0;
3043    
3044     + if (!q->ops->change)
3045     + return 0;
3046     +
3047     nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
3048     if (nla) {
3049     nla->nla_type = RTM_NEWQDISC;
3050     diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
3051     index da9ed0613eb7b..e14a66ce4884d 100644
3052     --- a/net/sched/sch_taprio.c
3053     +++ b/net/sched/sch_taprio.c
3054     @@ -1630,6 +1630,10 @@ static void taprio_destroy(struct Qdisc *sch)
3055     list_del(&q->taprio_list);
3056     spin_unlock(&taprio_list_lock);
3057    
3058     + /* Note that taprio_reset() might not be called if an error
3059     + * happens in qdisc_create(), after taprio_init() has been called.
3060     + */
3061     + hrtimer_cancel(&q->advance_timer);
3062    
3063     taprio_disable_offload(dev, q, NULL);
3064