Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0155-4.19.56-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3434 - (show annotations) (download)
Fri Aug 2 11:48:01 2019 UTC (4 years, 8 months ago) by niro
File size: 115619 byte(s)
-linux-4.19.56
1 diff --git a/Makefile b/Makefile
2 index 3addd4c286fa..a76c61f77bcd 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 55
10 +SUBLEVEL = 56
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 @@ -652,6 +652,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
15 KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
16 KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
17 KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
18 +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
19
20 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
21 KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
22 @@ -696,7 +697,6 @@ ifeq ($(cc-name),clang)
23 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
24 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
25 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
26 -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
27 # Quiet clang warning: comparison of unsigned expression < 0 is always false
28 KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
29 # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
30 diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
31 index ef149f59929a..d131c54acd3e 100644
32 --- a/arch/arc/boot/dts/hsdk.dts
33 +++ b/arch/arc/boot/dts/hsdk.dts
34 @@ -175,6 +175,7 @@
35 interrupt-names = "macirq";
36 phy-mode = "rgmii";
37 snps,pbl = <32>;
38 + snps,multicast-filter-bins = <256>;
39 clocks = <&gmacclk>;
40 clock-names = "stmmaceth";
41 phy-handle = <&phy0>;
42 @@ -183,6 +184,9 @@
43 mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
44 dma-coherent;
45
46 + tx-fifo-depth = <4096>;
47 + rx-fifo-depth = <4096>;
48 +
49 mdio {
50 #address-cells = <1>;
51 #size-cells = <0>;
52 diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
53 index d819de1c5d10..3ea4112c8302 100644
54 --- a/arch/arc/include/asm/cmpxchg.h
55 +++ b/arch/arc/include/asm/cmpxchg.h
56 @@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
57
58 #endif /* CONFIG_ARC_HAS_LLSC */
59
60 -#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
61 - (unsigned long)(o), (unsigned long)(n)))
62 +#define cmpxchg(ptr, o, n) ({ \
63 + (typeof(*(ptr)))__cmpxchg((ptr), \
64 + (unsigned long)(o), \
65 + (unsigned long)(n)); \
66 +})
67
68 /*
69 * atomic_cmpxchg is same as cmpxchg
70 @@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
71 return __xchg_bad_pointer();
72 }
73
74 -#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
75 - sizeof(*(ptr))))
76 +#define xchg(ptr, with) ({ \
77 + (typeof(*(ptr)))__xchg((unsigned long)(with), \
78 + (ptr), \
79 + sizeof(*(ptr))); \
80 +})
81
82 #endif /* CONFIG_ARC_PLAT_EZNPS */
83
84 diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
85 index 4097764fea23..fa18c00b0cfd 100644
86 --- a/arch/arc/mm/tlb.c
87 +++ b/arch/arc/mm/tlb.c
88 @@ -911,9 +911,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
89 struct pt_regs *regs)
90 {
91 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
92 - unsigned int pd0[mmu->ways];
93 unsigned long flags;
94 - int set;
95 + int set, n_ways = mmu->ways;
96 +
97 + n_ways = min(n_ways, 4);
98 + BUG_ON(mmu->ways > 4);
99
100 local_irq_save(flags);
101
102 @@ -921,9 +923,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
103 for (set = 0; set < mmu->sets; set++) {
104
105 int is_valid, way;
106 + unsigned int pd0[4];
107
108 /* read out all the ways of current set */
109 - for (way = 0, is_valid = 0; way < mmu->ways; way++) {
110 + for (way = 0, is_valid = 0; way < n_ways; way++) {
111 write_aux_reg(ARC_REG_TLBINDEX,
112 SET_WAY_TO_IDX(mmu, set, way));
113 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
114 @@ -937,14 +940,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
115 continue;
116
117 /* Scan the set for duplicate ways: needs a nested loop */
118 - for (way = 0; way < mmu->ways - 1; way++) {
119 + for (way = 0; way < n_ways - 1; way++) {
120
121 int n;
122
123 if (!pd0[way])
124 continue;
125
126 - for (n = way + 1; n < mmu->ways; n++) {
127 + for (n = way + 1; n < n_ways; n++) {
128 if (pd0[way] != pd0[n])
129 continue;
130
131 diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
132 index c9063ffca524..3fd9a1676d88 100644
133 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
134 +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
135 @@ -410,6 +410,7 @@
136 vqmmc-supply = <&ldo1_reg>;
137 bus-width = <4>;
138 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
139 + no-1-8-v;
140 };
141
142 &mmc2 {
143 diff --git a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
144 index baba7b00eca7..fdca48186916 100644
145 --- a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
146 +++ b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
147 @@ -22,7 +22,7 @@
148 *
149 * Datamanual Revisions:
150 *
151 - * DRA76x Silicon Revision 1.0: SPRS993A, Revised July 2017
152 + * DRA76x Silicon Revision 1.0: SPRS993E, Revised December 2018
153 *
154 */
155
156 @@ -169,25 +169,25 @@
157 /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */
158 mmc2_iodelay_hs200_conf: mmc2_iodelay_hs200_conf {
159 pinctrl-pin-array = <
160 - 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */
161 - 0x194 A_DELAY_PS(0) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */
162 - 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */
163 - 0x1ac A_DELAY_PS(85) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */
164 - 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */
165 - 0x1b8 A_DELAY_PS(139) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */
166 - 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */
167 - 0x1c4 A_DELAY_PS(69) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */
168 - 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */
169 - 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */
170 - 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */
171 - 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */
172 - 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */
173 - 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */
174 - 0x1f4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */
175 - 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */
176 - 0x200 A_DELAY_PS(36) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */
177 - 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */
178 - 0x368 A_DELAY_PS(72) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */
179 + 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */
180 + 0x194 A_DELAY_PS(350) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */
181 + 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */
182 + 0x1ac A_DELAY_PS(335) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */
183 + 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */
184 + 0x1b8 A_DELAY_PS(339) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */
185 + 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */
186 + 0x1c4 A_DELAY_PS(219) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */
187 + 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */
188 + 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */
189 + 0x1dc A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */
190 + 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */
191 + 0x1e8 A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */
192 + 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */
193 + 0x1f4 A_DELAY_PS(200) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */
194 + 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */
195 + 0x200 A_DELAY_PS(236) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */
196 + 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */
197 + 0x368 A_DELAY_PS(372) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */
198 >;
199 };
200
201 diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
202 index fd0053e47a15..3708a71f30e6 100644
203 --- a/arch/arm/mach-imx/cpuidle-imx6sx.c
204 +++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
205 @@ -15,6 +15,7 @@
206
207 #include "common.h"
208 #include "cpuidle.h"
209 +#include "hardware.h"
210
211 static int imx6sx_idle_finish(unsigned long val)
212 {
213 @@ -110,7 +111,7 @@ int __init imx6sx_cpuidle_init(void)
214 * except for power up sw2iso which need to be
215 * larger than LDO ramp up time.
216 */
217 - imx_gpc_set_arm_power_up_timing(0xf, 1);
218 + imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1);
219 imx_gpc_set_arm_power_down_timing(1, 1);
220
221 return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
222 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
223 index 35649ee8ad56..c12ff63265a9 100644
224 --- a/arch/arm64/Makefile
225 +++ b/arch/arm64/Makefile
226 @@ -51,6 +51,7 @@ endif
227
228 KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
229 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
230 +KBUILD_CFLAGS += -Wno-psabi
231 KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
232
233 KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
234 diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
235 index ad64d2c92ef5..5dff8eccd17d 100644
236 --- a/arch/arm64/include/uapi/asm/ptrace.h
237 +++ b/arch/arm64/include/uapi/asm/ptrace.h
238 @@ -64,8 +64,6 @@
239
240 #ifndef __ASSEMBLY__
241
242 -#include <linux/prctl.h>
243 -
244 /*
245 * User structures for general purpose, floating point and debug registers.
246 */
247 @@ -112,10 +110,10 @@ struct user_sve_header {
248
249 /*
250 * Common SVE_PT_* flags:
251 - * These must be kept in sync with prctl interface in <linux/ptrace.h>
252 + * These must be kept in sync with prctl interface in <linux/prctl.h>
253 */
254 -#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16)
255 -#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16)
256 +#define SVE_PT_VL_INHERIT ((1 << 17) /* PR_SVE_VL_INHERIT */ >> 16)
257 +#define SVE_PT_VL_ONEXEC ((1 << 18) /* PR_SVE_SET_VL_ONEXEC */ >> 16)
258
259
260 /*
261 diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
262 index 3432e5ef9f41..388f8fc13080 100644
263 --- a/arch/arm64/kernel/ssbd.c
264 +++ b/arch/arm64/kernel/ssbd.c
265 @@ -4,6 +4,7 @@
266 */
267
268 #include <linux/errno.h>
269 +#include <linux/prctl.h>
270 #include <linux/sched.h>
271 #include <linux/thread_info.h>
272
273 diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
274 index 4aaff3b3175c..6dbe4eab0a0e 100644
275 --- a/arch/mips/kernel/uprobes.c
276 +++ b/arch/mips/kernel/uprobes.c
277 @@ -112,9 +112,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
278 */
279 aup->resume_epc = regs->cp0_epc + 4;
280 if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
281 - unsigned long epc;
282 -
283 - epc = regs->cp0_epc;
284 __compute_return_epc_for_insn(regs,
285 (union mips_instruction) aup->insn[0]);
286 aup->resume_epc = regs->cp0_epc;
287 diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h
288 index 933423fa5144..b0db61188a61 100644
289 --- a/arch/parisc/math-emu/cnv_float.h
290 +++ b/arch/parisc/math-emu/cnv_float.h
291 @@ -60,19 +60,19 @@
292 ((exponent < (SGL_P - 1)) ? \
293 (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE)
294
295 -#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH)
296 +#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0)
297
298 #define Sgl_roundnearest_from_int(int_value,sgl_value) \
299 if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \
300 - if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \
301 + if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \
302 Sall(sgl_value)++
303
304 #define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \
305 - ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB))
306 + (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB))
307
308 #define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \
309 if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \
310 - if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \
311 + if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \
312 Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
313
314 #define Dint_isinexact_to_dbl(dint_value) \
315 diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
316 index 2b7135391231..d9d5391b2af6 100644
317 --- a/arch/powerpc/include/asm/ppc-opcode.h
318 +++ b/arch/powerpc/include/asm/ppc-opcode.h
319 @@ -336,6 +336,7 @@
320 #define PPC_INST_MULLI 0x1c000000
321 #define PPC_INST_DIVWU 0x7c000396
322 #define PPC_INST_DIVD 0x7c0003d2
323 +#define PPC_INST_DIVDU 0x7c000392
324 #define PPC_INST_RLWINM 0x54000000
325 #define PPC_INST_RLWIMI 0x50000000
326 #define PPC_INST_RLDICL 0x78000000
327 diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
328 index dbd8f762140b..68984d85ad6b 100644
329 --- a/arch/powerpc/mm/mmu_context_book3s64.c
330 +++ b/arch/powerpc/mm/mmu_context_book3s64.c
331 @@ -53,14 +53,48 @@ int hash__alloc_context_id(void)
332 }
333 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
334
335 +static int realloc_context_ids(mm_context_t *ctx)
336 +{
337 + int i, id;
338 +
339 + /*
340 + * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
341 + * there wasn't one allocated previously (which happens in the exec
342 + * case where ctx is newly allocated).
343 + *
344 + * We have to be a bit careful here. We must keep the existing ids in
345 + * the array, so that we can test if they're non-zero to decide if we
346 + * need to allocate a new one. However in case of error we must free the
347 + * ids we've allocated but *not* any of the existing ones (or risk a
348 + * UAF). That's why we decrement i at the start of the error handling
349 + * loop, to skip the id that we just tested but couldn't reallocate.
350 + */
351 + for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
352 + if (i == 0 || ctx->extended_id[i]) {
353 + id = hash__alloc_context_id();
354 + if (id < 0)
355 + goto error;
356 +
357 + ctx->extended_id[i] = id;
358 + }
359 + }
360 +
361 + /* The caller expects us to return id */
362 + return ctx->id;
363 +
364 +error:
365 + for (i--; i >= 0; i--) {
366 + if (ctx->extended_id[i])
367 + ida_free(&mmu_context_ida, ctx->extended_id[i]);
368 + }
369 +
370 + return id;
371 +}
372 +
373 static int hash__init_new_context(struct mm_struct *mm)
374 {
375 int index;
376
377 - index = hash__alloc_context_id();
378 - if (index < 0)
379 - return index;
380 -
381 /*
382 * The old code would re-promote on fork, we don't do that when using
383 * slices as it could cause problem promoting slices that have been
384 @@ -78,6 +112,10 @@ static int hash__init_new_context(struct mm_struct *mm)
385 if (mm->context.id == 0)
386 slice_init_new_context_exec(mm);
387
388 + index = realloc_context_ids(&mm->context);
389 + if (index < 0)
390 + return index;
391 +
392 subpage_prot_init_new_context(mm);
393
394 pkey_mm_init(mm);
395 diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
396 index 68dece206048..e5c1d30ee968 100644
397 --- a/arch/powerpc/net/bpf_jit.h
398 +++ b/arch/powerpc/net/bpf_jit.h
399 @@ -116,7 +116,7 @@
400 ___PPC_RA(a) | IMM_L(i))
401 #define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
402 ___PPC_RA(a) | ___PPC_RB(b))
403 -#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \
404 +#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \
405 ___PPC_RA(a) | ___PPC_RB(b))
406 #define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
407 ___PPC_RS(a) | ___PPC_RB(b))
408 diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
409 index 226eec62d125..279a51bf94d0 100644
410 --- a/arch/powerpc/net/bpf_jit_comp64.c
411 +++ b/arch/powerpc/net/bpf_jit_comp64.c
412 @@ -372,12 +372,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
413 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
414 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
415 if (BPF_OP(code) == BPF_MOD) {
416 - PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
417 + PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
418 PPC_MULD(b2p[TMP_REG_1], src_reg,
419 b2p[TMP_REG_1]);
420 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
421 } else
422 - PPC_DIVD(dst_reg, dst_reg, src_reg);
423 + PPC_DIVDU(dst_reg, dst_reg, src_reg);
424 break;
425 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
426 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
427 @@ -405,7 +405,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
428 break;
429 case BPF_ALU64:
430 if (BPF_OP(code) == BPF_MOD) {
431 - PPC_DIVD(b2p[TMP_REG_2], dst_reg,
432 + PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
433 b2p[TMP_REG_1]);
434 PPC_MULD(b2p[TMP_REG_1],
435 b2p[TMP_REG_1],
436 @@ -413,7 +413,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
437 PPC_SUB(dst_reg, dst_reg,
438 b2p[TMP_REG_1]);
439 } else
440 - PPC_DIVD(dst_reg, dst_reg,
441 + PPC_DIVDU(dst_reg, dst_reg,
442 b2p[TMP_REG_1]);
443 break;
444 }
445 diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
446 index 88401d5125bc..523dbfbac03d 100644
447 --- a/arch/riscv/mm/fault.c
448 +++ b/arch/riscv/mm/fault.c
449 @@ -29,6 +29,7 @@
450
451 #include <asm/pgalloc.h>
452 #include <asm/ptrace.h>
453 +#include <asm/tlbflush.h>
454
455 /*
456 * This routine handles page faults. It determines the address and the
457 @@ -281,6 +282,18 @@ vmalloc_fault:
458 pte_k = pte_offset_kernel(pmd_k, addr);
459 if (!pte_present(*pte_k))
460 goto no_context;
461 +
462 + /*
463 + * The kernel assumes that TLBs don't cache invalid
464 + * entries, but in RISC-V, SFENCE.VMA specifies an
465 + * ordering constraint, not a cache flush; it is
466 + * necessary even after writing invalid entries.
467 + * Relying on flush_tlb_fix_spurious_fault would
468 + * suffice, but the extra traps reduce
469 + * performance. So, eagerly SFENCE.VMA.
470 + */
471 + local_flush_tlb_page(addr);
472 +
473 return;
474 }
475 }
476 diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
477 index 8c00fd509c45..1a6a7092d942 100644
478 --- a/arch/s390/include/asm/ap.h
479 +++ b/arch/s390/include/asm/ap.h
480 @@ -221,16 +221,22 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
481 void *ind)
482 {
483 register unsigned long reg0 asm ("0") = qid | (3UL << 24);
484 - register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
485 - register struct ap_queue_status reg1_out asm ("1");
486 + register union {
487 + unsigned long value;
488 + struct ap_qirq_ctrl qirqctrl;
489 + struct ap_queue_status status;
490 + } reg1 asm ("1");
491 register void *reg2 asm ("2") = ind;
492
493 + reg1.qirqctrl = qirqctrl;
494 +
495 asm volatile(
496 ".long 0xb2af0000" /* PQAP(AQIC) */
497 - : "=d" (reg1_out)
498 - : "d" (reg0), "d" (reg1_in), "d" (reg2)
499 + : "+d" (reg1)
500 + : "d" (reg0), "d" (reg2)
501 : "cc");
502 - return reg1_out;
503 +
504 + return reg1.status;
505 }
506
507 /*
508 @@ -264,17 +270,21 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
509 {
510 register unsigned long reg0 asm ("0") = qid | (5UL << 24)
511 | ((ifbit & 0x01) << 22);
512 - register unsigned long reg1_in asm ("1") = apinfo->val;
513 - register struct ap_queue_status reg1_out asm ("1");
514 + register union {
515 + unsigned long value;
516 + struct ap_queue_status status;
517 + } reg1 asm ("1");
518 register unsigned long reg2 asm ("2");
519
520 + reg1.value = apinfo->val;
521 +
522 asm volatile(
523 ".long 0xb2af0000" /* PQAP(QACT) */
524 - : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2)
525 + : "+d" (reg1), "=d" (reg2)
526 : "d" (reg0)
527 : "cc");
528 apinfo->val = reg2;
529 - return reg1_out;
530 + return reg1.status;
531 }
532
533 /**
534 diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
535 index 40f651292aa7..9c7dc970e966 100644
536 --- a/arch/s390/include/asm/jump_label.h
537 +++ b/arch/s390/include/asm/jump_label.h
538 @@ -10,6 +10,12 @@
539 #define JUMP_LABEL_NOP_SIZE 6
540 #define JUMP_LABEL_NOP_OFFSET 2
541
542 +#if __GNUC__ < 9
543 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X"
544 +#else
545 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd"
546 +#endif
547 +
548 /*
549 * We use a brcl 0,2 instruction for jump labels at compile time so it
550 * can be easily distinguished from a hotpatch generated instruction.
551 @@ -19,9 +25,9 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
552 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
553 ".pushsection __jump_table, \"aw\"\n"
554 ".balign 8\n"
555 - ".quad 0b, %l[label], %0\n"
556 + ".quad 0b, %l[label], %0+%1\n"
557 ".popsection\n"
558 - : : "X" (&((char *)key)[branch]) : : label);
559 + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label);
560
561 return false;
562 label:
563 @@ -33,9 +39,9 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
564 asm_volatile_goto("0: brcl 15, %l[label]\n"
565 ".pushsection __jump_table, \"aw\"\n"
566 ".balign 8\n"
567 - ".quad 0b, %l[label], %0\n"
568 + ".quad 0b, %l[label], %0+%1\n"
569 ".popsection\n"
570 - : : "X" (&((char *)key)[branch]) : : label);
571 + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label);
572
573 return false;
574 label:
575 diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
576 index 39a2503fa3e1..51028abe5e90 100644
577 --- a/arch/sparc/kernel/mdesc.c
578 +++ b/arch/sparc/kernel/mdesc.c
579 @@ -357,6 +357,8 @@ static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
580
581 node_info->vdev_port.id = *idp;
582 node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
583 + if (!node_info->vdev_port.name)
584 + return -1;
585 node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
586
587 return 0;
588 diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
589 index 67b3e6b3ce5d..1ad5911f62b4 100644
590 --- a/arch/sparc/kernel/perf_event.c
591 +++ b/arch/sparc/kernel/perf_event.c
592 @@ -891,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event,
593 s64 period = hwc->sample_period;
594 int ret = 0;
595
596 + /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
597 + if (unlikely(period != hwc->last_period))
598 + left = period - (hwc->last_period - left);
599 +
600 if (unlikely(left <= -period)) {
601 left = period;
602 local64_set(&hwc->period_left, left);
603 diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
604 index 643670fb8943..274d220d0a83 100644
605 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
606 +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
607 @@ -2379,7 +2379,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
608 if (closid_allocated(i) && i != closid) {
609 mode = rdtgroup_mode_by_closid(i);
610 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
611 - break;
612 + continue;
613 used_b |= *ctrl;
614 if (mode == RDT_MODE_SHAREABLE)
615 d->new_ctrl |= *ctrl;
616 diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
617 index 351283b60df6..a285fbd0fd9b 100644
618 --- a/arch/xtensa/kernel/setup.c
619 +++ b/arch/xtensa/kernel/setup.c
620 @@ -310,7 +310,8 @@ extern char _SecondaryResetVector_text_start;
621 extern char _SecondaryResetVector_text_end;
622 #endif
623
624 -static inline int mem_reserve(unsigned long start, unsigned long end)
625 +static inline int __init_memblock mem_reserve(unsigned long start,
626 + unsigned long end)
627 {
628 return memblock_reserve(start, end - start);
629 }
630 diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
631 index c4eb55e3011c..c05ef7f1d7b6 100644
632 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
633 +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
634 @@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
635 return vchan_tx_prep(&chan->vc, &first->vd, flags);
636
637 err_desc_get:
638 - axi_desc_put(first);
639 + if (first)
640 + axi_desc_put(first);
641 return NULL;
642 }
643
644 diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
645 index 55df0d41355b..1ed1c7efa288 100644
646 --- a/drivers/dma/sprd-dma.c
647 +++ b/drivers/dma/sprd-dma.c
648 @@ -663,7 +663,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
649 temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
650 hw->frg_len = temp;
651
652 - hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
653 + hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
654 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
655
656 temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
657 diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
658 index 0e81d33af856..c9a613dc9eb7 100644
659 --- a/drivers/fpga/dfl-afu-dma-region.c
660 +++ b/drivers/fpga/dfl-afu-dma-region.c
661 @@ -399,7 +399,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
662 region->pages[0], 0,
663 region->length,
664 DMA_BIDIRECTIONAL);
665 - if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
666 + if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
667 dev_err(&pdata->dev->dev, "failed to map for dma\n");
668 ret = -EFAULT;
669 goto unpin_pages;
670 diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
671 index a9b521bccb06..ab361ec78df4 100644
672 --- a/drivers/fpga/dfl.c
673 +++ b/drivers/fpga/dfl.c
674 @@ -40,6 +40,13 @@ enum dfl_fpga_devt_type {
675 DFL_FPGA_DEVT_MAX,
676 };
677
678 +static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
679 +
680 +static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
681 + "dfl-fme-pdata",
682 + "dfl-port-pdata",
683 +};
684 +
685 /**
686 * dfl_dev_info - dfl feature device information.
687 * @name: name string of the feature platform device.
688 @@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
689 struct platform_device *fdev = binfo->feature_dev;
690 struct dfl_feature_platform_data *pdata;
691 struct dfl_feature_info *finfo, *p;
692 + enum dfl_id_type type;
693 int ret, index = 0;
694
695 if (!fdev)
696 return 0;
697
698 + type = feature_dev_id_type(fdev);
699 + if (WARN_ON_ONCE(type >= DFL_ID_MAX))
700 + return -EINVAL;
701 +
702 /*
703 * we do not need to care for the memory which is associated with
704 * the platform device. After calling platform_device_unregister(),
705 @@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
706 pdata->num = binfo->feature_num;
707 pdata->dfl_cdev = binfo->cdev;
708 mutex_init(&pdata->lock);
709 + lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
710 + dfl_pdata_key_strings[type]);
711
712 /*
713 * the count should be initialized to 0 to make sure
714 @@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
715
716 ret = platform_device_add(binfo->feature_dev);
717 if (!ret) {
718 - if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
719 + if (type == PORT_ID)
720 dfl_fpga_cdev_add_port_dev(binfo->cdev,
721 binfo->feature_dev);
722 else
723 diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
724 index e4d67b70244d..e69d996eabad 100644
725 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c
726 +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
727 @@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
728 clk_disable_unprepare(hdlcd->clk);
729 }
730
731 -static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
732 - struct drm_crtc_state *state)
733 +static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
734 + const struct drm_display_mode *mode)
735 {
736 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
737 - struct drm_display_mode *mode = &state->adjusted_mode;
738 long rate, clk_rate = mode->clock * 1000;
739
740 rate = clk_round_rate(hdlcd->clk, clk_rate);
741 - if (rate != clk_rate) {
742 + /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
743 + if (abs(rate - clk_rate) * 1000 > clk_rate) {
744 /* clock required by mode not supported by hardware */
745 - return -EINVAL;
746 + return MODE_NOCLOCK;
747 }
748
749 - return 0;
750 + return MODE_OK;
751 }
752
753 static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
754 @@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
755 }
756
757 static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
758 - .atomic_check = hdlcd_crtc_atomic_check,
759 + .mode_valid = hdlcd_crtc_mode_valid,
760 .atomic_begin = hdlcd_crtc_atomic_begin,
761 .atomic_enable = hdlcd_crtc_atomic_enable,
762 .atomic_disable = hdlcd_crtc_atomic_disable,
763 diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
764 index 94d6dabec2dc..1ab511e33243 100644
765 --- a/drivers/gpu/drm/arm/malidp_drv.c
766 +++ b/drivers/gpu/drm/arm/malidp_drv.c
767 @@ -190,6 +190,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
768 {
769 struct drm_device *drm = state->dev;
770 struct malidp_drm *malidp = drm->dev_private;
771 + int loop = 5;
772
773 malidp->event = malidp->crtc.state->event;
774 malidp->crtc.state->event = NULL;
775 @@ -204,8 +205,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
776 drm_crtc_vblank_get(&malidp->crtc);
777
778 /* only set config_valid if the CRTC is enabled */
779 - if (malidp_set_and_wait_config_valid(drm) < 0)
780 + if (malidp_set_and_wait_config_valid(drm) < 0) {
781 + /*
782 + * make a loop around the second CVAL setting and
783 + * try 5 times before giving up.
784 + */
785 + while (loop--) {
786 + if (!malidp_set_and_wait_config_valid(drm))
787 + break;
788 + }
789 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
790 + }
791 +
792 } else if (malidp->event) {
793 /* CRTC inactive means vblank IRQ is disabled, send event directly */
794 spin_lock_irq(&drm->event_lock);
795 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
796 index 8b9270f31409..e4e09d47c5c0 100644
797 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
798 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
799 @@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel)
800 return 0;
801 }
802
803 +/**
804 + * vmw_port_hb_out - Send the message payload either through the
805 + * high-bandwidth port if available, or through the backdoor otherwise.
806 + * @channel: The rpc channel.
807 + * @msg: NULL-terminated message.
808 + * @hb: Whether the high-bandwidth port is available.
809 + *
810 + * Return: The port status.
811 + */
812 +static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
813 + const char *msg, bool hb)
814 +{
815 + unsigned long si, di, eax, ebx, ecx, edx;
816 + unsigned long msg_len = strlen(msg);
817 +
818 + if (hb) {
819 + unsigned long bp = channel->cookie_high;
820 +
821 + si = (uintptr_t) msg;
822 + di = channel->cookie_low;
823 +
824 + VMW_PORT_HB_OUT(
825 + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
826 + msg_len, si, di,
827 + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
828 + VMW_HYPERVISOR_MAGIC, bp,
829 + eax, ebx, ecx, edx, si, di);
830 +
831 + return ebx;
832 + }
833 +
834 + /* HB port not available. Send the message 4 bytes at a time. */
835 + ecx = MESSAGE_STATUS_SUCCESS << 16;
836 + while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
837 + unsigned int bytes = min_t(size_t, msg_len, 4);
838 + unsigned long word = 0;
839 +
840 + memcpy(&word, msg, bytes);
841 + msg_len -= bytes;
842 + msg += bytes;
843 + si = channel->cookie_high;
844 + di = channel->cookie_low;
845 +
846 + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
847 + word, si, di,
848 + VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
849 + VMW_HYPERVISOR_MAGIC,
850 + eax, ebx, ecx, edx, si, di);
851 + }
852 +
853 + return ecx;
854 +}
855 +
856 +/**
857 + * vmw_port_hb_in - Receive the message payload either through the
858 + * high-bandwidth port if available, or through the backdoor otherwise.
859 + * @channel: The rpc channel.
860 + * @reply: Pointer to buffer holding reply.
861 + * @reply_len: Length of the reply.
862 + * @hb: Whether the high-bandwidth port is available.
863 + *
864 + * Return: The port status.
865 + */
866 +static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
867 + unsigned long reply_len, bool hb)
868 +{
869 + unsigned long si, di, eax, ebx, ecx, edx;
870 +
871 + if (hb) {
872 + unsigned long bp = channel->cookie_low;
873 +
874 + si = channel->cookie_high;
875 + di = (uintptr_t) reply;
876 +
877 + VMW_PORT_HB_IN(
878 + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
879 + reply_len, si, di,
880 + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
881 + VMW_HYPERVISOR_MAGIC, bp,
882 + eax, ebx, ecx, edx, si, di);
883 +
884 + return ebx;
885 + }
886 +
887 + /* HB port not available. Retrieve the message 4 bytes at a time. */
888 + ecx = MESSAGE_STATUS_SUCCESS << 16;
889 + while (reply_len) {
890 + unsigned int bytes = min_t(unsigned long, reply_len, 4);
891 +
892 + si = channel->cookie_high;
893 + di = channel->cookie_low;
894 +
895 + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
896 + MESSAGE_STATUS_SUCCESS, si, di,
897 + VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
898 + VMW_HYPERVISOR_MAGIC,
899 + eax, ebx, ecx, edx, si, di);
900 +
901 + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
902 + break;
903 +
904 + memcpy(reply, &ebx, bytes);
905 + reply_len -= bytes;
906 + reply += bytes;
907 + }
908 +
909 + return ecx;
910 +}
911
912
913 /**
914 @@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel)
915 */
916 static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
917 {
918 - unsigned long eax, ebx, ecx, edx, si, di, bp;
919 + unsigned long eax, ebx, ecx, edx, si, di;
920 size_t msg_len = strlen(msg);
921 int retries = 0;
922
923 -
924 while (retries < RETRIES) {
925 retries++;
926
927 @@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
928 VMW_HYPERVISOR_MAGIC,
929 eax, ebx, ecx, edx, si, di);
930
931 - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
932 - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
933 - /* Expected success + high-bandwidth. Give up. */
934 + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
935 + /* Expected success. Give up. */
936 return -EINVAL;
937 }
938
939 /* Send msg */
940 - si = (uintptr_t) msg;
941 - di = channel->cookie_low;
942 - bp = channel->cookie_high;
943 -
944 - VMW_PORT_HB_OUT(
945 - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
946 - msg_len, si, di,
947 - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
948 - VMW_HYPERVISOR_MAGIC, bp,
949 - eax, ebx, ecx, edx, si, di);
950 + ebx = vmw_port_hb_out(channel, msg,
951 + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
952
953 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
954 return 0;
955 @@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg);
956 static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
957 size_t *msg_len)
958 {
959 - unsigned long eax, ebx, ecx, edx, si, di, bp;
960 + unsigned long eax, ebx, ecx, edx, si, di;
961 char *reply;
962 size_t reply_len;
963 int retries = 0;
964 @@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
965 VMW_HYPERVISOR_MAGIC,
966 eax, ebx, ecx, edx, si, di);
967
968 - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
969 - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
970 + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
971 DRM_ERROR("Failed to get reply size for host message.\n");
972 return -EINVAL;
973 }
974 @@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
975
976
977 /* Receive buffer */
978 - si = channel->cookie_high;
979 - di = (uintptr_t) reply;
980 - bp = channel->cookie_low;
981 -
982 - VMW_PORT_HB_IN(
983 - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
984 - reply_len, si, di,
985 - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
986 - VMW_HYPERVISOR_MAGIC, bp,
987 - eax, ebx, ecx, edx, si, di);
988 -
989 + ebx = vmw_port_hb_in(channel, reply, reply_len,
990 + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
991 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
992 kfree(reply);
993
994 diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
995 index fcdbac4a56e3..6b3559f58b67 100644
996 --- a/drivers/hwmon/hwmon.c
997 +++ b/drivers/hwmon/hwmon.c
998 @@ -619,7 +619,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
999 if (err)
1000 goto free_hwmon;
1001
1002 - if (dev && chip && chip->ops->read &&
1003 + if (dev && dev->of_node && chip && chip->ops->read &&
1004 chip->info[0]->type == hwmon_chip &&
1005 (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
1006 const struct hwmon_channel_info **info = chip->info;
1007 diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
1008 index 2e2b5851139c..cd24b375df1e 100644
1009 --- a/drivers/hwmon/pmbus/pmbus_core.c
1010 +++ b/drivers/hwmon/pmbus/pmbus_core.c
1011 @@ -1230,7 +1230,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1012 const struct pmbus_driver_info *info,
1013 const char *name,
1014 int index, int page,
1015 - const struct pmbus_sensor_attr *attr)
1016 + const struct pmbus_sensor_attr *attr,
1017 + bool paged)
1018 {
1019 struct pmbus_sensor *base;
1020 bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */
1021 @@ -1238,7 +1239,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1022
1023 if (attr->label) {
1024 ret = pmbus_add_label(data, name, index, attr->label,
1025 - attr->paged ? page + 1 : 0);
1026 + paged ? page + 1 : 0);
1027 if (ret)
1028 return ret;
1029 }
1030 @@ -1271,6 +1272,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1031 return 0;
1032 }
1033
1034 +static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info,
1035 + const struct pmbus_sensor_attr *attr)
1036 +{
1037 + int p;
1038 +
1039 + if (attr->paged)
1040 + return true;
1041 +
1042 + /*
1043 + * Some attributes may be present on more than one page despite
1044 + * not being marked with the paged attribute. If that is the case,
1045 + * then treat the sensor as being paged and add the page suffix to the
1046 + * attribute name.
1047 + * We don't just add the paged attribute to all such attributes, in
1048 + * order to maintain the un-suffixed labels in the case where the
1049 + * attribute is only on page 0.
1050 + */
1051 + for (p = 1; p < info->pages; p++) {
1052 + if (info->func[p] & attr->func)
1053 + return true;
1054 + }
1055 + return false;
1056 +}
1057 +
1058 static int pmbus_add_sensor_attrs(struct i2c_client *client,
1059 struct pmbus_data *data,
1060 const char *name,
1061 @@ -1284,14 +1309,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
1062 index = 1;
1063 for (i = 0; i < nattrs; i++) {
1064 int page, pages;
1065 + bool paged = pmbus_sensor_is_paged(info, attrs);
1066
1067 - pages = attrs->paged ? info->pages : 1;
1068 + pages = paged ? info->pages : 1;
1069 for (page = 0; page < pages; page++) {
1070 if (!(info->func[page] & attrs->func))
1071 continue;
1072 ret = pmbus_add_sensor_attrs_one(client, data, info,
1073 name, index, page,
1074 - attrs);
1075 + attrs, paged);
1076 if (ret)
1077 return ret;
1078 index++;
1079 diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
1080 index 9851311aa3fd..2d54d9cac61d 100644
1081 --- a/drivers/iio/temperature/mlx90632.c
1082 +++ b/drivers/iio/temperature/mlx90632.c
1083 @@ -81,6 +81,8 @@
1084 /* Magic constants */
1085 #define MLX90632_ID_MEDICAL 0x0105 /* EEPROM DSPv5 Medical device id */
1086 #define MLX90632_ID_CONSUMER 0x0205 /* EEPROM DSPv5 Consumer device id */
1087 +#define MLX90632_DSP_VERSION 5 /* DSP version */
1088 +#define MLX90632_DSP_MASK GENMASK(7, 0) /* DSP version in EE_VERSION */
1089 #define MLX90632_RESET_CMD 0x0006 /* Reset sensor (address or global) */
1090 #define MLX90632_REF_12 12LL /**< ResCtrlRef value of Ch 1 or Ch 2 */
1091 #define MLX90632_REF_3 12LL /**< ResCtrlRef value of Channel 3 */
1092 @@ -666,10 +668,13 @@ static int mlx90632_probe(struct i2c_client *client,
1093 } else if (read == MLX90632_ID_CONSUMER) {
1094 dev_dbg(&client->dev,
1095 "Detected Consumer EEPROM calibration %x\n", read);
1096 + } else if ((read & MLX90632_DSP_MASK) == MLX90632_DSP_VERSION) {
1097 + dev_dbg(&client->dev,
1098 + "Detected Unknown EEPROM calibration %x\n", read);
1099 } else {
1100 dev_err(&client->dev,
1101 - "EEPROM version mismatch %x (expected %x or %x)\n",
1102 - read, MLX90632_ID_CONSUMER, MLX90632_ID_MEDICAL);
1103 + "Wrong DSP version %x (expected %x)\n",
1104 + read, MLX90632_DSP_VERSION);
1105 return -EPROTONOSUPPORT;
1106 }
1107
1108 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
1109 index b12c8ff8ed66..d8eb4dc04d69 100644
1110 --- a/drivers/infiniband/hw/hfi1/chip.c
1111 +++ b/drivers/infiniband/hw/hfi1/chip.c
1112 @@ -9849,6 +9849,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
1113
1114 /* disable the port */
1115 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
1116 + cancel_work_sync(&ppd->freeze_work);
1117 }
1118
1119 static inline int init_cpu_counters(struct hfi1_devdata *dd)
1120 diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
1121 index e2290f32c8d9..7eaff4dcbfd7 100644
1122 --- a/drivers/infiniband/hw/hfi1/fault.c
1123 +++ b/drivers/infiniband/hw/hfi1/fault.c
1124 @@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
1125 char *dash;
1126 unsigned long range_start, range_end, i;
1127 bool remove = false;
1128 + unsigned long bound = 1U << BITS_PER_BYTE;
1129
1130 end = strchr(ptr, ',');
1131 if (end)
1132 @@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
1133 BITS_PER_BYTE);
1134 break;
1135 }
1136 + /* Check the inputs */
1137 + if (range_start >= bound || range_end >= bound)
1138 + break;
1139 +
1140 for (i = range_start; i <= range_end; i++) {
1141 if (remove)
1142 clear_bit(i, fault->opcodes);
1143 diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1144 index dbe7d14a5c76..4e986ca4dd35 100644
1145 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1146 +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1147 @@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
1148 u32 *tidlist = NULL;
1149 struct tid_user_buf *tidbuf;
1150
1151 + if (!PAGE_ALIGNED(tinfo->vaddr))
1152 + return -EINVAL;
1153 +
1154 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
1155 if (!tidbuf)
1156 return -ENOMEM;
1157 diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
1158 index 48692adbe811..27d9c4cefdc7 100644
1159 --- a/drivers/infiniband/hw/hfi1/verbs.c
1160 +++ b/drivers/infiniband/hw/hfi1/verbs.c
1161 @@ -1418,8 +1418,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
1162 rdi->dparms.props.max_cq = hfi1_max_cqs;
1163 rdi->dparms.props.max_ah = hfi1_max_ahs;
1164 rdi->dparms.props.max_cqe = hfi1_max_cqes;
1165 - rdi->dparms.props.max_mr = rdi->lkey_table.max;
1166 - rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1167 rdi->dparms.props.max_map_per_fmr = 32767;
1168 rdi->dparms.props.max_pd = hfi1_max_pds;
1169 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1170 diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
1171 index c4ab2d5b4502..8f766dd3f61c 100644
1172 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
1173 +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
1174 @@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
1175 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
1176 struct hfi1_qp_priv *priv;
1177
1178 - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
1179 + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
1180 if (tx)
1181 goto out;
1182 priv = qp->priv;
1183 diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
1184 index 1c19bbc764b2..b1a78985b4ec 100644
1185 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
1186 +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
1187 @@ -72,6 +72,7 @@ struct hfi1_ibdev;
1188 struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
1189 struct rvt_qp *qp);
1190
1191 +#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN)
1192 static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
1193 struct rvt_qp *qp)
1194 __must_hold(&qp->slock)
1195 @@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
1196 struct verbs_txreq *tx;
1197 struct hfi1_qp_priv *priv = qp->priv;
1198
1199 - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
1200 + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
1201 if (unlikely(!tx)) {
1202 /* call slow path to get the lock */
1203 tx = __get_txreq(dev, qp);
1204 diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
1205 index 41babbc0db58..803c3544c75b 100644
1206 --- a/drivers/infiniband/hw/qib/qib_verbs.c
1207 +++ b/drivers/infiniband/hw/qib/qib_verbs.c
1208 @@ -1495,8 +1495,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
1209 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1210 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1211 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1212 - rdi->dparms.props.max_mr = rdi->lkey_table.max;
1213 - rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1214 rdi->dparms.props.max_map_per_fmr = 32767;
1215 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1216 rdi->dparms.props.max_qp_init_rd_atom = 255;
1217 diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
1218 index 5819c9d6ffdc..39d101df229d 100644
1219 --- a/drivers/infiniband/sw/rdmavt/mr.c
1220 +++ b/drivers/infiniband/sw/rdmavt/mr.c
1221 @@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
1222 for (i = 0; i < rdi->lkey_table.max; i++)
1223 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
1224
1225 + rdi->dparms.props.max_mr = rdi->lkey_table.max;
1226 + rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1227 return 0;
1228 }
1229
1230 diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
1231 index 5ce403c6cddb..7d03680afd91 100644
1232 --- a/drivers/infiniband/sw/rdmavt/qp.c
1233 +++ b/drivers/infiniband/sw/rdmavt/qp.c
1234 @@ -412,7 +412,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
1235 offset = qpt->incr | ((offset & 1) ^ 1);
1236 }
1237 /* there can be no set bits in low-order QoS bits */
1238 - WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
1239 + WARN_ON(rdi->dparms.qos_shift > 1 &&
1240 + offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
1241 qpn = mk_qpn(qpt, map, offset);
1242 }
1243
1244 diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1245 index 26ec603fe220..83d1499fe021 100644
1246 --- a/drivers/input/misc/uinput.c
1247 +++ b/drivers/input/misc/uinput.c
1248 @@ -1051,13 +1051,31 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1249
1250 #ifdef CONFIG_COMPAT
1251
1252 -#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
1253 +/*
1254 + * These IOCTLs change their size and thus their numbers between
1255 + * 32 and 64 bits.
1256 + */
1257 +#define UI_SET_PHYS_COMPAT \
1258 + _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
1259 +#define UI_BEGIN_FF_UPLOAD_COMPAT \
1260 + _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat)
1261 +#define UI_END_FF_UPLOAD_COMPAT \
1262 + _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat)
1263
1264 static long uinput_compat_ioctl(struct file *file,
1265 unsigned int cmd, unsigned long arg)
1266 {
1267 - if (cmd == UI_SET_PHYS_COMPAT)
1268 + switch (cmd) {
1269 + case UI_SET_PHYS_COMPAT:
1270 cmd = UI_SET_PHYS;
1271 + break;
1272 + case UI_BEGIN_FF_UPLOAD_COMPAT:
1273 + cmd = UI_BEGIN_FF_UPLOAD;
1274 + break;
1275 + case UI_END_FF_UPLOAD_COMPAT:
1276 + cmd = UI_END_FF_UPLOAD;
1277 + break;
1278 + }
1279
1280 return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
1281 }
1282 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1283 index b6da0c1267e3..8e6077d8e434 100644
1284 --- a/drivers/input/mouse/synaptics.c
1285 +++ b/drivers/input/mouse/synaptics.c
1286 @@ -179,6 +179,8 @@ static const char * const smbus_pnp_ids[] = {
1287 "LEN0096", /* X280 */
1288 "LEN0097", /* X280 -> ALPS trackpoint */
1289 "LEN200f", /* T450s */
1290 + "LEN2054", /* E480 */
1291 + "LEN2055", /* E580 */
1292 "SYN3052", /* HP EliteBook 840 G4 */
1293 "SYN3221", /* HP 15-ay000 */
1294 NULL
1295 diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
1296 index d196ac3d8b8c..e5c3b066bd2a 100644
1297 --- a/drivers/input/touchscreen/silead.c
1298 +++ b/drivers/input/touchscreen/silead.c
1299 @@ -604,6 +604,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
1300 { "MSSL1680", 0 },
1301 { "MSSL0001", 0 },
1302 { "MSSL0002", 0 },
1303 + { "MSSL0017", 0 },
1304 { }
1305 };
1306 MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
1307 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
1308 index 6600b3466dfb..0a74785e575b 100644
1309 --- a/drivers/mmc/core/core.c
1310 +++ b/drivers/mmc/core/core.c
1311 @@ -144,8 +144,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
1312 int err = cmd->error;
1313
1314 /* Flag re-tuning needed on CRC errors */
1315 - if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
1316 - cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
1317 + if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
1318 + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
1319 + !host->retune_crc_disable &&
1320 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
1321 (mrq->data && mrq->data->error == -EILSEQ) ||
1322 (mrq->stop && mrq->stop->error == -EILSEQ)))
1323 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
1324 index d8e17ea6126d..0aa99694b937 100644
1325 --- a/drivers/mmc/core/sdio.c
1326 +++ b/drivers/mmc/core/sdio.c
1327 @@ -934,6 +934,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
1328 */
1329 static int mmc_sdio_suspend(struct mmc_host *host)
1330 {
1331 + /* Prevent processing of SDIO IRQs in suspended state. */
1332 + mmc_card_set_suspended(host->card);
1333 + cancel_delayed_work_sync(&host->sdio_irq_work);
1334 +
1335 mmc_claim_host(host);
1336
1337 if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
1338 @@ -982,13 +986,20 @@ static int mmc_sdio_resume(struct mmc_host *host)
1339 err = sdio_enable_4bit_bus(host->card);
1340 }
1341
1342 - if (!err && host->sdio_irqs) {
1343 + if (err)
1344 + goto out;
1345 +
1346 + /* Allow SDIO IRQs to be processed again. */
1347 + mmc_card_clr_suspended(host->card);
1348 +
1349 + if (host->sdio_irqs) {
1350 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
1351 wake_up_process(host->sdio_irq_thread);
1352 else if (host->caps & MMC_CAP_SDIO_IRQ)
1353 host->ops->enable_sdio_irq(host, 1);
1354 }
1355
1356 +out:
1357 mmc_release_host(host);
1358
1359 host->pm_flags &= ~MMC_PM_KEEP_POWER;
1360 diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
1361 index d40744bbafa9..ed2d8c48ea17 100644
1362 --- a/drivers/mmc/core/sdio_io.c
1363 +++ b/drivers/mmc/core/sdio_io.c
1364 @@ -18,6 +18,7 @@
1365 #include "sdio_ops.h"
1366 #include "core.h"
1367 #include "card.h"
1368 +#include "host.h"
1369
1370 /**
1371 * sdio_claim_host - exclusively claim a bus for a certain SDIO function
1372 @@ -725,3 +726,79 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
1373 return 0;
1374 }
1375 EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags);
1376 +
1377 +/**
1378 + * sdio_retune_crc_disable - temporarily disable retuning on CRC errors
1379 + * @func: SDIO function attached to host
1380 + *
1381 + * If the SDIO card is known to be in a state where it might produce
1382 + * CRC errors on the bus in response to commands (like if we know it is
1383 + * transitioning between power states), an SDIO function driver can
1384 + * call this function to temporarily disable the SD/MMC core behavior of
1385 + * triggering an automatic retuning.
1386 + *
1387 + * This function should be called while the host is claimed and the host
1388 + * should remain claimed until sdio_retune_crc_enable() is called.
1389 + * Specifically, the expected sequence of calls is:
1390 + * - sdio_claim_host()
1391 + * - sdio_retune_crc_disable()
1392 + * - some number of calls like sdio_writeb() and sdio_readb()
1393 + * - sdio_retune_crc_enable()
1394 + * - sdio_release_host()
1395 + */
1396 +void sdio_retune_crc_disable(struct sdio_func *func)
1397 +{
1398 + func->card->host->retune_crc_disable = true;
1399 +}
1400 +EXPORT_SYMBOL_GPL(sdio_retune_crc_disable);
1401 +
1402 +/**
1403 + * sdio_retune_crc_enable - re-enable retuning on CRC errors
1404 + * @func: SDIO function attached to host
1405 + *
1406 + * This is the compement to sdio_retune_crc_disable().
1407 + */
1408 +void sdio_retune_crc_enable(struct sdio_func *func)
1409 +{
1410 + func->card->host->retune_crc_disable = false;
1411 +}
1412 +EXPORT_SYMBOL_GPL(sdio_retune_crc_enable);
1413 +
1414 +/**
1415 + * sdio_retune_hold_now - start deferring retuning requests till release
1416 + * @func: SDIO function attached to host
1417 + *
1418 + * This function can be called if it's currently a bad time to do
1419 + * a retune of the SDIO card. Retune requests made during this time
1420 + * will be held and we'll actually do the retune sometime after the
1421 + * release.
1422 + *
1423 + * This function could be useful if an SDIO card is in a power state
1424 + * where it can respond to a small subset of commands that doesn't
1425 + * include the retuning command. Care should be taken when using
1426 + * this function since (presumably) the retuning request we might be
1427 + * deferring was made for a good reason.
1428 + *
1429 + * This function should be called while the host is claimed.
1430 + */
1431 +void sdio_retune_hold_now(struct sdio_func *func)
1432 +{
1433 + mmc_retune_hold_now(func->card->host);
1434 +}
1435 +EXPORT_SYMBOL_GPL(sdio_retune_hold_now);
1436 +
1437 +/**
1438 + * sdio_retune_release - signal that it's OK to retune now
1439 + * @func: SDIO function attached to host
1440 + *
1441 + * This is the complement to sdio_retune_hold_now(). Calling this
1442 + * function won't make a retune happen right away but will allow
1443 + * them to be scheduled normally.
1444 + *
1445 + * This function should be called while the host is claimed.
1446 + */
1447 +void sdio_retune_release(struct sdio_func *func)
1448 +{
1449 + mmc_retune_release(func->card->host);
1450 +}
1451 +EXPORT_SYMBOL_GPL(sdio_retune_release);
1452 diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
1453 index 7ca7b99413f0..b299a24d33f9 100644
1454 --- a/drivers/mmc/core/sdio_irq.c
1455 +++ b/drivers/mmc/core/sdio_irq.c
1456 @@ -38,6 +38,10 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
1457 unsigned char pending;
1458 struct sdio_func *func;
1459
1460 + /* Don't process SDIO IRQs if the card is suspended. */
1461 + if (mmc_card_suspended(card))
1462 + return 0;
1463 +
1464 /*
1465 * Optimization, if there is only 1 function interrupt registered
1466 * and we know an IRQ was signaled then call irq handler directly.
1467 diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
1468 index cc3ffeffd7a2..fa8d9da2ab7f 100644
1469 --- a/drivers/mmc/host/sdhci-pci-o2micro.c
1470 +++ b/drivers/mmc/host/sdhci-pci-o2micro.c
1471 @@ -117,6 +117,7 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
1472 */
1473 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1474 current_bus_width = mmc->ios.bus_width;
1475 + mmc->ios.bus_width = MMC_BUS_WIDTH_4;
1476 sdhci_set_bus_width(host, MMC_BUS_WIDTH_4);
1477 }
1478
1479 @@ -128,8 +129,10 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
1480
1481 sdhci_end_tuning(host);
1482
1483 - if (current_bus_width == MMC_BUS_WIDTH_8)
1484 + if (current_bus_width == MMC_BUS_WIDTH_8) {
1485 + mmc->ios.bus_width = MMC_BUS_WIDTH_8;
1486 sdhci_set_bus_width(host, current_bus_width);
1487 + }
1488
1489 host->flags &= ~SDHCI_HS400_TUNING;
1490 return 0;
1491 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1492 index 2646faffd36e..6f265d2e647b 100644
1493 --- a/drivers/net/can/flexcan.c
1494 +++ b/drivers/net/can/flexcan.c
1495 @@ -165,7 +165,7 @@
1496 #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16)
1497 #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff)
1498
1499 -#define FLEXCAN_TIMEOUT_US (50)
1500 +#define FLEXCAN_TIMEOUT_US (250)
1501
1502 /* FLEXCAN hardware feature flags
1503 *
1504 diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
1505 index 045f0845e665..3df23487487f 100644
1506 --- a/drivers/net/can/xilinx_can.c
1507 +++ b/drivers/net/can/xilinx_can.c
1508 @@ -1424,7 +1424,7 @@ static const struct xcan_devtype_data xcan_canfd_data = {
1509 XCAN_FLAG_RXMNF |
1510 XCAN_FLAG_TX_MAILBOXES |
1511 XCAN_FLAG_RX_FIFO_MULTI,
1512 - .bittiming_const = &xcan_bittiming_const,
1513 + .bittiming_const = &xcan_bittiming_const_canfd,
1514 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1515 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1516 .bus_clk_name = "s_axi_aclk",
1517 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1518 index dfaad1c2c2b8..411cfb806459 100644
1519 --- a/drivers/net/dsa/mv88e6xxx/chip.c
1520 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
1521 @@ -1484,7 +1484,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
1522 int err;
1523
1524 if (!vid)
1525 - return -EINVAL;
1526 + return -EOPNOTSUPP;
1527
1528 entry->vid = vid - 1;
1529 entry->valid = false;
1530 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
1531 index e2710ff48fb0..1fa0cd527ead 100644
1532 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
1533 +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
1534 @@ -339,6 +339,7 @@ static int __lb_setup(struct net_device *ndev,
1535 static int __lb_up(struct net_device *ndev,
1536 enum hnae_loop loop_mode)
1537 {
1538 +#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
1539 struct hns_nic_priv *priv = netdev_priv(ndev);
1540 struct hnae_handle *h = priv->ae_handle;
1541 int speed, duplex;
1542 @@ -365,6 +366,9 @@ static int __lb_up(struct net_device *ndev,
1543
1544 h->dev->ops->adjust_link(h, speed, duplex);
1545
1546 + /* wait adjust link done and phy ready */
1547 + msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
1548 +
1549 return 0;
1550 }
1551
1552 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1553 index 6e6abdc399de..1d55f014725e 100644
1554 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1555 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1556 @@ -1784,6 +1784,7 @@ static void mtk_poll_controller(struct net_device *dev)
1557
1558 static int mtk_start_dma(struct mtk_eth *eth)
1559 {
1560 + u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
1561 int err;
1562
1563 err = mtk_dma_init(eth);
1564 @@ -1800,7 +1801,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
1565 MTK_QDMA_GLO_CFG);
1566
1567 mtk_w32(eth,
1568 - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1569 + MTK_RX_DMA_EN | rx_2b_offset |
1570 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1571 MTK_PDMA_GLO_CFG);
1572
1573 @@ -2304,13 +2305,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1574
1575 switch (cmd->cmd) {
1576 case ETHTOOL_GRXRINGS:
1577 - if (dev->features & NETIF_F_LRO) {
1578 + if (dev->hw_features & NETIF_F_LRO) {
1579 cmd->data = MTK_MAX_RX_RING_NUM;
1580 ret = 0;
1581 }
1582 break;
1583 case ETHTOOL_GRXCLSRLCNT:
1584 - if (dev->features & NETIF_F_LRO) {
1585 + if (dev->hw_features & NETIF_F_LRO) {
1586 struct mtk_mac *mac = netdev_priv(dev);
1587
1588 cmd->rule_cnt = mac->hwlro_ip_cnt;
1589 @@ -2318,11 +2319,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1590 }
1591 break;
1592 case ETHTOOL_GRXCLSRULE:
1593 - if (dev->features & NETIF_F_LRO)
1594 + if (dev->hw_features & NETIF_F_LRO)
1595 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
1596 break;
1597 case ETHTOOL_GRXCLSRLALL:
1598 - if (dev->features & NETIF_F_LRO)
1599 + if (dev->hw_features & NETIF_F_LRO)
1600 ret = mtk_hwlro_get_fdir_all(dev, cmd,
1601 rule_locs);
1602 break;
1603 @@ -2339,11 +2340,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1604
1605 switch (cmd->cmd) {
1606 case ETHTOOL_SRXCLSRLINS:
1607 - if (dev->features & NETIF_F_LRO)
1608 + if (dev->hw_features & NETIF_F_LRO)
1609 ret = mtk_hwlro_add_ipaddr(dev, cmd);
1610 break;
1611 case ETHTOOL_SRXCLSRLDEL:
1612 - if (dev->features & NETIF_F_LRO)
1613 + if (dev->hw_features & NETIF_F_LRO)
1614 ret = mtk_hwlro_del_ipaddr(dev, cmd);
1615 break;
1616 default:
1617 diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
1618 index 68b8007da82b..0115a2868933 100644
1619 --- a/drivers/net/ipvlan/ipvlan_main.c
1620 +++ b/drivers/net/ipvlan/ipvlan_main.c
1621 @@ -178,7 +178,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
1622 }
1623
1624 #define IPVLAN_FEATURES \
1625 - (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
1626 + (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
1627 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
1628 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
1629 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
1630 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1631 index a907d7b065fa..53e4962ceb8a 100644
1632 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1633 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1634 @@ -667,6 +667,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
1635
1636 brcmf_dbg(TRACE, "Enter: on=%d\n", on);
1637
1638 + sdio_retune_crc_disable(bus->sdiodev->func1);
1639 +
1640 + /* Cannot re-tune if device is asleep; defer till we're awake */
1641 + if (on)
1642 + sdio_retune_hold_now(bus->sdiodev->func1);
1643 +
1644 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
1645 /* 1st KSO write goes to AOS wake up core if device is asleep */
1646 brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
1647 @@ -719,6 +725,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
1648 if (try_cnt > MAX_KSO_ATTEMPTS)
1649 brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
1650
1651 + if (on)
1652 + sdio_retune_release(bus->sdiodev->func1);
1653 +
1654 + sdio_retune_crc_enable(bus->sdiodev->func1);
1655 +
1656 return err;
1657 }
1658
1659 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1660 index a867a139bb35..d8869d978c34 100644
1661 --- a/drivers/nvme/host/core.c
1662 +++ b/drivers/nvme/host/core.c
1663 @@ -3228,7 +3228,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
1664 {
1665 struct nvme_ns *ns;
1666 __le32 *ns_list;
1667 - unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
1668 + unsigned i, j, nsid, prev = 0;
1669 + unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
1670 int ret = 0;
1671
1672 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1673 diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
1674 index 7bc9f6240432..1096dd01ca22 100644
1675 --- a/drivers/nvme/target/io-cmd-bdev.c
1676 +++ b/drivers/nvme/target/io-cmd-bdev.c
1677 @@ -239,6 +239,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
1678 return 0;
1679 case nvme_cmd_write_zeroes:
1680 req->execute = nvmet_bdev_execute_write_zeroes;
1681 + req->data_len = 0;
1682 return 0;
1683 default:
1684 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
1685 diff --git a/drivers/parport/share.c b/drivers/parport/share.c
1686 index 5dc53d420ca8..7b4ee33c1935 100644
1687 --- a/drivers/parport/share.c
1688 +++ b/drivers/parport/share.c
1689 @@ -895,6 +895,7 @@ parport_register_dev_model(struct parport *port, const char *name,
1690 par_dev->devmodel = true;
1691 ret = device_register(&par_dev->dev);
1692 if (ret) {
1693 + kfree(par_dev->state);
1694 put_device(&par_dev->dev);
1695 goto err_put_port;
1696 }
1697 @@ -912,6 +913,7 @@ parport_register_dev_model(struct parport *port, const char *name,
1698 spin_unlock(&port->physport->pardevice_lock);
1699 pr_debug("%s: cannot grant exclusive access for device %s\n",
1700 port->name, name);
1701 + kfree(par_dev->state);
1702 device_unregister(&par_dev->dev);
1703 goto err_put_port;
1704 }
1705 diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
1706 index b7513c5848cf..c1c35eccd5b6 100644
1707 --- a/drivers/s390/net/qeth_l2_main.c
1708 +++ b/drivers/s390/net/qeth_l2_main.c
1709 @@ -1901,7 +1901,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
1710
1711 l2entry = (struct qdio_brinfo_entry_l2 *)entry;
1712 code = IPA_ADDR_CHANGE_CODE_MACADDR;
1713 - if (l2entry->addr_lnid.lnid)
1714 + if (l2entry->addr_lnid.lnid < VLAN_N_VID)
1715 code |= IPA_ADDR_CHANGE_CODE_VLANID;
1716 qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
1717 (struct net_if_token *)&l2entry->nit,
1718 diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
1719 index 411d656f2530..98f2d076f938 100644
1720 --- a/drivers/scsi/smartpqi/smartpqi_init.c
1721 +++ b/drivers/scsi/smartpqi/smartpqi_init.c
1722 @@ -3697,8 +3697,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
1723 return -ETIMEDOUT;
1724 msecs_blocked =
1725 jiffies_to_msecs(jiffies - start_jiffies);
1726 - if (msecs_blocked >= timeout_msecs)
1727 - return -ETIMEDOUT;
1728 + if (msecs_blocked >= timeout_msecs) {
1729 + rc = -ETIMEDOUT;
1730 + goto out;
1731 + }
1732 timeout_msecs -= msecs_blocked;
1733 }
1734 }
1735 diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
1736 index 895a9b5ac989..30c22e16b1e3 100644
1737 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c
1738 +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
1739 @@ -340,24 +340,21 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
1740 goto dealloc_host;
1741 }
1742
1743 - pm_runtime_set_active(&pdev->dev);
1744 - pm_runtime_enable(&pdev->dev);
1745 -
1746 ufshcd_init_lanes_per_dir(hba);
1747
1748 err = ufshcd_init(hba, mmio_base, irq);
1749 if (err) {
1750 dev_err(dev, "Initialization failed\n");
1751 - goto out_disable_rpm;
1752 + goto dealloc_host;
1753 }
1754
1755 platform_set_drvdata(pdev, hba);
1756
1757 + pm_runtime_set_active(&pdev->dev);
1758 + pm_runtime_enable(&pdev->dev);
1759 +
1760 return 0;
1761
1762 -out_disable_rpm:
1763 - pm_runtime_disable(&pdev->dev);
1764 - pm_runtime_set_suspended(&pdev->dev);
1765 dealloc_host:
1766 ufshcd_dealloc_host(hba);
1767 out:
1768 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1769 index 3183fa8c5857..b8b59cfeacd1 100644
1770 --- a/drivers/scsi/ufs/ufshcd.c
1771 +++ b/drivers/scsi/ufs/ufshcd.c
1772 @@ -1914,7 +1914,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1773 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1774
1775 /* Get the descriptor */
1776 - if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1777 + if (hba->dev_cmd.query.descriptor &&
1778 + lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1779 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1780 GENERAL_UPIU_REQUEST_SIZE;
1781 u16 resp_len;
1782 diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
1783 index 2f8e2bf70941..7677da889f12 100644
1784 --- a/drivers/staging/erofs/erofs_fs.h
1785 +++ b/drivers/staging/erofs/erofs_fs.h
1786 @@ -17,10 +17,16 @@
1787 #define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2
1788 #define EROFS_SUPER_OFFSET 1024
1789
1790 +/*
1791 + * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be
1792 + * incompatible with this kernel version.
1793 + */
1794 +#define EROFS_ALL_REQUIREMENTS 0
1795 +
1796 struct erofs_super_block {
1797 /* 0 */__le32 magic; /* in the little endian */
1798 /* 4 */__le32 checksum; /* crc32c(super_block) */
1799 -/* 8 */__le32 features;
1800 +/* 8 */__le32 features; /* (aka. feature_compat) */
1801 /* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */
1802 /* 13 */__u8 reserved;
1803
1804 @@ -34,9 +40,10 @@ struct erofs_super_block {
1805 /* 44 */__le32 xattr_blkaddr;
1806 /* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */
1807 /* 64 */__u8 volume_name[16]; /* volume name */
1808 +/* 80 */__le32 requirements; /* (aka. feature_incompat) */
1809
1810 -/* 80 */__u8 reserved2[48]; /* 128 bytes */
1811 -} __packed;
1812 +/* 84 */__u8 reserved2[44];
1813 +} __packed; /* 128 bytes */
1814
1815 #define __EROFS_BIT(_prefix, _cur, _pre) enum { \
1816 _prefix ## _cur ## _BIT = _prefix ## _pre ## _BIT + \
1817 diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
1818 index 58d8cbc3f921..8ce37091db20 100644
1819 --- a/drivers/staging/erofs/internal.h
1820 +++ b/drivers/staging/erofs/internal.h
1821 @@ -111,6 +111,8 @@ struct erofs_sb_info {
1822
1823 u8 uuid[16]; /* 128-bit uuid for volume */
1824 u8 volume_name[16]; /* volume name */
1825 + u32 requirements;
1826 +
1827 char *dev_name;
1828
1829 unsigned int mount_opt;
1830 diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
1831 index b0583cdb079a..b49ebdf6ebda 100644
1832 --- a/drivers/staging/erofs/super.c
1833 +++ b/drivers/staging/erofs/super.c
1834 @@ -75,6 +75,22 @@ static void destroy_inode(struct inode *inode)
1835 call_rcu(&inode->i_rcu, i_callback);
1836 }
1837
1838 +static bool check_layout_compatibility(struct super_block *sb,
1839 + struct erofs_super_block *layout)
1840 +{
1841 + const unsigned int requirements = le32_to_cpu(layout->requirements);
1842 +
1843 + EROFS_SB(sb)->requirements = requirements;
1844 +
1845 + /* check if current kernel meets all mandatory requirements */
1846 + if (requirements & (~EROFS_ALL_REQUIREMENTS)) {
1847 + errln("unidentified requirements %x, please upgrade kernel version",
1848 + requirements & ~EROFS_ALL_REQUIREMENTS);
1849 + return false;
1850 + }
1851 + return true;
1852 +}
1853 +
1854 static int superblock_read(struct super_block *sb)
1855 {
1856 struct erofs_sb_info *sbi;
1857 @@ -108,6 +124,9 @@ static int superblock_read(struct super_block *sb)
1858 goto out;
1859 }
1860
1861 + if (!check_layout_compatibility(sb, layout))
1862 + goto out;
1863 +
1864 sbi->blocks = le32_to_cpu(layout->blocks);
1865 sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
1866 #ifdef CONFIG_EROFS_FS_XATTR
1867 diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
1868 index 9852ec5e6e01..cc7c856126df 100644
1869 --- a/drivers/usb/chipidea/udc.c
1870 +++ b/drivers/usb/chipidea/udc.c
1871 @@ -1621,6 +1621,25 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1872 static int ci_udc_start(struct usb_gadget *gadget,
1873 struct usb_gadget_driver *driver);
1874 static int ci_udc_stop(struct usb_gadget *gadget);
1875 +
1876 +/* Match ISOC IN from the highest endpoint */
1877 +static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
1878 + struct usb_endpoint_descriptor *desc,
1879 + struct usb_ss_ep_comp_descriptor *comp_desc)
1880 +{
1881 + struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1882 + struct usb_ep *ep;
1883 +
1884 + if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
1885 + list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
1886 + if (ep->caps.dir_in && !ep->claimed)
1887 + return ep;
1888 + }
1889 + }
1890 +
1891 + return NULL;
1892 +}
1893 +
1894 /**
1895 * Device operations part of the API to the USB controller hardware,
1896 * which don't involve endpoints (or i/o)
1897 @@ -1634,6 +1653,7 @@ static const struct usb_gadget_ops usb_gadget_ops = {
1898 .vbus_draw = ci_udc_vbus_draw,
1899 .udc_start = ci_udc_start,
1900 .udc_stop = ci_udc_stop,
1901 + .match_ep = ci_udc_match_ep,
1902 };
1903
1904 static int init_eps(struct ci_hdrc *ci)
1905 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
1906 index b62953ee0fc6..f896a00662ef 100644
1907 --- a/drivers/usb/host/xhci-ring.c
1908 +++ b/drivers/usb/host/xhci-ring.c
1909 @@ -1604,8 +1604,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
1910 usb_hcd_resume_root_hub(hcd);
1911 }
1912
1913 - if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE)
1914 + if (hcd->speed >= HCD_USB3 &&
1915 + (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
1916 + slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1917 + if (slot_id && xhci->devs[slot_id])
1918 + xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
1919 bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
1920 + }
1921
1922 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
1923 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1924 @@ -1793,6 +1798,14 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1925 {
1926 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1927 struct xhci_command *command;
1928 +
1929 + /*
1930 + * Avoid resetting endpoint if link is inactive. Can cause host hang.
1931 + * Device will be reset soon to recover the link so don't do anything
1932 + */
1933 + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR)
1934 + return;
1935 +
1936 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1937 if (!command)
1938 return;
1939 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1940 index f30b065095fa..4ffadca2c71a 100644
1941 --- a/drivers/usb/host/xhci.c
1942 +++ b/drivers/usb/host/xhci.c
1943 @@ -1441,6 +1441,10 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
1944 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1945 return -ESHUTDOWN;
1946 }
1947 + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1948 + xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1949 + return -ENODEV;
1950 + }
1951
1952 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1953 num_tds = urb->number_of_packets;
1954 @@ -3724,6 +3728,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
1955 }
1956 /* If necessary, update the number of active TTs on this root port */
1957 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
1958 + virt_dev->flags = 0;
1959 ret = 0;
1960
1961 command_cleanup:
1962 @@ -5030,16 +5035,26 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
1963 } else {
1964 /*
1965 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
1966 - * minor revision instead of sbrn
1967 + * minor revision instead of sbrn. Minor revision is a two digit
1968 + * BCD containing minor and sub-minor numbers, only show minor.
1969 */
1970 - minor_rev = xhci->usb3_rhub.min_rev;
1971 - if (minor_rev) {
1972 + minor_rev = xhci->usb3_rhub.min_rev / 0x10;
1973 +
1974 + switch (minor_rev) {
1975 + case 2:
1976 + hcd->speed = HCD_USB32;
1977 + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
1978 + hcd->self.root_hub->rx_lanes = 2;
1979 + hcd->self.root_hub->tx_lanes = 2;
1980 + break;
1981 + case 1:
1982 hcd->speed = HCD_USB31;
1983 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
1984 + break;
1985 }
1986 - xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
1987 + xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
1988 minor_rev,
1989 - minor_rev ? "Enhanced" : "");
1990 + minor_rev ? "Enhanced " : "");
1991
1992 xhci->usb3_rhub.hcd = hcd;
1993 /* xHCI private pointer was set in xhci_pci_probe for the second
1994 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1995 index dc00f59c8e69..761b341d27b0 100644
1996 --- a/drivers/usb/host/xhci.h
1997 +++ b/drivers/usb/host/xhci.h
1998 @@ -1010,6 +1010,15 @@ struct xhci_virt_device {
1999 u8 real_port;
2000 struct xhci_interval_bw_table *bw_table;
2001 struct xhci_tt_bw_info *tt_info;
2002 + /*
2003 + * flags for state tracking based on events and issued commands.
2004 + * Software can not rely on states from output contexts because of
2005 + * latency between events and xHC updating output context values.
2006 + * See xhci 1.1 section 4.8.3 for more details
2007 + */
2008 + unsigned long flags;
2009 +#define VDEV_PORT_ERROR BIT(0) /* Port error, link inactive */
2010 +
2011 /* The current max exit latency for the enabled USB3 link states. */
2012 u16 current_mel;
2013 /* Used for the debugfs interfaces. */
2014 diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
2015 index dec14b739b10..859274e38417 100644
2016 --- a/fs/btrfs/reada.c
2017 +++ b/fs/btrfs/reada.c
2018 @@ -745,6 +745,7 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
2019 u64 total = 0;
2020 int i;
2021
2022 +again:
2023 do {
2024 enqueued = 0;
2025 mutex_lock(&fs_devices->device_list_mutex);
2026 @@ -756,6 +757,10 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
2027 mutex_unlock(&fs_devices->device_list_mutex);
2028 total += enqueued;
2029 } while (enqueued && total < 10000);
2030 + if (fs_devices->seed) {
2031 + fs_devices = fs_devices->seed;
2032 + goto again;
2033 + }
2034
2035 if (enqueued == 0)
2036 return;
2037 diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
2038 index 18814f1d67d9..3c0bad577859 100644
2039 --- a/fs/cifs/smb2maperror.c
2040 +++ b/fs/cifs/smb2maperror.c
2041 @@ -457,7 +457,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
2042 {STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"},
2043 {STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO,
2044 "STATUS_ALLOTTED_SPACE_EXCEEDED"},
2045 - {STATUS_INSUFFICIENT_RESOURCES, -EREMOTEIO,
2046 + {STATUS_INSUFFICIENT_RESOURCES, -EAGAIN,
2047 "STATUS_INSUFFICIENT_RESOURCES"},
2048 {STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"},
2049 {STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"},
2050 diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
2051 index 00338b828f76..0bd276e4ccbe 100644
2052 --- a/fs/overlayfs/file.c
2053 +++ b/fs/overlayfs/file.c
2054 @@ -409,36 +409,16 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd,
2055 return ret;
2056 }
2057
2058 -static unsigned int ovl_get_inode_flags(struct inode *inode)
2059 -{
2060 - unsigned int flags = READ_ONCE(inode->i_flags);
2061 - unsigned int ovl_iflags = 0;
2062 -
2063 - if (flags & S_SYNC)
2064 - ovl_iflags |= FS_SYNC_FL;
2065 - if (flags & S_APPEND)
2066 - ovl_iflags |= FS_APPEND_FL;
2067 - if (flags & S_IMMUTABLE)
2068 - ovl_iflags |= FS_IMMUTABLE_FL;
2069 - if (flags & S_NOATIME)
2070 - ovl_iflags |= FS_NOATIME_FL;
2071 -
2072 - return ovl_iflags;
2073 -}
2074 -
2075 -static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
2076 +static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd,
2077 + unsigned long arg, unsigned int iflags)
2078 {
2079 long ret;
2080 struct inode *inode = file_inode(file);
2081 - unsigned int flags;
2082 - unsigned int old_flags;
2083 + unsigned int old_iflags;
2084
2085 if (!inode_owner_or_capable(inode))
2086 return -EACCES;
2087
2088 - if (get_user(flags, (int __user *) arg))
2089 - return -EFAULT;
2090 -
2091 ret = mnt_want_write_file(file);
2092 if (ret)
2093 return ret;
2094 @@ -447,8 +427,8 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
2095
2096 /* Check the capability before cred override */
2097 ret = -EPERM;
2098 - old_flags = ovl_get_inode_flags(inode);
2099 - if (((flags ^ old_flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
2100 + old_iflags = READ_ONCE(inode->i_flags);
2101 + if (((iflags ^ old_iflags) & (S_APPEND | S_IMMUTABLE)) &&
2102 !capable(CAP_LINUX_IMMUTABLE))
2103 goto unlock;
2104
2105 @@ -456,7 +436,7 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
2106 if (ret)
2107 goto unlock;
2108
2109 - ret = ovl_real_ioctl(file, FS_IOC_SETFLAGS, arg);
2110 + ret = ovl_real_ioctl(file, cmd, arg);
2111
2112 ovl_copyflags(ovl_inode_real(inode), inode);
2113 unlock:
2114 @@ -468,17 +448,79 @@ unlock:
2115
2116 }
2117
2118 +static unsigned int ovl_fsflags_to_iflags(unsigned int flags)
2119 +{
2120 + unsigned int iflags = 0;
2121 +
2122 + if (flags & FS_SYNC_FL)
2123 + iflags |= S_SYNC;
2124 + if (flags & FS_APPEND_FL)
2125 + iflags |= S_APPEND;
2126 + if (flags & FS_IMMUTABLE_FL)
2127 + iflags |= S_IMMUTABLE;
2128 + if (flags & FS_NOATIME_FL)
2129 + iflags |= S_NOATIME;
2130 +
2131 + return iflags;
2132 +}
2133 +
2134 +static long ovl_ioctl_set_fsflags(struct file *file, unsigned int cmd,
2135 + unsigned long arg)
2136 +{
2137 + unsigned int flags;
2138 +
2139 + if (get_user(flags, (int __user *) arg))
2140 + return -EFAULT;
2141 +
2142 + return ovl_ioctl_set_flags(file, cmd, arg,
2143 + ovl_fsflags_to_iflags(flags));
2144 +}
2145 +
2146 +static unsigned int ovl_fsxflags_to_iflags(unsigned int xflags)
2147 +{
2148 + unsigned int iflags = 0;
2149 +
2150 + if (xflags & FS_XFLAG_SYNC)
2151 + iflags |= S_SYNC;
2152 + if (xflags & FS_XFLAG_APPEND)
2153 + iflags |= S_APPEND;
2154 + if (xflags & FS_XFLAG_IMMUTABLE)
2155 + iflags |= S_IMMUTABLE;
2156 + if (xflags & FS_XFLAG_NOATIME)
2157 + iflags |= S_NOATIME;
2158 +
2159 + return iflags;
2160 +}
2161 +
2162 +static long ovl_ioctl_set_fsxflags(struct file *file, unsigned int cmd,
2163 + unsigned long arg)
2164 +{
2165 + struct fsxattr fa;
2166 +
2167 + memset(&fa, 0, sizeof(fa));
2168 + if (copy_from_user(&fa, (void __user *) arg, sizeof(fa)))
2169 + return -EFAULT;
2170 +
2171 + return ovl_ioctl_set_flags(file, cmd, arg,
2172 + ovl_fsxflags_to_iflags(fa.fsx_xflags));
2173 +}
2174 +
2175 static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2176 {
2177 long ret;
2178
2179 switch (cmd) {
2180 case FS_IOC_GETFLAGS:
2181 + case FS_IOC_FSGETXATTR:
2182 ret = ovl_real_ioctl(file, cmd, arg);
2183 break;
2184
2185 case FS_IOC_SETFLAGS:
2186 - ret = ovl_ioctl_set_flags(file, arg);
2187 + ret = ovl_ioctl_set_fsflags(file, cmd, arg);
2188 + break;
2189 +
2190 + case FS_IOC_FSSETXATTR:
2191 + ret = ovl_ioctl_set_fsxflags(file, cmd, arg);
2192 break;
2193
2194 default:
2195 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
2196 index b48273e846ad..f0389849fd80 100644
2197 --- a/fs/overlayfs/inode.c
2198 +++ b/fs/overlayfs/inode.c
2199 @@ -553,15 +553,15 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
2200 int xinobits = ovl_xino_bits(inode->i_sb);
2201
2202 /*
2203 - * When NFS export is enabled and d_ino is consistent with st_ino
2204 - * (samefs or i_ino has enough bits to encode layer), set the same
2205 - * value used for d_ino to i_ino, because nfsd readdirplus compares
2206 - * d_ino values to i_ino values of child entries. When called from
2207 + * When d_ino is consistent with st_ino (samefs or i_ino has enough
2208 + * bits to encode layer), set the same value used for st_ino to i_ino,
2209 + * so inode number exposed via /proc/locks and a like will be
2210 + * consistent with d_ino and st_ino values. An i_ino value inconsistent
2211 + * with d_ino also causes nfsd readdirplus to fail. When called from
2212 * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
2213 * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
2214 */
2215 - if (inode->i_sb->s_export_op &&
2216 - (ovl_same_sb(inode->i_sb) || xinobits)) {
2217 + if (ovl_same_sb(inode->i_sb) || xinobits) {
2218 inode->i_ino = ino;
2219 if (xinobits && fsid && !(ino >> (64 - xinobits)))
2220 inode->i_ino |= (unsigned long)fsid << (64 - xinobits);
2221 @@ -777,6 +777,54 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
2222 return inode;
2223 }
2224
2225 +bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
2226 +{
2227 + struct inode *key = d_inode(dir);
2228 + struct inode *trap;
2229 + bool res;
2230 +
2231 + trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
2232 + if (!trap)
2233 + return false;
2234 +
2235 + res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
2236 + !ovl_inode_lower(trap);
2237 +
2238 + iput(trap);
2239 + return res;
2240 +}
2241 +
2242 +/*
2243 + * Create an inode cache entry for layer root dir, that will intentionally
2244 + * fail ovl_verify_inode(), so any lookup that will find some layer root
2245 + * will fail.
2246 + */
2247 +struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
2248 +{
2249 + struct inode *key = d_inode(dir);
2250 + struct inode *trap;
2251 +
2252 + if (!d_is_dir(dir))
2253 + return ERR_PTR(-ENOTDIR);
2254 +
2255 + trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
2256 + ovl_inode_set, key);
2257 + if (!trap)
2258 + return ERR_PTR(-ENOMEM);
2259 +
2260 + if (!(trap->i_state & I_NEW)) {
2261 + /* Conflicting layer roots? */
2262 + iput(trap);
2263 + return ERR_PTR(-ELOOP);
2264 + }
2265 +
2266 + trap->i_mode = S_IFDIR;
2267 + trap->i_flags = S_DEAD;
2268 + unlock_new_inode(trap);
2269 +
2270 + return trap;
2271 +}
2272 +
2273 /*
2274 * Does overlay inode need to be hashed by lower inode?
2275 */
2276 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
2277 index efd372312ef1..badf039267a2 100644
2278 --- a/fs/overlayfs/namei.c
2279 +++ b/fs/overlayfs/namei.c
2280 @@ -18,6 +18,7 @@
2281 #include "overlayfs.h"
2282
2283 struct ovl_lookup_data {
2284 + struct super_block *sb;
2285 struct qstr name;
2286 bool is_dir;
2287 bool opaque;
2288 @@ -244,6 +245,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
2289 if (!d->metacopy || d->last)
2290 goto out;
2291 } else {
2292 + if (ovl_lookup_trap_inode(d->sb, this)) {
2293 + /* Caught in a trap of overlapping layers */
2294 + err = -ELOOP;
2295 + goto out_err;
2296 + }
2297 +
2298 if (last_element)
2299 d->is_dir = true;
2300 if (d->last)
2301 @@ -819,6 +826,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
2302 int err;
2303 bool metacopy = false;
2304 struct ovl_lookup_data d = {
2305 + .sb = dentry->d_sb,
2306 .name = dentry->d_name,
2307 .is_dir = false,
2308 .opaque = false,
2309 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
2310 index 80fb66426760..265bf9cfde08 100644
2311 --- a/fs/overlayfs/overlayfs.h
2312 +++ b/fs/overlayfs/overlayfs.h
2313 @@ -270,6 +270,7 @@ void ovl_clear_flag(unsigned long flag, struct inode *inode);
2314 bool ovl_test_flag(unsigned long flag, struct inode *inode);
2315 bool ovl_inuse_trylock(struct dentry *dentry);
2316 void ovl_inuse_unlock(struct dentry *dentry);
2317 +bool ovl_is_inuse(struct dentry *dentry);
2318 bool ovl_need_index(struct dentry *dentry);
2319 int ovl_nlink_start(struct dentry *dentry, bool *locked);
2320 void ovl_nlink_end(struct dentry *dentry, bool locked);
2321 @@ -366,6 +367,8 @@ struct ovl_inode_params {
2322 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
2323 struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
2324 bool is_upper);
2325 +bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir);
2326 +struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir);
2327 struct inode *ovl_get_inode(struct super_block *sb,
2328 struct ovl_inode_params *oip);
2329 static inline void ovl_copyattr(struct inode *from, struct inode *to)
2330 diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
2331 index ec237035333a..6ed1ace8f8b3 100644
2332 --- a/fs/overlayfs/ovl_entry.h
2333 +++ b/fs/overlayfs/ovl_entry.h
2334 @@ -29,6 +29,8 @@ struct ovl_sb {
2335
2336 struct ovl_layer {
2337 struct vfsmount *mnt;
2338 + /* Trap in ovl inode cache */
2339 + struct inode *trap;
2340 struct ovl_sb *fs;
2341 /* Index of this layer in fs root (upper idx == 0) */
2342 int idx;
2343 @@ -65,6 +67,10 @@ struct ovl_fs {
2344 /* Did we take the inuse lock? */
2345 bool upperdir_locked;
2346 bool workdir_locked;
2347 + /* Traps in ovl inode cache */
2348 + struct inode *upperdir_trap;
2349 + struct inode *workdir_trap;
2350 + struct inode *indexdir_trap;
2351 /* Inode numbers in all layers do not use the high xino_bits */
2352 unsigned int xino_bits;
2353 };
2354 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
2355 index 0fb0a59a5e5c..2d028c02621f 100644
2356 --- a/fs/overlayfs/super.c
2357 +++ b/fs/overlayfs/super.c
2358 @@ -217,6 +217,9 @@ static void ovl_free_fs(struct ovl_fs *ofs)
2359 {
2360 unsigned i;
2361
2362 + iput(ofs->indexdir_trap);
2363 + iput(ofs->workdir_trap);
2364 + iput(ofs->upperdir_trap);
2365 dput(ofs->indexdir);
2366 dput(ofs->workdir);
2367 if (ofs->workdir_locked)
2368 @@ -225,8 +228,10 @@ static void ovl_free_fs(struct ovl_fs *ofs)
2369 if (ofs->upperdir_locked)
2370 ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
2371 mntput(ofs->upper_mnt);
2372 - for (i = 0; i < ofs->numlower; i++)
2373 + for (i = 0; i < ofs->numlower; i++) {
2374 + iput(ofs->lower_layers[i].trap);
2375 mntput(ofs->lower_layers[i].mnt);
2376 + }
2377 for (i = 0; i < ofs->numlowerfs; i++)
2378 free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
2379 kfree(ofs->lower_layers);
2380 @@ -984,7 +989,26 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
2381 NULL
2382 };
2383
2384 -static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
2385 +static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
2386 + struct inode **ptrap, const char *name)
2387 +{
2388 + struct inode *trap;
2389 + int err;
2390 +
2391 + trap = ovl_get_trap_inode(sb, dir);
2392 + err = PTR_ERR_OR_ZERO(trap);
2393 + if (err) {
2394 + if (err == -ELOOP)
2395 + pr_err("overlayfs: conflicting %s path\n", name);
2396 + return err;
2397 + }
2398 +
2399 + *ptrap = trap;
2400 + return 0;
2401 +}
2402 +
2403 +static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
2404 + struct path *upperpath)
2405 {
2406 struct vfsmount *upper_mnt;
2407 int err;
2408 @@ -1004,6 +1028,11 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
2409 if (err)
2410 goto out;
2411
2412 + err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap,
2413 + "upperdir");
2414 + if (err)
2415 + goto out;
2416 +
2417 upper_mnt = clone_private_mount(upperpath);
2418 err = PTR_ERR(upper_mnt);
2419 if (IS_ERR(upper_mnt)) {
2420 @@ -1030,7 +1059,8 @@ out:
2421 return err;
2422 }
2423
2424 -static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
2425 +static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
2426 + struct path *workpath)
2427 {
2428 struct vfsmount *mnt = ofs->upper_mnt;
2429 struct dentry *temp;
2430 @@ -1045,6 +1075,10 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
2431 if (!ofs->workdir)
2432 goto out;
2433
2434 + err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir");
2435 + if (err)
2436 + goto out;
2437 +
2438 /*
2439 * Upper should support d_type, else whiteouts are visible. Given
2440 * workdir and upper are on same fs, we can do iterate_dir() on
2441 @@ -1105,7 +1139,8 @@ out:
2442 return err;
2443 }
2444
2445 -static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
2446 +static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
2447 + struct path *upperpath)
2448 {
2449 int err;
2450 struct path workpath = { };
2451 @@ -1136,19 +1171,16 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
2452 pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
2453 }
2454
2455 - err = ovl_make_workdir(ofs, &workpath);
2456 - if (err)
2457 - goto out;
2458 + err = ovl_make_workdir(sb, ofs, &workpath);
2459
2460 - err = 0;
2461 out:
2462 path_put(&workpath);
2463
2464 return err;
2465 }
2466
2467 -static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
2468 - struct path *upperpath)
2469 +static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
2470 + struct ovl_entry *oe, struct path *upperpath)
2471 {
2472 struct vfsmount *mnt = ofs->upper_mnt;
2473 int err;
2474 @@ -1167,6 +1199,11 @@ static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
2475
2476 ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true);
2477 if (ofs->indexdir) {
2478 + err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap,
2479 + "indexdir");
2480 + if (err)
2481 + goto out;
2482 +
2483 /*
2484 * Verify upper root is exclusively associated with index dir.
2485 * Older kernels stored upper fh in "trusted.overlay.origin"
2486 @@ -1226,8 +1263,8 @@ static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
2487 return ofs->numlowerfs;
2488 }
2489
2490 -static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
2491 - unsigned int numlower)
2492 +static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
2493 + struct path *stack, unsigned int numlower)
2494 {
2495 int err;
2496 unsigned int i;
2497 @@ -1245,16 +1282,28 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
2498
2499 for (i = 0; i < numlower; i++) {
2500 struct vfsmount *mnt;
2501 + struct inode *trap;
2502 int fsid;
2503
2504 err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb);
2505 if (err < 0)
2506 goto out;
2507
2508 + err = -EBUSY;
2509 + if (ovl_is_inuse(stack[i].dentry)) {
2510 + pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
2511 + goto out;
2512 + }
2513 +
2514 + err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
2515 + if (err)
2516 + goto out;
2517 +
2518 mnt = clone_private_mount(&stack[i]);
2519 err = PTR_ERR(mnt);
2520 if (IS_ERR(mnt)) {
2521 pr_err("overlayfs: failed to clone lowerpath\n");
2522 + iput(trap);
2523 goto out;
2524 }
2525
2526 @@ -1264,6 +1313,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
2527 */
2528 mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
2529
2530 + ofs->lower_layers[ofs->numlower].trap = trap;
2531 ofs->lower_layers[ofs->numlower].mnt = mnt;
2532 ofs->lower_layers[ofs->numlower].idx = i + 1;
2533 ofs->lower_layers[ofs->numlower].fsid = fsid;
2534 @@ -1358,7 +1408,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
2535 goto out_err;
2536 }
2537
2538 - err = ovl_get_lower_layers(ofs, stack, numlower);
2539 + err = ovl_get_lower_layers(sb, ofs, stack, numlower);
2540 if (err)
2541 goto out_err;
2542
2543 @@ -1390,6 +1440,77 @@ out_err:
2544 goto out;
2545 }
2546
2547 +/*
2548 + * Check if this layer root is a descendant of:
2549 + * - another layer of this overlayfs instance
2550 + * - upper/work dir of any overlayfs instance
2551 + */
2552 +static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
2553 + const char *name)
2554 +{
2555 + struct dentry *next = dentry, *parent;
2556 + int err = 0;
2557 +
2558 + if (!dentry)
2559 + return 0;
2560 +
2561 + parent = dget_parent(next);
2562 +
2563 + /* Walk back ancestors to root (inclusive) looking for traps */
2564 + while (!err && parent != next) {
2565 + if (ovl_is_inuse(parent)) {
2566 + err = -EBUSY;
2567 + pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
2568 + name);
2569 + } else if (ovl_lookup_trap_inode(sb, parent)) {
2570 + err = -ELOOP;
2571 + pr_err("overlayfs: overlapping %s path\n", name);
2572 + }
2573 + next = parent;
2574 + parent = dget_parent(next);
2575 + dput(next);
2576 + }
2577 +
2578 + dput(parent);
2579 +
2580 + return err;
2581 +}
2582 +
2583 +/*
2584 + * Check if any of the layers or work dirs overlap.
2585 + */
2586 +static int ovl_check_overlapping_layers(struct super_block *sb,
2587 + struct ovl_fs *ofs)
2588 +{
2589 + int i, err;
2590 +
2591 + if (ofs->upper_mnt) {
2592 + err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
2593 + if (err)
2594 + return err;
2595 +
2596 + /*
2597 + * Checking workbasedir avoids hitting ovl_is_inuse(parent) of
2598 + * this instance and covers overlapping work and index dirs,
2599 + * unless work or index dir have been moved since created inside
2600 + * workbasedir. In that case, we already have their traps in
2601 + * inode cache and we will catch that case on lookup.
2602 + */
2603 + err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
2604 + if (err)
2605 + return err;
2606 + }
2607 +
2608 + for (i = 0; i < ofs->numlower; i++) {
2609 + err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
2610 + "lowerdir");
2611 + if (err)
2612 + return err;
2613 + }
2614 +
2615 + return 0;
2616 +}
2617 +
2618 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
2619 {
2620 struct path upperpath = { };
2621 @@ -1429,17 +1550,20 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
2622 if (ofs->config.xino != OVL_XINO_OFF)
2623 ofs->xino_bits = BITS_PER_LONG - 32;
2624
2625 + /* alloc/destroy_inode needed for setting up traps in inode cache */
2626 + sb->s_op = &ovl_super_operations;
2627 +
2628 if (ofs->config.upperdir) {
2629 if (!ofs->config.workdir) {
2630 pr_err("overlayfs: missing 'workdir'\n");
2631 goto out_err;
2632 }
2633
2634 - err = ovl_get_upper(ofs, &upperpath);
2635 + err = ovl_get_upper(sb, ofs, &upperpath);
2636 if (err)
2637 goto out_err;
2638
2639 - err = ovl_get_workdir(ofs, &upperpath);
2640 + err = ovl_get_workdir(sb, ofs, &upperpath);
2641 if (err)
2642 goto out_err;
2643
2644 @@ -1460,7 +1584,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
2645 sb->s_flags |= SB_RDONLY;
2646
2647 if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
2648 - err = ovl_get_indexdir(ofs, oe, &upperpath);
2649 + err = ovl_get_indexdir(sb, ofs, oe, &upperpath);
2650 if (err)
2651 goto out_free_oe;
2652
2653 @@ -1473,6 +1597,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
2654
2655 }
2656
2657 + err = ovl_check_overlapping_layers(sb, ofs);
2658 + if (err)
2659 + goto out_free_oe;
2660 +
2661 /* Show index=off in /proc/mounts for forced r/o mount */
2662 if (!ofs->indexdir) {
2663 ofs->config.index = false;
2664 @@ -1494,7 +1622,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
2665 cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
2666
2667 sb->s_magic = OVERLAYFS_SUPER_MAGIC;
2668 - sb->s_op = &ovl_super_operations;
2669 sb->s_xattr = ovl_xattr_handlers;
2670 sb->s_fs_info = ofs;
2671 sb->s_flags |= SB_POSIXACL;
2672 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
2673 index c9a2e3c6d537..db8bdb29b320 100644
2674 --- a/fs/overlayfs/util.c
2675 +++ b/fs/overlayfs/util.c
2676 @@ -653,6 +653,18 @@ void ovl_inuse_unlock(struct dentry *dentry)
2677 }
2678 }
2679
2680 +bool ovl_is_inuse(struct dentry *dentry)
2681 +{
2682 + struct inode *inode = d_inode(dentry);
2683 + bool inuse;
2684 +
2685 + spin_lock(&inode->i_lock);
2686 + inuse = (inode->i_state & I_OVL_INUSE);
2687 + spin_unlock(&inode->i_lock);
2688 +
2689 + return inuse;
2690 +}
2691 +
2692 /*
2693 * Does this overlay dentry need to be indexed on copy up?
2694 */
2695 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
2696 index beed7121c781..2ff52de1c2b8 100644
2697 --- a/include/linux/mmc/host.h
2698 +++ b/include/linux/mmc/host.h
2699 @@ -395,6 +395,7 @@ struct mmc_host {
2700 unsigned int retune_now:1; /* do re-tuning at next req */
2701 unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
2702 unsigned int use_blk_mq:1; /* use blk-mq */
2703 + unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
2704
2705 int rescan_disable; /* disable card detection */
2706 int rescan_entered; /* used with nonremovable devices */
2707 diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
2708 index 97ca105347a6..6905f3f641cc 100644
2709 --- a/include/linux/mmc/sdio_func.h
2710 +++ b/include/linux/mmc/sdio_func.h
2711 @@ -159,4 +159,10 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b,
2712 extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func);
2713 extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags);
2714
2715 +extern void sdio_retune_crc_disable(struct sdio_func *func);
2716 +extern void sdio_retune_crc_enable(struct sdio_func *func);
2717 +
2718 +extern void sdio_retune_hold_now(struct sdio_func *func);
2719 +extern void sdio_retune_release(struct sdio_func *func);
2720 +
2721 #endif /* LINUX_MMC_SDIO_FUNC_H */
2722 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
2723 index 1dfb75057580..cc2d0c3b475b 100644
2724 --- a/include/net/bluetooth/hci_core.h
2725 +++ b/include/net/bluetooth/hci_core.h
2726 @@ -182,6 +182,9 @@ struct adv_info {
2727
2728 #define HCI_MAX_SHORT_NAME_LENGTH 10
2729
2730 +/* Min encryption key size to match with SMP */
2731 +#define HCI_MIN_ENC_KEY_SIZE 7
2732 +
2733 /* Default LE RPA expiry time, 15 minutes */
2734 #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
2735
2736 diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
2737 index 4de121e24ce5..67e0a990144a 100644
2738 --- a/include/net/cfg80211.h
2739 +++ b/include/net/cfg80211.h
2740 @@ -3448,7 +3448,8 @@ struct cfg80211_ops {
2741 * on wiphy_new(), but can be changed by the driver if it has a good
2742 * reason to override the default
2743 * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
2744 - * on a VLAN interface)
2745 + * on a VLAN interface). This flag also serves an extra purpose of
2746 + * supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype.
2747 * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
2748 * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
2749 * control port protocol ethertype. The device also honours the
2750 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2751 index 1bd7a758583b..181dba75a203 100644
2752 --- a/kernel/trace/trace.c
2753 +++ b/kernel/trace/trace.c
2754 @@ -8351,12 +8351,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
2755
2756 cnt++;
2757
2758 - /* reset all but tr, trace, and overruns */
2759 - memset(&iter.seq, 0,
2760 - sizeof(struct trace_iterator) -
2761 - offsetof(struct trace_iterator, seq));
2762 + trace_iterator_reset(&iter);
2763 iter.iter_flags |= TRACE_FILE_LAT_FMT;
2764 - iter.pos = -1;
2765
2766 if (trace_find_next_entry_inc(&iter) != NULL) {
2767 int ret;
2768 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
2769 index 447bd96ee658..d11d7bfc3fa5 100644
2770 --- a/kernel/trace/trace.h
2771 +++ b/kernel/trace/trace.h
2772 @@ -1895,4 +1895,22 @@ static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
2773
2774 extern struct trace_iterator *tracepoint_print_iter;
2775
2776 +/*
2777 + * Reset the state of the trace_iterator so that it can read consumed data.
2778 + * Normally, the trace_iterator is used for reading the data when it is not
2779 + * consumed, and must retain state.
2780 + */
2781 +static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
2782 +{
2783 + const size_t offset = offsetof(struct trace_iterator, seq);
2784 +
2785 + /*
2786 + * Keep gcc from complaining about overwriting more than just one
2787 + * member in the structure.
2788 + */
2789 + memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
2790 +
2791 + iter->pos = -1;
2792 +}
2793 +
2794 #endif /* _LINUX_KERNEL_TRACE_H */
2795 diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
2796 index 810d78a8d14c..2905a3dd94c1 100644
2797 --- a/kernel/trace/trace_kdb.c
2798 +++ b/kernel/trace/trace_kdb.c
2799 @@ -41,12 +41,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
2800
2801 kdb_printf("Dumping ftrace buffer:\n");
2802
2803 - /* reset all but tr, trace, and overruns */
2804 - memset(&iter.seq, 0,
2805 - sizeof(struct trace_iterator) -
2806 - offsetof(struct trace_iterator, seq));
2807 + trace_iterator_reset(&iter);
2808 iter.iter_flags |= TRACE_FILE_LAT_FMT;
2809 - iter.pos = -1;
2810
2811 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2812 for_each_tracing_cpu(cpu) {
2813 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
2814 index bd4978ce8c45..15d1cb5aee18 100644
2815 --- a/net/bluetooth/hci_conn.c
2816 +++ b/net/bluetooth/hci_conn.c
2817 @@ -1392,8 +1392,16 @@ auth:
2818 return 0;
2819
2820 encrypt:
2821 - if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2822 + if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2823 + /* Ensure that the encryption key size has been read,
2824 + * otherwise stall the upper layer responses.
2825 + */
2826 + if (!conn->enc_key_size)
2827 + return 0;
2828 +
2829 + /* Nothing else needed, all requirements are met */
2830 return 1;
2831 + }
2832
2833 hci_conn_encrypt(conn);
2834 return 0;
2835 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
2836 index 2c6eabf294b3..69e3be51a2c3 100644
2837 --- a/net/bluetooth/l2cap_core.c
2838 +++ b/net/bluetooth/l2cap_core.c
2839 @@ -1340,6 +1340,21 @@ static void l2cap_request_info(struct l2cap_conn *conn)
2840 sizeof(req), &req);
2841 }
2842
2843 +static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
2844 +{
2845 + /* The minimum encryption key size needs to be enforced by the
2846 + * host stack before establishing any L2CAP connections. The
2847 + * specification in theory allows a minimum of 1, but to align
2848 + * BR/EDR and LE transports, a minimum of 7 is chosen.
2849 + *
2850 + * This check might also be called for unencrypted connections
2851 + * that have no key size requirements. Ensure that the link is
2852 + * actually encrypted before enforcing a key size.
2853 + */
2854 + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
2855 + hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE);
2856 +}
2857 +
2858 static void l2cap_do_start(struct l2cap_chan *chan)
2859 {
2860 struct l2cap_conn *conn = chan->conn;
2861 @@ -1357,9 +1372,14 @@ static void l2cap_do_start(struct l2cap_chan *chan)
2862 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
2863 return;
2864
2865 - if (l2cap_chan_check_security(chan, true) &&
2866 - __l2cap_no_conn_pending(chan))
2867 + if (!l2cap_chan_check_security(chan, true) ||
2868 + !__l2cap_no_conn_pending(chan))
2869 + return;
2870 +
2871 + if (l2cap_check_enc_key_size(conn->hcon))
2872 l2cap_start_connection(chan);
2873 + else
2874 + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2875 }
2876
2877 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2878 @@ -1438,7 +1458,10 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
2879 continue;
2880 }
2881
2882 - l2cap_start_connection(chan);
2883 + if (l2cap_check_enc_key_size(conn->hcon))
2884 + l2cap_start_connection(chan);
2885 + else
2886 + l2cap_chan_close(chan, ECONNREFUSED);
2887
2888 } else if (chan->state == BT_CONNECT2) {
2889 struct l2cap_conn_rsp rsp;
2890 @@ -7455,7 +7478,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2891 }
2892
2893 if (chan->state == BT_CONNECT) {
2894 - if (!status)
2895 + if (!status && l2cap_check_enc_key_size(hcon))
2896 l2cap_start_connection(chan);
2897 else
2898 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2899 @@ -7464,7 +7487,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2900 struct l2cap_conn_rsp rsp;
2901 __u16 res, stat;
2902
2903 - if (!status) {
2904 + if (!status && l2cap_check_enc_key_size(hcon)) {
2905 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
2906 res = L2CAP_CR_PEND;
2907 stat = L2CAP_CS_AUTHOR_PEND;
2908 diff --git a/net/can/af_can.c b/net/can/af_can.c
2909 index 1684ba5b51eb..e386d654116d 100644
2910 --- a/net/can/af_can.c
2911 +++ b/net/can/af_can.c
2912 @@ -105,6 +105,7 @@ EXPORT_SYMBOL(can_ioctl);
2913 static void can_sock_destruct(struct sock *sk)
2914 {
2915 skb_queue_purge(&sk->sk_receive_queue);
2916 + skb_queue_purge(&sk->sk_error_queue);
2917 }
2918
2919 static const struct can_proto *can_get_proto(int protocol)
2920 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
2921 index 172aeae21ae9..35c6dfa13fa8 100644
2922 --- a/net/mac80211/ieee80211_i.h
2923 +++ b/net/mac80211/ieee80211_i.h
2924 @@ -2183,6 +2183,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
2925 const u8 *addr);
2926 void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
2927 void ieee80211_tdls_chsw_work(struct work_struct *wk);
2928 +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
2929 + const u8 *peer, u16 reason);
2930 +const char *ieee80211_get_reason_code_string(u16 reason_code);
2931
2932 extern const struct ethtool_ops ieee80211_ethtool_ops;
2933
2934 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2935 index 2ac749c4a6b2..1aaa73fa308e 100644
2936 --- a/net/mac80211/mlme.c
2937 +++ b/net/mac80211/mlme.c
2938 @@ -2868,7 +2868,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
2939 #define case_WLAN(type) \
2940 case WLAN_REASON_##type: return #type
2941
2942 -static const char *ieee80211_get_reason_code_string(u16 reason_code)
2943 +const char *ieee80211_get_reason_code_string(u16 reason_code)
2944 {
2945 switch (reason_code) {
2946 case_WLAN(UNSPECIFIED);
2947 @@ -2933,6 +2933,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
2948 if (len < 24 + 2)
2949 return;
2950
2951 + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
2952 + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
2953 + return;
2954 + }
2955 +
2956 if (ifmgd->associated &&
2957 ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
2958 const u8 *bssid = ifmgd->associated->bssid;
2959 @@ -2982,6 +2987,11 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
2960
2961 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
2962
2963 + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
2964 + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
2965 + return;
2966 + }
2967 +
2968 sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
2969 mgmt->sa, reason_code,
2970 ieee80211_get_reason_code_string(reason_code));
2971 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2972 index e946ee4f335b..7523d995ea8a 100644
2973 --- a/net/mac80211/rx.c
2974 +++ b/net/mac80211/rx.c
2975 @@ -3752,6 +3752,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
2976 case NL80211_IFTYPE_STATION:
2977 if (!bssid && !sdata->u.mgd.use_4addr)
2978 return false;
2979 + if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
2980 + return false;
2981 if (multicast)
2982 return true;
2983 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
2984 diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
2985 index 6c647f425e05..67745d1d4c5d 100644
2986 --- a/net/mac80211/tdls.c
2987 +++ b/net/mac80211/tdls.c
2988 @@ -1992,3 +1992,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk)
2989 }
2990 rtnl_unlock();
2991 }
2992 +
2993 +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
2994 + const u8 *peer, u16 reason)
2995 +{
2996 + struct ieee80211_sta *sta;
2997 +
2998 + rcu_read_lock();
2999 + sta = ieee80211_find_sta(&sdata->vif, peer);
3000 + if (!sta || !sta->tdls) {
3001 + rcu_read_unlock();
3002 + return;
3003 + }
3004 + rcu_read_unlock();
3005 +
3006 + tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n",
3007 + peer, reason,
3008 + ieee80211_get_reason_code_string(reason));
3009 +
3010 + ieee80211_tdls_oper_request(&sdata->vif, peer,
3011 + NL80211_TDLS_TEARDOWN,
3012 + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
3013 + GFP_ATOMIC);
3014 +}
3015 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
3016 index 3deaa01ebee4..2558a34c9df1 100644
3017 --- a/net/mac80211/util.c
3018 +++ b/net/mac80211/util.c
3019 @@ -3523,7 +3523,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3020 }
3021
3022 /* Always allow software iftypes */
3023 - if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
3024 + if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
3025 + (iftype == NL80211_IFTYPE_AP_VLAN &&
3026 + local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
3027 if (radar_detect)
3028 return -EINVAL;
3029 return 0;
3030 diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
3031 index 58d0b258b684..5dd48f0a4b1b 100644
3032 --- a/net/mac80211/wpa.c
3033 +++ b/net/mac80211/wpa.c
3034 @@ -1175,7 +1175,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
3035 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3036 struct ieee80211_key *key = rx->key;
3037 struct ieee80211_mmie_16 *mmie;
3038 - u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
3039 + u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
3040 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3041
3042 if (!ieee80211_is_mgmt(hdr->frame_control))
3043 @@ -1206,13 +1206,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
3044 memcpy(nonce, hdr->addr2, ETH_ALEN);
3045 memcpy(nonce + ETH_ALEN, ipn, 6);
3046
3047 + mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
3048 + if (!mic)
3049 + return RX_DROP_UNUSABLE;
3050 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
3051 skb->data + 24, skb->len - 24,
3052 mic) < 0 ||
3053 crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
3054 key->u.aes_gmac.icverrors++;
3055 + kfree(mic);
3056 return RX_DROP_UNUSABLE;
3057 }
3058 + kfree(mic);
3059 }
3060
3061 memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
3062 diff --git a/net/wireless/core.c b/net/wireless/core.c
3063 index a88551f3bc43..2a46ec3cb72c 100644
3064 --- a/net/wireless/core.c
3065 +++ b/net/wireless/core.c
3066 @@ -498,7 +498,7 @@ use_default_name:
3067 &rdev->rfkill_ops, rdev);
3068
3069 if (!rdev->rfkill) {
3070 - kfree(rdev);
3071 + wiphy_free(&rdev->wiphy);
3072 return NULL;
3073 }
3074
3075 @@ -1335,8 +1335,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
3076 }
3077 break;
3078 case NETDEV_PRE_UP:
3079 - if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
3080 + if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
3081 + !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
3082 + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
3083 + wdev->use_4addr))
3084 return notifier_from_errno(-EOPNOTSUPP);
3085 +
3086 if (rfkill_blocked(rdev->rfkill))
3087 return notifier_from_errno(-ERFKILL);
3088 break;
3089 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3090 index c6711ead5e59..8e2f03ab4cc9 100644
3091 --- a/net/wireless/nl80211.c
3092 +++ b/net/wireless/nl80211.c
3093 @@ -3191,8 +3191,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
3094 return -EINVAL;
3095 }
3096
3097 - if (!rdev->ops->add_virtual_intf ||
3098 - !(rdev->wiphy.interface_modes & (1 << type)))
3099 + if (!rdev->ops->add_virtual_intf)
3100 return -EOPNOTSUPP;
3101
3102 if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN ||
3103 @@ -3211,6 +3210,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
3104 return err;
3105 }
3106
3107 + if (!(rdev->wiphy.interface_modes & (1 << type)) &&
3108 + !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
3109 + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
3110 + return -EOPNOTSUPP;
3111 +
3112 err = nl80211_parse_mon_options(rdev, type, info, &params);
3113 if (err < 0)
3114 return err;
3115 @@ -4607,8 +4611,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
3116 struct nlattr *sinfoattr, *bss_param;
3117
3118 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
3119 - if (!hdr)
3120 + if (!hdr) {
3121 + cfg80211_sinfo_release_content(sinfo);
3122 return -1;
3123 + }
3124
3125 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
3126 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
3127 diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
3128 index 34414c6efad6..a2c9e7f98e06 100755
3129 --- a/scripts/checkstack.pl
3130 +++ b/scripts/checkstack.pl
3131 @@ -46,7 +46,7 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
3132 $x = "[0-9a-f]"; # hex character
3133 $xs = "[0-9a-f ]"; # hex character or space
3134 $funcre = qr/^$x* <(.*)>:$/;
3135 - if ($arch eq 'aarch64') {
3136 + if ($arch =~ '^(aarch|arm)64$') {
3137 #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
3138 $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
3139 } elsif ($arch eq 'arm') {
3140 diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
3141 index ab64c6b5db5a..28c098fb6208 100644
3142 --- a/security/apparmor/include/policy.h
3143 +++ b/security/apparmor/include/policy.h
3144 @@ -214,7 +214,16 @@ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
3145 return labels_profile(aa_get_newest_label(&p->label));
3146 }
3147
3148 -#define PROFILE_MEDIATES(P, T) ((P)->policy.start[(unsigned char) (T)])
3149 +static inline unsigned int PROFILE_MEDIATES(struct aa_profile *profile,
3150 + unsigned char class)
3151 +{
3152 + if (class <= AA_CLASS_LAST)
3153 + return profile->policy.start[class];
3154 + else
3155 + return aa_dfa_match_len(profile->policy.dfa,
3156 + profile->policy.start[0], &class, 1);
3157 +}
3158 +
3159 static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
3160 u16 AF) {
3161 unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
3162 diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
3163 index 21cb384d712a..088ea2ac8570 100644
3164 --- a/security/apparmor/policy_unpack.c
3165 +++ b/security/apparmor/policy_unpack.c
3166 @@ -276,7 +276,7 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
3167 char *tag = NULL;
3168 size_t size = unpack_u16_chunk(e, &tag);
3169 /* if a name is specified it must match. otherwise skip tag */
3170 - if (name && (!size || strcmp(name, tag)))
3171 + if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
3172 goto fail;
3173 } else if (name) {
3174 /* if a name is specified and there is no name tag fail */
3175 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
3176 index 02a47e365e52..ecf5fc77f50b 100644
3177 --- a/tools/objtool/check.c
3178 +++ b/tools/objtool/check.c
3179 @@ -839,7 +839,7 @@ static int add_switch_table(struct objtool_file *file, struct instruction *insn,
3180 struct symbol *pfunc = insn->func->pfunc;
3181 unsigned int prev_offset = 0;
3182
3183 - list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
3184 + list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) {
3185 if (rela == next_table)
3186 break;
3187
3188 @@ -929,6 +929,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
3189 {
3190 struct rela *text_rela, *rodata_rela;
3191 struct instruction *orig_insn = insn;
3192 + struct section *rodata_sec;
3193 unsigned long table_offset;
3194
3195 /*
3196 @@ -956,10 +957,13 @@ static struct rela *find_switch_table(struct objtool_file *file,
3197 /* look for a relocation which references .rodata */
3198 text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
3199 insn->len);
3200 - if (!text_rela || text_rela->sym != file->rodata->sym)
3201 + if (!text_rela || text_rela->sym->type != STT_SECTION ||
3202 + !text_rela->sym->sec->rodata)
3203 continue;
3204
3205 table_offset = text_rela->addend;
3206 + rodata_sec = text_rela->sym->sec;
3207 +
3208 if (text_rela->type == R_X86_64_PC32)
3209 table_offset += 4;
3210
3211 @@ -967,10 +971,10 @@ static struct rela *find_switch_table(struct objtool_file *file,
3212 * Make sure the .rodata address isn't associated with a
3213 * symbol. gcc jump tables are anonymous data.
3214 */
3215 - if (find_symbol_containing(file->rodata, table_offset))
3216 + if (find_symbol_containing(rodata_sec, table_offset))
3217 continue;
3218
3219 - rodata_rela = find_rela_by_dest(file->rodata, table_offset);
3220 + rodata_rela = find_rela_by_dest(rodata_sec, table_offset);
3221 if (rodata_rela) {
3222 /*
3223 * Use of RIP-relative switch jumps is quite rare, and
3224 @@ -1055,7 +1059,7 @@ static int add_switch_table_alts(struct objtool_file *file)
3225 struct symbol *func;
3226 int ret;
3227
3228 - if (!file->rodata || !file->rodata->rela)
3229 + if (!file->rodata)
3230 return 0;
3231
3232 for_each_sec(file, sec) {
3233 @@ -1201,10 +1205,33 @@ static int read_retpoline_hints(struct objtool_file *file)
3234 return 0;
3235 }
3236
3237 +static void mark_rodata(struct objtool_file *file)
3238 +{
3239 + struct section *sec;
3240 + bool found = false;
3241 +
3242 + /*
3243 + * This searches for the .rodata section or multiple .rodata.func_name
3244 + * sections if -fdata-sections is being used. The .str.1.1 and .str.1.8
3245 + * rodata sections are ignored as they don't contain jump tables.
3246 + */
3247 + for_each_sec(file, sec) {
3248 + if (!strncmp(sec->name, ".rodata", 7) &&
3249 + !strstr(sec->name, ".str1.")) {
3250 + sec->rodata = true;
3251 + found = true;
3252 + }
3253 + }
3254 +
3255 + file->rodata = found;
3256 +}
3257 +
3258 static int decode_sections(struct objtool_file *file)
3259 {
3260 int ret;
3261
3262 + mark_rodata(file);
3263 +
3264 ret = decode_instructions(file);
3265 if (ret)
3266 return ret;
3267 @@ -2176,7 +2203,6 @@ int check(const char *_objname, bool orc)
3268 INIT_LIST_HEAD(&file.insn_list);
3269 hash_init(file.insn_hash);
3270 file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
3271 - file.rodata = find_section_by_name(file.elf, ".rodata");
3272 file.c_file = find_section_by_name(file.elf, ".comment");
3273 file.ignore_unreachables = no_unreachable;
3274 file.hints = false;
3275 diff --git a/tools/objtool/check.h b/tools/objtool/check.h
3276 index 95700a2bcb7c..e6e8a655b556 100644
3277 --- a/tools/objtool/check.h
3278 +++ b/tools/objtool/check.h
3279 @@ -60,8 +60,8 @@ struct objtool_file {
3280 struct elf *elf;
3281 struct list_head insn_list;
3282 DECLARE_HASHTABLE(insn_hash, 16);
3283 - struct section *rodata, *whitelist;
3284 - bool ignore_unreachables, c_file, hints;
3285 + struct section *whitelist;
3286 + bool ignore_unreachables, c_file, hints, rodata;
3287 };
3288
3289 int check(const char *objname, bool orc);
3290 diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
3291 index b75d004f6482..abed594a9653 100644
3292 --- a/tools/objtool/elf.c
3293 +++ b/tools/objtool/elf.c
3294 @@ -390,6 +390,7 @@ static int read_relas(struct elf *elf)
3295 rela->offset = rela->rela.r_offset;
3296 symndx = GELF_R_SYM(rela->rela.r_info);
3297 rela->sym = find_symbol_by_index(elf, symndx);
3298 + rela->rela_sec = sec;
3299 if (!rela->sym) {
3300 WARN("can't find rela entry symbol %d for %s",
3301 symndx, sec->name);
3302 diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
3303 index de5cd2ddded9..bc97ed86b9cd 100644
3304 --- a/tools/objtool/elf.h
3305 +++ b/tools/objtool/elf.h
3306 @@ -48,7 +48,7 @@ struct section {
3307 char *name;
3308 int idx;
3309 unsigned int len;
3310 - bool changed, text;
3311 + bool changed, text, rodata;
3312 };
3313
3314 struct symbol {
3315 @@ -68,6 +68,7 @@ struct rela {
3316 struct list_head list;
3317 struct hlist_node hash;
3318 GElf_Rela rela;
3319 + struct section *rela_sec;
3320 struct symbol *sym;
3321 unsigned int type;
3322 unsigned long offset;
3323 diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
3324 index be59f9c34ea2..79053a4f4783 100644
3325 --- a/tools/testing/selftests/cgroup/test_core.c
3326 +++ b/tools/testing/selftests/cgroup/test_core.c
3327 @@ -198,7 +198,7 @@ static int test_cgcore_no_internal_process_constraint_on_threads(const char *roo
3328 char *parent = NULL, *child = NULL;
3329
3330 if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
3331 - cg_read_strstr(root, "cgroup.subtree_control", "cpu")) {
3332 + cg_write(root, "cgroup.subtree_control", "+cpu")) {
3333 ret = KSFT_SKIP;
3334 goto cleanup;
3335 }
3336 @@ -376,6 +376,11 @@ int main(int argc, char *argv[])
3337
3338 if (cg_find_unified_root(root, sizeof(root)))
3339 ksft_exit_skip("cgroup v2 isn't mounted\n");
3340 +
3341 + if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
3342 + if (cg_write(root, "cgroup.subtree_control", "+memory"))
3343 + ksft_exit_skip("Failed to set memory controller\n");
3344 +
3345 for (i = 0; i < ARRAY_SIZE(tests); i++) {
3346 switch (tests[i].fn(root)) {
3347 case KSFT_PASS:
3348 diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
3349 index 6f339882a6ca..c19a97dd02d4 100644
3350 --- a/tools/testing/selftests/cgroup/test_memcontrol.c
3351 +++ b/tools/testing/selftests/cgroup/test_memcontrol.c
3352 @@ -1205,6 +1205,10 @@ int main(int argc, char **argv)
3353 if (cg_read_strstr(root, "cgroup.controllers", "memory"))
3354 ksft_exit_skip("memory controller isn't available\n");
3355
3356 + if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
3357 + if (cg_write(root, "cgroup.subtree_control", "+memory"))
3358 + ksft_exit_skip("Failed to set memory controller\n");
3359 +
3360 for (i = 0; i < ARRAY_SIZE(tests); i++) {
3361 switch (tests[i].fn(root)) {
3362 case KSFT_PASS:
3363 diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
3364 index dc68340a6a96..2cf3dc49bd03 100644
3365 --- a/tools/testing/selftests/vm/Makefile
3366 +++ b/tools/testing/selftests/vm/Makefile
3367 @@ -24,6 +24,8 @@ TEST_GEN_FILES += virtual_address_range
3368
3369 TEST_PROGS := run_vmtests
3370
3371 +TEST_FILES := test_vmalloc.sh
3372 +
3373 KSFT_KHDR_INSTALL := 1
3374 include ../lib.mk
3375