Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0309-5.4.210-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months ago) by niro
File size: 34650 byte(s)
-add missing
1 diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
2 index 6bd97cd50d625..7e061ed449aaa 100644
3 --- a/Documentation/admin-guide/hw-vuln/spectre.rst
4 +++ b/Documentation/admin-guide/hw-vuln/spectre.rst
5 @@ -422,6 +422,14 @@ The possible values in this file are:
6 'RSB filling' Protection of RSB on context switch enabled
7 ============= ===========================================
8
9 + - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
10 +
11 + =========================== =======================================================
12 + 'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
13 + 'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
14 + 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
15 + =========================== =======================================================
16 +
17 Full mitigation might require a microcode update from the CPU
18 vendor. When the necessary microcode is not available, the kernel will
19 report vulnerability.
20 diff --git a/Makefile b/Makefile
21 index 7093e3b03b9f7..74abb7e389f33 100644
22 --- a/Makefile
23 +++ b/Makefile
24 @@ -1,7 +1,7 @@
25 # SPDX-License-Identifier: GPL-2.0
26 VERSION = 5
27 PATCHLEVEL = 4
28 -SUBLEVEL = 209
29 +SUBLEVEL = 210
30 EXTRAVERSION =
31 NAME = Kleptomaniac Octopus
32
33 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
34 index 8c28a2365a92b..a3e32bc938562 100644
35 --- a/arch/x86/include/asm/cpufeatures.h
36 +++ b/arch/x86/include/asm/cpufeatures.h
37 @@ -286,6 +286,7 @@
38 #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
39 #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
40 #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
41 +#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+ 6) /* "" Fill RSB on VM exit when EIBRS is enabled */
42
43 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
44 #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
45 @@ -406,5 +407,6 @@
46 #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
47 #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
48 #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
49 +#define X86_BUG_EIBRS_PBRSB X86_BUG(26) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
50
51 #endif /* _ASM_X86_CPUFEATURES_H */
52 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
53 index c56042916a7c3..cef4eba03ff36 100644
54 --- a/arch/x86/include/asm/msr-index.h
55 +++ b/arch/x86/include/asm/msr-index.h
56 @@ -129,6 +129,10 @@
57 * bit available to control VERW
58 * behavior.
59 */
60 +#define ARCH_CAP_PBRSB_NO BIT(24) /*
61 + * Not susceptible to Post-Barrier
62 + * Return Stack Buffer Predictions.
63 + */
64
65 #define MSR_IA32_FLUSH_CMD 0x0000010b
66 #define L1D_FLUSH BIT(0) /*
67 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
68 index ece2b2c6d020d..1e5df3ccdd5cb 100644
69 --- a/arch/x86/include/asm/nospec-branch.h
70 +++ b/arch/x86/include/asm/nospec-branch.h
71 @@ -61,7 +61,16 @@
72 774: \
73 dec reg; \
74 jnz 771b; \
75 - add $(BITS_PER_LONG/8) * nr, sp;
76 + add $(BITS_PER_LONG/8) * nr, sp; \
77 + /* barrier for jnz misprediction */ \
78 + lfence;
79 +
80 +#define __ISSUE_UNBALANCED_RET_GUARD(sp) \
81 + call 881f; \
82 + int3; \
83 +881: \
84 + add $(BITS_PER_LONG/8), sp; \
85 + lfence;
86
87 #ifdef __ASSEMBLY__
88
89 @@ -130,6 +139,14 @@
90 #else
91 call *\reg
92 #endif
93 +.endm
94 +
95 +.macro ISSUE_UNBALANCED_RET_GUARD ftr:req
96 + ANNOTATE_NOSPEC_ALTERNATIVE
97 + ALTERNATIVE "jmp .Lskip_pbrsb_\@", \
98 + __stringify(__ISSUE_UNBALANCED_RET_GUARD(%_ASM_SP)) \
99 + \ftr
100 +.Lskip_pbrsb_\@:
101 .endm
102
103 /*
104 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
105 index 09d02b1f6f71f..57efa90f3fbd0 100644
106 --- a/arch/x86/kernel/cpu/bugs.c
107 +++ b/arch/x86/kernel/cpu/bugs.c
108 @@ -1043,6 +1043,49 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
109 return SPECTRE_V2_RETPOLINE;
110 }
111
112 +static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
113 +{
114 + /*
115 + * Similar to context switches, there are two types of RSB attacks
116 + * after VM exit:
117 + *
118 + * 1) RSB underflow
119 + *
120 + * 2) Poisoned RSB entry
121 + *
122 + * When retpoline is enabled, both are mitigated by filling/clearing
123 + * the RSB.
124 + *
125 + * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
126 + * prediction isolation protections, RSB still needs to be cleared
127 + * because of #2. Note that SMEP provides no protection here, unlike
128 + * user-space-poisoned RSB entries.
129 + *
130 + * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
131 + * bug is present then a LITE version of RSB protection is required,
132 + * just a single call needs to retire before a RET is executed.
133 + */
134 + switch (mode) {
135 + case SPECTRE_V2_NONE:
136 + /* These modes already fill RSB at vmexit */
137 + case SPECTRE_V2_LFENCE:
138 + case SPECTRE_V2_RETPOLINE:
139 + case SPECTRE_V2_EIBRS_RETPOLINE:
140 + return;
141 +
142 + case SPECTRE_V2_EIBRS_LFENCE:
143 + case SPECTRE_V2_EIBRS:
144 + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
145 + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
146 + pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
147 + }
148 + return;
149 + }
150 +
151 + pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
152 + dump_stack();
153 +}
154 +
155 static void __init spectre_v2_select_mitigation(void)
156 {
157 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
158 @@ -1135,6 +1178,8 @@ static void __init spectre_v2_select_mitigation(void)
159 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
160 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
161
162 + spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
163 +
164 /*
165 * Retpoline means the kernel is safe because it has no indirect
166 * branches. Enhanced IBRS protects firmware too, so, enable restricted
167 @@ -1879,6 +1924,19 @@ static char *ibpb_state(void)
168 return "";
169 }
170
171 +static char *pbrsb_eibrs_state(void)
172 +{
173 + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
174 + if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
175 + boot_cpu_has(X86_FEATURE_RETPOLINE))
176 + return ", PBRSB-eIBRS: SW sequence";
177 + else
178 + return ", PBRSB-eIBRS: Vulnerable";
179 + } else {
180 + return ", PBRSB-eIBRS: Not affected";
181 + }
182 +}
183 +
184 static ssize_t spectre_v2_show_state(char *buf)
185 {
186 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
187 @@ -1891,12 +1949,13 @@ static ssize_t spectre_v2_show_state(char *buf)
188 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
189 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
190
191 - return sprintf(buf, "%s%s%s%s%s%s\n",
192 + return sprintf(buf, "%s%s%s%s%s%s%s\n",
193 spectre_v2_strings[spectre_v2_enabled],
194 ibpb_state(),
195 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
196 stibp_state(),
197 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
198 + pbrsb_eibrs_state(),
199 spectre_v2_module_string());
200 }
201
202 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
203 index 305f30e45f3d3..b926b7244d42d 100644
204 --- a/arch/x86/kernel/cpu/common.c
205 +++ b/arch/x86/kernel/cpu/common.c
206 @@ -1025,6 +1025,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
207 #define NO_SWAPGS BIT(6)
208 #define NO_ITLB_MULTIHIT BIT(7)
209 #define NO_SPECTRE_V2 BIT(8)
210 +#define NO_EIBRS_PBRSB BIT(9)
211
212 #define VULNWL(_vendor, _family, _model, _whitelist) \
213 { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
214 @@ -1065,7 +1066,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
215
216 VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
217 VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
218 - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
219 + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
220
221 /*
222 * Technically, swapgs isn't serializing on AMD (despite it previously
223 @@ -1075,7 +1076,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
224 * good enough for our purposes.
225 */
226
227 - VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
228 + VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
229 + VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
230 + VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
231
232 /* AMD Family 0xf - 0x12 */
233 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
234 @@ -1236,6 +1239,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
235 !arch_cap_mmio_immune(ia32_cap))
236 setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
237
238 + if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
239 + !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
240 + !(ia32_cap & ARCH_CAP_PBRSB_NO))
241 + setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
242 +
243 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
244 return;
245
246 diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
247 index ca4252f81bf81..946d9205c3b6d 100644
248 --- a/arch/x86/kvm/vmx/vmenter.S
249 +++ b/arch/x86/kvm/vmx/vmenter.S
250 @@ -92,6 +92,7 @@ ENTRY(vmx_vmexit)
251 pop %_ASM_AX
252 .Lvmexit_skip_rsb:
253 #endif
254 + ISSUE_UNBALANCED_RET_GUARD X86_FEATURE_RSB_VMEXIT_LITE
255 ret
256 ENDPROC(vmx_vmexit)
257
258 diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
259 index 76b7539a37a93..a06f35528c9a7 100644
260 --- a/drivers/acpi/apei/bert.c
261 +++ b/drivers/acpi/apei/bert.c
262 @@ -29,16 +29,26 @@
263
264 #undef pr_fmt
265 #define pr_fmt(fmt) "BERT: " fmt
266 +
267 +#define ACPI_BERT_PRINT_MAX_RECORDS 5
268 #define ACPI_BERT_PRINT_MAX_LEN 1024
269
270 static int bert_disable;
271
272 +/*
273 + * Print "all" the error records in the BERT table, but avoid huge spam to
274 + * the console if the BIOS included oversize records, or too many records.
275 + * Skipping some records here does not lose anything because the full
276 + * data is available to user tools in:
277 + * /sys/firmware/acpi/tables/data/BERT
278 + */
279 static void __init bert_print_all(struct acpi_bert_region *region,
280 unsigned int region_len)
281 {
282 struct acpi_hest_generic_status *estatus =
283 (struct acpi_hest_generic_status *)region;
284 int remain = region_len;
285 + int printed = 0, skipped = 0;
286 u32 estatus_len;
287
288 while (remain >= sizeof(struct acpi_bert_region)) {
289 @@ -46,24 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region *region,
290 if (remain < estatus_len) {
291 pr_err(FW_BUG "Truncated status block (length: %u).\n",
292 estatus_len);
293 - return;
294 + break;
295 }
296
297 /* No more error records. */
298 if (!estatus->block_status)
299 - return;
300 + break;
301
302 if (cper_estatus_check(estatus)) {
303 pr_err(FW_BUG "Invalid error record.\n");
304 - return;
305 + break;
306 }
307
308 - pr_info_once("Error records from previous boot:\n");
309 - if (region_len < ACPI_BERT_PRINT_MAX_LEN)
310 + if (estatus_len < ACPI_BERT_PRINT_MAX_LEN &&
311 + printed < ACPI_BERT_PRINT_MAX_RECORDS) {
312 + pr_info_once("Error records from previous boot:\n");
313 cper_estatus_print(KERN_INFO HW_ERR, estatus);
314 - else
315 - pr_info_once("Max print length exceeded, table data is available at:\n"
316 - "/sys/firmware/acpi/tables/data/BERT");
317 + printed++;
318 + } else {
319 + skipped++;
320 + }
321
322 /*
323 * Because the boot error source is "one-time polled" type,
324 @@ -75,6 +87,9 @@ static void __init bert_print_all(struct acpi_bert_region *region,
325 estatus = (void *)estatus + estatus_len;
326 remain -= estatus_len;
327 }
328 +
329 + if (skipped)
330 + pr_info(HW_ERR "Skipped %d error records\n", skipped);
331 }
332
333 static int __init setup_bert_disable(char *str)
334 diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
335 index de4142723ff48..3b972ca536896 100644
336 --- a/drivers/acpi/video_detect.c
337 +++ b/drivers/acpi/video_detect.c
338 @@ -387,7 +387,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
339 .callback = video_detect_force_native,
340 .ident = "Clevo NL5xRU",
341 .matches = {
342 - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
343 DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
344 },
345 },
346 @@ -395,59 +394,75 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
347 .callback = video_detect_force_native,
348 .ident = "Clevo NL5xRU",
349 .matches = {
350 - DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
351 - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
352 + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
353 + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
354 },
355 },
356 {
357 .callback = video_detect_force_native,
358 .ident = "Clevo NL5xRU",
359 .matches = {
360 - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
361 - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
362 + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
363 + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
364 },
365 },
366 {
367 .callback = video_detect_force_native,
368 - .ident = "Clevo NL5xRU",
369 + .ident = "Clevo NL5xNU",
370 .matches = {
371 - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
372 - DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
373 + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
374 },
375 },
376 + /*
377 + * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10,
378 + * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo
379 + * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
380 + * above.
381 + */
382 {
383 .callback = video_detect_force_native,
384 - .ident = "Clevo NL5xRU",
385 + .ident = "TongFang PF5PU1G",
386 .matches = {
387 - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
388 - DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
389 + DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
390 },
391 },
392 {
393 .callback = video_detect_force_native,
394 - .ident = "Clevo NL5xNU",
395 + .ident = "TongFang PF4NU1F",
396 + .matches = {
397 + DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
398 + },
399 + },
400 + {
401 + .callback = video_detect_force_native,
402 + .ident = "TongFang PF4NU1F",
403 .matches = {
404 DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
405 - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
406 + DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
407 },
408 },
409 {
410 .callback = video_detect_force_native,
411 - .ident = "Clevo NL5xNU",
412 + .ident = "TongFang PF5NU1G",
413 .matches = {
414 - DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
415 - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
416 + DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
417 },
418 },
419 {
420 .callback = video_detect_force_native,
421 - .ident = "Clevo NL5xNU",
422 + .ident = "TongFang PF5NU1G",
423 .matches = {
424 - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
425 - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
426 + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
427 + DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
428 + },
429 + },
430 + {
431 + .callback = video_detect_force_native,
432 + .ident = "TongFang PF5LUXG",
433 + .matches = {
434 + DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
435 },
436 },
437 -
438 /*
439 * Desktops which falsely report a backlight and which our heuristics
440 * for this do not catch.
441 diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
442 index e49d1f287a175..c37d5fce86f79 100644
443 --- a/drivers/macintosh/adb.c
444 +++ b/drivers/macintosh/adb.c
445 @@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
446
447 switch(req->data[1]) {
448 case ADB_QUERY_GETDEVINFO:
449 - if (req->nbytes < 3)
450 + if (req->nbytes < 3 || req->data[2] >= 16)
451 break;
452 mutex_lock(&adb_handler_mutex);
453 req->reply[0] = adb_handler[req->data[2]].original_address;
454 diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
455 index 639dc8d45e603..d56837c04a81a 100644
456 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c
457 +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
458 @@ -460,19 +460,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
459 }
460 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
461
462 -int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
463 - struct v4l2_buffer *buf)
464 +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
465 + struct v4l2_buffer *buf)
466 {
467 - struct vb2_queue *vq;
468 - int ret = 0;
469 - unsigned int i;
470 -
471 - vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
472 - ret = vb2_querybuf(vq, buf);
473 -
474 /* Adjust MMAP memory offsets for the CAPTURE queue */
475 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
476 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
477 + unsigned int i;
478 +
479 for (i = 0; i < buf->length; ++i)
480 buf->m.planes[i].m.mem_offset
481 += DST_QUEUE_OFF_BASE;
482 @@ -480,8 +475,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
483 buf->m.offset += DST_QUEUE_OFF_BASE;
484 }
485 }
486 +}
487
488 - return ret;
489 +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
490 + struct v4l2_buffer *buf)
491 +{
492 + struct vb2_queue *vq;
493 + int ret;
494 +
495 + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
496 + ret = vb2_querybuf(vq, buf);
497 + if (ret)
498 + return ret;
499 +
500 + /* Adjust MMAP memory offsets for the CAPTURE queue */
501 + v4l2_m2m_adjust_mem_offset(vq, buf);
502 +
503 + return 0;
504 }
505 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
506
507 @@ -500,10 +510,16 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
508 return -EPERM;
509 }
510 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
511 - if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
512 + if (ret)
513 + return ret;
514 +
515 + /* Adjust MMAP memory offsets for the CAPTURE queue */
516 + v4l2_m2m_adjust_mem_offset(vq, buf);
517 +
518 + if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
519 v4l2_m2m_try_schedule(m2m_ctx);
520
521 - return ret;
522 + return 0;
523 }
524 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
525
526 @@ -511,9 +527,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
527 struct v4l2_buffer *buf)
528 {
529 struct vb2_queue *vq;
530 + int ret;
531
532 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
533 - return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
534 + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
535 + if (ret)
536 + return ret;
537 +
538 + /* Adjust MMAP memory offsets for the CAPTURE queue */
539 + v4l2_m2m_adjust_mem_offset(vq, buf);
540 +
541 + return 0;
542 }
543 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
544
545 @@ -522,9 +546,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
546 {
547 struct video_device *vdev = video_devdata(file);
548 struct vb2_queue *vq;
549 + int ret;
550
551 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
552 - return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
553 + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
554 + if (ret)
555 + return ret;
556 +
557 + /* Adjust MMAP memory offsets for the CAPTURE queue */
558 + v4l2_m2m_adjust_mem_offset(vq, buf);
559 +
560 + return 0;
561 }
562 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
563
564 diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
565 index 68d0c181ec7bb..1f38da5da6e45 100644
566 --- a/drivers/thermal/of-thermal.c
567 +++ b/drivers/thermal/of-thermal.c
568 @@ -91,7 +91,7 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz,
569 {
570 struct __thermal_zone *data = tz->devdata;
571
572 - if (!data->ops->get_temp)
573 + if (!data->ops || !data->ops->get_temp)
574 return -EINVAL;
575
576 return data->ops->get_temp(data->sensor_data, temp);
577 @@ -188,6 +188,9 @@ static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
578 {
579 struct __thermal_zone *data = tz->devdata;
580
581 + if (!data->ops || !data->ops->set_emul_temp)
582 + return -EINVAL;
583 +
584 return data->ops->set_emul_temp(data->sensor_data, temp);
585 }
586
587 @@ -196,7 +199,7 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
588 {
589 struct __thermal_zone *data = tz->devdata;
590
591 - if (!data->ops->get_trend)
592 + if (!data->ops || !data->ops->get_trend)
593 return -EINVAL;
594
595 return data->ops->get_trend(data->sensor_data, trip, trend);
596 @@ -336,7 +339,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
597 if (trip >= data->ntrips || trip < 0)
598 return -EDOM;
599
600 - if (data->ops->set_trip_temp) {
601 + if (data->ops && data->ops->set_trip_temp) {
602 int ret;
603
604 ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
605 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
606 index 34262d83dce11..f705d3752fe0d 100644
607 --- a/kernel/bpf/verifier.c
608 +++ b/kernel/bpf/verifier.c
609 @@ -5083,6 +5083,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
610 coerce_reg_to_size(dst_reg, 4);
611 }
612
613 + __update_reg_bounds(dst_reg);
614 __reg_deduce_bounds(dst_reg);
615 __reg_bound_offset(dst_reg);
616 return 0;
617 diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
618 index 4133c721af6ed..59f924e92c284 100644
619 --- a/tools/arch/x86/include/asm/cpufeatures.h
620 +++ b/tools/arch/x86/include/asm/cpufeatures.h
621 @@ -284,6 +284,7 @@
622 #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
623 #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
624 #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
625 +#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+ 6) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
626
627 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
628 #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
629 diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
630 index 0bfad86ec960a..cb0631098f918 100644
631 --- a/tools/include/uapi/linux/bpf.h
632 +++ b/tools/include/uapi/linux/bpf.h
633 @@ -3068,7 +3068,8 @@ struct bpf_sock {
634 __u32 src_ip4;
635 __u32 src_ip6[4];
636 __u32 src_port; /* host byte order */
637 - __u32 dst_port; /* network byte order */
638 + __be16 dst_port; /* network byte order */
639 + __u16 :16; /* zero padding */
640 __u32 dst_ip4;
641 __u32 dst_ip6[4];
642 __u32 state;
643 diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
644 index 0262f7b374f9c..4b9a26caa2c2e 100644
645 --- a/tools/testing/selftests/bpf/test_align.c
646 +++ b/tools/testing/selftests/bpf/test_align.c
647 @@ -359,15 +359,15 @@ static struct bpf_align_test tests[] = {
648 * is still (4n), fixed offset is not changed.
649 * Also, we create a new reg->id.
650 */
651 - {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
652 + {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
653 /* At the time the word size load is performed from R5,
654 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
655 * which is 20. Then the variable offset is (4n), so
656 * the total offset is 4-byte aligned and meets the
657 * load's requirements.
658 */
659 - {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
660 - {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
661 + {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
662 + {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
663 },
664 },
665 {
666 @@ -410,15 +410,15 @@ static struct bpf_align_test tests[] = {
667 /* Adding 14 makes R6 be (4n+2) */
668 {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
669 /* Packet pointer has (4n+2) offset */
670 - {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
671 - {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
672 + {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
673 + {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
674 /* At the time the word size load is performed from R5,
675 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
676 * which is 2. Then the variable offset is (4n+2), so
677 * the total offset is 4-byte aligned and meets the
678 * load's requirements.
679 */
680 - {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
681 + {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
682 /* Newly read value in R6 was shifted left by 2, so has
683 * known alignment of 4.
684 */
685 @@ -426,15 +426,15 @@ static struct bpf_align_test tests[] = {
686 /* Added (4n) to packet pointer's (4n+2) var_off, giving
687 * another (4n+2).
688 */
689 - {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
690 - {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
691 + {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
692 + {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
693 /* At the time the word size load is performed from R5,
694 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
695 * which is 2. Then the variable offset is (4n+2), so
696 * the total offset is 4-byte aligned and meets the
697 * load's requirements.
698 */
699 - {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
700 + {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
701 },
702 },
703 {
704 @@ -469,16 +469,16 @@ static struct bpf_align_test tests[] = {
705 .matches = {
706 {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
707 /* (ptr - ptr) << 2 == unknown, (4n) */
708 - {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
709 + {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
710 /* (4n) + 14 == (4n+2). We blow our bounds, because
711 * the add could overflow.
712 */
713 - {7, "R5_w=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
714 + {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
715 /* Checked s>=0 */
716 - {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
717 + {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
718 /* packet pointer + nonnegative (4n+2) */
719 - {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
720 - {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
721 + {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
722 + {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
723 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
724 * We checked the bounds, but it might have been able
725 * to overflow if the packet pointer started in the
726 @@ -486,7 +486,7 @@ static struct bpf_align_test tests[] = {
727 * So we did not get a 'range' on R6, and the access
728 * attempt will fail.
729 */
730 - {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
731 + {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
732 }
733 },
734 {
735 @@ -528,7 +528,7 @@ static struct bpf_align_test tests[] = {
736 /* New unknown value in R7 is (4n) */
737 {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
738 /* Subtracting it from R6 blows our unsigned bounds */
739 - {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
740 + {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
741 /* Checked s>= 0 */
742 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
743 /* At the time the word size load is performed from R5,
744 @@ -537,7 +537,8 @@ static struct bpf_align_test tests[] = {
745 * the total offset is 4-byte aligned and meets the
746 * load's requirements.
747 */
748 - {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
749 + {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
750 +
751 },
752 },
753 {
754 @@ -579,18 +580,18 @@ static struct bpf_align_test tests[] = {
755 /* Adding 14 makes R6 be (4n+2) */
756 {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
757 /* Subtracting from packet pointer overflows ubounds */
758 - {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
759 + {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
760 /* New unknown value in R7 is (4n), >= 76 */
761 {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
762 /* Adding it to packet pointer gives nice bounds again */
763 - {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
764 + {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
765 /* At the time the word size load is performed from R5,
766 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
767 * which is 2. Then the variable offset is (4n+2), so
768 * the total offset is 4-byte aligned and meets the
769 * load's requirements.
770 */
771 - {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
772 + {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
773 },
774 },
775 };
776 diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
777 index 92c02e4a1b626..313b345eddcc3 100644
778 --- a/tools/testing/selftests/bpf/verifier/bounds.c
779 +++ b/tools/testing/selftests/bpf/verifier/bounds.c
780 @@ -411,16 +411,14 @@
781 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
782 /* r1 = 0xffff'fffe (NOT 0!) */
783 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
784 - /* computes OOB pointer */
785 + /* error on computing OOB pointer */
786 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
787 - /* OOB access */
788 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
789 /* exit */
790 BPF_MOV64_IMM(BPF_REG_0, 0),
791 BPF_EXIT_INSN(),
792 },
793 .fixup_map_hash_8b = { 3 },
794 - .errstr = "R0 invalid mem access",
795 + .errstr = "math between map_value pointer and 4294967294 is not allowed",
796 .result = REJECT,
797 },
798 {
799 diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
800 index 9ed192e14f5fe..b2ce50bb935b8 100644
801 --- a/tools/testing/selftests/bpf/verifier/sock.c
802 +++ b/tools/testing/selftests/bpf/verifier/sock.c
803 @@ -121,7 +121,25 @@
804 .result = ACCEPT,
805 },
806 {
807 - "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
808 + "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)",
809 + .insns = {
810 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
811 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
812 + BPF_MOV64_IMM(BPF_REG_0, 0),
813 + BPF_EXIT_INSN(),
814 + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
815 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
816 + BPF_MOV64_IMM(BPF_REG_0, 0),
817 + BPF_EXIT_INSN(),
818 + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
819 + BPF_MOV64_IMM(BPF_REG_0, 0),
820 + BPF_EXIT_INSN(),
821 + },
822 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
823 + .result = ACCEPT,
824 +},
825 +{
826 + "sk_fullsock(skb->sk): sk->dst_port [half load]",
827 .insns = {
828 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
829 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
830 @@ -139,7 +157,64 @@
831 .result = ACCEPT,
832 },
833 {
834 - "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
835 + "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)",
836 + .insns = {
837 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
838 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
839 + BPF_MOV64_IMM(BPF_REG_0, 0),
840 + BPF_EXIT_INSN(),
841 + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
842 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
843 + BPF_MOV64_IMM(BPF_REG_0, 0),
844 + BPF_EXIT_INSN(),
845 + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
846 + BPF_MOV64_IMM(BPF_REG_0, 0),
847 + BPF_EXIT_INSN(),
848 + },
849 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
850 + .result = REJECT,
851 + .errstr = "invalid sock access",
852 +},
853 +{
854 + "sk_fullsock(skb->sk): sk->dst_port [byte load]",
855 + .insns = {
856 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
857 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
858 + BPF_MOV64_IMM(BPF_REG_0, 0),
859 + BPF_EXIT_INSN(),
860 + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
861 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
862 + BPF_MOV64_IMM(BPF_REG_0, 0),
863 + BPF_EXIT_INSN(),
864 + BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
865 + BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
866 + BPF_MOV64_IMM(BPF_REG_0, 0),
867 + BPF_EXIT_INSN(),
868 + },
869 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
870 + .result = ACCEPT,
871 +},
872 +{
873 + "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)",
874 + .insns = {
875 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
876 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
877 + BPF_MOV64_IMM(BPF_REG_0, 0),
878 + BPF_EXIT_INSN(),
879 + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
880 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
881 + BPF_MOV64_IMM(BPF_REG_0, 0),
882 + BPF_EXIT_INSN(),
883 + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
884 + BPF_MOV64_IMM(BPF_REG_0, 0),
885 + BPF_EXIT_INSN(),
886 + },
887 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
888 + .result = REJECT,
889 + .errstr = "invalid sock access",
890 +},
891 +{
892 + "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)",
893 .insns = {
894 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
895 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
896 @@ -149,7 +224,7 @@
897 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
898 BPF_MOV64_IMM(BPF_REG_0, 0),
899 BPF_EXIT_INSN(),
900 - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
901 + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)),
902 BPF_MOV64_IMM(BPF_REG_0, 0),
903 BPF_EXIT_INSN(),
904 },
905 diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
906 index 6cd91970fbad3..3b2a426070c44 100644
907 --- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
908 +++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
909 @@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
910
911 void ucall(uint64_t cmd, int nargs, ...)
912 {
913 - struct ucall uc = {
914 - .cmd = cmd,
915 - };
916 + struct ucall uc = {};
917 va_list va;
918 int i;
919
920 + WRITE_ONCE(uc.cmd, cmd);
921 nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
922
923 va_start(va, nargs);
924 for (i = 0; i < nargs; ++i)
925 - uc.args[i] = va_arg(va, uint64_t);
926 + WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
927 va_end(va);
928
929 - *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
930 + WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
931 }
932
933 uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
934 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
935 index 287444e52ccf8..4b445dddb7985 100644
936 --- a/virt/kvm/kvm_main.c
937 +++ b/virt/kvm/kvm_main.c
938 @@ -3329,8 +3329,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
939 kvm_put_kvm(kvm);
940 mutex_lock(&kvm->lock);
941 list_del(&dev->vm_node);
942 + if (ops->release)
943 + ops->release(dev);
944 mutex_unlock(&kvm->lock);
945 - ops->destroy(dev);
946 + if (ops->destroy)
947 + ops->destroy(dev);
948 return ret;
949 }
950