Contents of /trunk/kernel-alx/patches-4.14/0162-4.14.63-all-fixes.patch
Parent Directory | Revision Log
Revision 3238 -
(show annotations)
(download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 179128 byte(s)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 179128 byte(s)
-added up to patches-4.14.79
1 | diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu |
2 | index 8355e79350b7..6cae60929cb6 100644 |
3 | --- a/Documentation/ABI/testing/sysfs-devices-system-cpu |
4 | +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu |
5 | @@ -379,6 +379,7 @@ What: /sys/devices/system/cpu/vulnerabilities |
6 | /sys/devices/system/cpu/vulnerabilities/spectre_v1 |
7 | /sys/devices/system/cpu/vulnerabilities/spectre_v2 |
8 | /sys/devices/system/cpu/vulnerabilities/spec_store_bypass |
9 | + /sys/devices/system/cpu/vulnerabilities/l1tf |
10 | Date: January 2018 |
11 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> |
12 | Description: Information about CPU vulnerabilities |
13 | @@ -390,3 +391,26 @@ Description: Information about CPU vulnerabilities |
14 | "Not affected" CPU is not affected by the vulnerability |
15 | "Vulnerable" CPU is affected and no mitigation in effect |
16 | "Mitigation: $M" CPU is affected and mitigation $M is in effect |
17 | + |
18 | + Details about the l1tf file can be found in |
19 | + Documentation/admin-guide/l1tf.rst |
20 | + |
21 | +What: /sys/devices/system/cpu/smt |
22 | + /sys/devices/system/cpu/smt/active |
23 | + /sys/devices/system/cpu/smt/control |
24 | +Date: June 2018 |
25 | +Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> |
26 | +Description: Control Symetric Multi Threading (SMT) |
27 | + |
28 | + active: Tells whether SMT is active (enabled and siblings online) |
29 | + |
30 | + control: Read/write interface to control SMT. Possible |
31 | + values: |
32 | + |
33 | + "on" SMT is enabled |
34 | + "off" SMT is disabled |
35 | + "forceoff" SMT is force disabled. Cannot be changed. |
36 | + "notsupported" SMT is not supported by the CPU |
37 | + |
38 | + If control status is "forceoff" or "notsupported" writes |
39 | + are rejected. |
40 | diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst |
41 | index 5bb9161dbe6a..78f8f00c369f 100644 |
42 | --- a/Documentation/admin-guide/index.rst |
43 | +++ b/Documentation/admin-guide/index.rst |
44 | @@ -17,6 +17,15 @@ etc. |
45 | kernel-parameters |
46 | devices |
47 | |
48 | +This section describes CPU vulnerabilities and provides an overview of the |
49 | +possible mitigations along with guidance for selecting mitigations if they |
50 | +are configurable at compile, boot or run time. |
51 | + |
52 | +.. toctree:: |
53 | + :maxdepth: 1 |
54 | + |
55 | + l1tf |
56 | + |
57 | Here is a set of documents aimed at users who are trying to track down |
58 | problems and bugs in particular. |
59 | |
60 | diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt |
61 | index d6d7669e667f..9841bad6f271 100644 |
62 | --- a/Documentation/admin-guide/kernel-parameters.txt |
63 | +++ b/Documentation/admin-guide/kernel-parameters.txt |
64 | @@ -1888,10 +1888,84 @@ |
65 | (virtualized real and unpaged mode) on capable |
66 | Intel chips. Default is 1 (enabled) |
67 | |
68 | + kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault |
69 | + CVE-2018-3620. |
70 | + |
71 | + Valid arguments: never, cond, always |
72 | + |
73 | + always: L1D cache flush on every VMENTER. |
74 | + cond: Flush L1D on VMENTER only when the code between |
75 | + VMEXIT and VMENTER can leak host memory. |
76 | + never: Disables the mitigation |
77 | + |
78 | + Default is cond (do L1 cache flush in specific instances) |
79 | + |
80 | kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification |
81 | feature (tagged TLBs) on capable Intel chips. |
82 | Default is 1 (enabled) |
83 | |
84 | + l1tf= [X86] Control mitigation of the L1TF vulnerability on |
85 | + affected CPUs |
86 | + |
87 | + The kernel PTE inversion protection is unconditionally |
88 | + enabled and cannot be disabled. |
89 | + |
90 | + full |
91 | + Provides all available mitigations for the |
92 | + L1TF vulnerability. Disables SMT and |
93 | + enables all mitigations in the |
94 | + hypervisors, i.e. unconditional L1D flush. |
95 | + |
96 | + SMT control and L1D flush control via the |
97 | + sysfs interface is still possible after |
98 | + boot. Hypervisors will issue a warning |
99 | + when the first VM is started in a |
100 | + potentially insecure configuration, |
101 | + i.e. SMT enabled or L1D flush disabled. |
102 | + |
103 | + full,force |
104 | + Same as 'full', but disables SMT and L1D |
105 | + flush runtime control. Implies the |
106 | + 'nosmt=force' command line option. |
107 | + (i.e. sysfs control of SMT is disabled.) |
108 | + |
109 | + flush |
110 | + Leaves SMT enabled and enables the default |
111 | + hypervisor mitigation, i.e. conditional |
112 | + L1D flush. |
113 | + |
114 | + SMT control and L1D flush control via the |
115 | + sysfs interface is still possible after |
116 | + boot. Hypervisors will issue a warning |
117 | + when the first VM is started in a |
118 | + potentially insecure configuration, |
119 | + i.e. SMT enabled or L1D flush disabled. |
120 | + |
121 | + flush,nosmt |
122 | + |
123 | + Disables SMT and enables the default |
124 | + hypervisor mitigation. |
125 | + |
126 | + SMT control and L1D flush control via the |
127 | + sysfs interface is still possible after |
128 | + boot. Hypervisors will issue a warning |
129 | + when the first VM is started in a |
130 | + potentially insecure configuration, |
131 | + i.e. SMT enabled or L1D flush disabled. |
132 | + |
133 | + flush,nowarn |
134 | + Same as 'flush', but hypervisors will not |
135 | + warn when a VM is started in a potentially |
136 | + insecure configuration. |
137 | + |
138 | + off |
139 | + Disables hypervisor mitigations and doesn't |
140 | + emit any warnings. |
141 | + |
142 | + Default is 'flush'. |
143 | + |
144 | + For details see: Documentation/admin-guide/l1tf.rst |
145 | + |
146 | l2cr= [PPC] |
147 | |
148 | l3cr= [PPC] |
149 | @@ -2595,6 +2669,10 @@ |
150 | nosmt [KNL,S390] Disable symmetric multithreading (SMT). |
151 | Equivalent to smt=1. |
152 | |
153 | + [KNL,x86] Disable symmetric multithreading (SMT). |
154 | + nosmt=force: Force disable SMT, cannot be undone |
155 | + via the sysfs control file. |
156 | + |
157 | nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 |
158 | (indirect branch prediction) vulnerability. System may |
159 | allow data leaks with this option, which is equivalent |
160 | diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst |
161 | new file mode 100644 |
162 | index 000000000000..bae52b845de0 |
163 | --- /dev/null |
164 | +++ b/Documentation/admin-guide/l1tf.rst |
165 | @@ -0,0 +1,610 @@ |
166 | +L1TF - L1 Terminal Fault |
167 | +======================== |
168 | + |
169 | +L1 Terminal Fault is a hardware vulnerability which allows unprivileged |
170 | +speculative access to data which is available in the Level 1 Data Cache |
171 | +when the page table entry controlling the virtual address, which is used |
172 | +for the access, has the Present bit cleared or other reserved bits set. |
173 | + |
174 | +Affected processors |
175 | +------------------- |
176 | + |
177 | +This vulnerability affects a wide range of Intel processors. The |
178 | +vulnerability is not present on: |
179 | + |
180 | + - Processors from AMD, Centaur and other non Intel vendors |
181 | + |
182 | + - Older processor models, where the CPU family is < 6 |
183 | + |
184 | + - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft, |
185 | + Penwell, Pineview, Silvermont, Airmont, Merrifield) |
186 | + |
187 | + - The Intel XEON PHI family |
188 | + |
189 | + - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the |
190 | + IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected |
191 | + by the Meltdown vulnerability either. These CPUs should become |
192 | + available by end of 2018. |
193 | + |
194 | +Whether a processor is affected or not can be read out from the L1TF |
195 | +vulnerability file in sysfs. See :ref:`l1tf_sys_info`. |
196 | + |
197 | +Related CVEs |
198 | +------------ |
199 | + |
200 | +The following CVE entries are related to the L1TF vulnerability: |
201 | + |
202 | + ============= ================= ============================== |
203 | + CVE-2018-3615 L1 Terminal Fault SGX related aspects |
204 | + CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects |
205 | + CVE-2018-3646 L1 Terminal Fault Virtualization related aspects |
206 | + ============= ================= ============================== |
207 | + |
208 | +Problem |
209 | +------- |
210 | + |
211 | +If an instruction accesses a virtual address for which the relevant page |
212 | +table entry (PTE) has the Present bit cleared or other reserved bits set, |
213 | +then speculative execution ignores the invalid PTE and loads the referenced |
214 | +data if it is present in the Level 1 Data Cache, as if the page referenced |
215 | +by the address bits in the PTE was still present and accessible. |
216 | + |
217 | +While this is a purely speculative mechanism and the instruction will raise |
218 | +a page fault when it is retired eventually, the pure act of loading the |
219 | +data and making it available to other speculative instructions opens up the |
220 | +opportunity for side channel attacks to unprivileged malicious code, |
221 | +similar to the Meltdown attack. |
222 | + |
223 | +While Meltdown breaks the user space to kernel space protection, L1TF |
224 | +allows to attack any physical memory address in the system and the attack |
225 | +works across all protection domains. It allows an attack of SGX and also |
226 | +works from inside virtual machines because the speculation bypasses the |
227 | +extended page table (EPT) protection mechanism. |
228 | + |
229 | + |
230 | +Attack scenarios |
231 | +---------------- |
232 | + |
233 | +1. Malicious user space |
234 | +^^^^^^^^^^^^^^^^^^^^^^^ |
235 | + |
236 | + Operating Systems store arbitrary information in the address bits of a |
237 | + PTE which is marked non present. This allows a malicious user space |
238 | + application to attack the physical memory to which these PTEs resolve. |
239 | + In some cases user-space can maliciously influence the information |
240 | + encoded in the address bits of the PTE, thus making attacks more |
241 | + deterministic and more practical. |
242 | + |
243 | + The Linux kernel contains a mitigation for this attack vector, PTE |
244 | + inversion, which is permanently enabled and has no performance |
245 | + impact. The kernel ensures that the address bits of PTEs, which are not |
246 | + marked present, never point to cacheable physical memory space. |
247 | + |
248 | + A system with an up to date kernel is protected against attacks from |
249 | + malicious user space applications. |
250 | + |
251 | +2. Malicious guest in a virtual machine |
252 | +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
253 | + |
254 | + The fact that L1TF breaks all domain protections allows malicious guest |
255 | + OSes, which can control the PTEs directly, and malicious guest user |
256 | + space applications, which run on an unprotected guest kernel lacking the |
257 | + PTE inversion mitigation for L1TF, to attack physical host memory. |
258 | + |
259 | + A special aspect of L1TF in the context of virtualization is symmetric |
260 | + multi threading (SMT). The Intel implementation of SMT is called |
261 | + HyperThreading. The fact that Hyperthreads on the affected processors |
262 | + share the L1 Data Cache (L1D) is important for this. As the flaw allows |
263 | + only to attack data which is present in L1D, a malicious guest running |
264 | + on one Hyperthread can attack the data which is brought into the L1D by |
265 | + the context which runs on the sibling Hyperthread of the same physical |
266 | + core. This context can be host OS, host user space or a different guest. |
267 | + |
268 | + If the processor does not support Extended Page Tables, the attack is |
269 | + only possible, when the hypervisor does not sanitize the content of the |
270 | + effective (shadow) page tables. |
271 | + |
272 | + While solutions exist to mitigate these attack vectors fully, these |
273 | + mitigations are not enabled by default in the Linux kernel because they |
274 | + can affect performance significantly. The kernel provides several |
275 | + mechanisms which can be utilized to address the problem depending on the |
276 | + deployment scenario. The mitigations, their protection scope and impact |
277 | + are described in the next sections. |
278 | + |
279 | + The default mitigations and the rationale for choosing them are explained |
280 | + at the end of this document. See :ref:`default_mitigations`. |
281 | + |
282 | +.. _l1tf_sys_info: |
283 | + |
284 | +L1TF system information |
285 | +----------------------- |
286 | + |
287 | +The Linux kernel provides a sysfs interface to enumerate the current L1TF |
288 | +status of the system: whether the system is vulnerable, and which |
289 | +mitigations are active. The relevant sysfs file is: |
290 | + |
291 | +/sys/devices/system/cpu/vulnerabilities/l1tf |
292 | + |
293 | +The possible values in this file are: |
294 | + |
295 | + =========================== =============================== |
296 | + 'Not affected' The processor is not vulnerable |
297 | + 'Mitigation: PTE Inversion' The host protection is active |
298 | + =========================== =============================== |
299 | + |
300 | +If KVM/VMX is enabled and the processor is vulnerable then the following |
301 | +information is appended to the 'Mitigation: PTE Inversion' part: |
302 | + |
303 | + - SMT status: |
304 | + |
305 | + ===================== ================ |
306 | + 'VMX: SMT vulnerable' SMT is enabled |
307 | + 'VMX: SMT disabled' SMT is disabled |
308 | + ===================== ================ |
309 | + |
310 | + - L1D Flush mode: |
311 | + |
312 | + ================================ ==================================== |
313 | + 'L1D vulnerable' L1D flushing is disabled |
314 | + |
315 | + 'L1D conditional cache flushes' L1D flush is conditionally enabled |
316 | + |
317 | + 'L1D cache flushes' L1D flush is unconditionally enabled |
318 | + ================================ ==================================== |
319 | + |
320 | +The resulting grade of protection is discussed in the following sections. |
321 | + |
322 | + |
323 | +Host mitigation mechanism |
324 | +------------------------- |
325 | + |
326 | +The kernel is unconditionally protected against L1TF attacks from malicious |
327 | +user space running on the host. |
328 | + |
329 | + |
330 | +Guest mitigation mechanisms |
331 | +--------------------------- |
332 | + |
333 | +.. _l1d_flush: |
334 | + |
335 | +1. L1D flush on VMENTER |
336 | +^^^^^^^^^^^^^^^^^^^^^^^ |
337 | + |
338 | + To make sure that a guest cannot attack data which is present in the L1D |
339 | + the hypervisor flushes the L1D before entering the guest. |
340 | + |
341 | + Flushing the L1D evicts not only the data which should not be accessed |
342 | + by a potentially malicious guest, it also flushes the guest |
343 | + data. Flushing the L1D has a performance impact as the processor has to |
344 | + bring the flushed guest data back into the L1D. Depending on the |
345 | + frequency of VMEXIT/VMENTER and the type of computations in the guest |
346 | + performance degradation in the range of 1% to 50% has been observed. For |
347 | + scenarios where guest VMEXIT/VMENTER are rare the performance impact is |
348 | + minimal. Virtio and mechanisms like posted interrupts are designed to |
349 | + confine the VMEXITs to a bare minimum, but specific configurations and |
350 | + application scenarios might still suffer from a high VMEXIT rate. |
351 | + |
352 | + The kernel provides two L1D flush modes: |
353 | + - conditional ('cond') |
354 | + - unconditional ('always') |
355 | + |
356 | + The conditional mode avoids L1D flushing after VMEXITs which execute |
357 | + only audited code paths before the corresponding VMENTER. These code |
358 | + paths have been verified that they cannot expose secrets or other |
359 | + interesting data to an attacker, but they can leak information about the |
360 | + address space layout of the hypervisor. |
361 | + |
362 | + Unconditional mode flushes L1D on all VMENTER invocations and provides |
363 | + maximum protection. It has a higher overhead than the conditional |
364 | + mode. The overhead cannot be quantified correctly as it depends on the |
365 | + workload scenario and the resulting number of VMEXITs. |
366 | + |
367 | + The general recommendation is to enable L1D flush on VMENTER. The kernel |
368 | + defaults to conditional mode on affected processors. |
369 | + |
370 | + **Note**, that L1D flush does not prevent the SMT problem because the |
371 | + sibling thread will also bring back its data into the L1D which makes it |
372 | + attackable again. |
373 | + |
374 | + L1D flush can be controlled by the administrator via the kernel command |
375 | + line and sysfs control files. See :ref:`mitigation_control_command_line` |
376 | + and :ref:`mitigation_control_kvm`. |
377 | + |
378 | +.. _guest_confinement: |
379 | + |
380 | +2. Guest VCPU confinement to dedicated physical cores |
381 | +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
382 | + |
383 | + To address the SMT problem, it is possible to make a guest or a group of |
384 | + guests affine to one or more physical cores. The proper mechanism for |
385 | + that is to utilize exclusive cpusets to ensure that no other guest or |
386 | + host tasks can run on these cores. |
387 | + |
388 | + If only a single guest or related guests run on sibling SMT threads on |
389 | + the same physical core then they can only attack their own memory and |
390 | + restricted parts of the host memory. |
391 | + |
392 | + Host memory is attackable, when one of the sibling SMT threads runs in |
393 | + host OS (hypervisor) context and the other in guest context. The amount |
394 | + of valuable information from the host OS context depends on the context |
395 | + which the host OS executes, i.e. interrupts, soft interrupts and kernel |
396 | + threads. The amount of valuable data from these contexts cannot be |
397 | + declared as non-interesting for an attacker without deep inspection of |
398 | + the code. |
399 | + |
400 | + **Note**, that assigning guests to a fixed set of physical cores affects |
401 | + the ability of the scheduler to do load balancing and might have |
402 | + negative effects on CPU utilization depending on the hosting |
403 | + scenario. Disabling SMT might be a viable alternative for particular |
404 | + scenarios. |
405 | + |
406 | + For further information about confining guests to a single or to a group |
407 | + of cores consult the cpusets documentation: |
408 | + |
409 | + https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt |
410 | + |
411 | +.. _interrupt_isolation: |
412 | + |
413 | +3. Interrupt affinity |
414 | +^^^^^^^^^^^^^^^^^^^^^ |
415 | + |
416 | + Interrupts can be made affine to logical CPUs. This is not universally |
417 | + true because there are types of interrupts which are truly per CPU |
418 | + interrupts, e.g. the local timer interrupt. Aside of that multi queue |
419 | + devices affine their interrupts to single CPUs or groups of CPUs per |
420 | + queue without allowing the administrator to control the affinities. |
421 | + |
422 | + Moving the interrupts, which can be affinity controlled, away from CPUs |
423 | + which run untrusted guests, reduces the attack vector space. |
424 | + |
425 | + Whether the interrupts with are affine to CPUs, which run untrusted |
426 | + guests, provide interesting data for an attacker depends on the system |
427 | + configuration and the scenarios which run on the system. While for some |
428 | + of the interrupts it can be assumed that they won't expose interesting |
429 | + information beyond exposing hints about the host OS memory layout, there |
430 | + is no way to make general assumptions. |
431 | + |
432 | + Interrupt affinity can be controlled by the administrator via the |
433 | + /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is |
434 | + available at: |
435 | + |
436 | + https://www.kernel.org/doc/Documentation/IRQ-affinity.txt |
437 | + |
438 | +.. _smt_control: |
439 | + |
440 | +4. SMT control |
441 | +^^^^^^^^^^^^^^ |
442 | + |
443 | + To prevent the SMT issues of L1TF it might be necessary to disable SMT |
444 | + completely. Disabling SMT can have a significant performance impact, but |
445 | + the impact depends on the hosting scenario and the type of workloads. |
446 | + The impact of disabling SMT needs also to be weighted against the impact |
447 | + of other mitigation solutions like confining guests to dedicated cores. |
448 | + |
449 | + The kernel provides a sysfs interface to retrieve the status of SMT and |
450 | + to control it. It also provides a kernel command line interface to |
451 | + control SMT. |
452 | + |
453 | + The kernel command line interface consists of the following options: |
454 | + |
455 | + =========== ========================================================== |
456 | + nosmt Affects the bring up of the secondary CPUs during boot. The |
457 | + kernel tries to bring all present CPUs online during the |
458 | + boot process. "nosmt" makes sure that from each physical |
459 | + core only one - the so called primary (hyper) thread is |
460 | + activated. Due to a design flaw of Intel processors related |
461 | + to Machine Check Exceptions the non primary siblings have |
462 | + to be brought up at least partially and are then shut down |
463 | + again. "nosmt" can be undone via the sysfs interface. |
464 | + |
465 | + nosmt=force Has the same effect as "nosmt" but it does not allow to |
466 | + undo the SMT disable via the sysfs interface. |
467 | + =========== ========================================================== |
468 | + |
469 | + The sysfs interface provides two files: |
470 | + |
471 | + - /sys/devices/system/cpu/smt/control |
472 | + - /sys/devices/system/cpu/smt/active |
473 | + |
474 | + /sys/devices/system/cpu/smt/control: |
475 | + |
476 | + This file allows to read out the SMT control state and provides the |
477 | + ability to disable or (re)enable SMT. The possible states are: |
478 | + |
479 | + ============== =================================================== |
480 | + on SMT is supported by the CPU and enabled. All |
481 | + logical CPUs can be onlined and offlined without |
482 | + restrictions. |
483 | + |
484 | + off SMT is supported by the CPU and disabled. Only |
485 | + the so called primary SMT threads can be onlined |
486 | + and offlined without restrictions. An attempt to |
487 | + online a non-primary sibling is rejected |
488 | + |
489 | + forceoff Same as 'off' but the state cannot be controlled. |
490 | + Attempts to write to the control file are rejected. |
491 | + |
492 | + notsupported The processor does not support SMT. It's therefore |
493 | + not affected by the SMT implications of L1TF. |
494 | + Attempts to write to the control file are rejected. |
495 | + ============== =================================================== |
496 | + |
497 | + The possible states which can be written into this file to control SMT |
498 | + state are: |
499 | + |
500 | + - on |
501 | + - off |
502 | + - forceoff |
503 | + |
504 | + /sys/devices/system/cpu/smt/active: |
505 | + |
506 | + This file reports whether SMT is enabled and active, i.e. if on any |
507 | + physical core two or more sibling threads are online. |
508 | + |
509 | + SMT control is also possible at boot time via the l1tf kernel command |
510 | + line parameter in combination with L1D flush control. See |
511 | + :ref:`mitigation_control_command_line`. |
512 | + |
513 | +5. Disabling EPT |
514 | +^^^^^^^^^^^^^^^^ |
515 | + |
516 | + Disabling EPT for virtual machines provides full mitigation for L1TF even |
517 | + with SMT enabled, because the effective page tables for guests are |
518 | + managed and sanitized by the hypervisor. Though disabling EPT has a |
519 | + significant performance impact especially when the Meltdown mitigation |
520 | + KPTI is enabled. |
521 | + |
522 | + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter. |
523 | + |
524 | +There is ongoing research and development for new mitigation mechanisms to |
525 | +address the performance impact of disabling SMT or EPT. |
526 | + |
527 | +.. _mitigation_control_command_line: |
528 | + |
529 | +Mitigation control on the kernel command line |
530 | +--------------------------------------------- |
531 | + |
532 | +The kernel command line allows to control the L1TF mitigations at boot |
533 | +time with the option "l1tf=". The valid arguments for this option are: |
534 | + |
535 | + ============ ============================================================= |
536 | + full Provides all available mitigations for the L1TF |
537 | + vulnerability. Disables SMT and enables all mitigations in |
538 | + the hypervisors, i.e. unconditional L1D flushing |
539 | + |
540 | + SMT control and L1D flush control via the sysfs interface |
541 | + is still possible after boot. Hypervisors will issue a |
542 | + warning when the first VM is started in a potentially |
543 | + insecure configuration, i.e. SMT enabled or L1D flush |
544 | + disabled. |
545 | + |
546 | + full,force Same as 'full', but disables SMT and L1D flush runtime |
547 | + control. Implies the 'nosmt=force' command line option. |
548 | + (i.e. sysfs control of SMT is disabled.) |
549 | + |
550 | + flush Leaves SMT enabled and enables the default hypervisor |
551 | + mitigation, i.e. conditional L1D flushing |
552 | + |
553 | + SMT control and L1D flush control via the sysfs interface |
554 | + is still possible after boot. Hypervisors will issue a |
555 | + warning when the first VM is started in a potentially |
556 | + insecure configuration, i.e. SMT enabled or L1D flush |
557 | + disabled. |
558 | + |
559 | + flush,nosmt Disables SMT and enables the default hypervisor mitigation, |
560 | + i.e. conditional L1D flushing. |
561 | + |
562 | + SMT control and L1D flush control via the sysfs interface |
563 | + is still possible after boot. Hypervisors will issue a |
564 | + warning when the first VM is started in a potentially |
565 | + insecure configuration, i.e. SMT enabled or L1D flush |
566 | + disabled. |
567 | + |
568 | + flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is |
569 | + started in a potentially insecure configuration. |
570 | + |
571 | + off Disables hypervisor mitigations and doesn't emit any |
572 | + warnings. |
573 | + ============ ============================================================= |
574 | + |
575 | +The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`. |
576 | + |
577 | + |
578 | +.. _mitigation_control_kvm: |
579 | + |
580 | +Mitigation control for KVM - module parameter |
581 | +------------------------------------------------------------- |
582 | + |
583 | +The KVM hypervisor mitigation mechanism, flushing the L1D cache when |
584 | +entering a guest, can be controlled with a module parameter. |
585 | + |
586 | +The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the |
587 | +following arguments: |
588 | + |
589 | + ============ ============================================================== |
590 | + always L1D cache flush on every VMENTER. |
591 | + |
592 | + cond Flush L1D on VMENTER only when the code between VMEXIT and |
593 | + VMENTER can leak host memory which is considered |
594 | + interesting for an attacker. This still can leak host memory |
595 | + which allows e.g. to determine the hosts address space layout. |
596 | + |
597 | + never Disables the mitigation |
598 | + ============ ============================================================== |
599 | + |
600 | +The parameter can be provided on the kernel command line, as a module |
601 | +parameter when loading the modules and at runtime modified via the sysfs |
602 | +file: |
603 | + |
604 | +/sys/module/kvm_intel/parameters/vmentry_l1d_flush |
605 | + |
606 | +The default is 'cond'. If 'l1tf=full,force' is given on the kernel command |
607 | +line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush |
608 | +module parameter is ignored and writes to the sysfs file are rejected. |
609 | + |
610 | + |
611 | +Mitigation selection guide |
612 | +-------------------------- |
613 | + |
614 | +1. No virtualization in use |
615 | +^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
616 | + |
617 | + The system is protected by the kernel unconditionally and no further |
618 | + action is required. |
619 | + |
620 | +2. Virtualization with trusted guests |
621 | +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
622 | + |
623 | + If the guest comes from a trusted source and the guest OS kernel is |
624 | + guaranteed to have the L1TF mitigations in place the system is fully |
625 | + protected against L1TF and no further action is required. |
626 | + |
627 | + To avoid the overhead of the default L1D flushing on VMENTER the |
628 | + administrator can disable the flushing via the kernel command line and |
629 | + sysfs control files. See :ref:`mitigation_control_command_line` and |
630 | + :ref:`mitigation_control_kvm`. |
631 | + |
632 | + |
633 | +3. Virtualization with untrusted guests |
634 | +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
635 | + |
636 | +3.1. SMT not supported or disabled |
637 | +"""""""""""""""""""""""""""""""""" |
638 | + |
639 | + If SMT is not supported by the processor or disabled in the BIOS or by |
640 | + the kernel, it's only required to enforce L1D flushing on VMENTER. |
641 | + |
642 | + Conditional L1D flushing is the default behaviour and can be tuned. See |
643 | + :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`. |
644 | + |
645 | +3.2. EPT not supported or disabled |
646 | +"""""""""""""""""""""""""""""""""" |
647 | + |
648 | + If EPT is not supported by the processor or disabled in the hypervisor, |
649 | + the system is fully protected. SMT can stay enabled and L1D flushing on |
650 | + VMENTER is not required. |
651 | + |
652 | + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter. |
653 | + |
654 | +3.3. SMT and EPT supported and active |
655 | +""""""""""""""""""""""""""""""""""""" |
656 | + |
657 | + If SMT and EPT are supported and active then various degrees of |
658 | + mitigations can be employed: |
659 | + |
660 | + - L1D flushing on VMENTER: |
661 | + |
662 | + L1D flushing on VMENTER is the minimal protection requirement, but it |
663 | + is only potent in combination with other mitigation methods. |
664 | + |
665 | + Conditional L1D flushing is the default behaviour and can be tuned. See |
666 | + :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`. |
667 | + |
668 | + - Guest confinement: |
669 | + |
670 | + Confinement of guests to a single or a group of physical cores which |
671 | + are not running any other processes, can reduce the attack surface |
672 | + significantly, but interrupts, soft interrupts and kernel threads can |
673 | + still expose valuable data to a potential attacker. See |
674 | + :ref:`guest_confinement`. |
675 | + |
676 | + - Interrupt isolation: |
677 | + |
678 | + Isolating the guest CPUs from interrupts can reduce the attack surface |
679 | + further, but still allows a malicious guest to explore a limited amount |
680 | + of host physical memory. This can at least be used to gain knowledge |
681 | + about the host address space layout. The interrupts which have a fixed |
682 | + affinity to the CPUs which run the untrusted guests can depending on |
683 | + the scenario still trigger soft interrupts and schedule kernel threads |
684 | + which might expose valuable information. See |
685 | + :ref:`interrupt_isolation`. |
686 | + |
687 | +The above three mitigation methods combined can provide protection to a |
688 | +certain degree, but the risk of the remaining attack surface has to be |
689 | +carefully analyzed. For full protection the following methods are |
690 | +available: |
691 | + |
692 | + - Disabling SMT: |
693 | + |
694 | + Disabling SMT and enforcing the L1D flushing provides the maximum |
695 | + amount of protection. This mitigation is not depending on any of the |
696 | + above mitigation methods. |
697 | + |
698 | + SMT control and L1D flushing can be tuned by the command line |
699 | + parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run |
700 | + time with the matching sysfs control files. See :ref:`smt_control`, |
701 | + :ref:`mitigation_control_command_line` and |
702 | + :ref:`mitigation_control_kvm`. |
703 | + |
704 | + - Disabling EPT: |
705 | + |
706 | + Disabling EPT provides the maximum amount of protection as well. It is |
707 | + not depending on any of the above mitigation methods. SMT can stay |
708 | + enabled and L1D flushing is not required, but the performance impact is |
709 | + significant. |
710 | + |
711 | + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' |
712 | + parameter. |
713 | + |
714 | +3.4. Nested virtual machines |
715 | +"""""""""""""""""""""""""""" |
716 | + |
717 | +When nested virtualization is in use, three operating systems are involved: |
718 | +the bare metal hypervisor, the nested hypervisor and the nested virtual |
719 | +machine. VMENTER operations from the nested hypervisor into the nested |
720 | +guest will always be processed by the bare metal hypervisor. If KVM is the |
721 | +bare metal hypervisor it wiil: |
722 | + |
723 | + - Flush the L1D cache on every switch from the nested hypervisor to the |
724 | + nested virtual machine, so that the nested hypervisor's secrets are not |
725 | + exposed to the nested virtual machine; |
726 | + |
727 | + - Flush the L1D cache on every switch from the nested virtual machine to |
728 | + the nested hypervisor; this is a complex operation, and flushing the L1D |
729 | + cache avoids that the bare metal hypervisor's secrets are exposed to the |
730 | + nested virtual machine; |
731 | + |
732 | + - Instruct the nested hypervisor to not perform any L1D cache flush. This |
733 | + is an optimization to avoid double L1D flushing. |
734 | + |
735 | + |
736 | +.. _default_mitigations: |
737 | + |
738 | +Default mitigations |
739 | +------------------- |
740 | + |
741 | + The kernel default mitigations for vulnerable processors are: |
742 | + |
743 | + - PTE inversion to protect against malicious user space. This is done |
744 | + unconditionally and cannot be controlled. |
745 | + |
746 | + - L1D conditional flushing on VMENTER when EPT is enabled for |
747 | + a guest. |
748 | + |
749 | + The kernel does not by default enforce the disabling of SMT, which leaves |
750 | + SMT systems vulnerable when running untrusted guests with EPT enabled. |
751 | + |
752 | + The rationale for this choice is: |
753 | + |
754 | + - Force disabling SMT can break existing setups, especially with |
755 | + unattended updates. |
756 | + |
757 | + - If regular users run untrusted guests on their machine, then L1TF is |
758 | + just an add on to other malware which might be embedded in an untrusted |
759 | + guest, e.g. spam-bots or attacks on the local network. |
760 | + |
761 | + There is no technical way to prevent a user from running untrusted code |
762 | + on their machines blindly. |
763 | + |
764 | + - It's technically extremely unlikely and from today's knowledge even |
765 | + impossible that L1TF can be exploited via the most popular attack |
766 | + mechanisms like JavaScript because these mechanisms have no way to |
767 | + control PTEs. If this would be possible and not other mitigation would |
768 | + be possible, then the default might be different. |
769 | + |
770 | + - The administrators of cloud and hosting setups have to carefully |
771 | + analyze the risk for their scenarios and make the appropriate |
772 | + mitigation choices, which might even vary across their deployed |
773 | + machines and also result in other changes of their overall setup. |
774 | + There is no way for the kernel to provide a sensible default for this |
775 | + kind of scenarios. |
776 | diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt |
777 | index 88ad78c6f605..5d12166bd66b 100644 |
778 | --- a/Documentation/virtual/kvm/api.txt |
779 | +++ b/Documentation/virtual/kvm/api.txt |
780 | @@ -123,14 +123,15 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the |
781 | flag KVM_VM_MIPS_VZ. |
782 | |
783 | |
784 | -4.3 KVM_GET_MSR_INDEX_LIST |
785 | +4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST |
786 | |
787 | -Capability: basic |
788 | +Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST |
789 | Architectures: x86 |
790 | -Type: system |
791 | +Type: system ioctl |
792 | Parameters: struct kvm_msr_list (in/out) |
793 | Returns: 0 on success; -1 on error |
794 | Errors: |
795 | + EFAULT: the msr index list cannot be read from or written to |
796 | E2BIG: the msr index list is to be to fit in the array specified by |
797 | the user. |
798 | |
799 | @@ -139,16 +140,23 @@ struct kvm_msr_list { |
800 | __u32 indices[0]; |
801 | }; |
802 | |
803 | -This ioctl returns the guest msrs that are supported. The list varies |
804 | -by kvm version and host processor, but does not change otherwise. The |
805 | -user fills in the size of the indices array in nmsrs, and in return |
806 | -kvm adjusts nmsrs to reflect the actual number of msrs and fills in |
807 | -the indices array with their numbers. |
808 | +The user fills in the size of the indices array in nmsrs, and in return |
809 | +kvm adjusts nmsrs to reflect the actual number of msrs and fills in the |
810 | +indices array with their numbers. |
811 | + |
812 | +KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list |
813 | +varies by kvm version and host processor, but does not change otherwise. |
814 | |
815 | Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are |
816 | not returned in the MSR list, as different vcpus can have a different number |
817 | of banks, as set via the KVM_X86_SETUP_MCE ioctl. |
818 | |
819 | +KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed |
820 | +to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities |
821 | +and processor features that are exposed via MSRs (e.g., VMX capabilities). |
822 | +This list also varies by kvm version and host processor, but does not change |
823 | +otherwise. |
824 | + |
825 | |
826 | 4.4 KVM_CHECK_EXTENSION |
827 | |
828 | @@ -475,14 +483,22 @@ Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. |
829 | |
830 | 4.18 KVM_GET_MSRS |
831 | |
832 | -Capability: basic |
833 | +Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system) |
834 | Architectures: x86 |
835 | -Type: vcpu ioctl |
836 | +Type: system ioctl, vcpu ioctl |
837 | Parameters: struct kvm_msrs (in/out) |
838 | -Returns: 0 on success, -1 on error |
839 | +Returns: number of msrs successfully returned; |
840 | + -1 on error |
841 | + |
842 | +When used as a system ioctl: |
843 | +Reads the values of MSR-based features that are available for the VM. This |
844 | +is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values. |
845 | +The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST |
846 | +in a system ioctl. |
847 | |
848 | +When used as a vcpu ioctl: |
849 | Reads model-specific registers from the vcpu. Supported msr indices can |
850 | -be obtained using KVM_GET_MSR_INDEX_LIST. |
851 | +be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl. |
852 | |
853 | struct kvm_msrs { |
854 | __u32 nmsrs; /* number of msrs in entries */ |
855 | diff --git a/Makefile b/Makefile |
856 | index d407ecfdee0b..f3bb9428b3dc 100644 |
857 | --- a/Makefile |
858 | +++ b/Makefile |
859 | @@ -1,7 +1,7 @@ |
860 | # SPDX-License-Identifier: GPL-2.0 |
861 | VERSION = 4 |
862 | PATCHLEVEL = 14 |
863 | -SUBLEVEL = 62 |
864 | +SUBLEVEL = 63 |
865 | EXTRAVERSION = |
866 | NAME = Petit Gorille |
867 | |
868 | diff --git a/arch/Kconfig b/arch/Kconfig |
869 | index 400b9e1b2f27..4e01862f58e4 100644 |
870 | --- a/arch/Kconfig |
871 | +++ b/arch/Kconfig |
872 | @@ -13,6 +13,9 @@ config KEXEC_CORE |
873 | config HAVE_IMA_KEXEC |
874 | bool |
875 | |
876 | +config HOTPLUG_SMT |
877 | + bool |
878 | + |
879 | config OPROFILE |
880 | tristate "OProfile system profiling" |
881 | depends on PROFILING |
882 | diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi |
883 | index 6c7eb54be9e2..d64438bfa68b 100644 |
884 | --- a/arch/arm/boot/dts/imx6sx.dtsi |
885 | +++ b/arch/arm/boot/dts/imx6sx.dtsi |
886 | @@ -1305,7 +1305,7 @@ |
887 | 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; |
888 | bus-range = <0x00 0xff>; |
889 | num-lanes = <1>; |
890 | - interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; |
891 | + interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; |
892 | clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>, |
893 | <&clks IMX6SX_CLK_PCIE_AXI>, |
894 | <&clks IMX6SX_CLK_LVDS1_OUT>, |
895 | diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig |
896 | index 1fd3eb5b66c6..89e684fd795f 100644 |
897 | --- a/arch/parisc/Kconfig |
898 | +++ b/arch/parisc/Kconfig |
899 | @@ -201,7 +201,7 @@ config PREFETCH |
900 | |
901 | config MLONGCALLS |
902 | bool "Enable the -mlong-calls compiler option for big kernels" |
903 | - def_bool y if (!MODULES) |
904 | + default y |
905 | depends on PA8X00 |
906 | help |
907 | If you configure the kernel to include many drivers built-in instead |
908 | diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h |
909 | new file mode 100644 |
910 | index 000000000000..dbaaca84f27f |
911 | --- /dev/null |
912 | +++ b/arch/parisc/include/asm/barrier.h |
913 | @@ -0,0 +1,32 @@ |
914 | +/* SPDX-License-Identifier: GPL-2.0 */ |
915 | +#ifndef __ASM_BARRIER_H |
916 | +#define __ASM_BARRIER_H |
917 | + |
918 | +#ifndef __ASSEMBLY__ |
919 | + |
920 | +/* The synchronize caches instruction executes as a nop on systems in |
921 | + which all memory references are performed in order. */ |
922 | +#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory") |
923 | + |
924 | +#if defined(CONFIG_SMP) |
925 | +#define mb() do { synchronize_caches(); } while (0) |
926 | +#define rmb() mb() |
927 | +#define wmb() mb() |
928 | +#define dma_rmb() mb() |
929 | +#define dma_wmb() mb() |
930 | +#else |
931 | +#define mb() barrier() |
932 | +#define rmb() barrier() |
933 | +#define wmb() barrier() |
934 | +#define dma_rmb() barrier() |
935 | +#define dma_wmb() barrier() |
936 | +#endif |
937 | + |
938 | +#define __smp_mb() mb() |
939 | +#define __smp_rmb() mb() |
940 | +#define __smp_wmb() mb() |
941 | + |
942 | +#include <asm-generic/barrier.h> |
943 | + |
944 | +#endif /* !__ASSEMBLY__ */ |
945 | +#endif /* __ASM_BARRIER_H */ |
946 | diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S |
947 | index e95207c0565e..1b4732e20137 100644 |
948 | --- a/arch/parisc/kernel/entry.S |
949 | +++ b/arch/parisc/kernel/entry.S |
950 | @@ -481,6 +481,8 @@ |
951 | /* Release pa_tlb_lock lock without reloading lock address. */ |
952 | .macro tlb_unlock0 spc,tmp |
953 | #ifdef CONFIG_SMP |
954 | + or,COND(=) %r0,\spc,%r0 |
955 | + sync |
956 | or,COND(=) %r0,\spc,%r0 |
957 | stw \spc,0(\tmp) |
958 | #endif |
959 | diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S |
960 | index 67b0f7532e83..3e163df49cf3 100644 |
961 | --- a/arch/parisc/kernel/pacache.S |
962 | +++ b/arch/parisc/kernel/pacache.S |
963 | @@ -354,6 +354,7 @@ ENDPROC_CFI(flush_data_cache_local) |
964 | .macro tlb_unlock la,flags,tmp |
965 | #ifdef CONFIG_SMP |
966 | ldi 1,\tmp |
967 | + sync |
968 | stw \tmp,0(\la) |
969 | mtsm \flags |
970 | #endif |
971 | diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S |
972 | index e775f80ae28c..4886a6db42e9 100644 |
973 | --- a/arch/parisc/kernel/syscall.S |
974 | +++ b/arch/parisc/kernel/syscall.S |
975 | @@ -633,6 +633,7 @@ cas_action: |
976 | sub,<> %r28, %r25, %r0 |
977 | 2: stw,ma %r24, 0(%r26) |
978 | /* Free lock */ |
979 | + sync |
980 | stw,ma %r20, 0(%sr2,%r20) |
981 | #if ENABLE_LWS_DEBUG |
982 | /* Clear thread register indicator */ |
983 | @@ -647,6 +648,7 @@ cas_action: |
984 | 3: |
985 | /* Error occurred on load or store */ |
986 | /* Free lock */ |
987 | + sync |
988 | stw %r20, 0(%sr2,%r20) |
989 | #if ENABLE_LWS_DEBUG |
990 | stw %r0, 4(%sr2,%r20) |
991 | @@ -848,6 +850,7 @@ cas2_action: |
992 | |
993 | cas2_end: |
994 | /* Free lock */ |
995 | + sync |
996 | stw,ma %r20, 0(%sr2,%r20) |
997 | /* Enable interrupts */ |
998 | ssm PSW_SM_I, %r0 |
999 | @@ -858,6 +861,7 @@ cas2_end: |
1000 | 22: |
1001 | /* Error occurred on load or store */ |
1002 | /* Free lock */ |
1003 | + sync |
1004 | stw %r20, 0(%sr2,%r20) |
1005 | ssm PSW_SM_I, %r0 |
1006 | ldo 1(%r0),%r28 |
1007 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
1008 | index 7483cd514c32..1c63a4b5320d 100644 |
1009 | --- a/arch/x86/Kconfig |
1010 | +++ b/arch/x86/Kconfig |
1011 | @@ -176,6 +176,7 @@ config X86 |
1012 | select HAVE_SYSCALL_TRACEPOINTS |
1013 | select HAVE_UNSTABLE_SCHED_CLOCK |
1014 | select HAVE_USER_RETURN_NOTIFIER |
1015 | + select HOTPLUG_SMT if SMP |
1016 | select IRQ_FORCED_THREADING |
1017 | select PCI_LOCKLESS_CONFIG |
1018 | select PERF_EVENTS |
1019 | diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h |
1020 | index 5f01671c68f2..a1ed92aae12a 100644 |
1021 | --- a/arch/x86/include/asm/apic.h |
1022 | +++ b/arch/x86/include/asm/apic.h |
1023 | @@ -10,6 +10,7 @@ |
1024 | #include <asm/fixmap.h> |
1025 | #include <asm/mpspec.h> |
1026 | #include <asm/msr.h> |
1027 | +#include <asm/hardirq.h> |
1028 | |
1029 | #define ARCH_APICTIMER_STOPS_ON_C3 1 |
1030 | |
1031 | @@ -613,12 +614,20 @@ extern int default_check_phys_apicid_present(int phys_apicid); |
1032 | #endif |
1033 | |
1034 | #endif /* CONFIG_X86_LOCAL_APIC */ |
1035 | + |
1036 | +#ifdef CONFIG_SMP |
1037 | +bool apic_id_is_primary_thread(unsigned int id); |
1038 | +#else |
1039 | +static inline bool apic_id_is_primary_thread(unsigned int id) { return false; } |
1040 | +#endif |
1041 | + |
1042 | extern void irq_enter(void); |
1043 | extern void irq_exit(void); |
1044 | |
1045 | static inline void entering_irq(void) |
1046 | { |
1047 | irq_enter(); |
1048 | + kvm_set_cpu_l1tf_flush_l1d(); |
1049 | } |
1050 | |
1051 | static inline void entering_ack_irq(void) |
1052 | @@ -631,6 +640,7 @@ static inline void ipi_entering_ack_irq(void) |
1053 | { |
1054 | irq_enter(); |
1055 | ack_APIC_irq(); |
1056 | + kvm_set_cpu_l1tf_flush_l1d(); |
1057 | } |
1058 | |
1059 | static inline void exiting_irq(void) |
1060 | diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
1061 | index 403e97d5e243..8418462298e7 100644 |
1062 | --- a/arch/x86/include/asm/cpufeatures.h |
1063 | +++ b/arch/x86/include/asm/cpufeatures.h |
1064 | @@ -219,6 +219,7 @@ |
1065 | #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ |
1066 | #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ |
1067 | #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ |
1068 | +#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ |
1069 | |
1070 | /* Virtualization flags: Linux defined, word 8 */ |
1071 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
1072 | @@ -338,6 +339,7 @@ |
1073 | #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ |
1074 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
1075 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
1076 | +#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ |
1077 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
1078 | #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ |
1079 | |
1080 | @@ -370,5 +372,6 @@ |
1081 | #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ |
1082 | #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ |
1083 | #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ |
1084 | +#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ |
1085 | |
1086 | #endif /* _ASM_X86_CPUFEATURES_H */ |
1087 | diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h |
1088 | index 0ab2ab27ad1f..b825cb201251 100644 |
1089 | --- a/arch/x86/include/asm/dmi.h |
1090 | +++ b/arch/x86/include/asm/dmi.h |
1091 | @@ -4,8 +4,8 @@ |
1092 | |
1093 | #include <linux/compiler.h> |
1094 | #include <linux/init.h> |
1095 | +#include <linux/io.h> |
1096 | |
1097 | -#include <asm/io.h> |
1098 | #include <asm/setup.h> |
1099 | |
1100 | static __always_inline __init void *dmi_alloc(unsigned len) |
1101 | diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h |
1102 | index 51cc979dd364..486c843273c4 100644 |
1103 | --- a/arch/x86/include/asm/hardirq.h |
1104 | +++ b/arch/x86/include/asm/hardirq.h |
1105 | @@ -3,10 +3,12 @@ |
1106 | #define _ASM_X86_HARDIRQ_H |
1107 | |
1108 | #include <linux/threads.h> |
1109 | -#include <linux/irq.h> |
1110 | |
1111 | typedef struct { |
1112 | - unsigned int __softirq_pending; |
1113 | + u16 __softirq_pending; |
1114 | +#if IS_ENABLED(CONFIG_KVM_INTEL) |
1115 | + u8 kvm_cpu_l1tf_flush_l1d; |
1116 | +#endif |
1117 | unsigned int __nmi_count; /* arch dependent */ |
1118 | #ifdef CONFIG_X86_LOCAL_APIC |
1119 | unsigned int apic_timer_irqs; /* arch dependent */ |
1120 | @@ -62,4 +64,24 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu); |
1121 | extern u64 arch_irq_stat(void); |
1122 | #define arch_irq_stat arch_irq_stat |
1123 | |
1124 | + |
1125 | +#if IS_ENABLED(CONFIG_KVM_INTEL) |
1126 | +static inline void kvm_set_cpu_l1tf_flush_l1d(void) |
1127 | +{ |
1128 | + __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); |
1129 | +} |
1130 | + |
1131 | +static inline void kvm_clear_cpu_l1tf_flush_l1d(void) |
1132 | +{ |
1133 | + __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0); |
1134 | +} |
1135 | + |
1136 | +static inline bool kvm_get_cpu_l1tf_flush_l1d(void) |
1137 | +{ |
1138 | + return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); |
1139 | +} |
1140 | +#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ |
1141 | +static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } |
1142 | +#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ |
1143 | + |
1144 | #endif /* _ASM_X86_HARDIRQ_H */ |
1145 | diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h |
1146 | index c4fc17220df9..c14f2a74b2be 100644 |
1147 | --- a/arch/x86/include/asm/irqflags.h |
1148 | +++ b/arch/x86/include/asm/irqflags.h |
1149 | @@ -13,6 +13,8 @@ |
1150 | * Interrupt control: |
1151 | */ |
1152 | |
1153 | +/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ |
1154 | +extern inline unsigned long native_save_fl(void); |
1155 | extern inline unsigned long native_save_fl(void) |
1156 | { |
1157 | unsigned long flags; |
1158 | diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
1159 | index 174b9c41efce..4015b88383ce 100644 |
1160 | --- a/arch/x86/include/asm/kvm_host.h |
1161 | +++ b/arch/x86/include/asm/kvm_host.h |
1162 | @@ -17,6 +17,7 @@ |
1163 | #include <linux/tracepoint.h> |
1164 | #include <linux/cpumask.h> |
1165 | #include <linux/irq_work.h> |
1166 | +#include <linux/irq.h> |
1167 | |
1168 | #include <linux/kvm.h> |
1169 | #include <linux/kvm_para.h> |
1170 | @@ -506,6 +507,7 @@ struct kvm_vcpu_arch { |
1171 | u64 smbase; |
1172 | bool tpr_access_reporting; |
1173 | u64 ia32_xss; |
1174 | + u64 microcode_version; |
1175 | |
1176 | /* |
1177 | * Paging state of the vcpu |
1178 | @@ -693,6 +695,9 @@ struct kvm_vcpu_arch { |
1179 | |
1180 | /* be preempted when it's in kernel-mode(cpl=0) */ |
1181 | bool preempted_in_kernel; |
1182 | + |
1183 | + /* Flush the L1 Data cache for L1TF mitigation on VMENTER */ |
1184 | + bool l1tf_flush_l1d; |
1185 | }; |
1186 | |
1187 | struct kvm_lpage_info { |
1188 | @@ -862,6 +867,7 @@ struct kvm_vcpu_stat { |
1189 | u64 signal_exits; |
1190 | u64 irq_window_exits; |
1191 | u64 nmi_window_exits; |
1192 | + u64 l1d_flush; |
1193 | u64 halt_exits; |
1194 | u64 halt_successful_poll; |
1195 | u64 halt_attempted_poll; |
1196 | @@ -1061,6 +1067,8 @@ struct kvm_x86_ops { |
1197 | void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); |
1198 | |
1199 | void (*setup_mce)(struct kvm_vcpu *vcpu); |
1200 | + |
1201 | + int (*get_msr_feature)(struct kvm_msr_entry *entry); |
1202 | }; |
1203 | |
1204 | struct kvm_arch_async_pf { |
1205 | @@ -1366,6 +1374,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
1206 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); |
1207 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); |
1208 | |
1209 | +u64 kvm_get_arch_capabilities(void); |
1210 | void kvm_define_shared_msr(unsigned index, u32 msr); |
1211 | int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
1212 | |
1213 | diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
1214 | index 504b21692d32..ef7eec669a1b 100644 |
1215 | --- a/arch/x86/include/asm/msr-index.h |
1216 | +++ b/arch/x86/include/asm/msr-index.h |
1217 | @@ -70,12 +70,19 @@ |
1218 | #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a |
1219 | #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ |
1220 | #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ |
1221 | +#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ |
1222 | #define ARCH_CAP_SSB_NO (1 << 4) /* |
1223 | * Not susceptible to Speculative Store Bypass |
1224 | * attack, so no Speculative Store Bypass |
1225 | * control required. |
1226 | */ |
1227 | |
1228 | +#define MSR_IA32_FLUSH_CMD 0x0000010b |
1229 | +#define L1D_FLUSH (1 << 0) /* |
1230 | + * Writeback and invalidate the |
1231 | + * L1 data cache. |
1232 | + */ |
1233 | + |
1234 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
1235 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e |
1236 | |
1237 | diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h |
1238 | index aa30c3241ea7..0d5c739eebd7 100644 |
1239 | --- a/arch/x86/include/asm/page_32_types.h |
1240 | +++ b/arch/x86/include/asm/page_32_types.h |
1241 | @@ -29,8 +29,13 @@ |
1242 | #define N_EXCEPTION_STACKS 1 |
1243 | |
1244 | #ifdef CONFIG_X86_PAE |
1245 | -/* 44=32+12, the limit we can fit into an unsigned long pfn */ |
1246 | -#define __PHYSICAL_MASK_SHIFT 44 |
1247 | +/* |
1248 | + * This is beyond the 44 bit limit imposed by the 32bit long pfns, |
1249 | + * but we need the full mask to make sure inverted PROT_NONE |
1250 | + * entries have all the host bits set in a guest. |
1251 | + * The real limit is still 44 bits. |
1252 | + */ |
1253 | +#define __PHYSICAL_MASK_SHIFT 52 |
1254 | #define __VIRTUAL_MASK_SHIFT 32 |
1255 | |
1256 | #else /* !CONFIG_X86_PAE */ |
1257 | diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h |
1258 | index 685ffe8a0eaf..60d0f9015317 100644 |
1259 | --- a/arch/x86/include/asm/pgtable-2level.h |
1260 | +++ b/arch/x86/include/asm/pgtable-2level.h |
1261 | @@ -95,4 +95,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi |
1262 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
1263 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
1264 | |
1265 | +/* No inverted PFNs on 2 level page tables */ |
1266 | + |
1267 | +static inline u64 protnone_mask(u64 val) |
1268 | +{ |
1269 | + return 0; |
1270 | +} |
1271 | + |
1272 | +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) |
1273 | +{ |
1274 | + return val; |
1275 | +} |
1276 | + |
1277 | +static inline bool __pte_needs_invert(u64 val) |
1278 | +{ |
1279 | + return false; |
1280 | +} |
1281 | + |
1282 | #endif /* _ASM_X86_PGTABLE_2LEVEL_H */ |
1283 | diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h |
1284 | index bc4af5453802..9dc19b4a2a87 100644 |
1285 | --- a/arch/x86/include/asm/pgtable-3level.h |
1286 | +++ b/arch/x86/include/asm/pgtable-3level.h |
1287 | @@ -206,12 +206,43 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp) |
1288 | #endif |
1289 | |
1290 | /* Encode and de-code a swap entry */ |
1291 | +#define SWP_TYPE_BITS 5 |
1292 | + |
1293 | +#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) |
1294 | + |
1295 | +/* We always extract/encode the offset by shifting it all the way up, and then down again */ |
1296 | +#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS) |
1297 | + |
1298 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) |
1299 | #define __swp_type(x) (((x).val) & 0x1f) |
1300 | #define __swp_offset(x) ((x).val >> 5) |
1301 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) |
1302 | -#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) |
1303 | -#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
1304 | + |
1305 | +/* |
1306 | + * Normally, __swp_entry() converts from arch-independent swp_entry_t to |
1307 | + * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result |
1308 | + * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the |
1309 | + * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to |
1310 | + * __swp_entry_to_pte() through the following helper macro based on 64bit |
1311 | + * __swp_entry(). |
1312 | + */ |
1313 | +#define __swp_pteval_entry(type, offset) ((pteval_t) { \ |
1314 | + (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ |
1315 | + | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) }) |
1316 | + |
1317 | +#define __swp_entry_to_pte(x) ((pte_t){ .pte = \ |
1318 | + __swp_pteval_entry(__swp_type(x), __swp_offset(x)) }) |
1319 | +/* |
1320 | + * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent |
1321 | + * swp_entry_t, but also has to convert it from 64bit to the 32bit |
1322 | + * intermediate representation, using the following macros based on 64bit |
1323 | + * __swp_type() and __swp_offset(). |
1324 | + */ |
1325 | +#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS))) |
1326 | +#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)) |
1327 | + |
1328 | +#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \ |
1329 | + __pteval_swp_offset(pte))) |
1330 | |
1331 | #define gup_get_pte gup_get_pte |
1332 | /* |
1333 | @@ -260,4 +291,6 @@ static inline pte_t gup_get_pte(pte_t *ptep) |
1334 | return pte; |
1335 | } |
1336 | |
1337 | +#include <asm/pgtable-invert.h> |
1338 | + |
1339 | #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ |
1340 | diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h |
1341 | new file mode 100644 |
1342 | index 000000000000..44b1203ece12 |
1343 | --- /dev/null |
1344 | +++ b/arch/x86/include/asm/pgtable-invert.h |
1345 | @@ -0,0 +1,32 @@ |
1346 | +/* SPDX-License-Identifier: GPL-2.0 */ |
1347 | +#ifndef _ASM_PGTABLE_INVERT_H |
1348 | +#define _ASM_PGTABLE_INVERT_H 1 |
1349 | + |
1350 | +#ifndef __ASSEMBLY__ |
1351 | + |
1352 | +static inline bool __pte_needs_invert(u64 val) |
1353 | +{ |
1354 | + return !(val & _PAGE_PRESENT); |
1355 | +} |
1356 | + |
1357 | +/* Get a mask to xor with the page table entry to get the correct pfn. */ |
1358 | +static inline u64 protnone_mask(u64 val) |
1359 | +{ |
1360 | + return __pte_needs_invert(val) ? ~0ull : 0; |
1361 | +} |
1362 | + |
1363 | +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) |
1364 | +{ |
1365 | + /* |
1366 | + * When a PTE transitions from NONE to !NONE or vice-versa |
1367 | + * invert the PFN part to stop speculation. |
1368 | + * pte_pfn undoes this when needed. |
1369 | + */ |
1370 | + if (__pte_needs_invert(oldval) != __pte_needs_invert(val)) |
1371 | + val = (val & ~mask) | (~val & mask); |
1372 | + return val; |
1373 | +} |
1374 | + |
1375 | +#endif /* __ASSEMBLY__ */ |
1376 | + |
1377 | +#endif |
1378 | diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h |
1379 | index 5c790e93657d..6a4b1a54ff47 100644 |
1380 | --- a/arch/x86/include/asm/pgtable.h |
1381 | +++ b/arch/x86/include/asm/pgtable.h |
1382 | @@ -185,19 +185,29 @@ static inline int pte_special(pte_t pte) |
1383 | return pte_flags(pte) & _PAGE_SPECIAL; |
1384 | } |
1385 | |
1386 | +/* Entries that were set to PROT_NONE are inverted */ |
1387 | + |
1388 | +static inline u64 protnone_mask(u64 val); |
1389 | + |
1390 | static inline unsigned long pte_pfn(pte_t pte) |
1391 | { |
1392 | - return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; |
1393 | + phys_addr_t pfn = pte_val(pte); |
1394 | + pfn ^= protnone_mask(pfn); |
1395 | + return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT; |
1396 | } |
1397 | |
1398 | static inline unsigned long pmd_pfn(pmd_t pmd) |
1399 | { |
1400 | - return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; |
1401 | + phys_addr_t pfn = pmd_val(pmd); |
1402 | + pfn ^= protnone_mask(pfn); |
1403 | + return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; |
1404 | } |
1405 | |
1406 | static inline unsigned long pud_pfn(pud_t pud) |
1407 | { |
1408 | - return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; |
1409 | + phys_addr_t pfn = pud_val(pud); |
1410 | + pfn ^= protnone_mask(pfn); |
1411 | + return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT; |
1412 | } |
1413 | |
1414 | static inline unsigned long p4d_pfn(p4d_t p4d) |
1415 | @@ -400,11 +410,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) |
1416 | return pmd_set_flags(pmd, _PAGE_RW); |
1417 | } |
1418 | |
1419 | -static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
1420 | -{ |
1421 | - return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); |
1422 | -} |
1423 | - |
1424 | static inline pud_t pud_set_flags(pud_t pud, pudval_t set) |
1425 | { |
1426 | pudval_t v = native_pud_val(pud); |
1427 | @@ -459,11 +464,6 @@ static inline pud_t pud_mkwrite(pud_t pud) |
1428 | return pud_set_flags(pud, _PAGE_RW); |
1429 | } |
1430 | |
1431 | -static inline pud_t pud_mknotpresent(pud_t pud) |
1432 | -{ |
1433 | - return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE); |
1434 | -} |
1435 | - |
1436 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
1437 | static inline int pte_soft_dirty(pte_t pte) |
1438 | { |
1439 | @@ -528,25 +528,45 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot) |
1440 | |
1441 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
1442 | { |
1443 | - return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | |
1444 | - massage_pgprot(pgprot)); |
1445 | + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; |
1446 | + pfn ^= protnone_mask(pgprot_val(pgprot)); |
1447 | + pfn &= PTE_PFN_MASK; |
1448 | + return __pte(pfn | massage_pgprot(pgprot)); |
1449 | } |
1450 | |
1451 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) |
1452 | { |
1453 | - return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | |
1454 | - massage_pgprot(pgprot)); |
1455 | + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; |
1456 | + pfn ^= protnone_mask(pgprot_val(pgprot)); |
1457 | + pfn &= PHYSICAL_PMD_PAGE_MASK; |
1458 | + return __pmd(pfn | massage_pgprot(pgprot)); |
1459 | } |
1460 | |
1461 | static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) |
1462 | { |
1463 | - return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | |
1464 | - massage_pgprot(pgprot)); |
1465 | + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; |
1466 | + pfn ^= protnone_mask(pgprot_val(pgprot)); |
1467 | + pfn &= PHYSICAL_PUD_PAGE_MASK; |
1468 | + return __pud(pfn | massage_pgprot(pgprot)); |
1469 | } |
1470 | |
1471 | +static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
1472 | +{ |
1473 | + return pfn_pmd(pmd_pfn(pmd), |
1474 | + __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); |
1475 | +} |
1476 | + |
1477 | +static inline pud_t pud_mknotpresent(pud_t pud) |
1478 | +{ |
1479 | + return pfn_pud(pud_pfn(pud), |
1480 | + __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); |
1481 | +} |
1482 | + |
1483 | +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); |
1484 | + |
1485 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
1486 | { |
1487 | - pteval_t val = pte_val(pte); |
1488 | + pteval_t val = pte_val(pte), oldval = val; |
1489 | |
1490 | /* |
1491 | * Chop off the NX bit (if present), and add the NX portion of |
1492 | @@ -554,17 +574,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
1493 | */ |
1494 | val &= _PAGE_CHG_MASK; |
1495 | val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; |
1496 | - |
1497 | + val = flip_protnone_guard(oldval, val, PTE_PFN_MASK); |
1498 | return __pte(val); |
1499 | } |
1500 | |
1501 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
1502 | { |
1503 | - pmdval_t val = pmd_val(pmd); |
1504 | + pmdval_t val = pmd_val(pmd), oldval = val; |
1505 | |
1506 | val &= _HPAGE_CHG_MASK; |
1507 | val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; |
1508 | - |
1509 | + val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK); |
1510 | return __pmd(val); |
1511 | } |
1512 | |
1513 | @@ -1274,6 +1294,14 @@ static inline bool pud_access_permitted(pud_t pud, bool write) |
1514 | return __pte_access_permitted(pud_val(pud), write); |
1515 | } |
1516 | |
1517 | +#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1 |
1518 | +extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot); |
1519 | + |
1520 | +static inline bool arch_has_pfn_modify_check(void) |
1521 | +{ |
1522 | + return boot_cpu_has_bug(X86_BUG_L1TF); |
1523 | +} |
1524 | + |
1525 | #include <asm-generic/pgtable.h> |
1526 | #endif /* __ASSEMBLY__ */ |
1527 | |
1528 | diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h |
1529 | index 1149d2112b2e..4ecb72831938 100644 |
1530 | --- a/arch/x86/include/asm/pgtable_64.h |
1531 | +++ b/arch/x86/include/asm/pgtable_64.h |
1532 | @@ -276,7 +276,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } |
1533 | * |
1534 | * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number |
1535 | * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names |
1536 | - * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry |
1537 | + * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry |
1538 | * |
1539 | * G (8) is aliased and used as a PROT_NONE indicator for |
1540 | * !present ptes. We need to start storing swap entries above |
1541 | @@ -289,20 +289,34 @@ static inline int pgd_large(pgd_t pgd) { return 0; } |
1542 | * |
1543 | * Bit 7 in swp entry should be 0 because pmd_present checks not only P, |
1544 | * but also L and G. |
1545 | + * |
1546 | + * The offset is inverted by a binary not operation to make the high |
1547 | + * physical bits set. |
1548 | */ |
1549 | -#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) |
1550 | -#define SWP_TYPE_BITS 5 |
1551 | -/* Place the offset above the type: */ |
1552 | -#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) |
1553 | +#define SWP_TYPE_BITS 5 |
1554 | + |
1555 | +#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) |
1556 | + |
1557 | +/* We always extract/encode the offset by shifting it all the way up, and then down again */ |
1558 | +#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS) |
1559 | |
1560 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) |
1561 | |
1562 | -#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \ |
1563 | - & ((1U << SWP_TYPE_BITS) - 1)) |
1564 | -#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT) |
1565 | -#define __swp_entry(type, offset) ((swp_entry_t) { \ |
1566 | - ((type) << (SWP_TYPE_FIRST_BIT)) \ |
1567 | - | ((offset) << SWP_OFFSET_FIRST_BIT) }) |
1568 | +/* Extract the high bits for type */ |
1569 | +#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS)) |
1570 | + |
1571 | +/* Shift up (to get rid of type), then down to get value */ |
1572 | +#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT) |
1573 | + |
1574 | +/* |
1575 | + * Shift the offset up "too far" by TYPE bits, then down again |
1576 | + * The offset is inverted by a binary not operation to make the high |
1577 | + * physical bits set. |
1578 | + */ |
1579 | +#define __swp_entry(type, offset) ((swp_entry_t) { \ |
1580 | + (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ |
1581 | + | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) }) |
1582 | + |
1583 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) |
1584 | #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) |
1585 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
1586 | @@ -346,5 +360,7 @@ static inline bool gup_fast_permitted(unsigned long start, int nr_pages, |
1587 | return true; |
1588 | } |
1589 | |
1590 | +#include <asm/pgtable-invert.h> |
1591 | + |
1592 | #endif /* !__ASSEMBLY__ */ |
1593 | #endif /* _ASM_X86_PGTABLE_64_H */ |
1594 | diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h |
1595 | index 3222c7746cb1..0e856c0628b3 100644 |
1596 | --- a/arch/x86/include/asm/processor.h |
1597 | +++ b/arch/x86/include/asm/processor.h |
1598 | @@ -180,6 +180,11 @@ extern const struct seq_operations cpuinfo_op; |
1599 | |
1600 | extern void cpu_detect(struct cpuinfo_x86 *c); |
1601 | |
1602 | +static inline unsigned long l1tf_pfn_limit(void) |
1603 | +{ |
1604 | + return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; |
1605 | +} |
1606 | + |
1607 | extern void early_cpu_init(void); |
1608 | extern void identify_boot_cpu(void); |
1609 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); |
1610 | @@ -969,4 +974,16 @@ bool xen_set_default_idle(void); |
1611 | void stop_this_cpu(void *dummy); |
1612 | void df_debug(struct pt_regs *regs, long error_code); |
1613 | void microcode_check(void); |
1614 | + |
1615 | +enum l1tf_mitigations { |
1616 | + L1TF_MITIGATION_OFF, |
1617 | + L1TF_MITIGATION_FLUSH_NOWARN, |
1618 | + L1TF_MITIGATION_FLUSH, |
1619 | + L1TF_MITIGATION_FLUSH_NOSMT, |
1620 | + L1TF_MITIGATION_FULL, |
1621 | + L1TF_MITIGATION_FULL_FORCE |
1622 | +}; |
1623 | + |
1624 | +extern enum l1tf_mitigations l1tf_mitigation; |
1625 | + |
1626 | #endif /* _ASM_X86_PROCESSOR_H */ |
1627 | diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h |
1628 | index 461f53d27708..fe2ee61880a8 100644 |
1629 | --- a/arch/x86/include/asm/smp.h |
1630 | +++ b/arch/x86/include/asm/smp.h |
1631 | @@ -170,7 +170,6 @@ static inline int wbinvd_on_all_cpus(void) |
1632 | wbinvd(); |
1633 | return 0; |
1634 | } |
1635 | -#define smp_num_siblings 1 |
1636 | #endif /* CONFIG_SMP */ |
1637 | |
1638 | extern unsigned disabled_cpus; |
1639 | diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h |
1640 | index c1d2a9892352..453cf38a1c33 100644 |
1641 | --- a/arch/x86/include/asm/topology.h |
1642 | +++ b/arch/x86/include/asm/topology.h |
1643 | @@ -123,13 +123,17 @@ static inline int topology_max_smt_threads(void) |
1644 | } |
1645 | |
1646 | int topology_update_package_map(unsigned int apicid, unsigned int cpu); |
1647 | -extern int topology_phys_to_logical_pkg(unsigned int pkg); |
1648 | +int topology_phys_to_logical_pkg(unsigned int pkg); |
1649 | +bool topology_is_primary_thread(unsigned int cpu); |
1650 | +bool topology_smt_supported(void); |
1651 | #else |
1652 | #define topology_max_packages() (1) |
1653 | static inline int |
1654 | topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; } |
1655 | static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } |
1656 | static inline int topology_max_smt_threads(void) { return 1; } |
1657 | +static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } |
1658 | +static inline bool topology_smt_supported(void) { return false; } |
1659 | #endif |
1660 | |
1661 | static inline void arch_fix_phys_package_id(int num, u32 slot) |
1662 | diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h |
1663 | index 7c300299e12e..08c14aec26ac 100644 |
1664 | --- a/arch/x86/include/asm/vmx.h |
1665 | +++ b/arch/x86/include/asm/vmx.h |
1666 | @@ -571,4 +571,15 @@ enum vm_instruction_error_number { |
1667 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, |
1668 | }; |
1669 | |
1670 | +enum vmx_l1d_flush_state { |
1671 | + VMENTER_L1D_FLUSH_AUTO, |
1672 | + VMENTER_L1D_FLUSH_NEVER, |
1673 | + VMENTER_L1D_FLUSH_COND, |
1674 | + VMENTER_L1D_FLUSH_ALWAYS, |
1675 | + VMENTER_L1D_FLUSH_EPT_DISABLED, |
1676 | + VMENTER_L1D_FLUSH_NOT_REQUIRED, |
1677 | +}; |
1678 | + |
1679 | +extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; |
1680 | + |
1681 | #endif |
1682 | diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c |
1683 | index f48a51335538..2e64178f284d 100644 |
1684 | --- a/arch/x86/kernel/apic/apic.c |
1685 | +++ b/arch/x86/kernel/apic/apic.c |
1686 | @@ -34,6 +34,7 @@ |
1687 | #include <linux/dmi.h> |
1688 | #include <linux/smp.h> |
1689 | #include <linux/mm.h> |
1690 | +#include <linux/irq.h> |
1691 | |
1692 | #include <asm/trace/irq_vectors.h> |
1693 | #include <asm/irq_remapping.h> |
1694 | @@ -56,6 +57,7 @@ |
1695 | #include <asm/hypervisor.h> |
1696 | #include <asm/cpu_device_id.h> |
1697 | #include <asm/intel-family.h> |
1698 | +#include <asm/irq_regs.h> |
1699 | |
1700 | unsigned int num_processors; |
1701 | |
1702 | @@ -2092,6 +2094,23 @@ static int cpuid_to_apicid[] = { |
1703 | [0 ... NR_CPUS - 1] = -1, |
1704 | }; |
1705 | |
1706 | +#ifdef CONFIG_SMP |
1707 | +/** |
1708 | + * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread |
1709 | + * @id: APIC ID to check |
1710 | + */ |
1711 | +bool apic_id_is_primary_thread(unsigned int apicid) |
1712 | +{ |
1713 | + u32 mask; |
1714 | + |
1715 | + if (smp_num_siblings == 1) |
1716 | + return true; |
1717 | + /* Isolate the SMT bit(s) in the APICID and check for 0 */ |
1718 | + mask = (1U << (fls(smp_num_siblings) - 1)) - 1; |
1719 | + return !(apicid & mask); |
1720 | +} |
1721 | +#endif |
1722 | + |
1723 | /* |
1724 | * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids |
1725 | * and cpuid_to_apicid[] synchronized. |
1726 | diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c |
1727 | index 56ccf9346b08..741de281ed5d 100644 |
1728 | --- a/arch/x86/kernel/apic/htirq.c |
1729 | +++ b/arch/x86/kernel/apic/htirq.c |
1730 | @@ -16,6 +16,8 @@ |
1731 | #include <linux/device.h> |
1732 | #include <linux/pci.h> |
1733 | #include <linux/htirq.h> |
1734 | +#include <linux/irq.h> |
1735 | + |
1736 | #include <asm/irqdomain.h> |
1737 | #include <asm/hw_irq.h> |
1738 | #include <asm/apic.h> |
1739 | diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c |
1740 | index 3b89b27945ff..96a8a68f9c79 100644 |
1741 | --- a/arch/x86/kernel/apic/io_apic.c |
1742 | +++ b/arch/x86/kernel/apic/io_apic.c |
1743 | @@ -33,6 +33,7 @@ |
1744 | |
1745 | #include <linux/mm.h> |
1746 | #include <linux/interrupt.h> |
1747 | +#include <linux/irq.h> |
1748 | #include <linux/init.h> |
1749 | #include <linux/delay.h> |
1750 | #include <linux/sched.h> |
1751 | diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c |
1752 | index 9b18be764422..f10e7f93b0e2 100644 |
1753 | --- a/arch/x86/kernel/apic/msi.c |
1754 | +++ b/arch/x86/kernel/apic/msi.c |
1755 | @@ -12,6 +12,7 @@ |
1756 | */ |
1757 | #include <linux/mm.h> |
1758 | #include <linux/interrupt.h> |
1759 | +#include <linux/irq.h> |
1760 | #include <linux/pci.h> |
1761 | #include <linux/dmar.h> |
1762 | #include <linux/hpet.h> |
1763 | diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c |
1764 | index 2ce1c708b8ee..b958082c74a7 100644 |
1765 | --- a/arch/x86/kernel/apic/vector.c |
1766 | +++ b/arch/x86/kernel/apic/vector.c |
1767 | @@ -11,6 +11,7 @@ |
1768 | * published by the Free Software Foundation. |
1769 | */ |
1770 | #include <linux/interrupt.h> |
1771 | +#include <linux/irq.h> |
1772 | #include <linux/init.h> |
1773 | #include <linux/compiler.h> |
1774 | #include <linux/slab.h> |
1775 | diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
1776 | index 90574f731c05..dda741bd5789 100644 |
1777 | --- a/arch/x86/kernel/cpu/amd.c |
1778 | +++ b/arch/x86/kernel/cpu/amd.c |
1779 | @@ -298,7 +298,6 @@ static int nearby_node(int apicid) |
1780 | } |
1781 | #endif |
1782 | |
1783 | -#ifdef CONFIG_SMP |
1784 | /* |
1785 | * Fix up cpu_core_id for pre-F17h systems to be in the |
1786 | * [0 .. cores_per_node - 1] range. Not really needed but |
1787 | @@ -315,6 +314,13 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c) |
1788 | c->cpu_core_id %= cus_per_node; |
1789 | } |
1790 | |
1791 | + |
1792 | +static void amd_get_topology_early(struct cpuinfo_x86 *c) |
1793 | +{ |
1794 | + if (cpu_has(c, X86_FEATURE_TOPOEXT)) |
1795 | + smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; |
1796 | +} |
1797 | + |
1798 | /* |
1799 | * Fixup core topology information for |
1800 | * (1) AMD multi-node processors |
1801 | @@ -333,7 +339,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) |
1802 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); |
1803 | |
1804 | node_id = ecx & 0xff; |
1805 | - smp_num_siblings = ((ebx >> 8) & 0xff) + 1; |
1806 | |
1807 | if (c->x86 == 0x15) |
1808 | c->cu_id = ebx & 0xff; |
1809 | @@ -376,7 +381,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) |
1810 | legacy_fixup_core_id(c); |
1811 | } |
1812 | } |
1813 | -#endif |
1814 | |
1815 | /* |
1816 | * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. |
1817 | @@ -384,7 +388,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) |
1818 | */ |
1819 | static void amd_detect_cmp(struct cpuinfo_x86 *c) |
1820 | { |
1821 | -#ifdef CONFIG_SMP |
1822 | unsigned bits; |
1823 | int cpu = smp_processor_id(); |
1824 | |
1825 | @@ -396,16 +399,11 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) |
1826 | /* use socket ID also for last level cache */ |
1827 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; |
1828 | amd_get_topology(c); |
1829 | -#endif |
1830 | } |
1831 | |
1832 | u16 amd_get_nb_id(int cpu) |
1833 | { |
1834 | - u16 id = 0; |
1835 | -#ifdef CONFIG_SMP |
1836 | - id = per_cpu(cpu_llc_id, cpu); |
1837 | -#endif |
1838 | - return id; |
1839 | + return per_cpu(cpu_llc_id, cpu); |
1840 | } |
1841 | EXPORT_SYMBOL_GPL(amd_get_nb_id); |
1842 | |
1843 | @@ -579,6 +577,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) |
1844 | |
1845 | static void early_init_amd(struct cpuinfo_x86 *c) |
1846 | { |
1847 | + u64 value; |
1848 | u32 dummy; |
1849 | |
1850 | early_init_amd_mc(c); |
1851 | @@ -668,6 +667,22 @@ static void early_init_amd(struct cpuinfo_x86 *c) |
1852 | clear_cpu_cap(c, X86_FEATURE_SME); |
1853 | } |
1854 | } |
1855 | + |
1856 | + /* Re-enable TopologyExtensions if switched off by BIOS */ |
1857 | + if (c->x86 == 0x15 && |
1858 | + (c->x86_model >= 0x10 && c->x86_model <= 0x6f) && |
1859 | + !cpu_has(c, X86_FEATURE_TOPOEXT)) { |
1860 | + |
1861 | + if (msr_set_bit(0xc0011005, 54) > 0) { |
1862 | + rdmsrl(0xc0011005, value); |
1863 | + if (value & BIT_64(54)) { |
1864 | + set_cpu_cap(c, X86_FEATURE_TOPOEXT); |
1865 | + pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); |
1866 | + } |
1867 | + } |
1868 | + } |
1869 | + |
1870 | + amd_get_topology_early(c); |
1871 | } |
1872 | |
1873 | static void init_amd_k8(struct cpuinfo_x86 *c) |
1874 | @@ -759,19 +774,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c) |
1875 | { |
1876 | u64 value; |
1877 | |
1878 | - /* re-enable TopologyExtensions if switched off by BIOS */ |
1879 | - if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) && |
1880 | - !cpu_has(c, X86_FEATURE_TOPOEXT)) { |
1881 | - |
1882 | - if (msr_set_bit(0xc0011005, 54) > 0) { |
1883 | - rdmsrl(0xc0011005, value); |
1884 | - if (value & BIT_64(54)) { |
1885 | - set_cpu_cap(c, X86_FEATURE_TOPOEXT); |
1886 | - pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); |
1887 | - } |
1888 | - } |
1889 | - } |
1890 | - |
1891 | /* |
1892 | * The way access filter has a performance penalty on some workloads. |
1893 | * Disable it on the affected CPUs. |
1894 | @@ -835,15 +837,8 @@ static void init_amd(struct cpuinfo_x86 *c) |
1895 | |
1896 | cpu_detect_cache_sizes(c); |
1897 | |
1898 | - /* Multi core CPU? */ |
1899 | - if (c->extended_cpuid_level >= 0x80000008) { |
1900 | - amd_detect_cmp(c); |
1901 | - srat_detect_node(c); |
1902 | - } |
1903 | - |
1904 | -#ifdef CONFIG_X86_32 |
1905 | - detect_ht(c); |
1906 | -#endif |
1907 | + amd_detect_cmp(c); |
1908 | + srat_detect_node(c); |
1909 | |
1910 | init_amd_cacheinfo(c); |
1911 | |
1912 | diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
1913 | index 7416fc206b4a..edfc64a8a154 100644 |
1914 | --- a/arch/x86/kernel/cpu/bugs.c |
1915 | +++ b/arch/x86/kernel/cpu/bugs.c |
1916 | @@ -22,14 +22,17 @@ |
1917 | #include <asm/processor-flags.h> |
1918 | #include <asm/fpu/internal.h> |
1919 | #include <asm/msr.h> |
1920 | +#include <asm/vmx.h> |
1921 | #include <asm/paravirt.h> |
1922 | #include <asm/alternative.h> |
1923 | #include <asm/pgtable.h> |
1924 | #include <asm/set_memory.h> |
1925 | #include <asm/intel-family.h> |
1926 | +#include <asm/e820/api.h> |
1927 | |
1928 | static void __init spectre_v2_select_mitigation(void); |
1929 | static void __init ssb_select_mitigation(void); |
1930 | +static void __init l1tf_select_mitigation(void); |
1931 | |
1932 | /* |
1933 | * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any |
1934 | @@ -55,6 +58,12 @@ void __init check_bugs(void) |
1935 | { |
1936 | identify_boot_cpu(); |
1937 | |
1938 | + /* |
1939 | + * identify_boot_cpu() initialized SMT support information, let the |
1940 | + * core code know. |
1941 | + */ |
1942 | + cpu_smt_check_topology_early(); |
1943 | + |
1944 | if (!IS_ENABLED(CONFIG_SMP)) { |
1945 | pr_info("CPU: "); |
1946 | print_cpu_info(&boot_cpu_data); |
1947 | @@ -81,6 +90,8 @@ void __init check_bugs(void) |
1948 | */ |
1949 | ssb_select_mitigation(); |
1950 | |
1951 | + l1tf_select_mitigation(); |
1952 | + |
1953 | #ifdef CONFIG_X86_32 |
1954 | /* |
1955 | * Check whether we are able to run this kernel safely on SMP. |
1956 | @@ -311,23 +322,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
1957 | return cmd; |
1958 | } |
1959 | |
1960 | -/* Check for Skylake-like CPUs (for RSB handling) */ |
1961 | -static bool __init is_skylake_era(void) |
1962 | -{ |
1963 | - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
1964 | - boot_cpu_data.x86 == 6) { |
1965 | - switch (boot_cpu_data.x86_model) { |
1966 | - case INTEL_FAM6_SKYLAKE_MOBILE: |
1967 | - case INTEL_FAM6_SKYLAKE_DESKTOP: |
1968 | - case INTEL_FAM6_SKYLAKE_X: |
1969 | - case INTEL_FAM6_KABYLAKE_MOBILE: |
1970 | - case INTEL_FAM6_KABYLAKE_DESKTOP: |
1971 | - return true; |
1972 | - } |
1973 | - } |
1974 | - return false; |
1975 | -} |
1976 | - |
1977 | static void __init spectre_v2_select_mitigation(void) |
1978 | { |
1979 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); |
1980 | @@ -388,22 +382,15 @@ retpoline_auto: |
1981 | pr_info("%s\n", spectre_v2_strings[mode]); |
1982 | |
1983 | /* |
1984 | - * If neither SMEP nor PTI are available, there is a risk of |
1985 | - * hitting userspace addresses in the RSB after a context switch |
1986 | - * from a shallow call stack to a deeper one. To prevent this fill |
1987 | - * the entire RSB, even when using IBRS. |
1988 | + * If spectre v2 protection has been enabled, unconditionally fill |
1989 | + * RSB during a context switch; this protects against two independent |
1990 | + * issues: |
1991 | * |
1992 | - * Skylake era CPUs have a separate issue with *underflow* of the |
1993 | - * RSB, when they will predict 'ret' targets from the generic BTB. |
1994 | - * The proper mitigation for this is IBRS. If IBRS is not supported |
1995 | - * or deactivated in favour of retpolines the RSB fill on context |
1996 | - * switch is required. |
1997 | + * - RSB underflow (and switch to BTB) on Skylake+ |
1998 | + * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs |
1999 | */ |
2000 | - if ((!boot_cpu_has(X86_FEATURE_PTI) && |
2001 | - !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { |
2002 | - setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); |
2003 | - pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); |
2004 | - } |
2005 | + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); |
2006 | + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); |
2007 | |
2008 | /* Initialize Indirect Branch Prediction Barrier if supported */ |
2009 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
2010 | @@ -654,8 +641,121 @@ void x86_spec_ctrl_setup_ap(void) |
2011 | x86_amd_ssb_disable(); |
2012 | } |
2013 | |
2014 | +#undef pr_fmt |
2015 | +#define pr_fmt(fmt) "L1TF: " fmt |
2016 | + |
2017 | +/* Default mitigation for L1TF-affected CPUs */ |
2018 | +enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; |
2019 | +#if IS_ENABLED(CONFIG_KVM_INTEL) |
2020 | +EXPORT_SYMBOL_GPL(l1tf_mitigation); |
2021 | + |
2022 | +enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
2023 | +EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
2024 | +#endif |
2025 | + |
2026 | +static void __init l1tf_select_mitigation(void) |
2027 | +{ |
2028 | + u64 half_pa; |
2029 | + |
2030 | + if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
2031 | + return; |
2032 | + |
2033 | + switch (l1tf_mitigation) { |
2034 | + case L1TF_MITIGATION_OFF: |
2035 | + case L1TF_MITIGATION_FLUSH_NOWARN: |
2036 | + case L1TF_MITIGATION_FLUSH: |
2037 | + break; |
2038 | + case L1TF_MITIGATION_FLUSH_NOSMT: |
2039 | + case L1TF_MITIGATION_FULL: |
2040 | + cpu_smt_disable(false); |
2041 | + break; |
2042 | + case L1TF_MITIGATION_FULL_FORCE: |
2043 | + cpu_smt_disable(true); |
2044 | + break; |
2045 | + } |
2046 | + |
2047 | +#if CONFIG_PGTABLE_LEVELS == 2 |
2048 | + pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); |
2049 | + return; |
2050 | +#endif |
2051 | + |
2052 | + /* |
2053 | + * This is extremely unlikely to happen because almost all |
2054 | + * systems have far more MAX_PA/2 than RAM can be fit into |
2055 | + * DIMM slots. |
2056 | + */ |
2057 | + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; |
2058 | + if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { |
2059 | + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); |
2060 | + return; |
2061 | + } |
2062 | + |
2063 | + setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); |
2064 | +} |
2065 | + |
2066 | +static int __init l1tf_cmdline(char *str) |
2067 | +{ |
2068 | + if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
2069 | + return 0; |
2070 | + |
2071 | + if (!str) |
2072 | + return -EINVAL; |
2073 | + |
2074 | + if (!strcmp(str, "off")) |
2075 | + l1tf_mitigation = L1TF_MITIGATION_OFF; |
2076 | + else if (!strcmp(str, "flush,nowarn")) |
2077 | + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; |
2078 | + else if (!strcmp(str, "flush")) |
2079 | + l1tf_mitigation = L1TF_MITIGATION_FLUSH; |
2080 | + else if (!strcmp(str, "flush,nosmt")) |
2081 | + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; |
2082 | + else if (!strcmp(str, "full")) |
2083 | + l1tf_mitigation = L1TF_MITIGATION_FULL; |
2084 | + else if (!strcmp(str, "full,force")) |
2085 | + l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; |
2086 | + |
2087 | + return 0; |
2088 | +} |
2089 | +early_param("l1tf", l1tf_cmdline); |
2090 | + |
2091 | +#undef pr_fmt |
2092 | + |
2093 | #ifdef CONFIG_SYSFS |
2094 | |
2095 | +#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" |
2096 | + |
2097 | +#if IS_ENABLED(CONFIG_KVM_INTEL) |
2098 | +static const char *l1tf_vmx_states[] = { |
2099 | + [VMENTER_L1D_FLUSH_AUTO] = "auto", |
2100 | + [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", |
2101 | + [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", |
2102 | + [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", |
2103 | + [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", |
2104 | + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" |
2105 | +}; |
2106 | + |
2107 | +static ssize_t l1tf_show_state(char *buf) |
2108 | +{ |
2109 | + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) |
2110 | + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); |
2111 | + |
2112 | + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || |
2113 | + (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && |
2114 | + cpu_smt_control == CPU_SMT_ENABLED)) |
2115 | + return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, |
2116 | + l1tf_vmx_states[l1tf_vmx_mitigation]); |
2117 | + |
2118 | + return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, |
2119 | + l1tf_vmx_states[l1tf_vmx_mitigation], |
2120 | + cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); |
2121 | +} |
2122 | +#else |
2123 | +static ssize_t l1tf_show_state(char *buf) |
2124 | +{ |
2125 | + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); |
2126 | +} |
2127 | +#endif |
2128 | + |
2129 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
2130 | char *buf, unsigned int bug) |
2131 | { |
2132 | @@ -681,6 +781,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr |
2133 | case X86_BUG_SPEC_STORE_BYPASS: |
2134 | return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); |
2135 | |
2136 | + case X86_BUG_L1TF: |
2137 | + if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) |
2138 | + return l1tf_show_state(buf); |
2139 | + break; |
2140 | default: |
2141 | break; |
2142 | } |
2143 | @@ -707,4 +811,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute * |
2144 | { |
2145 | return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); |
2146 | } |
2147 | + |
2148 | +ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) |
2149 | +{ |
2150 | + return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); |
2151 | +} |
2152 | #endif |
2153 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
2154 | index 48e98964ecad..dd02ee4fa8cd 100644 |
2155 | --- a/arch/x86/kernel/cpu/common.c |
2156 | +++ b/arch/x86/kernel/cpu/common.c |
2157 | @@ -66,6 +66,13 @@ cpumask_var_t cpu_callin_mask; |
2158 | /* representing cpus for which sibling maps can be computed */ |
2159 | cpumask_var_t cpu_sibling_setup_mask; |
2160 | |
2161 | +/* Number of siblings per CPU package */ |
2162 | +int smp_num_siblings = 1; |
2163 | +EXPORT_SYMBOL(smp_num_siblings); |
2164 | + |
2165 | +/* Last level cache ID of each logical CPU */ |
2166 | +DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; |
2167 | + |
2168 | /* correctly size the local cpu masks */ |
2169 | void __init setup_cpu_local_masks(void) |
2170 | { |
2171 | @@ -614,33 +621,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c) |
2172 | tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); |
2173 | } |
2174 | |
2175 | -void detect_ht(struct cpuinfo_x86 *c) |
2176 | +int detect_ht_early(struct cpuinfo_x86 *c) |
2177 | { |
2178 | #ifdef CONFIG_SMP |
2179 | u32 eax, ebx, ecx, edx; |
2180 | - int index_msb, core_bits; |
2181 | - static bool printed; |
2182 | |
2183 | if (!cpu_has(c, X86_FEATURE_HT)) |
2184 | - return; |
2185 | + return -1; |
2186 | |
2187 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
2188 | - goto out; |
2189 | + return -1; |
2190 | |
2191 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
2192 | - return; |
2193 | + return -1; |
2194 | |
2195 | cpuid(1, &eax, &ebx, &ecx, &edx); |
2196 | |
2197 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
2198 | - |
2199 | - if (smp_num_siblings == 1) { |
2200 | + if (smp_num_siblings == 1) |
2201 | pr_info_once("CPU0: Hyper-Threading is disabled\n"); |
2202 | - goto out; |
2203 | - } |
2204 | +#endif |
2205 | + return 0; |
2206 | +} |
2207 | |
2208 | - if (smp_num_siblings <= 1) |
2209 | - goto out; |
2210 | +void detect_ht(struct cpuinfo_x86 *c) |
2211 | +{ |
2212 | +#ifdef CONFIG_SMP |
2213 | + int index_msb, core_bits; |
2214 | + |
2215 | + if (detect_ht_early(c) < 0) |
2216 | + return; |
2217 | |
2218 | index_msb = get_count_order(smp_num_siblings); |
2219 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
2220 | @@ -653,15 +663,6 @@ void detect_ht(struct cpuinfo_x86 *c) |
2221 | |
2222 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
2223 | ((1 << core_bits) - 1); |
2224 | - |
2225 | -out: |
2226 | - if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
2227 | - pr_info("CPU: Physical Processor ID: %d\n", |
2228 | - c->phys_proc_id); |
2229 | - pr_info("CPU: Processor Core ID: %d\n", |
2230 | - c->cpu_core_id); |
2231 | - printed = 1; |
2232 | - } |
2233 | #endif |
2234 | } |
2235 | |
2236 | @@ -933,6 +934,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { |
2237 | {} |
2238 | }; |
2239 | |
2240 | +static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { |
2241 | + /* in addition to cpu_no_speculation */ |
2242 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, |
2243 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, |
2244 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, |
2245 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, |
2246 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD }, |
2247 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, |
2248 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON }, |
2249 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE }, |
2250 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, |
2251 | + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, |
2252 | + {} |
2253 | +}; |
2254 | + |
2255 | static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
2256 | { |
2257 | u64 ia32_cap = 0; |
2258 | @@ -958,6 +974,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
2259 | return; |
2260 | |
2261 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); |
2262 | + |
2263 | + if (x86_match_cpu(cpu_no_l1tf)) |
2264 | + return; |
2265 | + |
2266 | + setup_force_cpu_bug(X86_BUG_L1TF); |
2267 | } |
2268 | |
2269 | /* |
2270 | diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h |
2271 | index 37672d299e35..cca588407dca 100644 |
2272 | --- a/arch/x86/kernel/cpu/cpu.h |
2273 | +++ b/arch/x86/kernel/cpu/cpu.h |
2274 | @@ -47,6 +47,8 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], |
2275 | |
2276 | extern void get_cpu_cap(struct cpuinfo_x86 *c); |
2277 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
2278 | +extern int detect_extended_topology_early(struct cpuinfo_x86 *c); |
2279 | +extern int detect_ht_early(struct cpuinfo_x86 *c); |
2280 | |
2281 | unsigned int aperfmperf_get_khz(int cpu); |
2282 | |
2283 | diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c |
2284 | index 0b2330e19169..278be092b300 100644 |
2285 | --- a/arch/x86/kernel/cpu/intel.c |
2286 | +++ b/arch/x86/kernel/cpu/intel.c |
2287 | @@ -301,6 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
2288 | } |
2289 | |
2290 | check_mpx_erratum(c); |
2291 | + |
2292 | + /* |
2293 | + * Get the number of SMT siblings early from the extended topology |
2294 | + * leaf, if available. Otherwise try the legacy SMT detection. |
2295 | + */ |
2296 | + if (detect_extended_topology_early(c) < 0) |
2297 | + detect_ht_early(c); |
2298 | } |
2299 | |
2300 | #ifdef CONFIG_X86_32 |
2301 | diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c |
2302 | index 4fc0e08a30b9..387a8f44fba1 100644 |
2303 | --- a/arch/x86/kernel/cpu/microcode/core.c |
2304 | +++ b/arch/x86/kernel/cpu/microcode/core.c |
2305 | @@ -509,12 +509,20 @@ static struct platform_device *microcode_pdev; |
2306 | |
2307 | static int check_online_cpus(void) |
2308 | { |
2309 | - if (num_online_cpus() == num_present_cpus()) |
2310 | - return 0; |
2311 | + unsigned int cpu; |
2312 | |
2313 | - pr_err("Not all CPUs online, aborting microcode update.\n"); |
2314 | + /* |
2315 | + * Make sure all CPUs are online. It's fine for SMT to be disabled if |
2316 | + * all the primary threads are still online. |
2317 | + */ |
2318 | + for_each_present_cpu(cpu) { |
2319 | + if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { |
2320 | + pr_err("Not all CPUs online, aborting microcode update.\n"); |
2321 | + return -EINVAL; |
2322 | + } |
2323 | + } |
2324 | |
2325 | - return -EINVAL; |
2326 | + return 0; |
2327 | } |
2328 | |
2329 | static atomic_t late_cpus_in; |
2330 | diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c |
2331 | index b099024d339c..19c6e800e816 100644 |
2332 | --- a/arch/x86/kernel/cpu/topology.c |
2333 | +++ b/arch/x86/kernel/cpu/topology.c |
2334 | @@ -27,16 +27,13 @@ |
2335 | * exists, use it for populating initial_apicid and cpu topology |
2336 | * detection. |
2337 | */ |
2338 | -void detect_extended_topology(struct cpuinfo_x86 *c) |
2339 | +int detect_extended_topology_early(struct cpuinfo_x86 *c) |
2340 | { |
2341 | #ifdef CONFIG_SMP |
2342 | - unsigned int eax, ebx, ecx, edx, sub_index; |
2343 | - unsigned int ht_mask_width, core_plus_mask_width; |
2344 | - unsigned int core_select_mask, core_level_siblings; |
2345 | - static bool printed; |
2346 | + unsigned int eax, ebx, ecx, edx; |
2347 | |
2348 | if (c->cpuid_level < 0xb) |
2349 | - return; |
2350 | + return -1; |
2351 | |
2352 | cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); |
2353 | |
2354 | @@ -44,7 +41,7 @@ void detect_extended_topology(struct cpuinfo_x86 *c) |
2355 | * check if the cpuid leaf 0xb is actually implemented. |
2356 | */ |
2357 | if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) |
2358 | - return; |
2359 | + return -1; |
2360 | |
2361 | set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); |
2362 | |
2363 | @@ -52,10 +49,30 @@ void detect_extended_topology(struct cpuinfo_x86 *c) |
2364 | * initial apic id, which also represents 32-bit extended x2apic id. |
2365 | */ |
2366 | c->initial_apicid = edx; |
2367 | + smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); |
2368 | +#endif |
2369 | + return 0; |
2370 | +} |
2371 | + |
2372 | +/* |
2373 | + * Check for extended topology enumeration cpuid leaf 0xb and if it |
2374 | + * exists, use it for populating initial_apicid and cpu topology |
2375 | + * detection. |
2376 | + */ |
2377 | +void detect_extended_topology(struct cpuinfo_x86 *c) |
2378 | +{ |
2379 | +#ifdef CONFIG_SMP |
2380 | + unsigned int eax, ebx, ecx, edx, sub_index; |
2381 | + unsigned int ht_mask_width, core_plus_mask_width; |
2382 | + unsigned int core_select_mask, core_level_siblings; |
2383 | + |
2384 | + if (detect_extended_topology_early(c) < 0) |
2385 | + return; |
2386 | |
2387 | /* |
2388 | * Populate HT related information from sub-leaf level 0. |
2389 | */ |
2390 | + cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); |
2391 | core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); |
2392 | core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); |
2393 | |
2394 | @@ -86,15 +103,5 @@ void detect_extended_topology(struct cpuinfo_x86 *c) |
2395 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
2396 | |
2397 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); |
2398 | - |
2399 | - if (!printed) { |
2400 | - pr_info("CPU: Physical Processor ID: %d\n", |
2401 | - c->phys_proc_id); |
2402 | - if (c->x86_max_cores > 1) |
2403 | - pr_info("CPU: Processor Core ID: %d\n", |
2404 | - c->cpu_core_id); |
2405 | - printed = 1; |
2406 | - } |
2407 | - return; |
2408 | #endif |
2409 | } |
2410 | diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c |
2411 | index f92a6593de1e..2ea85b32421a 100644 |
2412 | --- a/arch/x86/kernel/fpu/core.c |
2413 | +++ b/arch/x86/kernel/fpu/core.c |
2414 | @@ -10,6 +10,7 @@ |
2415 | #include <asm/fpu/signal.h> |
2416 | #include <asm/fpu/types.h> |
2417 | #include <asm/traps.h> |
2418 | +#include <asm/irq_regs.h> |
2419 | |
2420 | #include <linux/hardirq.h> |
2421 | #include <linux/pkeys.h> |
2422 | diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c |
2423 | index 01ebcb6f263e..7acb87cb2da8 100644 |
2424 | --- a/arch/x86/kernel/ftrace.c |
2425 | +++ b/arch/x86/kernel/ftrace.c |
2426 | @@ -27,6 +27,7 @@ |
2427 | |
2428 | #include <asm/set_memory.h> |
2429 | #include <asm/kprobes.h> |
2430 | +#include <asm/sections.h> |
2431 | #include <asm/ftrace.h> |
2432 | #include <asm/nops.h> |
2433 | |
2434 | diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c |
2435 | index 8ce4212e2b8d..afa1a204bc6d 100644 |
2436 | --- a/arch/x86/kernel/hpet.c |
2437 | +++ b/arch/x86/kernel/hpet.c |
2438 | @@ -1,6 +1,7 @@ |
2439 | #include <linux/clocksource.h> |
2440 | #include <linux/clockchips.h> |
2441 | #include <linux/interrupt.h> |
2442 | +#include <linux/irq.h> |
2443 | #include <linux/export.h> |
2444 | #include <linux/delay.h> |
2445 | #include <linux/errno.h> |
2446 | diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c |
2447 | index 8f5cb2c7060c..02abc134367f 100644 |
2448 | --- a/arch/x86/kernel/i8259.c |
2449 | +++ b/arch/x86/kernel/i8259.c |
2450 | @@ -5,6 +5,7 @@ |
2451 | #include <linux/sched.h> |
2452 | #include <linux/ioport.h> |
2453 | #include <linux/interrupt.h> |
2454 | +#include <linux/irq.h> |
2455 | #include <linux/timex.h> |
2456 | #include <linux/random.h> |
2457 | #include <linux/init.h> |
2458 | diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c |
2459 | index 0c5256653d6c..38c3d5790970 100644 |
2460 | --- a/arch/x86/kernel/idt.c |
2461 | +++ b/arch/x86/kernel/idt.c |
2462 | @@ -8,6 +8,7 @@ |
2463 | #include <asm/traps.h> |
2464 | #include <asm/proto.h> |
2465 | #include <asm/desc.h> |
2466 | +#include <asm/hw_irq.h> |
2467 | |
2468 | struct idt_data { |
2469 | unsigned int vector; |
2470 | diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c |
2471 | index aa9d51eea9d0..3c2326b59820 100644 |
2472 | --- a/arch/x86/kernel/irq.c |
2473 | +++ b/arch/x86/kernel/irq.c |
2474 | @@ -10,6 +10,7 @@ |
2475 | #include <linux/ftrace.h> |
2476 | #include <linux/delay.h> |
2477 | #include <linux/export.h> |
2478 | +#include <linux/irq.h> |
2479 | |
2480 | #include <asm/apic.h> |
2481 | #include <asm/io_apic.h> |
2482 | diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c |
2483 | index c1bdbd3d3232..95600a99ae93 100644 |
2484 | --- a/arch/x86/kernel/irq_32.c |
2485 | +++ b/arch/x86/kernel/irq_32.c |
2486 | @@ -11,6 +11,7 @@ |
2487 | |
2488 | #include <linux/seq_file.h> |
2489 | #include <linux/interrupt.h> |
2490 | +#include <linux/irq.h> |
2491 | #include <linux/kernel_stat.h> |
2492 | #include <linux/notifier.h> |
2493 | #include <linux/cpu.h> |
2494 | diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c |
2495 | index d86e344f5b3d..0469cd078db1 100644 |
2496 | --- a/arch/x86/kernel/irq_64.c |
2497 | +++ b/arch/x86/kernel/irq_64.c |
2498 | @@ -11,6 +11,7 @@ |
2499 | |
2500 | #include <linux/kernel_stat.h> |
2501 | #include <linux/interrupt.h> |
2502 | +#include <linux/irq.h> |
2503 | #include <linux/seq_file.h> |
2504 | #include <linux/delay.h> |
2505 | #include <linux/ftrace.h> |
2506 | diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c |
2507 | index 1e4094eba15e..40f83d0d7b8a 100644 |
2508 | --- a/arch/x86/kernel/irqinit.c |
2509 | +++ b/arch/x86/kernel/irqinit.c |
2510 | @@ -5,6 +5,7 @@ |
2511 | #include <linux/sched.h> |
2512 | #include <linux/ioport.h> |
2513 | #include <linux/interrupt.h> |
2514 | +#include <linux/irq.h> |
2515 | #include <linux/timex.h> |
2516 | #include <linux/random.h> |
2517 | #include <linux/kprobes.h> |
2518 | diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c |
2519 | index f1030c522e06..65452d555f05 100644 |
2520 | --- a/arch/x86/kernel/kprobes/core.c |
2521 | +++ b/arch/x86/kernel/kprobes/core.c |
2522 | @@ -63,6 +63,7 @@ |
2523 | #include <asm/insn.h> |
2524 | #include <asm/debugreg.h> |
2525 | #include <asm/set_memory.h> |
2526 | +#include <asm/sections.h> |
2527 | |
2528 | #include "common.h" |
2529 | |
2530 | @@ -394,8 +395,6 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) |
2531 | - (u8 *) dest; |
2532 | if ((s64) (s32) newdisp != newdisp) { |
2533 | pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); |
2534 | - pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", |
2535 | - src, dest, insn->displacement.value); |
2536 | return 0; |
2537 | } |
2538 | disp = (u8 *) dest + insn_offset_displacement(insn); |
2539 | @@ -621,8 +620,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, |
2540 | * Raise a BUG or we'll continue in an endless reentering loop |
2541 | * and eventually a stack overflow. |
2542 | */ |
2543 | - printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", |
2544 | - p->addr); |
2545 | + pr_err("Unrecoverable kprobe detected.\n"); |
2546 | dump_kprobe(p); |
2547 | BUG(); |
2548 | default: |
2549 | diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c |
2550 | index e1df9ef5d78c..f3559b84cd75 100644 |
2551 | --- a/arch/x86/kernel/paravirt.c |
2552 | +++ b/arch/x86/kernel/paravirt.c |
2553 | @@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnbuf, |
2554 | struct branch *b = insnbuf; |
2555 | unsigned long delta = (unsigned long)target - (addr+5); |
2556 | |
2557 | - if (tgt_clobbers & ~site_clobbers) |
2558 | - return len; /* target would clobber too much for this site */ |
2559 | - if (len < 5) |
2560 | + if (len < 5) { |
2561 | +#ifdef CONFIG_RETPOLINE |
2562 | + WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); |
2563 | +#endif |
2564 | return len; /* call too long for patch site */ |
2565 | + } |
2566 | |
2567 | b->opcode = 0xe8; /* call */ |
2568 | b->delta = delta; |
2569 | @@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, |
2570 | struct branch *b = insnbuf; |
2571 | unsigned long delta = (unsigned long)target - (addr+5); |
2572 | |
2573 | - if (len < 5) |
2574 | + if (len < 5) { |
2575 | +#ifdef CONFIG_RETPOLINE |
2576 | + WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); |
2577 | +#endif |
2578 | return len; /* call too long for patch site */ |
2579 | + } |
2580 | |
2581 | b->opcode = 0xe9; /* jmp */ |
2582 | b->delta = delta; |
2583 | diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c |
2584 | index efbcf5283520..dcb00acb6583 100644 |
2585 | --- a/arch/x86/kernel/setup.c |
2586 | +++ b/arch/x86/kernel/setup.c |
2587 | @@ -852,6 +852,12 @@ void __init setup_arch(char **cmdline_p) |
2588 | memblock_reserve(__pa_symbol(_text), |
2589 | (unsigned long)__bss_stop - (unsigned long)_text); |
2590 | |
2591 | + /* |
2592 | + * Make sure page 0 is always reserved because on systems with |
2593 | + * L1TF its contents can be leaked to user processes. |
2594 | + */ |
2595 | + memblock_reserve(0, PAGE_SIZE); |
2596 | + |
2597 | early_reserve_initrd(); |
2598 | |
2599 | /* |
2600 | diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c |
2601 | index 5c574dff4c1a..04adc8d60aed 100644 |
2602 | --- a/arch/x86/kernel/smp.c |
2603 | +++ b/arch/x86/kernel/smp.c |
2604 | @@ -261,6 +261,7 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) |
2605 | { |
2606 | ack_APIC_irq(); |
2607 | inc_irq_stat(irq_resched_count); |
2608 | + kvm_set_cpu_l1tf_flush_l1d(); |
2609 | |
2610 | if (trace_resched_ipi_enabled()) { |
2611 | /* |
2612 | diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c |
2613 | index 344d3c160f8d..5ebb0dbcf4f7 100644 |
2614 | --- a/arch/x86/kernel/smpboot.c |
2615 | +++ b/arch/x86/kernel/smpboot.c |
2616 | @@ -78,13 +78,7 @@ |
2617 | #include <asm/realmode.h> |
2618 | #include <asm/misc.h> |
2619 | #include <asm/spec-ctrl.h> |
2620 | - |
2621 | -/* Number of siblings per CPU package */ |
2622 | -int smp_num_siblings = 1; |
2623 | -EXPORT_SYMBOL(smp_num_siblings); |
2624 | - |
2625 | -/* Last level cache ID of each logical CPU */ |
2626 | -DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; |
2627 | +#include <asm/hw_irq.h> |
2628 | |
2629 | /* representing HT siblings of each logical CPU */ |
2630 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
2631 | @@ -311,6 +305,23 @@ found: |
2632 | return 0; |
2633 | } |
2634 | |
2635 | +/** |
2636 | + * topology_is_primary_thread - Check whether CPU is the primary SMT thread |
2637 | + * @cpu: CPU to check |
2638 | + */ |
2639 | +bool topology_is_primary_thread(unsigned int cpu) |
2640 | +{ |
2641 | + return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu)); |
2642 | +} |
2643 | + |
2644 | +/** |
2645 | + * topology_smt_supported - Check whether SMT is supported by the CPUs |
2646 | + */ |
2647 | +bool topology_smt_supported(void) |
2648 | +{ |
2649 | + return smp_num_siblings > 1; |
2650 | +} |
2651 | + |
2652 | /** |
2653 | * topology_phys_to_logical_pkg - Map a physical package id to a logical |
2654 | * |
2655 | diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c |
2656 | index 879af864d99a..49a5c394f3ed 100644 |
2657 | --- a/arch/x86/kernel/time.c |
2658 | +++ b/arch/x86/kernel/time.c |
2659 | @@ -12,6 +12,7 @@ |
2660 | |
2661 | #include <linux/clockchips.h> |
2662 | #include <linux/interrupt.h> |
2663 | +#include <linux/irq.h> |
2664 | #include <linux/i8253.h> |
2665 | #include <linux/time.h> |
2666 | #include <linux/export.h> |
2667 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
2668 | index 2ef2f1fe875b..00e2ae033a0f 100644 |
2669 | --- a/arch/x86/kvm/mmu.c |
2670 | +++ b/arch/x86/kvm/mmu.c |
2671 | @@ -3825,6 +3825,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
2672 | { |
2673 | int r = 1; |
2674 | |
2675 | + vcpu->arch.l1tf_flush_l1d = true; |
2676 | switch (vcpu->arch.apf.host_apf_reason) { |
2677 | default: |
2678 | trace_kvm_page_fault(fault_address, error_code); |
2679 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
2680 | index cfa155078ebb..282bbcbf3b6a 100644 |
2681 | --- a/arch/x86/kvm/svm.c |
2682 | +++ b/arch/x86/kvm/svm.c |
2683 | @@ -175,6 +175,8 @@ struct vcpu_svm { |
2684 | uint64_t sysenter_eip; |
2685 | uint64_t tsc_aux; |
2686 | |
2687 | + u64 msr_decfg; |
2688 | + |
2689 | u64 next_rip; |
2690 | |
2691 | u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; |
2692 | @@ -1616,6 +1618,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
2693 | u32 dummy; |
2694 | u32 eax = 1; |
2695 | |
2696 | + vcpu->arch.microcode_version = 0x01000065; |
2697 | svm->spec_ctrl = 0; |
2698 | svm->virt_spec_ctrl = 0; |
2699 | |
2700 | @@ -3555,6 +3558,22 @@ static int cr8_write_interception(struct vcpu_svm *svm) |
2701 | return 0; |
2702 | } |
2703 | |
2704 | +static int svm_get_msr_feature(struct kvm_msr_entry *msr) |
2705 | +{ |
2706 | + msr->data = 0; |
2707 | + |
2708 | + switch (msr->index) { |
2709 | + case MSR_F10H_DECFG: |
2710 | + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) |
2711 | + msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; |
2712 | + break; |
2713 | + default: |
2714 | + return 1; |
2715 | + } |
2716 | + |
2717 | + return 0; |
2718 | +} |
2719 | + |
2720 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
2721 | { |
2722 | struct vcpu_svm *svm = to_svm(vcpu); |
2723 | @@ -3637,9 +3656,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
2724 | |
2725 | msr_info->data = svm->virt_spec_ctrl; |
2726 | break; |
2727 | - case MSR_IA32_UCODE_REV: |
2728 | - msr_info->data = 0x01000065; |
2729 | - break; |
2730 | case MSR_F15H_IC_CFG: { |
2731 | |
2732 | int family, model; |
2733 | @@ -3657,6 +3673,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
2734 | msr_info->data = 0x1E; |
2735 | } |
2736 | break; |
2737 | + case MSR_F10H_DECFG: |
2738 | + msr_info->data = svm->msr_decfg; |
2739 | + break; |
2740 | default: |
2741 | return kvm_get_msr_common(vcpu, msr_info); |
2742 | } |
2743 | @@ -3845,6 +3864,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
2744 | case MSR_VM_IGNNE: |
2745 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
2746 | break; |
2747 | + case MSR_F10H_DECFG: { |
2748 | + struct kvm_msr_entry msr_entry; |
2749 | + |
2750 | + msr_entry.index = msr->index; |
2751 | + if (svm_get_msr_feature(&msr_entry)) |
2752 | + return 1; |
2753 | + |
2754 | + /* Check the supported bits */ |
2755 | + if (data & ~msr_entry.data) |
2756 | + return 1; |
2757 | + |
2758 | + /* Don't allow the guest to change a bit, #GP */ |
2759 | + if (!msr->host_initiated && (data ^ msr_entry.data)) |
2760 | + return 1; |
2761 | + |
2762 | + svm->msr_decfg = data; |
2763 | + break; |
2764 | + } |
2765 | case MSR_IA32_APICBASE: |
2766 | if (kvm_vcpu_apicv_active(vcpu)) |
2767 | avic_update_vapic_bar(to_svm(vcpu), data); |
2768 | @@ -5588,6 +5625,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { |
2769 | .vcpu_unblocking = svm_vcpu_unblocking, |
2770 | |
2771 | .update_bp_intercept = update_bp_intercept, |
2772 | + .get_msr_feature = svm_get_msr_feature, |
2773 | .get_msr = svm_get_msr, |
2774 | .set_msr = svm_set_msr, |
2775 | .get_segment_base = svm_get_segment_base, |
2776 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
2777 | index 8d000fde1414..f015ca3997d9 100644 |
2778 | --- a/arch/x86/kvm/vmx.c |
2779 | +++ b/arch/x86/kvm/vmx.c |
2780 | @@ -191,6 +191,150 @@ module_param(ple_window_max, int, S_IRUGO); |
2781 | |
2782 | extern const ulong vmx_return; |
2783 | |
2784 | +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); |
2785 | +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); |
2786 | +static DEFINE_MUTEX(vmx_l1d_flush_mutex); |
2787 | + |
2788 | +/* Storage for pre module init parameter parsing */ |
2789 | +static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; |
2790 | + |
2791 | +static const struct { |
2792 | + const char *option; |
2793 | + enum vmx_l1d_flush_state cmd; |
2794 | +} vmentry_l1d_param[] = { |
2795 | + {"auto", VMENTER_L1D_FLUSH_AUTO}, |
2796 | + {"never", VMENTER_L1D_FLUSH_NEVER}, |
2797 | + {"cond", VMENTER_L1D_FLUSH_COND}, |
2798 | + {"always", VMENTER_L1D_FLUSH_ALWAYS}, |
2799 | +}; |
2800 | + |
2801 | +#define L1D_CACHE_ORDER 4 |
2802 | +static void *vmx_l1d_flush_pages; |
2803 | + |
2804 | +static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) |
2805 | +{ |
2806 | + struct page *page; |
2807 | + unsigned int i; |
2808 | + |
2809 | + if (!enable_ept) { |
2810 | + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; |
2811 | + return 0; |
2812 | + } |
2813 | + |
2814 | + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { |
2815 | + u64 msr; |
2816 | + |
2817 | + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); |
2818 | + if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { |
2819 | + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; |
2820 | + return 0; |
2821 | + } |
2822 | + } |
2823 | + |
2824 | + /* If set to auto use the default l1tf mitigation method */ |
2825 | + if (l1tf == VMENTER_L1D_FLUSH_AUTO) { |
2826 | + switch (l1tf_mitigation) { |
2827 | + case L1TF_MITIGATION_OFF: |
2828 | + l1tf = VMENTER_L1D_FLUSH_NEVER; |
2829 | + break; |
2830 | + case L1TF_MITIGATION_FLUSH_NOWARN: |
2831 | + case L1TF_MITIGATION_FLUSH: |
2832 | + case L1TF_MITIGATION_FLUSH_NOSMT: |
2833 | + l1tf = VMENTER_L1D_FLUSH_COND; |
2834 | + break; |
2835 | + case L1TF_MITIGATION_FULL: |
2836 | + case L1TF_MITIGATION_FULL_FORCE: |
2837 | + l1tf = VMENTER_L1D_FLUSH_ALWAYS; |
2838 | + break; |
2839 | + } |
2840 | + } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { |
2841 | + l1tf = VMENTER_L1D_FLUSH_ALWAYS; |
2842 | + } |
2843 | + |
2844 | + if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && |
2845 | + !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { |
2846 | + page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); |
2847 | + if (!page) |
2848 | + return -ENOMEM; |
2849 | + vmx_l1d_flush_pages = page_address(page); |
2850 | + |
2851 | + /* |
2852 | + * Initialize each page with a different pattern in |
2853 | + * order to protect against KSM in the nested |
2854 | + * virtualization case. |
2855 | + */ |
2856 | + for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { |
2857 | + memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, |
2858 | + PAGE_SIZE); |
2859 | + } |
2860 | + } |
2861 | + |
2862 | + l1tf_vmx_mitigation = l1tf; |
2863 | + |
2864 | + if (l1tf != VMENTER_L1D_FLUSH_NEVER) |
2865 | + static_branch_enable(&vmx_l1d_should_flush); |
2866 | + else |
2867 | + static_branch_disable(&vmx_l1d_should_flush); |
2868 | + |
2869 | + if (l1tf == VMENTER_L1D_FLUSH_COND) |
2870 | + static_branch_enable(&vmx_l1d_flush_cond); |
2871 | + else |
2872 | + static_branch_disable(&vmx_l1d_flush_cond); |
2873 | + return 0; |
2874 | +} |
2875 | + |
2876 | +static int vmentry_l1d_flush_parse(const char *s) |
2877 | +{ |
2878 | + unsigned int i; |
2879 | + |
2880 | + if (s) { |
2881 | + for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { |
2882 | + if (sysfs_streq(s, vmentry_l1d_param[i].option)) |
2883 | + return vmentry_l1d_param[i].cmd; |
2884 | + } |
2885 | + } |
2886 | + return -EINVAL; |
2887 | +} |
2888 | + |
2889 | +static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) |
2890 | +{ |
2891 | + int l1tf, ret; |
2892 | + |
2893 | + if (!boot_cpu_has(X86_BUG_L1TF)) |
2894 | + return 0; |
2895 | + |
2896 | + l1tf = vmentry_l1d_flush_parse(s); |
2897 | + if (l1tf < 0) |
2898 | + return l1tf; |
2899 | + |
2900 | + /* |
2901 | + * Has vmx_init() run already? If not then this is the pre init |
2902 | + * parameter parsing. In that case just store the value and let |
2903 | + * vmx_init() do the proper setup after enable_ept has been |
2904 | + * established. |
2905 | + */ |
2906 | + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { |
2907 | + vmentry_l1d_flush_param = l1tf; |
2908 | + return 0; |
2909 | + } |
2910 | + |
2911 | + mutex_lock(&vmx_l1d_flush_mutex); |
2912 | + ret = vmx_setup_l1d_flush(l1tf); |
2913 | + mutex_unlock(&vmx_l1d_flush_mutex); |
2914 | + return ret; |
2915 | +} |
2916 | + |
2917 | +static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) |
2918 | +{ |
2919 | + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); |
2920 | +} |
2921 | + |
2922 | +static const struct kernel_param_ops vmentry_l1d_flush_ops = { |
2923 | + .set = vmentry_l1d_flush_set, |
2924 | + .get = vmentry_l1d_flush_get, |
2925 | +}; |
2926 | +module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); |
2927 | + |
2928 | #define NR_AUTOLOAD_MSRS 8 |
2929 | |
2930 | struct vmcs { |
2931 | @@ -567,6 +711,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc) |
2932 | (unsigned long *)&pi_desc->control); |
2933 | } |
2934 | |
2935 | +struct vmx_msrs { |
2936 | + unsigned int nr; |
2937 | + struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; |
2938 | +}; |
2939 | + |
2940 | struct vcpu_vmx { |
2941 | struct kvm_vcpu vcpu; |
2942 | unsigned long host_rsp; |
2943 | @@ -600,9 +749,8 @@ struct vcpu_vmx { |
2944 | struct loaded_vmcs *loaded_vmcs; |
2945 | bool __launched; /* temporary, used in vmx_vcpu_run */ |
2946 | struct msr_autoload { |
2947 | - unsigned nr; |
2948 | - struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; |
2949 | - struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; |
2950 | + struct vmx_msrs guest; |
2951 | + struct vmx_msrs host; |
2952 | } msr_autoload; |
2953 | struct { |
2954 | int loaded; |
2955 | @@ -1967,9 +2115,20 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, |
2956 | vm_exit_controls_clearbit(vmx, exit); |
2957 | } |
2958 | |
2959 | +static int find_msr(struct vmx_msrs *m, unsigned int msr) |
2960 | +{ |
2961 | + unsigned int i; |
2962 | + |
2963 | + for (i = 0; i < m->nr; ++i) { |
2964 | + if (m->val[i].index == msr) |
2965 | + return i; |
2966 | + } |
2967 | + return -ENOENT; |
2968 | +} |
2969 | + |
2970 | static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) |
2971 | { |
2972 | - unsigned i; |
2973 | + int i; |
2974 | struct msr_autoload *m = &vmx->msr_autoload; |
2975 | |
2976 | switch (msr) { |
2977 | @@ -1990,18 +2149,21 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) |
2978 | } |
2979 | break; |
2980 | } |
2981 | + i = find_msr(&m->guest, msr); |
2982 | + if (i < 0) |
2983 | + goto skip_guest; |
2984 | + --m->guest.nr; |
2985 | + m->guest.val[i] = m->guest.val[m->guest.nr]; |
2986 | + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); |
2987 | |
2988 | - for (i = 0; i < m->nr; ++i) |
2989 | - if (m->guest[i].index == msr) |
2990 | - break; |
2991 | - |
2992 | - if (i == m->nr) |
2993 | +skip_guest: |
2994 | + i = find_msr(&m->host, msr); |
2995 | + if (i < 0) |
2996 | return; |
2997 | - --m->nr; |
2998 | - m->guest[i] = m->guest[m->nr]; |
2999 | - m->host[i] = m->host[m->nr]; |
3000 | - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); |
3001 | - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
3002 | + |
3003 | + --m->host.nr; |
3004 | + m->host.val[i] = m->host.val[m->host.nr]; |
3005 | + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); |
3006 | } |
3007 | |
3008 | static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, |
3009 | @@ -2016,9 +2178,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, |
3010 | } |
3011 | |
3012 | static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, |
3013 | - u64 guest_val, u64 host_val) |
3014 | + u64 guest_val, u64 host_val, bool entry_only) |
3015 | { |
3016 | - unsigned i; |
3017 | + int i, j = 0; |
3018 | struct msr_autoload *m = &vmx->msr_autoload; |
3019 | |
3020 | switch (msr) { |
3021 | @@ -2053,24 +2215,31 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, |
3022 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
3023 | } |
3024 | |
3025 | - for (i = 0; i < m->nr; ++i) |
3026 | - if (m->guest[i].index == msr) |
3027 | - break; |
3028 | + i = find_msr(&m->guest, msr); |
3029 | + if (!entry_only) |
3030 | + j = find_msr(&m->host, msr); |
3031 | |
3032 | - if (i == NR_AUTOLOAD_MSRS) { |
3033 | + if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { |
3034 | printk_once(KERN_WARNING "Not enough msr switch entries. " |
3035 | "Can't add msr %x\n", msr); |
3036 | return; |
3037 | - } else if (i == m->nr) { |
3038 | - ++m->nr; |
3039 | - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); |
3040 | - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
3041 | } |
3042 | + if (i < 0) { |
3043 | + i = m->guest.nr++; |
3044 | + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); |
3045 | + } |
3046 | + m->guest.val[i].index = msr; |
3047 | + m->guest.val[i].value = guest_val; |
3048 | |
3049 | - m->guest[i].index = msr; |
3050 | - m->guest[i].value = guest_val; |
3051 | - m->host[i].index = msr; |
3052 | - m->host[i].value = host_val; |
3053 | + if (entry_only) |
3054 | + return; |
3055 | + |
3056 | + if (j < 0) { |
3057 | + j = m->host.nr++; |
3058 | + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); |
3059 | + } |
3060 | + m->host.val[j].index = msr; |
3061 | + m->host.val[j].value = host_val; |
3062 | } |
3063 | |
3064 | static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) |
3065 | @@ -2114,7 +2283,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) |
3066 | guest_efer &= ~EFER_LME; |
3067 | if (guest_efer != host_efer) |
3068 | add_atomic_switch_msr(vmx, MSR_EFER, |
3069 | - guest_efer, host_efer); |
3070 | + guest_efer, host_efer, false); |
3071 | return false; |
3072 | } else { |
3073 | guest_efer &= ~ignore_bits; |
3074 | @@ -3266,6 +3435,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, |
3075 | return !(val & ~valid_bits); |
3076 | } |
3077 | |
3078 | +static int vmx_get_msr_feature(struct kvm_msr_entry *msr) |
3079 | +{ |
3080 | + return 1; |
3081 | +} |
3082 | + |
3083 | /* |
3084 | * Reads an msr value (of 'msr_index') into 'pdata'. |
3085 | * Returns 0 on success, non-0 otherwise. |
3086 | @@ -3523,7 +3697,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
3087 | vcpu->arch.ia32_xss = data; |
3088 | if (vcpu->arch.ia32_xss != host_xss) |
3089 | add_atomic_switch_msr(vmx, MSR_IA32_XSS, |
3090 | - vcpu->arch.ia32_xss, host_xss); |
3091 | + vcpu->arch.ia32_xss, host_xss, false); |
3092 | else |
3093 | clear_atomic_switch_msr(vmx, MSR_IA32_XSS); |
3094 | break; |
3095 | @@ -5714,9 +5888,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) |
3096 | |
3097 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); |
3098 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); |
3099 | - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); |
3100 | + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); |
3101 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); |
3102 | - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); |
3103 | + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); |
3104 | |
3105 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) |
3106 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); |
3107 | @@ -5736,8 +5910,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) |
3108 | ++vmx->nmsrs; |
3109 | } |
3110 | |
3111 | - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) |
3112 | - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities); |
3113 | + vmx->arch_capabilities = kvm_get_arch_capabilities(); |
3114 | |
3115 | vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); |
3116 | |
3117 | @@ -5770,6 +5943,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
3118 | vmx->rmode.vm86_active = 0; |
3119 | vmx->spec_ctrl = 0; |
3120 | |
3121 | + vcpu->arch.microcode_version = 0x100000000ULL; |
3122 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
3123 | kvm_set_cr8(vcpu, 0); |
3124 | |
3125 | @@ -8987,6 +9161,79 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) |
3126 | } |
3127 | } |
3128 | |
3129 | +/* |
3130 | + * Software based L1D cache flush which is used when microcode providing |
3131 | + * the cache control MSR is not loaded. |
3132 | + * |
3133 | + * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to |
3134 | + * flush it is required to read in 64 KiB because the replacement algorithm |
3135 | + * is not exactly LRU. This could be sized at runtime via topology |
3136 | + * information but as all relevant affected CPUs have 32KiB L1D cache size |
3137 | + * there is no point in doing so. |
3138 | + */ |
3139 | +#define L1D_CACHE_ORDER 4 |
3140 | +static void *vmx_l1d_flush_pages; |
3141 | + |
3142 | +static void vmx_l1d_flush(struct kvm_vcpu *vcpu) |
3143 | +{ |
3144 | + int size = PAGE_SIZE << L1D_CACHE_ORDER; |
3145 | + |
3146 | + /* |
3147 | + * This code is only executed when the the flush mode is 'cond' or |
3148 | + * 'always' |
3149 | + */ |
3150 | + if (static_branch_likely(&vmx_l1d_flush_cond)) { |
3151 | + bool flush_l1d; |
3152 | + |
3153 | + /* |
3154 | + * Clear the per-vcpu flush bit, it gets set again |
3155 | + * either from vcpu_run() or from one of the unsafe |
3156 | + * VMEXIT handlers. |
3157 | + */ |
3158 | + flush_l1d = vcpu->arch.l1tf_flush_l1d; |
3159 | + vcpu->arch.l1tf_flush_l1d = false; |
3160 | + |
3161 | + /* |
3162 | + * Clear the per-cpu flush bit, it gets set again from |
3163 | + * the interrupt handlers. |
3164 | + */ |
3165 | + flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); |
3166 | + kvm_clear_cpu_l1tf_flush_l1d(); |
3167 | + |
3168 | + if (!flush_l1d) |
3169 | + return; |
3170 | + } |
3171 | + |
3172 | + vcpu->stat.l1d_flush++; |
3173 | + |
3174 | + if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { |
3175 | + wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); |
3176 | + return; |
3177 | + } |
3178 | + |
3179 | + asm volatile( |
3180 | + /* First ensure the pages are in the TLB */ |
3181 | + "xorl %%eax, %%eax\n" |
3182 | + ".Lpopulate_tlb:\n\t" |
3183 | + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" |
3184 | + "addl $4096, %%eax\n\t" |
3185 | + "cmpl %%eax, %[size]\n\t" |
3186 | + "jne .Lpopulate_tlb\n\t" |
3187 | + "xorl %%eax, %%eax\n\t" |
3188 | + "cpuid\n\t" |
3189 | + /* Now fill the cache */ |
3190 | + "xorl %%eax, %%eax\n" |
3191 | + ".Lfill_cache:\n" |
3192 | + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" |
3193 | + "addl $64, %%eax\n\t" |
3194 | + "cmpl %%eax, %[size]\n\t" |
3195 | + "jne .Lfill_cache\n\t" |
3196 | + "lfence\n" |
3197 | + :: [flush_pages] "r" (vmx_l1d_flush_pages), |
3198 | + [size] "r" (size) |
3199 | + : "eax", "ebx", "ecx", "edx"); |
3200 | +} |
3201 | + |
3202 | static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
3203 | { |
3204 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
3205 | @@ -9390,7 +9637,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) |
3206 | clear_atomic_switch_msr(vmx, msrs[i].msr); |
3207 | else |
3208 | add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, |
3209 | - msrs[i].host); |
3210 | + msrs[i].host, false); |
3211 | } |
3212 | |
3213 | static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) |
3214 | @@ -9483,6 +9730,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
3215 | |
3216 | vmx->__launched = vmx->loaded_vmcs->launched; |
3217 | |
3218 | + if (static_branch_unlikely(&vmx_l1d_should_flush)) |
3219 | + vmx_l1d_flush(vcpu); |
3220 | + |
3221 | asm( |
3222 | /* Store host registers */ |
3223 | "push %%" _ASM_DX "; push %%" _ASM_BP ";" |
3224 | @@ -9835,6 +10085,37 @@ free_vcpu: |
3225 | return ERR_PTR(err); |
3226 | } |
3227 | |
3228 | +#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" |
3229 | +#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" |
3230 | + |
3231 | +static int vmx_vm_init(struct kvm *kvm) |
3232 | +{ |
3233 | + if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { |
3234 | + switch (l1tf_mitigation) { |
3235 | + case L1TF_MITIGATION_OFF: |
3236 | + case L1TF_MITIGATION_FLUSH_NOWARN: |
3237 | + /* 'I explicitly don't care' is set */ |
3238 | + break; |
3239 | + case L1TF_MITIGATION_FLUSH: |
3240 | + case L1TF_MITIGATION_FLUSH_NOSMT: |
3241 | + case L1TF_MITIGATION_FULL: |
3242 | + /* |
3243 | + * Warn upon starting the first VM in a potentially |
3244 | + * insecure environment. |
3245 | + */ |
3246 | + if (cpu_smt_control == CPU_SMT_ENABLED) |
3247 | + pr_warn_once(L1TF_MSG_SMT); |
3248 | + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) |
3249 | + pr_warn_once(L1TF_MSG_L1D); |
3250 | + break; |
3251 | + case L1TF_MITIGATION_FULL_FORCE: |
3252 | + /* Flush is enforced */ |
3253 | + break; |
3254 | + } |
3255 | + } |
3256 | + return 0; |
3257 | +} |
3258 | + |
3259 | static void __init vmx_check_processor_compat(void *rtn) |
3260 | { |
3261 | struct vmcs_config vmcs_conf; |
3262 | @@ -10774,10 +11055,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, |
3263 | * Set the MSR load/store lists to match L0's settings. |
3264 | */ |
3265 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); |
3266 | - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); |
3267 | - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); |
3268 | - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); |
3269 | - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); |
3270 | + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); |
3271 | + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); |
3272 | + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); |
3273 | + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); |
3274 | |
3275 | /* |
3276 | * HOST_RSP is normally set correctly in vmx_vcpu_run() just before |
3277 | @@ -11202,6 +11483,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) |
3278 | if (ret) |
3279 | return ret; |
3280 | |
3281 | + /* Hide L1D cache contents from the nested guest. */ |
3282 | + vmx->vcpu.arch.l1tf_flush_l1d = true; |
3283 | + |
3284 | /* |
3285 | * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken |
3286 | * by event injection, halt vcpu. |
3287 | @@ -11712,8 +11996,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, |
3288 | vmx_segment_cache_clear(vmx); |
3289 | |
3290 | /* Update any VMCS fields that might have changed while L2 ran */ |
3291 | - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); |
3292 | - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); |
3293 | + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); |
3294 | + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); |
3295 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); |
3296 | if (vmx->hv_deadline_tsc == -1) |
3297 | vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, |
3298 | @@ -12225,6 +12509,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { |
3299 | .cpu_has_accelerated_tpr = report_flexpriority, |
3300 | .has_emulated_msr = vmx_has_emulated_msr, |
3301 | |
3302 | + .vm_init = vmx_vm_init, |
3303 | + |
3304 | .vcpu_create = vmx_create_vcpu, |
3305 | .vcpu_free = vmx_free_vcpu, |
3306 | .vcpu_reset = vmx_vcpu_reset, |
3307 | @@ -12234,6 +12520,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { |
3308 | .vcpu_put = vmx_vcpu_put, |
3309 | |
3310 | .update_bp_intercept = update_exception_bitmap, |
3311 | + .get_msr_feature = vmx_get_msr_feature, |
3312 | .get_msr = vmx_get_msr, |
3313 | .set_msr = vmx_set_msr, |
3314 | .get_segment_base = vmx_get_segment_base, |
3315 | @@ -12341,22 +12628,18 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { |
3316 | .setup_mce = vmx_setup_mce, |
3317 | }; |
3318 | |
3319 | -static int __init vmx_init(void) |
3320 | +static void vmx_cleanup_l1d_flush(void) |
3321 | { |
3322 | - int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), |
3323 | - __alignof__(struct vcpu_vmx), THIS_MODULE); |
3324 | - if (r) |
3325 | - return r; |
3326 | - |
3327 | -#ifdef CONFIG_KEXEC_CORE |
3328 | - rcu_assign_pointer(crash_vmclear_loaded_vmcss, |
3329 | - crash_vmclear_local_loaded_vmcss); |
3330 | -#endif |
3331 | - |
3332 | - return 0; |
3333 | + if (vmx_l1d_flush_pages) { |
3334 | + free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); |
3335 | + vmx_l1d_flush_pages = NULL; |
3336 | + } |
3337 | + /* Restore state so sysfs ignores VMX */ |
3338 | + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
3339 | } |
3340 | |
3341 | -static void __exit vmx_exit(void) |
3342 | + |
3343 | +static void vmx_exit(void) |
3344 | { |
3345 | #ifdef CONFIG_KEXEC_CORE |
3346 | RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); |
3347 | @@ -12364,7 +12647,40 @@ static void __exit vmx_exit(void) |
3348 | #endif |
3349 | |
3350 | kvm_exit(); |
3351 | + |
3352 | + vmx_cleanup_l1d_flush(); |
3353 | } |
3354 | +module_exit(vmx_exit) |
3355 | |
3356 | +static int __init vmx_init(void) |
3357 | +{ |
3358 | + int r; |
3359 | + |
3360 | + r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), |
3361 | + __alignof__(struct vcpu_vmx), THIS_MODULE); |
3362 | + if (r) |
3363 | + return r; |
3364 | + |
3365 | + /* |
3366 | + * Must be called after kvm_init() so enable_ept is properly set |
3367 | + * up. Hand the parameter mitigation value in which was stored in |
3368 | + * the pre module init parser. If no parameter was given, it will |
3369 | + * contain 'auto' which will be turned into the default 'cond' |
3370 | + * mitigation mode. |
3371 | + */ |
3372 | + if (boot_cpu_has(X86_BUG_L1TF)) { |
3373 | + r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); |
3374 | + if (r) { |
3375 | + vmx_exit(); |
3376 | + return r; |
3377 | + } |
3378 | + } |
3379 | + |
3380 | +#ifdef CONFIG_KEXEC_CORE |
3381 | + rcu_assign_pointer(crash_vmclear_loaded_vmcss, |
3382 | + crash_vmclear_local_loaded_vmcss); |
3383 | +#endif |
3384 | + |
3385 | + return 0; |
3386 | +} |
3387 | module_init(vmx_init) |
3388 | -module_exit(vmx_exit) |
3389 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
3390 | index 2f3fe25639b3..5c2c09f6c1c3 100644 |
3391 | --- a/arch/x86/kvm/x86.c |
3392 | +++ b/arch/x86/kvm/x86.c |
3393 | @@ -181,6 +181,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { |
3394 | { "irq_injections", VCPU_STAT(irq_injections) }, |
3395 | { "nmi_injections", VCPU_STAT(nmi_injections) }, |
3396 | { "req_event", VCPU_STAT(req_event) }, |
3397 | + { "l1d_flush", VCPU_STAT(l1d_flush) }, |
3398 | { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, |
3399 | { "mmu_pte_write", VM_STAT(mmu_pte_write) }, |
3400 | { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, |
3401 | @@ -1041,6 +1042,71 @@ static u32 emulated_msrs[] = { |
3402 | |
3403 | static unsigned num_emulated_msrs; |
3404 | |
3405 | +/* |
3406 | + * List of msr numbers which are used to expose MSR-based features that |
3407 | + * can be used by a hypervisor to validate requested CPU features. |
3408 | + */ |
3409 | +static u32 msr_based_features[] = { |
3410 | + MSR_F10H_DECFG, |
3411 | + MSR_IA32_UCODE_REV, |
3412 | + MSR_IA32_ARCH_CAPABILITIES, |
3413 | +}; |
3414 | + |
3415 | +static unsigned int num_msr_based_features; |
3416 | + |
3417 | +u64 kvm_get_arch_capabilities(void) |
3418 | +{ |
3419 | + u64 data; |
3420 | + |
3421 | + rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data); |
3422 | + |
3423 | + /* |
3424 | + * If we're doing cache flushes (either "always" or "cond") |
3425 | + * we will do one whenever the guest does a vmlaunch/vmresume. |
3426 | + * If an outer hypervisor is doing the cache flush for us |
3427 | + * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that |
3428 | + * capability to the guest too, and if EPT is disabled we're not |
3429 | + * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will |
3430 | + * require a nested hypervisor to do a flush of its own. |
3431 | + */ |
3432 | + if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) |
3433 | + data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; |
3434 | + |
3435 | + return data; |
3436 | +} |
3437 | +EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities); |
3438 | + |
3439 | +static int kvm_get_msr_feature(struct kvm_msr_entry *msr) |
3440 | +{ |
3441 | + switch (msr->index) { |
3442 | + case MSR_IA32_ARCH_CAPABILITIES: |
3443 | + msr->data = kvm_get_arch_capabilities(); |
3444 | + break; |
3445 | + case MSR_IA32_UCODE_REV: |
3446 | + rdmsrl_safe(msr->index, &msr->data); |
3447 | + break; |
3448 | + default: |
3449 | + if (kvm_x86_ops->get_msr_feature(msr)) |
3450 | + return 1; |
3451 | + } |
3452 | + return 0; |
3453 | +} |
3454 | + |
3455 | +static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) |
3456 | +{ |
3457 | + struct kvm_msr_entry msr; |
3458 | + int r; |
3459 | + |
3460 | + msr.index = index; |
3461 | + r = kvm_get_msr_feature(&msr); |
3462 | + if (r) |
3463 | + return r; |
3464 | + |
3465 | + *data = msr.data; |
3466 | + |
3467 | + return 0; |
3468 | +} |
3469 | + |
3470 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) |
3471 | { |
3472 | if (efer & efer_reserved_bits) |
3473 | @@ -2156,7 +2222,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
3474 | |
3475 | switch (msr) { |
3476 | case MSR_AMD64_NB_CFG: |
3477 | - case MSR_IA32_UCODE_REV: |
3478 | case MSR_IA32_UCODE_WRITE: |
3479 | case MSR_VM_HSAVE_PA: |
3480 | case MSR_AMD64_PATCH_LOADER: |
3481 | @@ -2164,6 +2229,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
3482 | case MSR_AMD64_DC_CFG: |
3483 | break; |
3484 | |
3485 | + case MSR_IA32_UCODE_REV: |
3486 | + if (msr_info->host_initiated) |
3487 | + vcpu->arch.microcode_version = data; |
3488 | + break; |
3489 | case MSR_EFER: |
3490 | return set_efer(vcpu, data); |
3491 | case MSR_K7_HWCR: |
3492 | @@ -2450,7 +2519,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
3493 | msr_info->data = 0; |
3494 | break; |
3495 | case MSR_IA32_UCODE_REV: |
3496 | - msr_info->data = 0x100000000ULL; |
3497 | + msr_info->data = vcpu->arch.microcode_version; |
3498 | break; |
3499 | case MSR_MTRRcap: |
3500 | case 0x200 ... 0x2ff: |
3501 | @@ -2600,13 +2669,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, |
3502 | int (*do_msr)(struct kvm_vcpu *vcpu, |
3503 | unsigned index, u64 *data)) |
3504 | { |
3505 | - int i, idx; |
3506 | + int i; |
3507 | |
3508 | - idx = srcu_read_lock(&vcpu->kvm->srcu); |
3509 | for (i = 0; i < msrs->nmsrs; ++i) |
3510 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) |
3511 | break; |
3512 | - srcu_read_unlock(&vcpu->kvm->srcu, idx); |
3513 | |
3514 | return i; |
3515 | } |
3516 | @@ -2705,6 +2772,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
3517 | case KVM_CAP_SET_BOOT_CPU_ID: |
3518 | case KVM_CAP_SPLIT_IRQCHIP: |
3519 | case KVM_CAP_IMMEDIATE_EXIT: |
3520 | + case KVM_CAP_GET_MSR_FEATURES: |
3521 | r = 1; |
3522 | break; |
3523 | case KVM_CAP_ADJUST_CLOCK: |
3524 | @@ -2819,6 +2887,31 @@ long kvm_arch_dev_ioctl(struct file *filp, |
3525 | goto out; |
3526 | r = 0; |
3527 | break; |
3528 | + case KVM_GET_MSR_FEATURE_INDEX_LIST: { |
3529 | + struct kvm_msr_list __user *user_msr_list = argp; |
3530 | + struct kvm_msr_list msr_list; |
3531 | + unsigned int n; |
3532 | + |
3533 | + r = -EFAULT; |
3534 | + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) |
3535 | + goto out; |
3536 | + n = msr_list.nmsrs; |
3537 | + msr_list.nmsrs = num_msr_based_features; |
3538 | + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) |
3539 | + goto out; |
3540 | + r = -E2BIG; |
3541 | + if (n < msr_list.nmsrs) |
3542 | + goto out; |
3543 | + r = -EFAULT; |
3544 | + if (copy_to_user(user_msr_list->indices, &msr_based_features, |
3545 | + num_msr_based_features * sizeof(u32))) |
3546 | + goto out; |
3547 | + r = 0; |
3548 | + break; |
3549 | + } |
3550 | + case KVM_GET_MSRS: |
3551 | + r = msr_io(NULL, argp, do_get_msr_feature, 1); |
3552 | + break; |
3553 | } |
3554 | default: |
3555 | r = -EINVAL; |
3556 | @@ -3553,12 +3646,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, |
3557 | r = 0; |
3558 | break; |
3559 | } |
3560 | - case KVM_GET_MSRS: |
3561 | + case KVM_GET_MSRS: { |
3562 | + int idx = srcu_read_lock(&vcpu->kvm->srcu); |
3563 | r = msr_io(vcpu, argp, do_get_msr, 1); |
3564 | + srcu_read_unlock(&vcpu->kvm->srcu, idx); |
3565 | break; |
3566 | - case KVM_SET_MSRS: |
3567 | + } |
3568 | + case KVM_SET_MSRS: { |
3569 | + int idx = srcu_read_lock(&vcpu->kvm->srcu); |
3570 | r = msr_io(vcpu, argp, do_set_msr, 0); |
3571 | + srcu_read_unlock(&vcpu->kvm->srcu, idx); |
3572 | break; |
3573 | + } |
3574 | case KVM_TPR_ACCESS_REPORTING: { |
3575 | struct kvm_tpr_access_ctl tac; |
3576 | |
3577 | @@ -4333,6 +4432,19 @@ static void kvm_init_msr_list(void) |
3578 | j++; |
3579 | } |
3580 | num_emulated_msrs = j; |
3581 | + |
3582 | + for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) { |
3583 | + struct kvm_msr_entry msr; |
3584 | + |
3585 | + msr.index = msr_based_features[i]; |
3586 | + if (kvm_get_msr_feature(&msr)) |
3587 | + continue; |
3588 | + |
3589 | + if (j < i) |
3590 | + msr_based_features[j] = msr_based_features[i]; |
3591 | + j++; |
3592 | + } |
3593 | + num_msr_based_features = j; |
3594 | } |
3595 | |
3596 | static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, |
3597 | @@ -4573,6 +4685,9 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v |
3598 | int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, |
3599 | unsigned int bytes, struct x86_exception *exception) |
3600 | { |
3601 | + /* kvm_write_guest_virt_system can pull in tons of pages. */ |
3602 | + vcpu->arch.l1tf_flush_l1d = true; |
3603 | + |
3604 | return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, |
3605 | PFERR_WRITE_MASK, exception); |
3606 | } |
3607 | @@ -5701,6 +5816,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, |
3608 | bool writeback = true; |
3609 | bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; |
3610 | |
3611 | + vcpu->arch.l1tf_flush_l1d = true; |
3612 | + |
3613 | /* |
3614 | * Clear write_fault_to_shadow_pgtable here to ensure it is |
3615 | * never reused. |
3616 | @@ -7146,6 +7263,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) |
3617 | struct kvm *kvm = vcpu->kvm; |
3618 | |
3619 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
3620 | + vcpu->arch.l1tf_flush_l1d = true; |
3621 | |
3622 | for (;;) { |
3623 | if (kvm_vcpu_running(vcpu)) { |
3624 | @@ -8153,6 +8271,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
3625 | |
3626 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) |
3627 | { |
3628 | + vcpu->arch.l1tf_flush_l1d = true; |
3629 | kvm_x86_ops->sched_in(vcpu, cpu); |
3630 | } |
3631 | |
3632 | diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c |
3633 | index 0133d26f16be..c2faff548f59 100644 |
3634 | --- a/arch/x86/mm/fault.c |
3635 | +++ b/arch/x86/mm/fault.c |
3636 | @@ -24,6 +24,7 @@ |
3637 | #include <asm/vsyscall.h> /* emulate_vsyscall */ |
3638 | #include <asm/vm86.h> /* struct vm86 */ |
3639 | #include <asm/mmu_context.h> /* vma_pkey() */ |
3640 | +#include <asm/sections.h> |
3641 | |
3642 | #define CREATE_TRACE_POINTS |
3643 | #include <asm/trace/exceptions.h> |
3644 | diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c |
3645 | index 071cbbbb60d9..37f60dfd7e4e 100644 |
3646 | --- a/arch/x86/mm/init.c |
3647 | +++ b/arch/x86/mm/init.c |
3648 | @@ -4,6 +4,8 @@ |
3649 | #include <linux/swap.h> |
3650 | #include <linux/memblock.h> |
3651 | #include <linux/bootmem.h> /* for max_low_pfn */ |
3652 | +#include <linux/swapfile.h> |
3653 | +#include <linux/swapops.h> |
3654 | |
3655 | #include <asm/set_memory.h> |
3656 | #include <asm/e820/api.h> |
3657 | @@ -880,3 +882,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) |
3658 | __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); |
3659 | __pte2cachemode_tbl[entry] = cache; |
3660 | } |
3661 | + |
3662 | +#ifdef CONFIG_SWAP |
3663 | +unsigned long max_swapfile_size(void) |
3664 | +{ |
3665 | + unsigned long pages; |
3666 | + |
3667 | + pages = generic_max_swapfile_size(); |
3668 | + |
3669 | + if (boot_cpu_has_bug(X86_BUG_L1TF)) { |
3670 | + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ |
3671 | + unsigned long l1tf_limit = l1tf_pfn_limit() + 1; |
3672 | + /* |
3673 | + * We encode swap offsets also with 3 bits below those for pfn |
3674 | + * which makes the usable limit higher. |
3675 | + */ |
3676 | +#if CONFIG_PGTABLE_LEVELS > 2 |
3677 | + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; |
3678 | +#endif |
3679 | + pages = min_t(unsigned long, l1tf_limit, pages); |
3680 | + } |
3681 | + return pages; |
3682 | +} |
3683 | +#endif |
3684 | diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c |
3685 | index 7c8686709636..79eb55ce69a9 100644 |
3686 | --- a/arch/x86/mm/kmmio.c |
3687 | +++ b/arch/x86/mm/kmmio.c |
3688 | @@ -126,24 +126,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr) |
3689 | |
3690 | static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) |
3691 | { |
3692 | + pmd_t new_pmd; |
3693 | pmdval_t v = pmd_val(*pmd); |
3694 | if (clear) { |
3695 | - *old = v & _PAGE_PRESENT; |
3696 | - v &= ~_PAGE_PRESENT; |
3697 | - } else /* presume this has been called with clear==true previously */ |
3698 | - v |= *old; |
3699 | - set_pmd(pmd, __pmd(v)); |
3700 | + *old = v; |
3701 | + new_pmd = pmd_mknotpresent(*pmd); |
3702 | + } else { |
3703 | + /* Presume this has been called with clear==true previously */ |
3704 | + new_pmd = __pmd(*old); |
3705 | + } |
3706 | + set_pmd(pmd, new_pmd); |
3707 | } |
3708 | |
3709 | static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) |
3710 | { |
3711 | pteval_t v = pte_val(*pte); |
3712 | if (clear) { |
3713 | - *old = v & _PAGE_PRESENT; |
3714 | - v &= ~_PAGE_PRESENT; |
3715 | - } else /* presume this has been called with clear==true previously */ |
3716 | - v |= *old; |
3717 | - set_pte_atomic(pte, __pte(v)); |
3718 | + *old = v; |
3719 | + /* Nothing should care about address */ |
3720 | + pte_clear(&init_mm, 0, pte); |
3721 | + } else { |
3722 | + /* Presume this has been called with clear==true previously */ |
3723 | + set_pte_atomic(pte, __pte(*old)); |
3724 | + } |
3725 | } |
3726 | |
3727 | static int clear_page_presence(struct kmmio_fault_page *f, bool clear) |
3728 | diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c |
3729 | index a99679826846..5f4805d69aab 100644 |
3730 | --- a/arch/x86/mm/mmap.c |
3731 | +++ b/arch/x86/mm/mmap.c |
3732 | @@ -174,3 +174,24 @@ const char *arch_vma_name(struct vm_area_struct *vma) |
3733 | return "[mpx]"; |
3734 | return NULL; |
3735 | } |
3736 | + |
3737 | +/* |
3738 | + * Only allow root to set high MMIO mappings to PROT_NONE. |
3739 | + * This prevents an unpriv. user to set them to PROT_NONE and invert |
3740 | + * them, then pointing to valid memory for L1TF speculation. |
3741 | + * |
3742 | + * Note: for locked down kernels may want to disable the root override. |
3743 | + */ |
3744 | +bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) |
3745 | +{ |
3746 | + if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
3747 | + return true; |
3748 | + if (!__pte_needs_invert(pgprot_val(prot))) |
3749 | + return true; |
3750 | + /* If it's real memory always allow */ |
3751 | + if (pfn_valid(pfn)) |
3752 | + return true; |
3753 | + if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) |
3754 | + return false; |
3755 | + return true; |
3756 | +} |
3757 | diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c |
3758 | index 4085897fef64..464f53da3a6f 100644 |
3759 | --- a/arch/x86/mm/pageattr.c |
3760 | +++ b/arch/x86/mm/pageattr.c |
3761 | @@ -1006,8 +1006,8 @@ static long populate_pmd(struct cpa_data *cpa, |
3762 | |
3763 | pmd = pmd_offset(pud, start); |
3764 | |
3765 | - set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | |
3766 | - massage_pgprot(pmd_pgprot))); |
3767 | + set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, |
3768 | + canon_pgprot(pmd_pgprot)))); |
3769 | |
3770 | start += PMD_SIZE; |
3771 | cpa->pfn += PMD_SIZE >> PAGE_SHIFT; |
3772 | @@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, |
3773 | * Map everything starting from the Gb boundary, possibly with 1G pages |
3774 | */ |
3775 | while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { |
3776 | - set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | |
3777 | - massage_pgprot(pud_pgprot))); |
3778 | + set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, |
3779 | + canon_pgprot(pud_pgprot)))); |
3780 | |
3781 | start += PUD_SIZE; |
3782 | cpa->pfn += PUD_SIZE >> PAGE_SHIFT; |
3783 | diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c |
3784 | index ce38f165489b..d6f11accd37a 100644 |
3785 | --- a/arch/x86/mm/pti.c |
3786 | +++ b/arch/x86/mm/pti.c |
3787 | @@ -45,6 +45,7 @@ |
3788 | #include <asm/pgalloc.h> |
3789 | #include <asm/tlbflush.h> |
3790 | #include <asm/desc.h> |
3791 | +#include <asm/sections.h> |
3792 | |
3793 | #undef pr_fmt |
3794 | #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt |
3795 | diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c |
3796 | index 4f5fa65a1011..2acd6be13375 100644 |
3797 | --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c |
3798 | +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c |
3799 | @@ -18,6 +18,7 @@ |
3800 | #include <asm/intel-mid.h> |
3801 | #include <asm/intel_scu_ipc.h> |
3802 | #include <asm/io_apic.h> |
3803 | +#include <asm/hw_irq.h> |
3804 | |
3805 | #define TANGIER_EXT_TIMER0_MSI 12 |
3806 | |
3807 | diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c |
3808 | index 0b530c53de1f..34f9a9ce6236 100644 |
3809 | --- a/arch/x86/platform/uv/tlb_uv.c |
3810 | +++ b/arch/x86/platform/uv/tlb_uv.c |
3811 | @@ -1285,6 +1285,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) |
3812 | struct msg_desc msgdesc; |
3813 | |
3814 | ack_APIC_irq(); |
3815 | + kvm_set_cpu_l1tf_flush_l1d(); |
3816 | time_start = get_cycles(); |
3817 | |
3818 | bcp = &per_cpu(bau_control, smp_processor_id()); |
3819 | diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c |
3820 | index c9081c6671f0..df208af3cd74 100644 |
3821 | --- a/arch/x86/xen/enlighten.c |
3822 | +++ b/arch/x86/xen/enlighten.c |
3823 | @@ -3,6 +3,7 @@ |
3824 | #endif |
3825 | #include <linux/cpu.h> |
3826 | #include <linux/kexec.h> |
3827 | +#include <linux/slab.h> |
3828 | |
3829 | #include <xen/features.h> |
3830 | #include <xen/page.h> |
3831 | diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c |
3832 | index 433f14bcab15..93758b528d8f 100644 |
3833 | --- a/drivers/base/cpu.c |
3834 | +++ b/drivers/base/cpu.c |
3835 | @@ -527,16 +527,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, |
3836 | return sprintf(buf, "Not affected\n"); |
3837 | } |
3838 | |
3839 | +ssize_t __weak cpu_show_l1tf(struct device *dev, |
3840 | + struct device_attribute *attr, char *buf) |
3841 | +{ |
3842 | + return sprintf(buf, "Not affected\n"); |
3843 | +} |
3844 | + |
3845 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); |
3846 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); |
3847 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); |
3848 | static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); |
3849 | +static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); |
3850 | |
3851 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { |
3852 | &dev_attr_meltdown.attr, |
3853 | &dev_attr_spectre_v1.attr, |
3854 | &dev_attr_spectre_v2.attr, |
3855 | &dev_attr_spec_store_bypass.attr, |
3856 | + &dev_attr_l1tf.attr, |
3857 | NULL |
3858 | }; |
3859 | |
3860 | diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c |
3861 | index 6aef3bde10d7..c823914b3a80 100644 |
3862 | --- a/drivers/bluetooth/hci_ldisc.c |
3863 | +++ b/drivers/bluetooth/hci_ldisc.c |
3864 | @@ -115,12 +115,12 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) |
3865 | struct sk_buff *skb = hu->tx_skb; |
3866 | |
3867 | if (!skb) { |
3868 | - read_lock(&hu->proto_lock); |
3869 | + percpu_down_read(&hu->proto_lock); |
3870 | |
3871 | if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) |
3872 | skb = hu->proto->dequeue(hu); |
3873 | |
3874 | - read_unlock(&hu->proto_lock); |
3875 | + percpu_up_read(&hu->proto_lock); |
3876 | } else { |
3877 | hu->tx_skb = NULL; |
3878 | } |
3879 | @@ -130,7 +130,14 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) |
3880 | |
3881 | int hci_uart_tx_wakeup(struct hci_uart *hu) |
3882 | { |
3883 | - read_lock(&hu->proto_lock); |
3884 | + /* This may be called in an IRQ context, so we can't sleep. Therefore |
3885 | + * we try to acquire the lock only, and if that fails we assume the |
3886 | + * tty is being closed because that is the only time the write lock is |
3887 | + * acquired. If, however, at some point in the future the write lock |
3888 | + * is also acquired in other situations, then this must be revisited. |
3889 | + */ |
3890 | + if (!percpu_down_read_trylock(&hu->proto_lock)) |
3891 | + return 0; |
3892 | |
3893 | if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) |
3894 | goto no_schedule; |
3895 | @@ -145,7 +152,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) |
3896 | schedule_work(&hu->write_work); |
3897 | |
3898 | no_schedule: |
3899 | - read_unlock(&hu->proto_lock); |
3900 | + percpu_up_read(&hu->proto_lock); |
3901 | |
3902 | return 0; |
3903 | } |
3904 | @@ -247,12 +254,12 @@ static int hci_uart_flush(struct hci_dev *hdev) |
3905 | tty_ldisc_flush(tty); |
3906 | tty_driver_flush_buffer(tty); |
3907 | |
3908 | - read_lock(&hu->proto_lock); |
3909 | + percpu_down_read(&hu->proto_lock); |
3910 | |
3911 | if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) |
3912 | hu->proto->flush(hu); |
3913 | |
3914 | - read_unlock(&hu->proto_lock); |
3915 | + percpu_up_read(&hu->proto_lock); |
3916 | |
3917 | return 0; |
3918 | } |
3919 | @@ -275,15 +282,15 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) |
3920 | BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), |
3921 | skb->len); |
3922 | |
3923 | - read_lock(&hu->proto_lock); |
3924 | + percpu_down_read(&hu->proto_lock); |
3925 | |
3926 | if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { |
3927 | - read_unlock(&hu->proto_lock); |
3928 | + percpu_up_read(&hu->proto_lock); |
3929 | return -EUNATCH; |
3930 | } |
3931 | |
3932 | hu->proto->enqueue(hu, skb); |
3933 | - read_unlock(&hu->proto_lock); |
3934 | + percpu_up_read(&hu->proto_lock); |
3935 | |
3936 | hci_uart_tx_wakeup(hu); |
3937 | |
3938 | @@ -486,7 +493,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) |
3939 | INIT_WORK(&hu->init_ready, hci_uart_init_work); |
3940 | INIT_WORK(&hu->write_work, hci_uart_write_work); |
3941 | |
3942 | - rwlock_init(&hu->proto_lock); |
3943 | + percpu_init_rwsem(&hu->proto_lock); |
3944 | |
3945 | /* Flush any pending characters in the driver */ |
3946 | tty_driver_flush_buffer(tty); |
3947 | @@ -503,7 +510,6 @@ static void hci_uart_tty_close(struct tty_struct *tty) |
3948 | { |
3949 | struct hci_uart *hu = tty->disc_data; |
3950 | struct hci_dev *hdev; |
3951 | - unsigned long flags; |
3952 | |
3953 | BT_DBG("tty %p", tty); |
3954 | |
3955 | @@ -518,9 +524,9 @@ static void hci_uart_tty_close(struct tty_struct *tty) |
3956 | hci_uart_close(hdev); |
3957 | |
3958 | if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { |
3959 | - write_lock_irqsave(&hu->proto_lock, flags); |
3960 | + percpu_down_write(&hu->proto_lock); |
3961 | clear_bit(HCI_UART_PROTO_READY, &hu->flags); |
3962 | - write_unlock_irqrestore(&hu->proto_lock, flags); |
3963 | + percpu_up_write(&hu->proto_lock); |
3964 | |
3965 | cancel_work_sync(&hu->write_work); |
3966 | |
3967 | @@ -582,10 +588,10 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, |
3968 | if (!hu || tty != hu->tty) |
3969 | return; |
3970 | |
3971 | - read_lock(&hu->proto_lock); |
3972 | + percpu_down_read(&hu->proto_lock); |
3973 | |
3974 | if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { |
3975 | - read_unlock(&hu->proto_lock); |
3976 | + percpu_up_read(&hu->proto_lock); |
3977 | return; |
3978 | } |
3979 | |
3980 | @@ -593,7 +599,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, |
3981 | * tty caller |
3982 | */ |
3983 | hu->proto->recv(hu, data, count); |
3984 | - read_unlock(&hu->proto_lock); |
3985 | + percpu_up_read(&hu->proto_lock); |
3986 | |
3987 | if (hu->hdev) |
3988 | hu->hdev->stat.byte_rx += count; |
3989 | diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c |
3990 | index b725ac4f7ff6..52e6d4d1608e 100644 |
3991 | --- a/drivers/bluetooth/hci_serdev.c |
3992 | +++ b/drivers/bluetooth/hci_serdev.c |
3993 | @@ -304,6 +304,7 @@ int hci_uart_register_device(struct hci_uart *hu, |
3994 | hci_set_drvdata(hdev, hu); |
3995 | |
3996 | INIT_WORK(&hu->write_work, hci_uart_write_work); |
3997 | + percpu_init_rwsem(&hu->proto_lock); |
3998 | |
3999 | /* Only when vendor specific setup callback is provided, consider |
4000 | * the manufacturer information valid. This avoids filling in the |
4001 | diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h |
4002 | index d9cd95d81149..66e8c68e4607 100644 |
4003 | --- a/drivers/bluetooth/hci_uart.h |
4004 | +++ b/drivers/bluetooth/hci_uart.h |
4005 | @@ -87,7 +87,7 @@ struct hci_uart { |
4006 | struct work_struct write_work; |
4007 | |
4008 | const struct hci_uart_proto *proto; |
4009 | - rwlock_t proto_lock; /* Stop work for proto close */ |
4010 | + struct percpu_rw_semaphore proto_lock; /* Stop work for proto close */ |
4011 | void *priv; |
4012 | |
4013 | struct sk_buff *tx_skb; |
4014 | diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c |
4015 | index 3bf65288ffff..2fdf302ebdad 100644 |
4016 | --- a/drivers/gpu/drm/i915/intel_lpe_audio.c |
4017 | +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c |
4018 | @@ -62,6 +62,7 @@ |
4019 | |
4020 | #include <linux/acpi.h> |
4021 | #include <linux/device.h> |
4022 | +#include <linux/irq.h> |
4023 | #include <linux/pci.h> |
4024 | #include <linux/pm_runtime.h> |
4025 | |
4026 | diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c |
4027 | index 3baddfc997d1..b49ca02b399d 100644 |
4028 | --- a/drivers/mtd/nand/qcom_nandc.c |
4029 | +++ b/drivers/mtd/nand/qcom_nandc.c |
4030 | @@ -2544,6 +2544,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc, |
4031 | |
4032 | nand_set_flash_node(chip, dn); |
4033 | mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); |
4034 | + if (!mtd->name) |
4035 | + return -ENOMEM; |
4036 | + |
4037 | mtd->owner = THIS_MODULE; |
4038 | mtd->dev.parent = dev; |
4039 | |
4040 | diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
4041 | index dfc076f9ee4b..d5e790dd589a 100644 |
4042 | --- a/drivers/net/xen-netfront.c |
4043 | +++ b/drivers/net/xen-netfront.c |
4044 | @@ -894,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, |
4045 | struct sk_buff *skb, |
4046 | struct sk_buff_head *list) |
4047 | { |
4048 | - struct skb_shared_info *shinfo = skb_shinfo(skb); |
4049 | RING_IDX cons = queue->rx.rsp_cons; |
4050 | struct sk_buff *nskb; |
4051 | |
4052 | @@ -903,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, |
4053 | RING_GET_RESPONSE(&queue->rx, ++cons); |
4054 | skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; |
4055 | |
4056 | - if (shinfo->nr_frags == MAX_SKB_FRAGS) { |
4057 | + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { |
4058 | unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; |
4059 | |
4060 | BUG_ON(pull_to <= skb_headlen(skb)); |
4061 | __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); |
4062 | } |
4063 | - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); |
4064 | + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); |
4065 | |
4066 | - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), |
4067 | + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
4068 | + skb_frag_page(nfrag), |
4069 | rx->offset, rx->status, PAGE_SIZE); |
4070 | |
4071 | skb_shinfo(nskb)->nr_frags = 0; |
4072 | diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c |
4073 | index 4523d7e1bcb9..ffc87a956d97 100644 |
4074 | --- a/drivers/pci/host/pci-hyperv.c |
4075 | +++ b/drivers/pci/host/pci-hyperv.c |
4076 | @@ -53,6 +53,8 @@ |
4077 | #include <linux/delay.h> |
4078 | #include <linux/semaphore.h> |
4079 | #include <linux/irqdomain.h> |
4080 | +#include <linux/irq.h> |
4081 | + |
4082 | #include <asm/irqdomain.h> |
4083 | #include <asm/apic.h> |
4084 | #include <linux/msi.h> |
4085 | diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c |
4086 | index 721a2a1c97ef..a63bba12aee4 100644 |
4087 | --- a/drivers/phy/mediatek/phy-mtk-tphy.c |
4088 | +++ b/drivers/phy/mediatek/phy-mtk-tphy.c |
4089 | @@ -438,9 +438,9 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy, |
4090 | u32 index = instance->index; |
4091 | u32 tmp; |
4092 | |
4093 | - /* switch to USB function. (system register, force ip into usb mode) */ |
4094 | + /* switch to USB function, and enable usb pll */ |
4095 | tmp = readl(com + U3P_U2PHYDTM0); |
4096 | - tmp &= ~P2C_FORCE_UART_EN; |
4097 | + tmp &= ~(P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM); |
4098 | tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0); |
4099 | writel(tmp, com + U3P_U2PHYDTM0); |
4100 | |
4101 | @@ -500,10 +500,8 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy, |
4102 | u32 index = instance->index; |
4103 | u32 tmp; |
4104 | |
4105 | - /* (force_suspendm=0) (let suspendm=1, enable usb 480MHz pll) */ |
4106 | tmp = readl(com + U3P_U2PHYDTM0); |
4107 | - tmp &= ~(P2C_FORCE_SUSPENDM | P2C_RG_XCVRSEL); |
4108 | - tmp &= ~(P2C_RG_DATAIN | P2C_DTM0_PART_MASK); |
4109 | + tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK); |
4110 | writel(tmp, com + U3P_U2PHYDTM0); |
4111 | |
4112 | /* OTG Enable */ |
4113 | @@ -538,7 +536,6 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy, |
4114 | |
4115 | tmp = readl(com + U3P_U2PHYDTM0); |
4116 | tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN); |
4117 | - tmp |= P2C_FORCE_SUSPENDM; |
4118 | writel(tmp, com + U3P_U2PHYDTM0); |
4119 | |
4120 | /* OTG Disable */ |
4121 | @@ -546,18 +543,16 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy, |
4122 | tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN; |
4123 | writel(tmp, com + U3P_USBPHYACR6); |
4124 | |
4125 | - /* let suspendm=0, set utmi into analog power down */ |
4126 | - tmp = readl(com + U3P_U2PHYDTM0); |
4127 | - tmp &= ~P2C_RG_SUSPENDM; |
4128 | - writel(tmp, com + U3P_U2PHYDTM0); |
4129 | - udelay(1); |
4130 | - |
4131 | tmp = readl(com + U3P_U2PHYDTM1); |
4132 | tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID); |
4133 | tmp |= P2C_RG_SESSEND; |
4134 | writel(tmp, com + U3P_U2PHYDTM1); |
4135 | |
4136 | if (tphy->pdata->avoid_rx_sen_degradation && index) { |
4137 | + tmp = readl(com + U3P_U2PHYDTM0); |
4138 | + tmp &= ~(P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM); |
4139 | + writel(tmp, com + U3P_U2PHYDTM0); |
4140 | + |
4141 | tmp = readl(com + U3D_U2PHYDCR0); |
4142 | tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; |
4143 | writel(tmp, com + U3D_U2PHYDCR0); |
4144 | diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c |
4145 | index dd9464920456..ef22b275d050 100644 |
4146 | --- a/drivers/scsi/hosts.c |
4147 | +++ b/drivers/scsi/hosts.c |
4148 | @@ -474,6 +474,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) |
4149 | shost->dma_boundary = 0xffffffff; |
4150 | |
4151 | shost->use_blk_mq = scsi_use_blk_mq; |
4152 | + shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq; |
4153 | |
4154 | device_initialize(&shost->shost_gendev); |
4155 | dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); |
4156 | diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c |
4157 | index 604a39dba5d0..5b4b7f9be2d7 100644 |
4158 | --- a/drivers/scsi/hpsa.c |
4159 | +++ b/drivers/scsi/hpsa.c |
4160 | @@ -1040,11 +1040,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, |
4161 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); |
4162 | if (unlikely(!h->msix_vectors)) |
4163 | return; |
4164 | - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
4165 | - c->Header.ReplyQueue = |
4166 | - raw_smp_processor_id() % h->nreply_queues; |
4167 | - else |
4168 | - c->Header.ReplyQueue = reply_queue % h->nreply_queues; |
4169 | + c->Header.ReplyQueue = reply_queue; |
4170 | } |
4171 | } |
4172 | |
4173 | @@ -1058,10 +1054,7 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h, |
4174 | * Tell the controller to post the reply to the queue for this |
4175 | * processor. This seems to give the best I/O throughput. |
4176 | */ |
4177 | - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
4178 | - cp->ReplyQueue = smp_processor_id() % h->nreply_queues; |
4179 | - else |
4180 | - cp->ReplyQueue = reply_queue % h->nreply_queues; |
4181 | + cp->ReplyQueue = reply_queue; |
4182 | /* |
4183 | * Set the bits in the address sent down to include: |
4184 | * - performant mode bit (bit 0) |
4185 | @@ -1082,10 +1075,7 @@ static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, |
4186 | /* Tell the controller to post the reply to the queue for this |
4187 | * processor. This seems to give the best I/O throughput. |
4188 | */ |
4189 | - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
4190 | - cp->reply_queue = smp_processor_id() % h->nreply_queues; |
4191 | - else |
4192 | - cp->reply_queue = reply_queue % h->nreply_queues; |
4193 | + cp->reply_queue = reply_queue; |
4194 | /* Set the bits in the address sent down to include: |
4195 | * - performant mode bit not used in ioaccel mode 2 |
4196 | * - pull count (bits 0-3) |
4197 | @@ -1104,10 +1094,7 @@ static void set_ioaccel2_performant_mode(struct ctlr_info *h, |
4198 | * Tell the controller to post the reply to the queue for this |
4199 | * processor. This seems to give the best I/O throughput. |
4200 | */ |
4201 | - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
4202 | - cp->reply_queue = smp_processor_id() % h->nreply_queues; |
4203 | - else |
4204 | - cp->reply_queue = reply_queue % h->nreply_queues; |
4205 | + cp->reply_queue = reply_queue; |
4206 | /* |
4207 | * Set the bits in the address sent down to include: |
4208 | * - performant mode bit not used in ioaccel mode 2 |
4209 | @@ -1152,6 +1139,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, |
4210 | { |
4211 | dial_down_lockup_detection_during_fw_flash(h, c); |
4212 | atomic_inc(&h->commands_outstanding); |
4213 | + |
4214 | + reply_queue = h->reply_map[raw_smp_processor_id()]; |
4215 | switch (c->cmd_type) { |
4216 | case CMD_IOACCEL1: |
4217 | set_ioaccel1_performant_mode(h, c, reply_queue); |
4218 | @@ -7244,6 +7233,26 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h) |
4219 | h->msix_vectors = 0; |
4220 | } |
4221 | |
4222 | +static void hpsa_setup_reply_map(struct ctlr_info *h) |
4223 | +{ |
4224 | + const struct cpumask *mask; |
4225 | + unsigned int queue, cpu; |
4226 | + |
4227 | + for (queue = 0; queue < h->msix_vectors; queue++) { |
4228 | + mask = pci_irq_get_affinity(h->pdev, queue); |
4229 | + if (!mask) |
4230 | + goto fallback; |
4231 | + |
4232 | + for_each_cpu(cpu, mask) |
4233 | + h->reply_map[cpu] = queue; |
4234 | + } |
4235 | + return; |
4236 | + |
4237 | +fallback: |
4238 | + for_each_possible_cpu(cpu) |
4239 | + h->reply_map[cpu] = 0; |
4240 | +} |
4241 | + |
4242 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
4243 | * controllers that are capable. If not, we use legacy INTx mode. |
4244 | */ |
4245 | @@ -7639,6 +7648,10 @@ static int hpsa_pci_init(struct ctlr_info *h) |
4246 | err = hpsa_interrupt_mode(h); |
4247 | if (err) |
4248 | goto clean1; |
4249 | + |
4250 | + /* setup mapping between CPU and reply queue */ |
4251 | + hpsa_setup_reply_map(h); |
4252 | + |
4253 | err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); |
4254 | if (err) |
4255 | goto clean2; /* intmode+region, pci */ |
4256 | @@ -8284,6 +8297,28 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, |
4257 | return wq; |
4258 | } |
4259 | |
4260 | +static void hpda_free_ctlr_info(struct ctlr_info *h) |
4261 | +{ |
4262 | + kfree(h->reply_map); |
4263 | + kfree(h); |
4264 | +} |
4265 | + |
4266 | +static struct ctlr_info *hpda_alloc_ctlr_info(void) |
4267 | +{ |
4268 | + struct ctlr_info *h; |
4269 | + |
4270 | + h = kzalloc(sizeof(*h), GFP_KERNEL); |
4271 | + if (!h) |
4272 | + return NULL; |
4273 | + |
4274 | + h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL); |
4275 | + if (!h->reply_map) { |
4276 | + kfree(h); |
4277 | + return NULL; |
4278 | + } |
4279 | + return h; |
4280 | +} |
4281 | + |
4282 | static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
4283 | { |
4284 | int dac, rc; |
4285 | @@ -8321,7 +8356,7 @@ reinit_after_soft_reset: |
4286 | * the driver. See comments in hpsa.h for more info. |
4287 | */ |
4288 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); |
4289 | - h = kzalloc(sizeof(*h), GFP_KERNEL); |
4290 | + h = hpda_alloc_ctlr_info(); |
4291 | if (!h) { |
4292 | dev_err(&pdev->dev, "Failed to allocate controller head\n"); |
4293 | return -ENOMEM; |
4294 | @@ -8726,7 +8761,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) |
4295 | h->lockup_detected = NULL; /* init_one 2 */ |
4296 | /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ |
4297 | |
4298 | - kfree(h); /* init_one 1 */ |
4299 | + hpda_free_ctlr_info(h); /* init_one 1 */ |
4300 | } |
4301 | |
4302 | static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, |
4303 | diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h |
4304 | index 018f980a701c..fb9f5e7f8209 100644 |
4305 | --- a/drivers/scsi/hpsa.h |
4306 | +++ b/drivers/scsi/hpsa.h |
4307 | @@ -158,6 +158,7 @@ struct bmic_controller_parameters { |
4308 | #pragma pack() |
4309 | |
4310 | struct ctlr_info { |
4311 | + unsigned int *reply_map; |
4312 | int ctlr; |
4313 | char devname[8]; |
4314 | char *product_name; |
4315 | diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c |
4316 | index 63bea6a65d51..8d579bf0fc81 100644 |
4317 | --- a/drivers/scsi/qla2xxx/qla_iocb.c |
4318 | +++ b/drivers/scsi/qla2xxx/qla_iocb.c |
4319 | @@ -2128,34 +2128,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) |
4320 | req_cnt = 1; |
4321 | handle = 0; |
4322 | |
4323 | - if (!sp) |
4324 | - goto skip_cmd_array; |
4325 | - |
4326 | - /* Check for room in outstanding command list. */ |
4327 | - handle = req->current_outstanding_cmd; |
4328 | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
4329 | - handle++; |
4330 | - if (handle == req->num_outstanding_cmds) |
4331 | - handle = 1; |
4332 | - if (!req->outstanding_cmds[handle]) |
4333 | - break; |
4334 | - } |
4335 | - if (index == req->num_outstanding_cmds) { |
4336 | - ql_log(ql_log_warn, vha, 0x700b, |
4337 | - "No room on outstanding cmd array.\n"); |
4338 | - goto queuing_error; |
4339 | - } |
4340 | - |
4341 | - /* Prep command array. */ |
4342 | - req->current_outstanding_cmd = handle; |
4343 | - req->outstanding_cmds[handle] = sp; |
4344 | - sp->handle = handle; |
4345 | - |
4346 | - /* Adjust entry-counts as needed. */ |
4347 | - if (sp->type != SRB_SCSI_CMD) |
4348 | + if (sp && (sp->type != SRB_SCSI_CMD)) { |
4349 | + /* Adjust entry-counts as needed. */ |
4350 | req_cnt = sp->iocbs; |
4351 | + } |
4352 | |
4353 | -skip_cmd_array: |
4354 | /* Check for room on request queue. */ |
4355 | if (req->cnt < req_cnt + 2) { |
4356 | if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) |
4357 | @@ -2179,6 +2156,28 @@ skip_cmd_array: |
4358 | if (req->cnt < req_cnt + 2) |
4359 | goto queuing_error; |
4360 | |
4361 | + if (sp) { |
4362 | + /* Check for room in outstanding command list. */ |
4363 | + handle = req->current_outstanding_cmd; |
4364 | + for (index = 1; index < req->num_outstanding_cmds; index++) { |
4365 | + handle++; |
4366 | + if (handle == req->num_outstanding_cmds) |
4367 | + handle = 1; |
4368 | + if (!req->outstanding_cmds[handle]) |
4369 | + break; |
4370 | + } |
4371 | + if (index == req->num_outstanding_cmds) { |
4372 | + ql_log(ql_log_warn, vha, 0x700b, |
4373 | + "No room on outstanding cmd array.\n"); |
4374 | + goto queuing_error; |
4375 | + } |
4376 | + |
4377 | + /* Prep command array. */ |
4378 | + req->current_outstanding_cmd = handle; |
4379 | + req->outstanding_cmds[handle] = sp; |
4380 | + sp->handle = handle; |
4381 | + } |
4382 | + |
4383 | /* Prep packet */ |
4384 | req->cnt -= req_cnt; |
4385 | pkt = req->ring_ptr; |
4386 | @@ -2191,6 +2190,8 @@ skip_cmd_array: |
4387 | pkt->handle = handle; |
4388 | } |
4389 | |
4390 | + return pkt; |
4391 | + |
4392 | queuing_error: |
4393 | qpair->tgt_counters.num_alloc_iocb_failed++; |
4394 | return pkt; |
4395 | diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c |
4396 | index 3f3cb72e0c0c..d0389b20574d 100644 |
4397 | --- a/drivers/scsi/sr.c |
4398 | +++ b/drivers/scsi/sr.c |
4399 | @@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) |
4400 | static int sr_block_open(struct block_device *bdev, fmode_t mode) |
4401 | { |
4402 | struct scsi_cd *cd; |
4403 | + struct scsi_device *sdev; |
4404 | int ret = -ENXIO; |
4405 | |
4406 | + cd = scsi_cd_get(bdev->bd_disk); |
4407 | + if (!cd) |
4408 | + goto out; |
4409 | + |
4410 | + sdev = cd->device; |
4411 | + scsi_autopm_get_device(sdev); |
4412 | check_disk_change(bdev); |
4413 | |
4414 | mutex_lock(&sr_mutex); |
4415 | - cd = scsi_cd_get(bdev->bd_disk); |
4416 | - if (cd) { |
4417 | - ret = cdrom_open(&cd->cdi, bdev, mode); |
4418 | - if (ret) |
4419 | - scsi_cd_put(cd); |
4420 | - } |
4421 | + ret = cdrom_open(&cd->cdi, bdev, mode); |
4422 | mutex_unlock(&sr_mutex); |
4423 | + |
4424 | + scsi_autopm_put_device(sdev); |
4425 | + if (ret) |
4426 | + scsi_cd_put(cd); |
4427 | + |
4428 | +out: |
4429 | return ret; |
4430 | } |
4431 | |
4432 | @@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, |
4433 | if (ret) |
4434 | goto out; |
4435 | |
4436 | + scsi_autopm_get_device(sdev); |
4437 | + |
4438 | /* |
4439 | * Send SCSI addressing ioctls directly to mid level, send other |
4440 | * ioctls to cdrom/block level. |
4441 | @@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, |
4442 | case SCSI_IOCTL_GET_IDLUN: |
4443 | case SCSI_IOCTL_GET_BUS_NUMBER: |
4444 | ret = scsi_ioctl(sdev, cmd, argp); |
4445 | - goto out; |
4446 | + goto put; |
4447 | } |
4448 | |
4449 | ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); |
4450 | if (ret != -ENOSYS) |
4451 | - goto out; |
4452 | + goto put; |
4453 | |
4454 | ret = scsi_ioctl(sdev, cmd, argp); |
4455 | |
4456 | +put: |
4457 | + scsi_autopm_put_device(sdev); |
4458 | + |
4459 | out: |
4460 | mutex_unlock(&sr_mutex); |
4461 | return ret; |
4462 | diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c |
4463 | index 7c28e8d4955a..54e3a0f6844c 100644 |
4464 | --- a/drivers/scsi/virtio_scsi.c |
4465 | +++ b/drivers/scsi/virtio_scsi.c |
4466 | @@ -91,9 +91,6 @@ struct virtio_scsi_vq { |
4467 | struct virtio_scsi_target_state { |
4468 | seqcount_t tgt_seq; |
4469 | |
4470 | - /* Count of outstanding requests. */ |
4471 | - atomic_t reqs; |
4472 | - |
4473 | /* Currently active virtqueue for requests sent to this target. */ |
4474 | struct virtio_scsi_vq *req_vq; |
4475 | }; |
4476 | @@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) |
4477 | struct virtio_scsi_cmd *cmd = buf; |
4478 | struct scsi_cmnd *sc = cmd->sc; |
4479 | struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; |
4480 | - struct virtio_scsi_target_state *tgt = |
4481 | - scsi_target(sc->device)->hostdata; |
4482 | |
4483 | dev_dbg(&sc->device->sdev_gendev, |
4484 | "cmd %p response %u status %#02x sense_len %u\n", |
4485 | @@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) |
4486 | } |
4487 | |
4488 | sc->scsi_done(sc); |
4489 | - |
4490 | - atomic_dec(&tgt->reqs); |
4491 | } |
4492 | |
4493 | static void virtscsi_vq_done(struct virtio_scsi *vscsi, |
4494 | @@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh, |
4495 | struct scsi_cmnd *sc) |
4496 | { |
4497 | struct virtio_scsi *vscsi = shost_priv(sh); |
4498 | - struct virtio_scsi_target_state *tgt = |
4499 | - scsi_target(sc->device)->hostdata; |
4500 | |
4501 | - atomic_inc(&tgt->reqs); |
4502 | return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); |
4503 | } |
4504 | |
4505 | @@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, |
4506 | return &vscsi->req_vqs[hwq]; |
4507 | } |
4508 | |
4509 | -static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, |
4510 | - struct virtio_scsi_target_state *tgt) |
4511 | -{ |
4512 | - struct virtio_scsi_vq *vq; |
4513 | - unsigned long flags; |
4514 | - u32 queue_num; |
4515 | - |
4516 | - local_irq_save(flags); |
4517 | - if (atomic_inc_return(&tgt->reqs) > 1) { |
4518 | - unsigned long seq; |
4519 | - |
4520 | - do { |
4521 | - seq = read_seqcount_begin(&tgt->tgt_seq); |
4522 | - vq = tgt->req_vq; |
4523 | - } while (read_seqcount_retry(&tgt->tgt_seq, seq)); |
4524 | - } else { |
4525 | - /* no writes can be concurrent because of atomic_t */ |
4526 | - write_seqcount_begin(&tgt->tgt_seq); |
4527 | - |
4528 | - /* keep previous req_vq if a reader just arrived */ |
4529 | - if (unlikely(atomic_read(&tgt->reqs) > 1)) { |
4530 | - vq = tgt->req_vq; |
4531 | - goto unlock; |
4532 | - } |
4533 | - |
4534 | - queue_num = smp_processor_id(); |
4535 | - while (unlikely(queue_num >= vscsi->num_queues)) |
4536 | - queue_num -= vscsi->num_queues; |
4537 | - tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; |
4538 | - unlock: |
4539 | - write_seqcount_end(&tgt->tgt_seq); |
4540 | - } |
4541 | - local_irq_restore(flags); |
4542 | - |
4543 | - return vq; |
4544 | -} |
4545 | - |
4546 | static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, |
4547 | struct scsi_cmnd *sc) |
4548 | { |
4549 | struct virtio_scsi *vscsi = shost_priv(sh); |
4550 | - struct virtio_scsi_target_state *tgt = |
4551 | - scsi_target(sc->device)->hostdata; |
4552 | - struct virtio_scsi_vq *req_vq; |
4553 | - |
4554 | - if (shost_use_blk_mq(sh)) |
4555 | - req_vq = virtscsi_pick_vq_mq(vscsi, sc); |
4556 | - else |
4557 | - req_vq = virtscsi_pick_vq(vscsi, tgt); |
4558 | + struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); |
4559 | |
4560 | return virtscsi_queuecommand(vscsi, req_vq, sc); |
4561 | } |
4562 | @@ -775,7 +721,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget) |
4563 | return -ENOMEM; |
4564 | |
4565 | seqcount_init(&tgt->tgt_seq); |
4566 | - atomic_set(&tgt->reqs, 0); |
4567 | tgt->req_vq = &vscsi->req_vqs[0]; |
4568 | |
4569 | starget->hostdata = tgt; |
4570 | @@ -823,6 +768,7 @@ static struct scsi_host_template virtscsi_host_template_single = { |
4571 | .target_alloc = virtscsi_target_alloc, |
4572 | .target_destroy = virtscsi_target_destroy, |
4573 | .track_queue_depth = 1, |
4574 | + .force_blk_mq = 1, |
4575 | }; |
4576 | |
4577 | static struct scsi_host_template virtscsi_host_template_multi = { |
4578 | @@ -844,6 +790,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { |
4579 | .target_destroy = virtscsi_target_destroy, |
4580 | .map_queues = virtscsi_map_queues, |
4581 | .track_queue_depth = 1, |
4582 | + .force_blk_mq = 1, |
4583 | }; |
4584 | |
4585 | #define virtscsi_config_get(vdev, fld) \ |
4586 | diff --git a/fs/dcache.c b/fs/dcache.c |
4587 | index 5f31a93150d1..8d4935978fec 100644 |
4588 | --- a/fs/dcache.c |
4589 | +++ b/fs/dcache.c |
4590 | @@ -357,14 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry) |
4591 | __releases(dentry->d_inode->i_lock) |
4592 | { |
4593 | struct inode *inode = dentry->d_inode; |
4594 | - bool hashed = !d_unhashed(dentry); |
4595 | |
4596 | - if (hashed) |
4597 | - raw_write_seqcount_begin(&dentry->d_seq); |
4598 | + raw_write_seqcount_begin(&dentry->d_seq); |
4599 | __d_clear_type_and_inode(dentry); |
4600 | hlist_del_init(&dentry->d_u.d_alias); |
4601 | - if (hashed) |
4602 | - raw_write_seqcount_end(&dentry->d_seq); |
4603 | + raw_write_seqcount_end(&dentry->d_seq); |
4604 | spin_unlock(&dentry->d_lock); |
4605 | spin_unlock(&inode->i_lock); |
4606 | if (!inode->i_nlink) |
4607 | @@ -1922,10 +1919,12 @@ struct dentry *d_make_root(struct inode *root_inode) |
4608 | |
4609 | if (root_inode) { |
4610 | res = __d_alloc(root_inode->i_sb, NULL); |
4611 | - if (res) |
4612 | + if (res) { |
4613 | + res->d_flags |= DCACHE_RCUACCESS; |
4614 | d_instantiate(res, root_inode); |
4615 | - else |
4616 | + } else { |
4617 | iput(root_inode); |
4618 | + } |
4619 | } |
4620 | return res; |
4621 | } |
4622 | diff --git a/fs/namespace.c b/fs/namespace.c |
4623 | index 1eb3bfd8be5a..9dc146e7b5e0 100644 |
4624 | --- a/fs/namespace.c |
4625 | +++ b/fs/namespace.c |
4626 | @@ -659,12 +659,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) |
4627 | return 0; |
4628 | mnt = real_mount(bastard); |
4629 | mnt_add_count(mnt, 1); |
4630 | + smp_mb(); // see mntput_no_expire() |
4631 | if (likely(!read_seqretry(&mount_lock, seq))) |
4632 | return 0; |
4633 | if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { |
4634 | mnt_add_count(mnt, -1); |
4635 | return 1; |
4636 | } |
4637 | + lock_mount_hash(); |
4638 | + if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { |
4639 | + mnt_add_count(mnt, -1); |
4640 | + unlock_mount_hash(); |
4641 | + return 1; |
4642 | + } |
4643 | + unlock_mount_hash(); |
4644 | + /* caller will mntput() */ |
4645 | return -1; |
4646 | } |
4647 | |
4648 | @@ -1195,12 +1204,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); |
4649 | static void mntput_no_expire(struct mount *mnt) |
4650 | { |
4651 | rcu_read_lock(); |
4652 | - mnt_add_count(mnt, -1); |
4653 | - if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ |
4654 | + if (likely(READ_ONCE(mnt->mnt_ns))) { |
4655 | + /* |
4656 | + * Since we don't do lock_mount_hash() here, |
4657 | + * ->mnt_ns can change under us. However, if it's |
4658 | + * non-NULL, then there's a reference that won't |
4659 | + * be dropped until after an RCU delay done after |
4660 | + * turning ->mnt_ns NULL. So if we observe it |
4661 | + * non-NULL under rcu_read_lock(), the reference |
4662 | + * we are dropping is not the final one. |
4663 | + */ |
4664 | + mnt_add_count(mnt, -1); |
4665 | rcu_read_unlock(); |
4666 | return; |
4667 | } |
4668 | lock_mount_hash(); |
4669 | + /* |
4670 | + * make sure that if __legitimize_mnt() has not seen us grab |
4671 | + * mount_lock, we'll see their refcount increment here. |
4672 | + */ |
4673 | + smp_mb(); |
4674 | + mnt_add_count(mnt, -1); |
4675 | if (mnt_get_count(mnt)) { |
4676 | rcu_read_unlock(); |
4677 | unlock_mount_hash(); |
4678 | diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h |
4679 | index 2142bceaeb75..46a2f5d9aa25 100644 |
4680 | --- a/include/asm-generic/pgtable.h |
4681 | +++ b/include/asm-generic/pgtable.h |
4682 | @@ -1055,6 +1055,18 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
4683 | static inline void init_espfix_bsp(void) { } |
4684 | #endif |
4685 | |
4686 | +#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED |
4687 | +static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) |
4688 | +{ |
4689 | + return true; |
4690 | +} |
4691 | + |
4692 | +static inline bool arch_has_pfn_modify_check(void) |
4693 | +{ |
4694 | + return false; |
4695 | +} |
4696 | +#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ |
4697 | + |
4698 | #endif /* !__ASSEMBLY__ */ |
4699 | |
4700 | #ifndef io_remap_pfn_range |
4701 | diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h |
4702 | index 070f85d92c15..28b76f0894d4 100644 |
4703 | --- a/include/linux/compiler-clang.h |
4704 | +++ b/include/linux/compiler-clang.h |
4705 | @@ -17,6 +17,9 @@ |
4706 | */ |
4707 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) |
4708 | |
4709 | +#undef __no_sanitize_address |
4710 | +#define __no_sanitize_address __attribute__((no_sanitize("address"))) |
4711 | + |
4712 | /* Clang doesn't have a way to turn it off per-function, yet. */ |
4713 | #ifdef __noretpoline |
4714 | #undef __noretpoline |
4715 | diff --git a/include/linux/cpu.h b/include/linux/cpu.h |
4716 | index 9546bf2fe310..2a378d261914 100644 |
4717 | --- a/include/linux/cpu.h |
4718 | +++ b/include/linux/cpu.h |
4719 | @@ -30,7 +30,7 @@ struct cpu { |
4720 | }; |
4721 | |
4722 | extern void boot_cpu_init(void); |
4723 | -extern void boot_cpu_state_init(void); |
4724 | +extern void boot_cpu_hotplug_init(void); |
4725 | extern void cpu_init(void); |
4726 | extern void trap_init(void); |
4727 | |
4728 | @@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev, |
4729 | struct device_attribute *attr, char *buf); |
4730 | extern ssize_t cpu_show_spec_store_bypass(struct device *dev, |
4731 | struct device_attribute *attr, char *buf); |
4732 | +extern ssize_t cpu_show_l1tf(struct device *dev, |
4733 | + struct device_attribute *attr, char *buf); |
4734 | |
4735 | extern __printf(4, 5) |
4736 | struct device *cpu_device_create(struct device *parent, void *drvdata, |
4737 | @@ -176,4 +178,23 @@ void cpuhp_report_idle_dead(void); |
4738 | static inline void cpuhp_report_idle_dead(void) { } |
4739 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
4740 | |
4741 | +enum cpuhp_smt_control { |
4742 | + CPU_SMT_ENABLED, |
4743 | + CPU_SMT_DISABLED, |
4744 | + CPU_SMT_FORCE_DISABLED, |
4745 | + CPU_SMT_NOT_SUPPORTED, |
4746 | +}; |
4747 | + |
4748 | +#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) |
4749 | +extern enum cpuhp_smt_control cpu_smt_control; |
4750 | +extern void cpu_smt_disable(bool force); |
4751 | +extern void cpu_smt_check_topology_early(void); |
4752 | +extern void cpu_smt_check_topology(void); |
4753 | +#else |
4754 | +# define cpu_smt_control (CPU_SMT_ENABLED) |
4755 | +static inline void cpu_smt_disable(bool force) { } |
4756 | +static inline void cpu_smt_check_topology_early(void) { } |
4757 | +static inline void cpu_smt_check_topology(void) { } |
4758 | +#endif |
4759 | + |
4760 | #endif /* _LINUX_CPU_H_ */ |
4761 | diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h |
4762 | index 06bd7b096167..e06febf62978 100644 |
4763 | --- a/include/linux/swapfile.h |
4764 | +++ b/include/linux/swapfile.h |
4765 | @@ -10,5 +10,7 @@ extern spinlock_t swap_lock; |
4766 | extern struct plist_head swap_active_head; |
4767 | extern struct swap_info_struct *swap_info[]; |
4768 | extern int try_to_unuse(unsigned int, bool, unsigned long); |
4769 | +extern unsigned long generic_max_swapfile_size(void); |
4770 | +extern unsigned long max_swapfile_size(void); |
4771 | |
4772 | #endif /* _LINUX_SWAPFILE_H */ |
4773 | diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h |
4774 | index a8b7bf879ced..9c1e4bad6581 100644 |
4775 | --- a/include/scsi/scsi_host.h |
4776 | +++ b/include/scsi/scsi_host.h |
4777 | @@ -452,6 +452,9 @@ struct scsi_host_template { |
4778 | /* True if the controller does not support WRITE SAME */ |
4779 | unsigned no_write_same:1; |
4780 | |
4781 | + /* True if the low-level driver supports blk-mq only */ |
4782 | + unsigned force_blk_mq:1; |
4783 | + |
4784 | /* |
4785 | * Countdown for host blocking with no commands outstanding. |
4786 | */ |
4787 | diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h |
4788 | index 857bad91c454..27c62abb6c9e 100644 |
4789 | --- a/include/uapi/linux/kvm.h |
4790 | +++ b/include/uapi/linux/kvm.h |
4791 | @@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt { |
4792 | #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 |
4793 | #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 |
4794 | #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) |
4795 | +#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) |
4796 | |
4797 | /* |
4798 | * Extension capability list. |
4799 | @@ -932,6 +933,7 @@ struct kvm_ppc_resize_hpt { |
4800 | #define KVM_CAP_HYPERV_SYNIC2 148 |
4801 | #define KVM_CAP_HYPERV_VP_INDEX 149 |
4802 | #define KVM_CAP_S390_BPB 152 |
4803 | +#define KVM_CAP_GET_MSR_FEATURES 153 |
4804 | |
4805 | #ifdef KVM_CAP_IRQ_ROUTING |
4806 | |
4807 | diff --git a/init/main.c b/init/main.c |
4808 | index 0d88f37febcb..c4a45145e102 100644 |
4809 | --- a/init/main.c |
4810 | +++ b/init/main.c |
4811 | @@ -543,8 +543,8 @@ asmlinkage __visible void __init start_kernel(void) |
4812 | setup_command_line(command_line); |
4813 | setup_nr_cpu_ids(); |
4814 | setup_per_cpu_areas(); |
4815 | - boot_cpu_state_init(); |
4816 | smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
4817 | + boot_cpu_hotplug_init(); |
4818 | |
4819 | build_all_zonelists(NULL); |
4820 | page_alloc_init(); |
4821 | diff --git a/kernel/cpu.c b/kernel/cpu.c |
4822 | index f21bfa3172d8..8f02f9b6e046 100644 |
4823 | --- a/kernel/cpu.c |
4824 | +++ b/kernel/cpu.c |
4825 | @@ -60,6 +60,7 @@ struct cpuhp_cpu_state { |
4826 | bool rollback; |
4827 | bool single; |
4828 | bool bringup; |
4829 | + bool booted_once; |
4830 | struct hlist_node *node; |
4831 | struct hlist_node *last; |
4832 | enum cpuhp_state cb_state; |
4833 | @@ -346,6 +347,85 @@ void cpu_hotplug_enable(void) |
4834 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); |
4835 | #endif /* CONFIG_HOTPLUG_CPU */ |
4836 | |
4837 | +#ifdef CONFIG_HOTPLUG_SMT |
4838 | +enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; |
4839 | +EXPORT_SYMBOL_GPL(cpu_smt_control); |
4840 | + |
4841 | +static bool cpu_smt_available __read_mostly; |
4842 | + |
4843 | +void __init cpu_smt_disable(bool force) |
4844 | +{ |
4845 | + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || |
4846 | + cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
4847 | + return; |
4848 | + |
4849 | + if (force) { |
4850 | + pr_info("SMT: Force disabled\n"); |
4851 | + cpu_smt_control = CPU_SMT_FORCE_DISABLED; |
4852 | + } else { |
4853 | + cpu_smt_control = CPU_SMT_DISABLED; |
4854 | + } |
4855 | +} |
4856 | + |
4857 | +/* |
4858 | + * The decision whether SMT is supported can only be done after the full |
4859 | + * CPU identification. Called from architecture code before non boot CPUs |
4860 | + * are brought up. |
4861 | + */ |
4862 | +void __init cpu_smt_check_topology_early(void) |
4863 | +{ |
4864 | + if (!topology_smt_supported()) |
4865 | + cpu_smt_control = CPU_SMT_NOT_SUPPORTED; |
4866 | +} |
4867 | + |
4868 | +/* |
4869 | + * If SMT was disabled by BIOS, detect it here, after the CPUs have been |
4870 | + * brought online. This ensures the smt/l1tf sysfs entries are consistent |
4871 | + * with reality. cpu_smt_available is set to true during the bringup of non |
4872 | + * boot CPUs when a SMT sibling is detected. Note, this may overwrite |
4873 | + * cpu_smt_control's previous setting. |
4874 | + */ |
4875 | +void __init cpu_smt_check_topology(void) |
4876 | +{ |
4877 | + if (!cpu_smt_available) |
4878 | + cpu_smt_control = CPU_SMT_NOT_SUPPORTED; |
4879 | +} |
4880 | + |
4881 | +static int __init smt_cmdline_disable(char *str) |
4882 | +{ |
4883 | + cpu_smt_disable(str && !strcmp(str, "force")); |
4884 | + return 0; |
4885 | +} |
4886 | +early_param("nosmt", smt_cmdline_disable); |
4887 | + |
4888 | +static inline bool cpu_smt_allowed(unsigned int cpu) |
4889 | +{ |
4890 | + if (topology_is_primary_thread(cpu)) |
4891 | + return true; |
4892 | + |
4893 | + /* |
4894 | + * If the CPU is not a 'primary' thread and the booted_once bit is |
4895 | + * set then the processor has SMT support. Store this information |
4896 | + * for the late check of SMT support in cpu_smt_check_topology(). |
4897 | + */ |
4898 | + if (per_cpu(cpuhp_state, cpu).booted_once) |
4899 | + cpu_smt_available = true; |
4900 | + |
4901 | + if (cpu_smt_control == CPU_SMT_ENABLED) |
4902 | + return true; |
4903 | + |
4904 | + /* |
4905 | + * On x86 it's required to boot all logical CPUs at least once so |
4906 | + * that the init code can get a chance to set CR4.MCE on each |
4907 | + * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any |
4908 | + * core will shutdown the machine. |
4909 | + */ |
4910 | + return !per_cpu(cpuhp_state, cpu).booted_once; |
4911 | +} |
4912 | +#else |
4913 | +static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } |
4914 | +#endif |
4915 | + |
4916 | static inline enum cpuhp_state |
4917 | cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) |
4918 | { |
4919 | @@ -426,6 +506,16 @@ static int bringup_wait_for_ap(unsigned int cpu) |
4920 | stop_machine_unpark(cpu); |
4921 | kthread_unpark(st->thread); |
4922 | |
4923 | + /* |
4924 | + * SMT soft disabling on X86 requires to bring the CPU out of the |
4925 | + * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The |
4926 | + * CPU marked itself as booted_once in cpu_notify_starting() so the |
4927 | + * cpu_smt_allowed() check will now return false if this is not the |
4928 | + * primary sibling. |
4929 | + */ |
4930 | + if (!cpu_smt_allowed(cpu)) |
4931 | + return -ECANCELED; |
4932 | + |
4933 | if (st->target <= CPUHP_AP_ONLINE_IDLE) |
4934 | return 0; |
4935 | |
4936 | @@ -758,7 +848,6 @@ static int takedown_cpu(unsigned int cpu) |
4937 | |
4938 | /* Park the smpboot threads */ |
4939 | kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); |
4940 | - smpboot_park_threads(cpu); |
4941 | |
4942 | /* |
4943 | * Prevent irq alloc/free while the dying cpu reorganizes the |
4944 | @@ -911,20 +1000,19 @@ out: |
4945 | return ret; |
4946 | } |
4947 | |
4948 | +static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) |
4949 | +{ |
4950 | + if (cpu_hotplug_disabled) |
4951 | + return -EBUSY; |
4952 | + return _cpu_down(cpu, 0, target); |
4953 | +} |
4954 | + |
4955 | static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) |
4956 | { |
4957 | int err; |
4958 | |
4959 | cpu_maps_update_begin(); |
4960 | - |
4961 | - if (cpu_hotplug_disabled) { |
4962 | - err = -EBUSY; |
4963 | - goto out; |
4964 | - } |
4965 | - |
4966 | - err = _cpu_down(cpu, 0, target); |
4967 | - |
4968 | -out: |
4969 | + err = cpu_down_maps_locked(cpu, target); |
4970 | cpu_maps_update_done(); |
4971 | return err; |
4972 | } |
4973 | @@ -953,6 +1041,7 @@ void notify_cpu_starting(unsigned int cpu) |
4974 | int ret; |
4975 | |
4976 | rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ |
4977 | + st->booted_once = true; |
4978 | while (st->state < target) { |
4979 | st->state++; |
4980 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
4981 | @@ -1062,6 +1151,10 @@ static int do_cpu_up(unsigned int cpu, enum cpuhp_state target) |
4982 | err = -EBUSY; |
4983 | goto out; |
4984 | } |
4985 | + if (!cpu_smt_allowed(cpu)) { |
4986 | + err = -EPERM; |
4987 | + goto out; |
4988 | + } |
4989 | |
4990 | err = _cpu_up(cpu, 0, target); |
4991 | out: |
4992 | @@ -1344,7 +1437,7 @@ static struct cpuhp_step cpuhp_ap_states[] = { |
4993 | [CPUHP_AP_SMPBOOT_THREADS] = { |
4994 | .name = "smpboot/threads:online", |
4995 | .startup.single = smpboot_unpark_threads, |
4996 | - .teardown.single = NULL, |
4997 | + .teardown.single = smpboot_park_threads, |
4998 | }, |
4999 | [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { |
5000 | .name = "irq/affinity:online", |
5001 | @@ -1918,10 +2011,172 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = { |
5002 | NULL |
5003 | }; |
5004 | |
5005 | +#ifdef CONFIG_HOTPLUG_SMT |
5006 | + |
5007 | +static const char *smt_states[] = { |
5008 | + [CPU_SMT_ENABLED] = "on", |
5009 | + [CPU_SMT_DISABLED] = "off", |
5010 | + [CPU_SMT_FORCE_DISABLED] = "forceoff", |
5011 | + [CPU_SMT_NOT_SUPPORTED] = "notsupported", |
5012 | +}; |
5013 | + |
5014 | +static ssize_t |
5015 | +show_smt_control(struct device *dev, struct device_attribute *attr, char *buf) |
5016 | +{ |
5017 | + return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]); |
5018 | +} |
5019 | + |
5020 | +static void cpuhp_offline_cpu_device(unsigned int cpu) |
5021 | +{ |
5022 | + struct device *dev = get_cpu_device(cpu); |
5023 | + |
5024 | + dev->offline = true; |
5025 | + /* Tell user space about the state change */ |
5026 | + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); |
5027 | +} |
5028 | + |
5029 | +static void cpuhp_online_cpu_device(unsigned int cpu) |
5030 | +{ |
5031 | + struct device *dev = get_cpu_device(cpu); |
5032 | + |
5033 | + dev->offline = false; |
5034 | + /* Tell user space about the state change */ |
5035 | + kobject_uevent(&dev->kobj, KOBJ_ONLINE); |
5036 | +} |
5037 | + |
5038 | +static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) |
5039 | +{ |
5040 | + int cpu, ret = 0; |
5041 | + |
5042 | + cpu_maps_update_begin(); |
5043 | + for_each_online_cpu(cpu) { |
5044 | + if (topology_is_primary_thread(cpu)) |
5045 | + continue; |
5046 | + ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); |
5047 | + if (ret) |
5048 | + break; |
5049 | + /* |
5050 | + * As this needs to hold the cpu maps lock it's impossible |
5051 | + * to call device_offline() because that ends up calling |
5052 | + * cpu_down() which takes cpu maps lock. cpu maps lock |
5053 | + * needs to be held as this might race against in kernel |
5054 | + * abusers of the hotplug machinery (thermal management). |
5055 | + * |
5056 | + * So nothing would update device:offline state. That would |
5057 | + * leave the sysfs entry stale and prevent onlining after |
5058 | + * smt control has been changed to 'off' again. This is |
5059 | + * called under the sysfs hotplug lock, so it is properly |
5060 | + * serialized against the regular offline usage. |
5061 | + */ |
5062 | + cpuhp_offline_cpu_device(cpu); |
5063 | + } |
5064 | + if (!ret) |
5065 | + cpu_smt_control = ctrlval; |
5066 | + cpu_maps_update_done(); |
5067 | + return ret; |
5068 | +} |
5069 | + |
5070 | +static int cpuhp_smt_enable(void) |
5071 | +{ |
5072 | + int cpu, ret = 0; |
5073 | + |
5074 | + cpu_maps_update_begin(); |
5075 | + cpu_smt_control = CPU_SMT_ENABLED; |
5076 | + for_each_present_cpu(cpu) { |
5077 | + /* Skip online CPUs and CPUs on offline nodes */ |
5078 | + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) |
5079 | + continue; |
5080 | + ret = _cpu_up(cpu, 0, CPUHP_ONLINE); |
5081 | + if (ret) |
5082 | + break; |
5083 | + /* See comment in cpuhp_smt_disable() */ |
5084 | + cpuhp_online_cpu_device(cpu); |
5085 | + } |
5086 | + cpu_maps_update_done(); |
5087 | + return ret; |
5088 | +} |
5089 | + |
5090 | +static ssize_t |
5091 | +store_smt_control(struct device *dev, struct device_attribute *attr, |
5092 | + const char *buf, size_t count) |
5093 | +{ |
5094 | + int ctrlval, ret; |
5095 | + |
5096 | + if (sysfs_streq(buf, "on")) |
5097 | + ctrlval = CPU_SMT_ENABLED; |
5098 | + else if (sysfs_streq(buf, "off")) |
5099 | + ctrlval = CPU_SMT_DISABLED; |
5100 | + else if (sysfs_streq(buf, "forceoff")) |
5101 | + ctrlval = CPU_SMT_FORCE_DISABLED; |
5102 | + else |
5103 | + return -EINVAL; |
5104 | + |
5105 | + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) |
5106 | + return -EPERM; |
5107 | + |
5108 | + if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
5109 | + return -ENODEV; |
5110 | + |
5111 | + ret = lock_device_hotplug_sysfs(); |
5112 | + if (ret) |
5113 | + return ret; |
5114 | + |
5115 | + if (ctrlval != cpu_smt_control) { |
5116 | + switch (ctrlval) { |
5117 | + case CPU_SMT_ENABLED: |
5118 | + ret = cpuhp_smt_enable(); |
5119 | + break; |
5120 | + case CPU_SMT_DISABLED: |
5121 | + case CPU_SMT_FORCE_DISABLED: |
5122 | + ret = cpuhp_smt_disable(ctrlval); |
5123 | + break; |
5124 | + } |
5125 | + } |
5126 | + |
5127 | + unlock_device_hotplug(); |
5128 | + return ret ? ret : count; |
5129 | +} |
5130 | +static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control); |
5131 | + |
5132 | +static ssize_t |
5133 | +show_smt_active(struct device *dev, struct device_attribute *attr, char *buf) |
5134 | +{ |
5135 | + bool active = topology_max_smt_threads() > 1; |
5136 | + |
5137 | + return snprintf(buf, PAGE_SIZE - 2, "%d\n", active); |
5138 | +} |
5139 | +static DEVICE_ATTR(active, 0444, show_smt_active, NULL); |
5140 | + |
5141 | +static struct attribute *cpuhp_smt_attrs[] = { |
5142 | + &dev_attr_control.attr, |
5143 | + &dev_attr_active.attr, |
5144 | + NULL |
5145 | +}; |
5146 | + |
5147 | +static const struct attribute_group cpuhp_smt_attr_group = { |
5148 | + .attrs = cpuhp_smt_attrs, |
5149 | + .name = "smt", |
5150 | + NULL |
5151 | +}; |
5152 | + |
5153 | +static int __init cpu_smt_state_init(void) |
5154 | +{ |
5155 | + return sysfs_create_group(&cpu_subsys.dev_root->kobj, |
5156 | + &cpuhp_smt_attr_group); |
5157 | +} |
5158 | + |
5159 | +#else |
5160 | +static inline int cpu_smt_state_init(void) { return 0; } |
5161 | +#endif |
5162 | + |
5163 | static int __init cpuhp_sysfs_init(void) |
5164 | { |
5165 | int cpu, ret; |
5166 | |
5167 | + ret = cpu_smt_state_init(); |
5168 | + if (ret) |
5169 | + return ret; |
5170 | + |
5171 | ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, |
5172 | &cpuhp_cpu_root_attr_group); |
5173 | if (ret) |
5174 | @@ -2022,7 +2277,10 @@ void __init boot_cpu_init(void) |
5175 | /* |
5176 | * Must be called _AFTER_ setting up the per_cpu areas |
5177 | */ |
5178 | -void __init boot_cpu_state_init(void) |
5179 | +void __init boot_cpu_hotplug_init(void) |
5180 | { |
5181 | - per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; |
5182 | +#ifdef CONFIG_SMP |
5183 | + this_cpu_write(cpuhp_state.booted_once, true); |
5184 | +#endif |
5185 | + this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); |
5186 | } |
5187 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
5188 | index 31615d1ae44c..4e89ed8a0fb2 100644 |
5189 | --- a/kernel/sched/core.c |
5190 | +++ b/kernel/sched/core.c |
5191 | @@ -5615,6 +5615,18 @@ int sched_cpu_activate(unsigned int cpu) |
5192 | struct rq *rq = cpu_rq(cpu); |
5193 | struct rq_flags rf; |
5194 | |
5195 | +#ifdef CONFIG_SCHED_SMT |
5196 | + /* |
5197 | + * The sched_smt_present static key needs to be evaluated on every |
5198 | + * hotplug event because at boot time SMT might be disabled when |
5199 | + * the number of booted CPUs is limited. |
5200 | + * |
5201 | + * If then later a sibling gets hotplugged, then the key would stay |
5202 | + * off and SMT scheduling would never be functional. |
5203 | + */ |
5204 | + if (cpumask_weight(cpu_smt_mask(cpu)) > 1) |
5205 | + static_branch_enable_cpuslocked(&sched_smt_present); |
5206 | +#endif |
5207 | set_cpu_active(cpu, true); |
5208 | |
5209 | if (sched_smp_initialized) { |
5210 | @@ -5710,22 +5722,6 @@ int sched_cpu_dying(unsigned int cpu) |
5211 | } |
5212 | #endif |
5213 | |
5214 | -#ifdef CONFIG_SCHED_SMT |
5215 | -DEFINE_STATIC_KEY_FALSE(sched_smt_present); |
5216 | - |
5217 | -static void sched_init_smt(void) |
5218 | -{ |
5219 | - /* |
5220 | - * We've enumerated all CPUs and will assume that if any CPU |
5221 | - * has SMT siblings, CPU0 will too. |
5222 | - */ |
5223 | - if (cpumask_weight(cpu_smt_mask(0)) > 1) |
5224 | - static_branch_enable(&sched_smt_present); |
5225 | -} |
5226 | -#else |
5227 | -static inline void sched_init_smt(void) { } |
5228 | -#endif |
5229 | - |
5230 | void __init sched_init_smp(void) |
5231 | { |
5232 | cpumask_var_t non_isolated_cpus; |
5233 | @@ -5755,8 +5751,6 @@ void __init sched_init_smp(void) |
5234 | init_sched_rt_class(); |
5235 | init_sched_dl_class(); |
5236 | |
5237 | - sched_init_smt(); |
5238 | - |
5239 | sched_smp_initialized = true; |
5240 | } |
5241 | |
5242 | diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
5243 | index 5c09ddf8c832..0cc7098c6dfd 100644 |
5244 | --- a/kernel/sched/fair.c |
5245 | +++ b/kernel/sched/fair.c |
5246 | @@ -5631,6 +5631,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
5247 | } |
5248 | |
5249 | #ifdef CONFIG_SCHED_SMT |
5250 | +DEFINE_STATIC_KEY_FALSE(sched_smt_present); |
5251 | |
5252 | static inline void set_idle_cores(int cpu, int val) |
5253 | { |
5254 | diff --git a/kernel/smp.c b/kernel/smp.c |
5255 | index c94dd85c8d41..2d1da290f144 100644 |
5256 | --- a/kernel/smp.c |
5257 | +++ b/kernel/smp.c |
5258 | @@ -584,6 +584,8 @@ void __init smp_init(void) |
5259 | num_nodes, (num_nodes > 1 ? "s" : ""), |
5260 | num_cpus, (num_cpus > 1 ? "s" : "")); |
5261 | |
5262 | + /* Final decision about SMT support */ |
5263 | + cpu_smt_check_topology(); |
5264 | /* Any cleanup work */ |
5265 | smp_cpus_done(setup_max_cpus); |
5266 | } |
5267 | diff --git a/kernel/softirq.c b/kernel/softirq.c |
5268 | index f40ac7191257..a4c87cf27f9d 100644 |
5269 | --- a/kernel/softirq.c |
5270 | +++ b/kernel/softirq.c |
5271 | @@ -79,12 +79,16 @@ static void wakeup_softirqd(void) |
5272 | |
5273 | /* |
5274 | * If ksoftirqd is scheduled, we do not want to process pending softirqs |
5275 | - * right now. Let ksoftirqd handle this at its own rate, to get fairness. |
5276 | + * right now. Let ksoftirqd handle this at its own rate, to get fairness, |
5277 | + * unless we're doing some of the synchronous softirqs. |
5278 | */ |
5279 | -static bool ksoftirqd_running(void) |
5280 | +#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) |
5281 | +static bool ksoftirqd_running(unsigned long pending) |
5282 | { |
5283 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
5284 | |
5285 | + if (pending & SOFTIRQ_NOW_MASK) |
5286 | + return false; |
5287 | return tsk && (tsk->state == TASK_RUNNING); |
5288 | } |
5289 | |
5290 | @@ -324,7 +328,7 @@ asmlinkage __visible void do_softirq(void) |
5291 | |
5292 | pending = local_softirq_pending(); |
5293 | |
5294 | - if (pending && !ksoftirqd_running()) |
5295 | + if (pending && !ksoftirqd_running(pending)) |
5296 | do_softirq_own_stack(); |
5297 | |
5298 | local_irq_restore(flags); |
5299 | @@ -351,7 +355,7 @@ void irq_enter(void) |
5300 | |
5301 | static inline void invoke_softirq(void) |
5302 | { |
5303 | - if (ksoftirqd_running()) |
5304 | + if (ksoftirqd_running(local_softirq_pending())) |
5305 | return; |
5306 | |
5307 | if (!force_irqthreads) { |
5308 | diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c |
5309 | index 1ff523dae6e2..e190d1ef3a23 100644 |
5310 | --- a/kernel/stop_machine.c |
5311 | +++ b/kernel/stop_machine.c |
5312 | @@ -260,6 +260,15 @@ retry: |
5313 | err = 0; |
5314 | __cpu_stop_queue_work(stopper1, work1, &wakeq); |
5315 | __cpu_stop_queue_work(stopper2, work2, &wakeq); |
5316 | + /* |
5317 | + * The waking up of stopper threads has to happen |
5318 | + * in the same scheduling context as the queueing. |
5319 | + * Otherwise, there is a possibility of one of the |
5320 | + * above stoppers being woken up by another CPU, |
5321 | + * and preempting us. This will cause us to n ot |
5322 | + * wake up the other stopper forever. |
5323 | + */ |
5324 | + preempt_disable(); |
5325 | unlock: |
5326 | raw_spin_unlock(&stopper2->lock); |
5327 | raw_spin_unlock_irq(&stopper1->lock); |
5328 | @@ -271,7 +280,6 @@ unlock: |
5329 | } |
5330 | |
5331 | if (!err) { |
5332 | - preempt_disable(); |
5333 | wake_up_q(&wakeq); |
5334 | preempt_enable(); |
5335 | } |
5336 | diff --git a/mm/memory.c b/mm/memory.c |
5337 | index fc7779165dcf..5539b1975091 100644 |
5338 | --- a/mm/memory.c |
5339 | +++ b/mm/memory.c |
5340 | @@ -1887,6 +1887,9 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, |
5341 | if (addr < vma->vm_start || addr >= vma->vm_end) |
5342 | return -EFAULT; |
5343 | |
5344 | + if (!pfn_modify_allowed(pfn, pgprot)) |
5345 | + return -EACCES; |
5346 | + |
5347 | track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); |
5348 | |
5349 | ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, |
5350 | @@ -1908,6 +1911,9 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, |
5351 | |
5352 | track_pfn_insert(vma, &pgprot, pfn); |
5353 | |
5354 | + if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) |
5355 | + return -EACCES; |
5356 | + |
5357 | /* |
5358 | * If we don't have pte special, then we have to use the pfn_valid() |
5359 | * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* |
5360 | @@ -1955,6 +1961,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, |
5361 | { |
5362 | pte_t *pte; |
5363 | spinlock_t *ptl; |
5364 | + int err = 0; |
5365 | |
5366 | pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); |
5367 | if (!pte) |
5368 | @@ -1962,12 +1969,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, |
5369 | arch_enter_lazy_mmu_mode(); |
5370 | do { |
5371 | BUG_ON(!pte_none(*pte)); |
5372 | + if (!pfn_modify_allowed(pfn, prot)) { |
5373 | + err = -EACCES; |
5374 | + break; |
5375 | + } |
5376 | set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); |
5377 | pfn++; |
5378 | } while (pte++, addr += PAGE_SIZE, addr != end); |
5379 | arch_leave_lazy_mmu_mode(); |
5380 | pte_unmap_unlock(pte - 1, ptl); |
5381 | - return 0; |
5382 | + return err; |
5383 | } |
5384 | |
5385 | static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, |
5386 | @@ -1976,6 +1987,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, |
5387 | { |
5388 | pmd_t *pmd; |
5389 | unsigned long next; |
5390 | + int err; |
5391 | |
5392 | pfn -= addr >> PAGE_SHIFT; |
5393 | pmd = pmd_alloc(mm, pud, addr); |
5394 | @@ -1984,9 +1996,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, |
5395 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
5396 | do { |
5397 | next = pmd_addr_end(addr, end); |
5398 | - if (remap_pte_range(mm, pmd, addr, next, |
5399 | - pfn + (addr >> PAGE_SHIFT), prot)) |
5400 | - return -ENOMEM; |
5401 | + err = remap_pte_range(mm, pmd, addr, next, |
5402 | + pfn + (addr >> PAGE_SHIFT), prot); |
5403 | + if (err) |
5404 | + return err; |
5405 | } while (pmd++, addr = next, addr != end); |
5406 | return 0; |
5407 | } |
5408 | @@ -1997,6 +2010,7 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, |
5409 | { |
5410 | pud_t *pud; |
5411 | unsigned long next; |
5412 | + int err; |
5413 | |
5414 | pfn -= addr >> PAGE_SHIFT; |
5415 | pud = pud_alloc(mm, p4d, addr); |
5416 | @@ -2004,9 +2018,10 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, |
5417 | return -ENOMEM; |
5418 | do { |
5419 | next = pud_addr_end(addr, end); |
5420 | - if (remap_pmd_range(mm, pud, addr, next, |
5421 | - pfn + (addr >> PAGE_SHIFT), prot)) |
5422 | - return -ENOMEM; |
5423 | + err = remap_pmd_range(mm, pud, addr, next, |
5424 | + pfn + (addr >> PAGE_SHIFT), prot); |
5425 | + if (err) |
5426 | + return err; |
5427 | } while (pud++, addr = next, addr != end); |
5428 | return 0; |
5429 | } |
5430 | @@ -2017,6 +2032,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, |
5431 | { |
5432 | p4d_t *p4d; |
5433 | unsigned long next; |
5434 | + int err; |
5435 | |
5436 | pfn -= addr >> PAGE_SHIFT; |
5437 | p4d = p4d_alloc(mm, pgd, addr); |
5438 | @@ -2024,9 +2040,10 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, |
5439 | return -ENOMEM; |
5440 | do { |
5441 | next = p4d_addr_end(addr, end); |
5442 | - if (remap_pud_range(mm, p4d, addr, next, |
5443 | - pfn + (addr >> PAGE_SHIFT), prot)) |
5444 | - return -ENOMEM; |
5445 | + err = remap_pud_range(mm, p4d, addr, next, |
5446 | + pfn + (addr >> PAGE_SHIFT), prot); |
5447 | + if (err) |
5448 | + return err; |
5449 | } while (p4d++, addr = next, addr != end); |
5450 | return 0; |
5451 | } |
5452 | diff --git a/mm/mprotect.c b/mm/mprotect.c |
5453 | index 58b629bb70de..60864e19421e 100644 |
5454 | --- a/mm/mprotect.c |
5455 | +++ b/mm/mprotect.c |
5456 | @@ -292,6 +292,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, |
5457 | return pages; |
5458 | } |
5459 | |
5460 | +static int prot_none_pte_entry(pte_t *pte, unsigned long addr, |
5461 | + unsigned long next, struct mm_walk *walk) |
5462 | +{ |
5463 | + return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? |
5464 | + 0 : -EACCES; |
5465 | +} |
5466 | + |
5467 | +static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, |
5468 | + unsigned long addr, unsigned long next, |
5469 | + struct mm_walk *walk) |
5470 | +{ |
5471 | + return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? |
5472 | + 0 : -EACCES; |
5473 | +} |
5474 | + |
5475 | +static int prot_none_test(unsigned long addr, unsigned long next, |
5476 | + struct mm_walk *walk) |
5477 | +{ |
5478 | + return 0; |
5479 | +} |
5480 | + |
5481 | +static int prot_none_walk(struct vm_area_struct *vma, unsigned long start, |
5482 | + unsigned long end, unsigned long newflags) |
5483 | +{ |
5484 | + pgprot_t new_pgprot = vm_get_page_prot(newflags); |
5485 | + struct mm_walk prot_none_walk = { |
5486 | + .pte_entry = prot_none_pte_entry, |
5487 | + .hugetlb_entry = prot_none_hugetlb_entry, |
5488 | + .test_walk = prot_none_test, |
5489 | + .mm = current->mm, |
5490 | + .private = &new_pgprot, |
5491 | + }; |
5492 | + |
5493 | + return walk_page_range(start, end, &prot_none_walk); |
5494 | +} |
5495 | + |
5496 | int |
5497 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, |
5498 | unsigned long start, unsigned long end, unsigned long newflags) |
5499 | @@ -309,6 +345,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, |
5500 | return 0; |
5501 | } |
5502 | |
5503 | + /* |
5504 | + * Do PROT_NONE PFN permission checks here when we can still |
5505 | + * bail out without undoing a lot of state. This is a rather |
5506 | + * uncommon case, so doesn't need to be very optimized. |
5507 | + */ |
5508 | + if (arch_has_pfn_modify_check() && |
5509 | + (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && |
5510 | + (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { |
5511 | + error = prot_none_walk(vma, start, end, newflags); |
5512 | + if (error) |
5513 | + return error; |
5514 | + } |
5515 | + |
5516 | /* |
5517 | * If we make a private mapping writable we increase our commit; |
5518 | * but (without finer accounting) cannot reduce our commit if we |
5519 | diff --git a/mm/swapfile.c b/mm/swapfile.c |
5520 | index 03d2ce288d83..8cbc7d6fd52e 100644 |
5521 | --- a/mm/swapfile.c |
5522 | +++ b/mm/swapfile.c |
5523 | @@ -2902,6 +2902,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) |
5524 | return 0; |
5525 | } |
5526 | |
5527 | + |
5528 | +/* |
5529 | + * Find out how many pages are allowed for a single swap device. There |
5530 | + * are two limiting factors: |
5531 | + * 1) the number of bits for the swap offset in the swp_entry_t type, and |
5532 | + * 2) the number of bits in the swap pte, as defined by the different |
5533 | + * architectures. |
5534 | + * |
5535 | + * In order to find the largest possible bit mask, a swap entry with |
5536 | + * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, |
5537 | + * decoded to a swp_entry_t again, and finally the swap offset is |
5538 | + * extracted. |
5539 | + * |
5540 | + * This will mask all the bits from the initial ~0UL mask that can't |
5541 | + * be encoded in either the swp_entry_t or the architecture definition |
5542 | + * of a swap pte. |
5543 | + */ |
5544 | +unsigned long generic_max_swapfile_size(void) |
5545 | +{ |
5546 | + return swp_offset(pte_to_swp_entry( |
5547 | + swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; |
5548 | +} |
5549 | + |
5550 | +/* Can be overridden by an architecture for additional checks. */ |
5551 | +__weak unsigned long max_swapfile_size(void) |
5552 | +{ |
5553 | + return generic_max_swapfile_size(); |
5554 | +} |
5555 | + |
5556 | static unsigned long read_swap_header(struct swap_info_struct *p, |
5557 | union swap_header *swap_header, |
5558 | struct inode *inode) |
5559 | @@ -2937,22 +2966,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p, |
5560 | p->cluster_next = 1; |
5561 | p->cluster_nr = 0; |
5562 | |
5563 | - /* |
5564 | - * Find out how many pages are allowed for a single swap |
5565 | - * device. There are two limiting factors: 1) the number |
5566 | - * of bits for the swap offset in the swp_entry_t type, and |
5567 | - * 2) the number of bits in the swap pte as defined by the |
5568 | - * different architectures. In order to find the |
5569 | - * largest possible bit mask, a swap entry with swap type 0 |
5570 | - * and swap offset ~0UL is created, encoded to a swap pte, |
5571 | - * decoded to a swp_entry_t again, and finally the swap |
5572 | - * offset is extracted. This will mask all the bits from |
5573 | - * the initial ~0UL mask that can't be encoded in either |
5574 | - * the swp_entry_t or the architecture definition of a |
5575 | - * swap pte. |
5576 | - */ |
5577 | - maxpages = swp_offset(pte_to_swp_entry( |
5578 | - swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; |
5579 | + maxpages = max_swapfile_size(); |
5580 | last_page = swap_header->info.last_page; |
5581 | if (!last_page) { |
5582 | pr_warn("Empty swap-file\n"); |
5583 | diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h |
5584 | index 403e97d5e243..8418462298e7 100644 |
5585 | --- a/tools/arch/x86/include/asm/cpufeatures.h |
5586 | +++ b/tools/arch/x86/include/asm/cpufeatures.h |
5587 | @@ -219,6 +219,7 @@ |
5588 | #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ |
5589 | #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ |
5590 | #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ |
5591 | +#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ |
5592 | |
5593 | /* Virtualization flags: Linux defined, word 8 */ |
5594 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
5595 | @@ -338,6 +339,7 @@ |
5596 | #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ |
5597 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
5598 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
5599 | +#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ |
5600 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
5601 | #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ |
5602 | |
5603 | @@ -370,5 +372,6 @@ |
5604 | #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ |
5605 | #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ |
5606 | #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ |
5607 | +#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ |
5608 | |
5609 | #endif /* _ASM_X86_CPUFEATURES_H */ |