Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0120-4.14.21-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 377487 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index c76afdcafbef..fb385af482ff 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -1841,13 +1841,6 @@
6     Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
7     the default is off.
8    
9     - kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode
10     - Valid arguments: 0, 1, 2
11     - kmemcheck=0 (disabled)
12     - kmemcheck=1 (enabled)
13     - kmemcheck=2 (one-shot mode)
14     - Default: 2 (one-shot mode)
15     -
16     kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
17     Default is 0 (don't ignore, but inject #GP)
18    
19     diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
20     index a81787cd47d7..e313925fb0fa 100644
21     --- a/Documentation/dev-tools/index.rst
22     +++ b/Documentation/dev-tools/index.rst
23     @@ -21,7 +21,6 @@ whole; patches welcome!
24     kasan
25     ubsan
26     kmemleak
27     - kmemcheck
28     gdb-kernel-debugging
29     kgdb
30     kselftest
31     diff --git a/Documentation/dev-tools/kmemcheck.rst b/Documentation/dev-tools/kmemcheck.rst
32     deleted file mode 100644
33     index 7f3d1985de74..000000000000
34     --- a/Documentation/dev-tools/kmemcheck.rst
35     +++ /dev/null
36     @@ -1,733 +0,0 @@
37     -Getting started with kmemcheck
38     -==============================
39     -
40     -Vegard Nossum <vegardno@ifi.uio.no>
41     -
42     -
43     -Introduction
44     -------------
45     -
46     -kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
47     -is a dynamic checker that detects and warns about some uses of uninitialized
48     -memory.
49     -
50     -Userspace programmers might be familiar with Valgrind's memcheck. The main
51     -difference between memcheck and kmemcheck is that memcheck works for userspace
52     -programs only, and kmemcheck works for the kernel only. The implementations
53     -are of course vastly different. Because of this, kmemcheck is not as accurate
54     -as memcheck, but it turns out to be good enough in practice to discover real
55     -programmer errors that the compiler is not able to find through static
56     -analysis.
57     -
58     -Enabling kmemcheck on a kernel will probably slow it down to the extent that
59     -the machine will not be usable for normal workloads such as e.g. an
60     -interactive desktop. kmemcheck will also cause the kernel to use about twice
61     -as much memory as normal. For this reason, kmemcheck is strictly a debugging
62     -feature.
63     -
64     -
65     -Downloading
66     ------------
67     -
68     -As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel.
69     -
70     -
71     -Configuring and compiling
72     --------------------------
73     -
74     -kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
75     -configuration variables must have specific settings in order for the kmemcheck
76     -menu to even appear in "menuconfig". These are:
77     -
78     -- ``CONFIG_CC_OPTIMIZE_FOR_SIZE=n``
79     - This option is located under "General setup" / "Optimize for size".
80     -
81     - Without this, gcc will use certain optimizations that usually lead to
82     - false positive warnings from kmemcheck. An example of this is a 16-bit
83     - field in a struct, where gcc may load 32 bits, then discard the upper
84     - 16 bits. kmemcheck sees only the 32-bit load, and may trigger a
85     - warning for the upper 16 bits (if they're uninitialized).
86     -
87     -- ``CONFIG_SLAB=y`` or ``CONFIG_SLUB=y``
88     - This option is located under "General setup" / "Choose SLAB
89     - allocator".
90     -
91     -- ``CONFIG_FUNCTION_TRACER=n``
92     - This option is located under "Kernel hacking" / "Tracers" / "Kernel
93     - Function Tracer"
94     -
95     - When function tracing is compiled in, gcc emits a call to another
96     - function at the beginning of every function. This means that when the
97     - page fault handler is called, the ftrace framework will be called
98     - before kmemcheck has had a chance to handle the fault. If ftrace then
99     - modifies memory that was tracked by kmemcheck, the result is an
100     - endless recursive page fault.
101     -
102     -- ``CONFIG_DEBUG_PAGEALLOC=n``
103     - This option is located under "Kernel hacking" / "Memory Debugging"
104     - / "Debug page memory allocations".
105     -
106     -In addition, I highly recommend turning on ``CONFIG_DEBUG_INFO=y``. This is also
107     -located under "Kernel hacking". With this, you will be able to get line number
108     -information from the kmemcheck warnings, which is extremely valuable in
109     -debugging a problem. This option is not mandatory, however, because it slows
110     -down the compilation process and produces a much bigger kernel image.
111     -
112     -Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory
113     -Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows
114     -a description of the kmemcheck configuration variables:
115     -
116     -- ``CONFIG_KMEMCHECK``
117     - This must be enabled in order to use kmemcheck at all...
118     -
119     -- ``CONFIG_KMEMCHECK_``[``DISABLED`` | ``ENABLED`` | ``ONESHOT``]``_BY_DEFAULT``
120     - This option controls the status of kmemcheck at boot-time. "Enabled"
121     - will enable kmemcheck right from the start, "disabled" will boot the
122     - kernel as normal (but with the kmemcheck code compiled in, so it can
123     - be enabled at run-time after the kernel has booted), and "one-shot" is
124     - a special mode which will turn kmemcheck off automatically after
125     - detecting the first use of uninitialized memory.
126     -
127     - If you are using kmemcheck to actively debug a problem, then you
128     - probably want to choose "enabled" here.
129     -
130     - The one-shot mode is mostly useful in automated test setups because it
131     - can prevent floods of warnings and increase the chances of the machine
132     - surviving in case something is really wrong. In other cases, the one-
133     - shot mode could actually be counter-productive because it would turn
134     - itself off at the very first error -- in the case of a false positive
135     - too -- and this would come in the way of debugging the specific
136     - problem you were interested in.
137     -
138     - If you would like to use your kernel as normal, but with a chance to
139     - enable kmemcheck in case of some problem, it might be a good idea to
140     - choose "disabled" here. When kmemcheck is disabled, most of the run-
141     - time overhead is not incurred, and the kernel will be almost as fast
142     - as normal.
143     -
144     -- ``CONFIG_KMEMCHECK_QUEUE_SIZE``
145     - Select the maximum number of error reports to store in an internal
146     - (fixed-size) buffer. Since errors can occur virtually anywhere and in
147     - any context, we need a temporary storage area which is guaranteed not
148     - to generate any other page faults when accessed. The queue will be
149     - emptied as soon as a tasklet may be scheduled. If the queue is full,
150     - new error reports will be lost.
151     -
152     - The default value of 64 is probably fine. If some code produces more
153     - than 64 errors within an irqs-off section, then the code is likely to
154     - produce many, many more, too, and these additional reports seldom give
155     - any more information (the first report is usually the most valuable
156     - anyway).
157     -
158     - This number might have to be adjusted if you are not using serial
159     - console or similar to capture the kernel log. If you are using the
160     - "dmesg" command to save the log, then getting a lot of kmemcheck
161     - warnings might overflow the kernel log itself, and the earlier reports
162     - will get lost in that way instead. Try setting this to 10 or so on
163     - such a setup.
164     -
165     -- ``CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT``
166     - Select the number of shadow bytes to save along with each entry of the
167     - error-report queue. These bytes indicate what parts of an allocation
168     - are initialized, uninitialized, etc. and will be displayed when an
169     - error is detected to help the debugging of a particular problem.
170     -
171     - The number entered here is actually the logarithm of the number of
172     - bytes that will be saved. So if you pick for example 5 here, kmemcheck
173     - will save 2^5 = 32 bytes.
174     -
175     - The default value should be fine for debugging most problems. It also
176     - fits nicely within 80 columns.
177     -
178     -- ``CONFIG_KMEMCHECK_PARTIAL_OK``
179     - This option (when enabled) works around certain GCC optimizations that
180     - produce 32-bit reads from 16-bit variables where the upper 16 bits are
181     - thrown away afterwards.
182     -
183     - The default value (enabled) is recommended. This may of course hide
184     - some real errors, but disabling it would probably produce a lot of
185     - false positives.
186     -
187     -- ``CONFIG_KMEMCHECK_BITOPS_OK``
188     - This option silences warnings that would be generated for bit-field
189     - accesses where not all the bits are initialized at the same time. This
190     - may also hide some real bugs.
191     -
192     - This option is probably obsolete, or it should be replaced with
193     - the kmemcheck-/bitfield-annotations for the code in question. The
194     - default value is therefore fine.
195     -
196     -Now compile the kernel as usual.
197     -
198     -
199     -How to use
200     -----------
201     -
202     -Booting
203     -~~~~~~~
204     -
205     -First some information about the command-line options. There is only one
206     -option specific to kmemcheck, and this is called "kmemcheck". It can be used
207     -to override the default mode as chosen by the ``CONFIG_KMEMCHECK_*_BY_DEFAULT``
208     -option. Its possible settings are:
209     -
210     -- ``kmemcheck=0`` (disabled)
211     -- ``kmemcheck=1`` (enabled)
212     -- ``kmemcheck=2`` (one-shot mode)
213     -
214     -If SLUB debugging has been enabled in the kernel, it may take precedence over
215     -kmemcheck in such a way that the slab caches which are under SLUB debugging
216     -will not be tracked by kmemcheck. In order to ensure that this doesn't happen
217     -(even though it shouldn't by default), use SLUB's boot option ``slub_debug``,
218     -like this: ``slub_debug=-``
219     -
220     -In fact, this option may also be used for fine-grained control over SLUB vs.
221     -kmemcheck. For example, if the command line includes
222     -``kmemcheck=1 slub_debug=,dentry``, then SLUB debugging will be used only
223     -for the "dentry" slab cache, and with kmemcheck tracking all the other
224     -caches. This is advanced usage, however, and is not generally recommended.
225     -
226     -
227     -Run-time enable/disable
228     -~~~~~~~~~~~~~~~~~~~~~~~
229     -
230     -When the kernel has booted, it is possible to enable or disable kmemcheck at
231     -run-time. WARNING: This feature is still experimental and may cause false
232     -positive warnings to appear. Therefore, try not to use this. If you find that
233     -it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
234     -will be happy to take bug reports.
235     -
236     -Use the file ``/proc/sys/kernel/kmemcheck`` for this purpose, e.g.::
237     -
238     - $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
239     -
240     -The numbers are the same as for the ``kmemcheck=`` command-line option.
241     -
242     -
243     -Debugging
244     -~~~~~~~~~
245     -
246     -A typical report will look something like this::
247     -
248     - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
249     - 80000000000000000000000000000000000000000088ffff0000000000000000
250     - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
251     - ^
252     -
253     - Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
254     - RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
255     - RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002
256     - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
257     - RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
258     - RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
259     - R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
260     - R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
261     - FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
262     - CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
263     - CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
264     - DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
265     - DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
266     - [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
267     - [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
268     - [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
269     - [<ffffffff8100c7b5>] int_signal+0x12/0x17
270     - [<ffffffffffffffff>] 0xffffffffffffffff
271     -
272     -The single most valuable information in this report is the RIP (or EIP on 32-
273     -bit) value. This will help us pinpoint exactly which instruction that caused
274     -the warning.
275     -
276     -If your kernel was compiled with ``CONFIG_DEBUG_INFO=y``, then all we have to do
277     -is give this address to the addr2line program, like this::
278     -
279     - $ addr2line -e vmlinux -i ffffffff8104ede8
280     - arch/x86/include/asm/string_64.h:12
281     - include/asm-generic/siginfo.h:287
282     - kernel/signal.c:380
283     - kernel/signal.c:410
284     -
285     -The "``-e vmlinux``" tells addr2line which file to look in. **IMPORTANT:**
286     -This must be the vmlinux of the kernel that produced the warning in the
287     -first place! If not, the line number information will almost certainly be
288     -wrong.
289     -
290     -The "``-i``" tells addr2line to also print the line numbers of inlined
291     -functions. In this case, the flag was very important, because otherwise,
292     -it would only have printed the first line, which is just a call to
293     -``memcpy()``, which could be called from a thousand places in the kernel, and
294     -is therefore not very useful. These inlined functions would not show up in
295     -the stack trace above, simply because the kernel doesn't load the extra
296     -debugging information. This technique can of course be used with ordinary
297     -kernel oopses as well.
298     -
299     -In this case, it's the caller of ``memcpy()`` that is interesting, and it can be
300     -found in ``include/asm-generic/siginfo.h``, line 287::
301     -
302     - 281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
303     - 282 {
304     - 283 if (from->si_code < 0)
305     - 284 memcpy(to, from, sizeof(*to));
306     - 285 else
307     - 286 /* _sigchld is currently the largest know union member */
308     - 287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
309     - 288 }
310     -
311     -Since this was a read (kmemcheck usually warns about reads only, though it can
312     -warn about writes to unallocated or freed memory as well), it was probably the
313     -"from" argument which contained some uninitialized bytes. Following the chain
314     -of calls, we move upwards to see where "from" was allocated or initialized,
315     -``kernel/signal.c``, line 380::
316     -
317     - 359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
318     - 360 {
319     - ...
320     - 367 list_for_each_entry(q, &list->list, list) {
321     - 368 if (q->info.si_signo == sig) {
322     - 369 if (first)
323     - 370 goto still_pending;
324     - 371 first = q;
325     - ...
326     - 377 if (first) {
327     - 378 still_pending:
328     - 379 list_del_init(&first->list);
329     - 380 copy_siginfo(info, &first->info);
330     - 381 __sigqueue_free(first);
331     - ...
332     - 392 }
333     - 393 }
334     -
335     -Here, it is ``&first->info`` that is being passed on to ``copy_siginfo()``. The
336     -variable ``first`` was found on a list -- passed in as the second argument to
337     -``collect_signal()``. We continue our journey through the stack, to figure out
338     -where the item on "list" was allocated or initialized. We move to line 410::
339     -
340     - 395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
341     - 396 siginfo_t *info)
342     - 397 {
343     - ...
344     - 410 collect_signal(sig, pending, info);
345     - ...
346     - 414 }
347     -
348     -Now we need to follow the ``pending`` pointer, since that is being passed on to
349     -``collect_signal()`` as ``list``. At this point, we've run out of lines from the
350     -"addr2line" output. Not to worry, we just paste the next addresses from the
351     -kmemcheck stack dump, i.e.::
352     -
353     - [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
354     - [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
355     - [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
356     - [<ffffffff8100c7b5>] int_signal+0x12/0x17
357     -
358     - $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
359     - ffffffff8100b87d ffffffff8100c7b5
360     - kernel/signal.c:446
361     - kernel/signal.c:1806
362     - arch/x86/kernel/signal.c:805
363     - arch/x86/kernel/signal.c:871
364     - arch/x86/kernel/entry_64.S:694
365     -
366     -Remember that since these addresses were found on the stack and not as the
367     -RIP value, they actually point to the _next_ instruction (they are return
368     -addresses). This becomes obvious when we look at the code for line 446::
369     -
370     - 422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
371     - 423 {
372     - ...
373     - 431 signr = __dequeue_signal(&tsk->signal->shared_pending,
374     - 432 mask, info);
375     - 433 /*
376     - 434 * itimer signal ?
377     - 435 *
378     - 436 * itimers are process shared and we restart periodic
379     - 437 * itimers in the signal delivery path to prevent DoS
380     - 438 * attacks in the high resolution timer case. This is
381     - 439 * compliant with the old way of self restarting
382     - 440 * itimers, as the SIGALRM is a legacy signal and only
383     - 441 * queued once. Changing the restart behaviour to
384     - 442 * restart the timer in the signal dequeue path is
385     - 443 * reducing the timer noise on heavy loaded !highres
386     - 444 * systems too.
387     - 445 */
388     - 446 if (unlikely(signr == SIGALRM)) {
389     - ...
390     - 489 }
391     -
392     -So instead of looking at 446, we should be looking at 431, which is the line
393     -that executes just before 446. Here we see that what we are looking for is
394     -``&tsk->signal->shared_pending``.
395     -
396     -Our next task is now to figure out which function that puts items on this
397     -``shared_pending`` list. A crude, but efficient tool, is ``git grep``::
398     -
399     - $ git grep -n 'shared_pending' kernel/
400     - ...
401     - kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending;
402     - kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending;
403     - ...
404     -
405     -There were more results, but none of them were related to list operations,
406     -and these were the only assignments. We inspect the line numbers more closely
407     -and find that this is indeed where items are being added to the list::
408     -
409     - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
410     - 817 int group)
411     - 818 {
412     - ...
413     - 828 pending = group ? &t->signal->shared_pending : &t->pending;
414     - ...
415     - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
416     - 852 (is_si_special(info) ||
417     - 853 info->si_code >= 0)));
418     - 854 if (q) {
419     - 855 list_add_tail(&q->list, &pending->list);
420     - ...
421     - 890 }
422     -
423     -and::
424     -
425     - 1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
426     - 1310 {
427     - ....
428     - 1339 pending = group ? &t->signal->shared_pending : &t->pending;
429     - 1340 list_add_tail(&q->list, &pending->list);
430     - ....
431     - 1347 }
432     -
433     -In the first case, the list element we are looking for, ``q``, is being
434     -returned from the function ``__sigqueue_alloc()``, which looks like an
435     -allocation function. Let's take a look at it::
436     -
437     - 187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
438     - 188 int override_rlimit)
439     - 189 {
440     - 190 struct sigqueue *q = NULL;
441     - 191 struct user_struct *user;
442     - 192
443     - 193 /*
444     - 194 * We won't get problems with the target's UID changing under us
445     - 195 * because changing it requires RCU be used, and if t != current, the
446     - 196 * caller must be holding the RCU readlock (by way of a spinlock) and
447     - 197 * we use RCU protection here
448     - 198 */
449     - 199 user = get_uid(__task_cred(t)->user);
450     - 200 atomic_inc(&user->sigpending);
451     - 201 if (override_rlimit ||
452     - 202 atomic_read(&user->sigpending) <=
453     - 203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
454     - 204 q = kmem_cache_alloc(sigqueue_cachep, flags);
455     - 205 if (unlikely(q == NULL)) {
456     - 206 atomic_dec(&user->sigpending);
457     - 207 free_uid(user);
458     - 208 } else {
459     - 209 INIT_LIST_HEAD(&q->list);
460     - 210 q->flags = 0;
461     - 211 q->user = user;
462     - 212 }
463     - 213
464     - 214 return q;
465     - 215 }
466     -
467     -We see that this function initializes ``q->list``, ``q->flags``, and
468     -``q->user``. It seems that now is the time to look at the definition of
469     -``struct sigqueue``, e.g.::
470     -
471     - 14 struct sigqueue {
472     - 15 struct list_head list;
473     - 16 int flags;
474     - 17 siginfo_t info;
475     - 18 struct user_struct *user;
476     - 19 };
477     -
478     -And, you might remember, it was a ``memcpy()`` on ``&first->info`` that
479     -caused the warning, so this makes perfect sense. It also seems reasonable
480     -to assume that it is the caller of ``__sigqueue_alloc()`` that has the
481     -responsibility of filling out (initializing) this member.
482     -
483     -But just which fields of the struct were uninitialized? Let's look at
484     -kmemcheck's report again::
485     -
486     - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
487     - 80000000000000000000000000000000000000000088ffff0000000000000000
488     - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
489     - ^
490     -
491     -These first two lines are the memory dump of the memory object itself, and
492     -the shadow bytemap, respectively. The memory object itself is in this case
493     -``&first->info``. Just beware that the start of this dump is NOT the start
494     -of the object itself! The position of the caret (^) corresponds with the
495     -address of the read (ffff88003e4a2024).
496     -
497     -The shadow bytemap dump legend is as follows:
498     -
499     -- i: initialized
500     -- u: uninitialized
501     -- a: unallocated (memory has been allocated by the slab layer, but has not
502     - yet been handed off to anybody)
503     -- f: freed (memory has been allocated by the slab layer, but has been freed
504     - by the previous owner)
505     -
506     -In order to figure out where (relative to the start of the object) the
507     -uninitialized memory was located, we have to look at the disassembly. For
508     -that, we'll need the RIP address again::
509     -
510     - RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
511     -
512     - $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
513     - ffffffff8104edc8: mov %r8,0x8(%r8)
514     - ffffffff8104edcc: test %r10d,%r10d
515     - ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168>
516     - ffffffff8104edd5: mov %rax,%rdx
517     - ffffffff8104edd8: mov $0xc,%ecx
518     - ffffffff8104eddd: mov %r13,%rdi
519     - ffffffff8104ede0: mov $0x30,%eax
520     - ffffffff8104ede5: mov %rdx,%rsi
521     - ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi)
522     - ffffffff8104edea: test $0x2,%al
523     - ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0>
524     - ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi)
525     - ffffffff8104edf0: test $0x1,%al
526     - ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5>
527     - ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi)
528     - ffffffff8104edf5: mov %r8,%rdi
529     - ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free>
530     -
531     -As expected, it's the "``rep movsl``" instruction from the ``memcpy()``
532     -that causes the warning. We know about ``REP MOVSL`` that it uses the register
533     -``RCX`` to count the number of remaining iterations. By taking a look at the
534     -register dump again (from the kmemcheck report), we can figure out how many
535     -bytes were left to copy::
536     -
537     - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
538     -
539     -By looking at the disassembly, we also see that ``%ecx`` is being loaded
540     -with the value ``$0xc`` just before (ffffffff8104edd8), so we are very
541     -lucky. Keep in mind that this is the number of iterations, not bytes. And
542     -since this is a "long" operation, we need to multiply by 4 to get the
543     -number of bytes. So this means that the uninitialized value was encountered
544     -at 4 * (0xc - 0x9) = 12 bytes from the start of the object.
545     -
546     -We can now try to figure out which field of the "``struct siginfo``" that
547     -was not initialized. This is the beginning of the struct::
548     -
549     - 40 typedef struct siginfo {
550     - 41 int si_signo;
551     - 42 int si_errno;
552     - 43 int si_code;
553     - 44
554     - 45 union {
555     - ..
556     - 92 } _sifields;
557     - 93 } siginfo_t;
558     -
559     -On 64-bit, the int is 4 bytes long, so it must the union member that has
560     -not been initialized. We can verify this using gdb::
561     -
562     - $ gdb vmlinux
563     - ...
564     - (gdb) p &((struct siginfo *) 0)->_sifields
565     - $1 = (union {...} *) 0x10
566     -
567     -Actually, it seems that the union member is located at offset 0x10 -- which
568     -means that gcc has inserted 4 bytes of padding between the members ``si_code``
569     -and ``_sifields``. We can now get a fuller picture of the memory dump::
570     -
571     - _----------------------------=> si_code
572     - / _--------------------=> (padding)
573     - | / _------------=> _sifields(._kill._pid)
574     - | | / _----=> _sifields(._kill._uid)
575     - | | | /
576     - -------|-------|-------|-------|
577     - 80000000000000000000000000000000000000000088ffff0000000000000000
578     - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
579     -
580     -This allows us to realize another important fact: ``si_code`` contains the
581     -value 0x80. Remember that x86 is little endian, so the first 4 bytes
582     -"80000000" are really the number 0x00000080. With a bit of research, we
583     -find that this is actually the constant ``SI_KERNEL`` defined in
584     -``include/asm-generic/siginfo.h``::
585     -
586     - 144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
587     -
588     -This macro is used in exactly one place in the x86 kernel: In ``send_signal()``
589     -in ``kernel/signal.c``::
590     -
591     - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
592     - 817 int group)
593     - 818 {
594     - ...
595     - 828 pending = group ? &t->signal->shared_pending : &t->pending;
596     - ...
597     - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
598     - 852 (is_si_special(info) ||
599     - 853 info->si_code >= 0)));
600     - 854 if (q) {
601     - 855 list_add_tail(&q->list, &pending->list);
602     - 856 switch ((unsigned long) info) {
603     - ...
604     - 865 case (unsigned long) SEND_SIG_PRIV:
605     - 866 q->info.si_signo = sig;
606     - 867 q->info.si_errno = 0;
607     - 868 q->info.si_code = SI_KERNEL;
608     - 869 q->info.si_pid = 0;
609     - 870 q->info.si_uid = 0;
610     - 871 break;
611     - ...
612     - 890 }
613     -
614     -Not only does this match with the ``.si_code`` member, it also matches the place
615     -we found earlier when looking for where siginfo_t objects are enqueued on the
616     -``shared_pending`` list.
617     -
618     -So to sum up: It seems that it is the padding introduced by the compiler
619     -between two struct fields that is uninitialized, and this gets reported when
620     -we do a ``memcpy()`` on the struct. This means that we have identified a false
621     -positive warning.
622     -
623     -Normally, kmemcheck will not report uninitialized accesses in ``memcpy()`` calls
624     -when both the source and destination addresses are tracked. (Instead, we copy
625     -the shadow bytemap as well). In this case, the destination address clearly
626     -was not tracked. We can dig a little deeper into the stack trace from above::
627     -
628     - arch/x86/kernel/signal.c:805
629     - arch/x86/kernel/signal.c:871
630     - arch/x86/kernel/entry_64.S:694
631     -
632     -And we clearly see that the destination siginfo object is located on the
633     -stack::
634     -
635     - 782 static void do_signal(struct pt_regs *regs)
636     - 783 {
637     - 784 struct k_sigaction ka;
638     - 785 siginfo_t info;
639     - ...
640     - 804 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
641     - ...
642     - 854 }
643     -
644     -And this ``&info`` is what eventually gets passed to ``copy_siginfo()`` as the
645     -destination argument.
646     -
647     -Now, even though we didn't find an actual error here, the example is still a
648     -good one, because it shows how one would go about to find out what the report
649     -was all about.
650     -
651     -
652     -Annotating false positives
653     -~~~~~~~~~~~~~~~~~~~~~~~~~~
654     -
655     -There are a few different ways to make annotations in the source code that
656     -will keep kmemcheck from checking and reporting certain allocations. Here
657     -they are:
658     -
659     -- ``__GFP_NOTRACK_FALSE_POSITIVE``
660     - This flag can be passed to ``kmalloc()`` or ``kmem_cache_alloc()``
661     - (therefore also to other functions that end up calling one of
662     - these) to indicate that the allocation should not be tracked
663     - because it would lead to a false positive report. This is a "big
664     - hammer" way of silencing kmemcheck; after all, even if the false
665     - positive pertains to particular field in a struct, for example, we
666     - will now lose the ability to find (real) errors in other parts of
667     - the same struct.
668     -
669     - Example::
670     -
671     - /* No warnings will ever trigger on accessing any part of x */
672     - x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
673     -
674     -- ``kmemcheck_bitfield_begin(name)``/``kmemcheck_bitfield_end(name)`` and
675     - ``kmemcheck_annotate_bitfield(ptr, name)``
676     - The first two of these three macros can be used inside struct
677     - definitions to signal, respectively, the beginning and end of a
678     - bitfield. Additionally, this will assign the bitfield a name, which
679     - is given as an argument to the macros.
680     -
681     - Having used these markers, one can later use
682     - kmemcheck_annotate_bitfield() at the point of allocation, to indicate
683     - which parts of the allocation is part of a bitfield.
684     -
685     - Example::
686     -
687     - struct foo {
688     - int x;
689     -
690     - kmemcheck_bitfield_begin(flags);
691     - int flag_a:1;
692     - int flag_b:1;
693     - kmemcheck_bitfield_end(flags);
694     -
695     - int y;
696     - };
697     -
698     - struct foo *x = kmalloc(sizeof *x);
699     -
700     - /* No warnings will trigger on accessing the bitfield of x */
701     - kmemcheck_annotate_bitfield(x, flags);
702     -
703     - Note that ``kmemcheck_annotate_bitfield()`` can be used even before the
704     - return value of ``kmalloc()`` is checked -- in other words, passing NULL
705     - as the first argument is legal (and will do nothing).
706     -
707     -
708     -Reporting errors
709     -----------------
710     -
711     -As we have seen, kmemcheck will produce false positive reports. Therefore, it
712     -is not very wise to blindly post kmemcheck warnings to mailing lists and
713     -maintainers. Instead, I encourage maintainers and developers to find errors
714     -in their own code. If you get a warning, you can try to work around it, try
715     -to figure out if it's a real error or not, or simply ignore it. Most
716     -developers know their own code and will quickly and efficiently determine the
717     -root cause of a kmemcheck report. This is therefore also the most efficient
718     -way to work with kmemcheck.
719     -
720     -That said, we (the kmemcheck maintainers) will always be on the lookout for
721     -false positives that we can annotate and silence. So whatever you find,
722     -please drop us a note privately! Kernel configs and steps to reproduce (if
723     -available) are of course a great help too.
724     -
725     -Happy hacking!
726     -
727     -
728     -Technical description
729     ----------------------
730     -
731     -kmemcheck works by marking memory pages non-present. This means that whenever
732     -somebody attempts to access the page, a page fault is generated. The page
733     -fault handler notices that the page was in fact only hidden, and so it calls
734     -on the kmemcheck code to make further investigations.
735     -
736     -When the investigations are completed, kmemcheck "shows" the page by marking
737     -it present (as it would be under normal circumstances). This way, the
738     -interrupted code can continue as usual.
739     -
740     -But after the instruction has been executed, we should hide the page again, so
741     -that we can catch the next access too! Now kmemcheck makes use of a debugging
742     -feature of the processor, namely single-stepping. When the processor has
743     -finished the one instruction that generated the memory access, a debug
744     -exception is raised. From here, we simply hide the page again and continue
745     -execution, this time with the single-stepping feature turned off.
746     -
747     -kmemcheck requires some assistance from the memory allocator in order to work.
748     -The memory allocator needs to
749     -
750     - 1. Tell kmemcheck about newly allocated pages and pages that are about to
751     - be freed. This allows kmemcheck to set up and tear down the shadow memory
752     - for the pages in question. The shadow memory stores the status of each
753     - byte in the allocation proper, e.g. whether it is initialized or
754     - uninitialized.
755     -
756     - 2. Tell kmemcheck which parts of memory should be marked uninitialized.
757     - There are actually a few more states, such as "not yet allocated" and
758     - "recently freed".
759     -
760     -If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
761     -memory that can take page faults because of kmemcheck.
762     -
763     -If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
764     -request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
765     -This does not prevent the page faults from occurring, however, but marks the
766     -object in question as being initialized so that no warnings will ever be
767     -produced for this object.
768     -
769     -Currently, the SLAB and SLUB allocators are supported by kmemcheck.
770     diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
771     index a122723907ac..99acc712f83a 100644
772     --- a/Documentation/devicetree/bindings/dma/snps-dma.txt
773     +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
774     @@ -64,6 +64,6 @@ Example:
775     reg = <0xe0000000 0x1000>;
776     interrupts = <0 35 0x4>;
777     dmas = <&dmahost 12 0 1>,
778     - <&dmahost 13 0 1 0>;
779     + <&dmahost 13 1 0>;
780     dma-names = "rx", "rx";
781     };
782     diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
783     index 5a8f7f4d2bca..7449893dc039 100644
784     --- a/Documentation/filesystems/ext4.txt
785     +++ b/Documentation/filesystems/ext4.txt
786     @@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
787     data_err=abort Abort the journal if an error occurs in a file
788     data buffer in ordered mode.
789    
790     -grpid Give objects the same group ID as their creator.
791     +grpid New objects have the group ID of their parent.
792     bsdgroups
793    
794     nogrpid (*) New objects have the group ID of their creator.
795     diff --git a/MAINTAINERS b/MAINTAINERS
796     index 2811a211632c..76ea063d8083 100644
797     --- a/MAINTAINERS
798     +++ b/MAINTAINERS
799     @@ -7670,16 +7670,6 @@ F: include/linux/kdb.h
800     F: include/linux/kgdb.h
801     F: kernel/debug/
802    
803     -KMEMCHECK
804     -M: Vegard Nossum <vegardno@ifi.uio.no>
805     -M: Pekka Enberg <penberg@kernel.org>
806     -S: Maintained
807     -F: Documentation/dev-tools/kmemcheck.rst
808     -F: arch/x86/include/asm/kmemcheck.h
809     -F: arch/x86/mm/kmemcheck/
810     -F: include/linux/kmemcheck.h
811     -F: mm/kmemcheck.c
812     -
813     KMEMLEAK
814     M: Catalin Marinas <catalin.marinas@arm.com>
815     S: Maintained
816     diff --git a/Makefile b/Makefile
817     index 33176140f133..68d70485b088 100644
818     --- a/Makefile
819     +++ b/Makefile
820     @@ -1,7 +1,7 @@
821     # SPDX-License-Identifier: GPL-2.0
822     VERSION = 4
823     PATCHLEVEL = 14
824     -SUBLEVEL = 20
825     +SUBLEVEL = 21
826     EXTRAVERSION =
827     NAME = Petit Gorille
828    
829     diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
830     index 7b8d90b7aeea..29b636fce23f 100644
831     --- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
832     +++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
833     @@ -150,11 +150,6 @@
834     interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
835     };
836    
837     -&charlcd {
838     - interrupt-parent = <&intc>;
839     - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
840     -};
841     -
842     &serial0 {
843     interrupt-parent = <&intc>;
844     interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
845     diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
846     index 7eab4bc07cec..7628bbb02324 100644
847     --- a/arch/arm/boot/dts/exynos5410.dtsi
848     +++ b/arch/arm/boot/dts/exynos5410.dtsi
849     @@ -333,7 +333,6 @@
850     &rtc {
851     clocks = <&clock CLK_RTC>;
852     clock-names = "rtc";
853     - interrupt-parent = <&pmu_system_controller>;
854     status = "disabled";
855     };
856    
857     diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
858     index 52b3ed10283a..e2bc731079be 100644
859     --- a/arch/arm/boot/dts/lpc3250-ea3250.dts
860     +++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
861     @@ -156,8 +156,8 @@
862     uda1380: uda1380@18 {
863     compatible = "nxp,uda1380";
864     reg = <0x18>;
865     - power-gpio = <&gpio 0x59 0>;
866     - reset-gpio = <&gpio 0x51 0>;
867     + power-gpio = <&gpio 3 10 0>;
868     + reset-gpio = <&gpio 3 2 0>;
869     dac-clk = "wspll";
870     };
871    
872     diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
873     index fd95e2b10357..b7bd3a110a8d 100644
874     --- a/arch/arm/boot/dts/lpc3250-phy3250.dts
875     +++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
876     @@ -81,8 +81,8 @@
877     uda1380: uda1380@18 {
878     compatible = "nxp,uda1380";
879     reg = <0x18>;
880     - power-gpio = <&gpio 0x59 0>;
881     - reset-gpio = <&gpio 0x51 0>;
882     + power-gpio = <&gpio 3 10 0>;
883     + reset-gpio = <&gpio 3 2 0>;
884     dac-clk = "wspll";
885     };
886    
887     diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
888     index afe12e5b51f9..f936000f0699 100644
889     --- a/arch/arm/boot/dts/mt2701.dtsi
890     +++ b/arch/arm/boot/dts/mt2701.dtsi
891     @@ -593,6 +593,7 @@
892     compatible = "mediatek,mt2701-hifsys", "syscon";
893     reg = <0 0x1a000000 0 0x1000>;
894     #clock-cells = <1>;
895     + #reset-cells = <1>;
896     };
897    
898     usb0: usb@1a1c0000 {
899     @@ -677,6 +678,7 @@
900     compatible = "mediatek,mt2701-ethsys", "syscon";
901     reg = <0 0x1b000000 0 0x1000>;
902     #clock-cells = <1>;
903     + #reset-cells = <1>;
904     };
905    
906     eth: ethernet@1b100000 {
907     diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
908     index ec8a07415cb3..36983a7d7cfd 100644
909     --- a/arch/arm/boot/dts/mt7623.dtsi
910     +++ b/arch/arm/boot/dts/mt7623.dtsi
911     @@ -753,6 +753,7 @@
912     "syscon";
913     reg = <0 0x1b000000 0 0x1000>;
914     #clock-cells = <1>;
915     + #reset-cells = <1>;
916     };
917    
918     eth: ethernet@1b100000 {
919     diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
920     index 688a86378cee..7bf5aa2237c9 100644
921     --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
922     +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
923     @@ -204,7 +204,7 @@
924     bus-width = <4>;
925     max-frequency = <50000000>;
926     cap-sd-highspeed;
927     - cd-gpios = <&pio 261 0>;
928     + cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>;
929     vmmc-supply = <&mt6323_vmch_reg>;
930     vqmmc-supply = <&mt6323_vio18_reg>;
931     };
932     diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
933     index 726c5d0dbd5b..b290a5abb901 100644
934     --- a/arch/arm/boot/dts/s5pv210.dtsi
935     +++ b/arch/arm/boot/dts/s5pv210.dtsi
936     @@ -463,6 +463,7 @@
937     compatible = "samsung,exynos4210-ohci";
938     reg = <0xec300000 0x100>;
939     interrupts = <23>;
940     + interrupt-parent = <&vic1>;
941     clocks = <&clocks CLK_USB_HOST>;
942     clock-names = "usbhost";
943     #address-cells = <1>;
944     diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
945     index 84101e4eebbf..0f5f379323a8 100644
946     --- a/arch/arm/boot/dts/spear1310-evb.dts
947     +++ b/arch/arm/boot/dts/spear1310-evb.dts
948     @@ -349,7 +349,7 @@
949     spi0: spi@e0100000 {
950     status = "okay";
951     num-cs = <3>;
952     - cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
953     + cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
954    
955     stmpe610@0 {
956     compatible = "st,stmpe610";
957     diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
958     index 5f347054527d..d4dbc4098653 100644
959     --- a/arch/arm/boot/dts/spear1340.dtsi
960     +++ b/arch/arm/boot/dts/spear1340.dtsi
961     @@ -142,8 +142,8 @@
962     reg = <0xb4100000 0x1000>;
963     interrupts = <0 105 0x4>;
964     status = "disabled";
965     - dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
966     - <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
967     + dmas = <&dwdma0 12 0 1>,
968     + <&dwdma0 13 1 0>;
969     dma-names = "tx", "rx";
970     };
971    
972     diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
973     index 17ea0abcdbd7..086b4b333249 100644
974     --- a/arch/arm/boot/dts/spear13xx.dtsi
975     +++ b/arch/arm/boot/dts/spear13xx.dtsi
976     @@ -100,7 +100,7 @@
977     reg = <0xb2800000 0x1000>;
978     interrupts = <0 29 0x4>;
979     status = "disabled";
980     - dmas = <&dwdma0 0 0 0 0>;
981     + dmas = <&dwdma0 0 0 0>;
982     dma-names = "data";
983     };
984    
985     @@ -290,8 +290,8 @@
986     #size-cells = <0>;
987     interrupts = <0 31 0x4>;
988     status = "disabled";
989     - dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
990     - <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */
991     + dmas = <&dwdma0 4 0 0>,
992     + <&dwdma0 5 0 0>;
993     dma-names = "tx", "rx";
994     };
995    
996     diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
997     index 6b32d20acc9f..00166eb9be86 100644
998     --- a/arch/arm/boot/dts/spear600.dtsi
999     +++ b/arch/arm/boot/dts/spear600.dtsi
1000     @@ -194,6 +194,7 @@
1001     rtc: rtc@fc900000 {
1002     compatible = "st,spear600-rtc";
1003     reg = <0xfc900000 0x1000>;
1004     + interrupt-parent = <&vic0>;
1005     interrupts = <10>;
1006     status = "disabled";
1007     };
1008     diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
1009     index 68aab50a73ab..733678b75b88 100644
1010     --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
1011     +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
1012     @@ -750,6 +750,7 @@
1013     reg = <0x10120000 0x1000>;
1014     interrupt-names = "combined";
1015     interrupts = <14>;
1016     + interrupt-parent = <&vica>;
1017     clocks = <&clcdclk>, <&hclkclcd>;
1018     clock-names = "clcdclk", "apb_pclk";
1019     status = "disabled";
1020     diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
1021     index fa149837df14..11fdecd9312e 100644
1022     --- a/arch/arm/boot/dts/stih407.dtsi
1023     +++ b/arch/arm/boot/dts/stih407.dtsi
1024     @@ -8,6 +8,7 @@
1025     */
1026     #include "stih407-clock.dtsi"
1027     #include "stih407-family.dtsi"
1028     +#include <dt-bindings/gpio/gpio.h>
1029     / {
1030     soc {
1031     sti-display-subsystem {
1032     @@ -122,7 +123,7 @@
1033     <&clk_s_d2_quadfs 0>,
1034     <&clk_s_d2_quadfs 1>;
1035    
1036     - hdmi,hpd-gpio = <&pio5 3>;
1037     + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
1038     reset-names = "hdmi";
1039     resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
1040     ddc = <&hdmiddc>;
1041     diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
1042     index 21fe72b183d8..96eed0dc08b8 100644
1043     --- a/arch/arm/boot/dts/stih410.dtsi
1044     +++ b/arch/arm/boot/dts/stih410.dtsi
1045     @@ -9,6 +9,7 @@
1046     #include "stih410-clock.dtsi"
1047     #include "stih407-family.dtsi"
1048     #include "stih410-pinctrl.dtsi"
1049     +#include <dt-bindings/gpio/gpio.h>
1050     / {
1051     aliases {
1052     bdisp0 = &bdisp0;
1053     @@ -213,7 +214,7 @@
1054     <&clk_s_d2_quadfs 0>,
1055     <&clk_s_d2_quadfs 1>;
1056    
1057     - hdmi,hpd-gpio = <&pio5 3>;
1058     + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
1059     reset-names = "hdmi";
1060     resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
1061     ddc = <&hdmiddc>;
1062     diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
1063     index 0722ec6be692..6821f1249300 100644
1064     --- a/arch/arm/include/asm/dma-iommu.h
1065     +++ b/arch/arm/include/asm/dma-iommu.h
1066     @@ -7,7 +7,6 @@
1067     #include <linux/mm_types.h>
1068     #include <linux/scatterlist.h>
1069     #include <linux/dma-debug.h>
1070     -#include <linux/kmemcheck.h>
1071     #include <linux/kref.h>
1072    
1073     #define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
1074     diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1075     index b2902a5cd780..2d7344f0e208 100644
1076     --- a/arch/arm/include/asm/pgalloc.h
1077     +++ b/arch/arm/include/asm/pgalloc.h
1078     @@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1079     extern pgd_t *pgd_alloc(struct mm_struct *mm);
1080     extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
1081    
1082     -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
1083     +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
1084    
1085     static inline void clean_pte_table(pte_t *pte)
1086     {
1087     diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
1088     index 107f37210fb9..83606087edc7 100644
1089     --- a/arch/arm/mach-pxa/tosa-bt.c
1090     +++ b/arch/arm/mach-pxa/tosa-bt.c
1091     @@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
1092     },
1093     };
1094     module_platform_driver(tosa_bt_driver);
1095     +
1096     +MODULE_LICENSE("GPL");
1097     +MODULE_AUTHOR("Dmitry Baryshkov");
1098     +MODULE_DESCRIPTION("Bluetooth built-in chip control");
1099     diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
1100     index dc3817593e14..61da6e65900b 100644
1101     --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
1102     +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
1103     @@ -901,6 +901,7 @@
1104     "dsi_phy_regulator";
1105    
1106     #clock-cells = <1>;
1107     + #phy-cells = <0>;
1108    
1109     clocks = <&gcc GCC_MDSS_AHB_CLK>;
1110     clock-names = "iface_clk";
1111     @@ -1430,8 +1431,8 @@
1112     #address-cells = <1>;
1113     #size-cells = <0>;
1114    
1115     - qcom,ipc-1 = <&apcs 0 13>;
1116     - qcom,ipc-6 = <&apcs 0 19>;
1117     + qcom,ipc-1 = <&apcs 8 13>;
1118     + qcom,ipc-3 = <&apcs 8 19>;
1119    
1120     apps_smsm: apps@0 {
1121     reg = <0>;
1122     diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
1123     index d25f4f137c2a..5ca6a573a701 100644
1124     --- a/arch/arm64/include/asm/pgalloc.h
1125     +++ b/arch/arm64/include/asm/pgalloc.h
1126     @@ -26,7 +26,7 @@
1127    
1128     #define check_pgt_cache() do { } while (0)
1129    
1130     -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
1131     +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
1132     #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
1133    
1134     #if CONFIG_PGTABLE_LEVELS > 2
1135     diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
1136     index 07823595b7f0..52f15cd896e1 100644
1137     --- a/arch/arm64/kernel/cpu_errata.c
1138     +++ b/arch/arm64/kernel/cpu_errata.c
1139     @@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1140     .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
1141     MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
1142     },
1143     + {
1144     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1145     + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
1146     + .enable = qcom_enable_link_stack_sanitization,
1147     + },
1148     + {
1149     + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
1150     + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
1151     + },
1152     {
1153     .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1154     MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1155     diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
1156     index 79364d3455c0..e08ae6b6b63e 100644
1157     --- a/arch/arm64/kvm/hyp/switch.c
1158     +++ b/arch/arm64/kvm/hyp/switch.c
1159     @@ -371,8 +371,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1160     u32 midr = read_cpuid_id();
1161    
1162     /* Apply BTAC predictors mitigation to all Falkor chips */
1163     - if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
1164     + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
1165     + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
1166     __qcom_hyp_sanitize_btac_predictors();
1167     + }
1168     }
1169    
1170     fp_enabled = __fpsimd_enabled();
1171     diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
1172     index 27058f3fd132..329a1c43365e 100644
1173     --- a/arch/arm64/mm/proc.S
1174     +++ b/arch/arm64/mm/proc.S
1175     @@ -190,7 +190,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
1176     dc cvac, cur_\()\type\()p // Ensure any existing dirty
1177     dmb sy // lines are written back before
1178     ldr \type, [cur_\()\type\()p] // loading the entry
1179     - tbz \type, #0, next_\()\type // Skip invalid entries
1180     + tbz \type, #0, skip_\()\type // Skip invalid and
1181     + tbnz \type, #11, skip_\()\type // non-global entries
1182     .endm
1183    
1184     .macro __idmap_kpti_put_pgtable_ent_ng, type
1185     @@ -249,8 +250,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
1186     add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
1187     do_pgd: __idmap_kpti_get_pgtable_ent pgd
1188     tbnz pgd, #1, walk_puds
1189     - __idmap_kpti_put_pgtable_ent_ng pgd
1190     next_pgd:
1191     + __idmap_kpti_put_pgtable_ent_ng pgd
1192     +skip_pgd:
1193     add cur_pgdp, cur_pgdp, #8
1194     cmp cur_pgdp, end_pgdp
1195     b.ne do_pgd
1196     @@ -278,8 +280,9 @@ walk_puds:
1197     add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
1198     do_pud: __idmap_kpti_get_pgtable_ent pud
1199     tbnz pud, #1, walk_pmds
1200     - __idmap_kpti_put_pgtable_ent_ng pud
1201     next_pud:
1202     + __idmap_kpti_put_pgtable_ent_ng pud
1203     +skip_pud:
1204     add cur_pudp, cur_pudp, 8
1205     cmp cur_pudp, end_pudp
1206     b.ne do_pud
1207     @@ -298,8 +301,9 @@ walk_pmds:
1208     add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
1209     do_pmd: __idmap_kpti_get_pgtable_ent pmd
1210     tbnz pmd, #1, walk_ptes
1211     - __idmap_kpti_put_pgtable_ent_ng pmd
1212     next_pmd:
1213     + __idmap_kpti_put_pgtable_ent_ng pmd
1214     +skip_pmd:
1215     add cur_pmdp, cur_pmdp, #8
1216     cmp cur_pmdp, end_pmdp
1217     b.ne do_pmd
1218     @@ -317,7 +321,7 @@ walk_ptes:
1219     add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
1220     do_pte: __idmap_kpti_get_pgtable_ent pte
1221     __idmap_kpti_put_pgtable_ent_ng pte
1222     -next_pte:
1223     +skip_pte:
1224     add cur_ptep, cur_ptep, #8
1225     cmp cur_ptep, end_ptep
1226     b.ne do_pte
1227     diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
1228     index c3d798b44030..c82457b0e733 100644
1229     --- a/arch/mips/Kconfig
1230     +++ b/arch/mips/Kconfig
1231     @@ -119,12 +119,12 @@ config MIPS_GENERIC
1232     select SYS_SUPPORTS_MULTITHREADING
1233     select SYS_SUPPORTS_RELOCATABLE
1234     select SYS_SUPPORTS_SMARTMIPS
1235     - select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
1236     - select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
1237     - select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
1238     - select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
1239     - select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
1240     - select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
1241     + select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
1242     + select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
1243     + select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
1244     + select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
1245     + select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
1246     + select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
1247     select USE_OF
1248     help
1249     Select this to build a kernel which aims to support multiple boards,
1250     diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
1251     index fe3939726765..795caa763da3 100644
1252     --- a/arch/mips/kernel/setup.c
1253     +++ b/arch/mips/kernel/setup.c
1254     @@ -374,6 +374,7 @@ static void __init bootmem_init(void)
1255     unsigned long reserved_end;
1256     unsigned long mapstart = ~0UL;
1257     unsigned long bootmap_size;
1258     + phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
1259     bool bootmap_valid = false;
1260     int i;
1261    
1262     @@ -394,7 +395,8 @@ static void __init bootmem_init(void)
1263     max_low_pfn = 0;
1264    
1265     /*
1266     - * Find the highest page frame number we have available.
1267     + * Find the highest page frame number we have available
1268     + * and the lowest used RAM address
1269     */
1270     for (i = 0; i < boot_mem_map.nr_map; i++) {
1271     unsigned long start, end;
1272     @@ -406,6 +408,8 @@ static void __init bootmem_init(void)
1273     end = PFN_DOWN(boot_mem_map.map[i].addr
1274     + boot_mem_map.map[i].size);
1275    
1276     + ramstart = min(ramstart, boot_mem_map.map[i].addr);
1277     +
1278     #ifndef CONFIG_HIGHMEM
1279     /*
1280     * Skip highmem here so we get an accurate max_low_pfn if low
1281     @@ -435,6 +439,13 @@ static void __init bootmem_init(void)
1282     mapstart = max(reserved_end, start);
1283     }
1284    
1285     + /*
1286     + * Reserve any memory between the start of RAM and PHYS_OFFSET
1287     + */
1288     + if (ramstart > PHYS_OFFSET)
1289     + add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
1290     + BOOT_MEM_RESERVED);
1291     +
1292     if (min_low_pfn >= max_low_pfn)
1293     panic("Incorrect memory mapping !!!");
1294     if (min_low_pfn > ARCH_PFN_OFFSET) {
1295     @@ -663,9 +674,6 @@ static int __init early_parse_mem(char *p)
1296    
1297     add_memory_region(start, size, BOOT_MEM_RAM);
1298    
1299     - if (start && start > PHYS_OFFSET)
1300     - add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
1301     - BOOT_MEM_RESERVED);
1302     return 0;
1303     }
1304     early_param("mem", early_parse_mem);
1305     diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
1306     index f41bd3cb76d9..e212a1f0b6d2 100644
1307     --- a/arch/openrisc/include/asm/dma-mapping.h
1308     +++ b/arch/openrisc/include/asm/dma-mapping.h
1309     @@ -23,7 +23,6 @@
1310     */
1311    
1312     #include <linux/dma-debug.h>
1313     -#include <linux/kmemcheck.h>
1314     #include <linux/dma-mapping.h>
1315    
1316     extern const struct dma_map_ops or1k_dma_map_ops;
1317     diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
1318     index a14203c005f1..e11f03007b57 100644
1319     --- a/arch/powerpc/include/asm/pgalloc.h
1320     +++ b/arch/powerpc/include/asm/pgalloc.h
1321     @@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
1322     }
1323     #endif /* MODULE */
1324    
1325     -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
1326     +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
1327    
1328     #ifdef CONFIG_PPC_BOOK3S
1329     #include <asm/book3s/pgalloc.h>
1330     diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
1331     index 023ff9f17501..d5f2ee882f74 100644
1332     --- a/arch/powerpc/include/asm/topology.h
1333     +++ b/arch/powerpc/include/asm/topology.h
1334     @@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
1335     extern void sysfs_remove_device_from_node(struct device *dev, int nid);
1336     extern int numa_update_cpu_topology(bool cpus_locked);
1337    
1338     +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
1339     +{
1340     + numa_cpu_lookup_table[cpu] = node;
1341     +}
1342     +
1343     static inline int early_cpu_to_node(int cpu)
1344     {
1345     int nid;
1346     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
1347     index e9f72abc52b7..e91b40aa5417 100644
1348     --- a/arch/powerpc/kernel/exceptions-64s.S
1349     +++ b/arch/powerpc/kernel/exceptions-64s.S
1350     @@ -1617,7 +1617,7 @@ USE_TEXT_SECTION()
1351     .balign IFETCH_ALIGN_BYTES
1352     do_hash_page:
1353     #ifdef CONFIG_PPC_STD_MMU_64
1354     - lis r0,DSISR_BAD_FAULT_64S@h
1355     + lis r0,(DSISR_BAD_FAULT_64S|DSISR_DABRMATCH)@h
1356     ori r0,r0,DSISR_BAD_FAULT_64S@l
1357     and. r0,r4,r0 /* weird error? */
1358     bne- handle_page_fault /* if not, try to insert a HPTE */
1359     diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
1360     index 8c54166491e7..29b2fed93289 100644
1361     --- a/arch/powerpc/kernel/head_32.S
1362     +++ b/arch/powerpc/kernel/head_32.S
1363     @@ -388,7 +388,7 @@ DataAccess:
1364     EXCEPTION_PROLOG
1365     mfspr r10,SPRN_DSISR
1366     stw r10,_DSISR(r11)
1367     - andis. r0,r10,DSISR_BAD_FAULT_32S@h
1368     + andis. r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h
1369     bne 1f /* if not, try to put a PTE */
1370     mfspr r4,SPRN_DAR /* into the hash table */
1371     rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
1372     diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1373     index a51df9ef529d..a81279249bfb 100644
1374     --- a/arch/powerpc/mm/numa.c
1375     +++ b/arch/powerpc/mm/numa.c
1376     @@ -142,11 +142,6 @@ static void reset_numa_cpu_lookup_table(void)
1377     numa_cpu_lookup_table[cpu] = -1;
1378     }
1379    
1380     -static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
1381     -{
1382     - numa_cpu_lookup_table[cpu] = node;
1383     -}
1384     -
1385     static void map_cpu_to_node(int cpu, int node)
1386     {
1387     update_numa_cpu_lookup_table(cpu, node);
1388     diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
1389     index cfbbee941a76..17ae5c15a9e0 100644
1390     --- a/arch/powerpc/mm/pgtable-radix.c
1391     +++ b/arch/powerpc/mm/pgtable-radix.c
1392     @@ -17,6 +17,7 @@
1393     #include <linux/of_fdt.h>
1394     #include <linux/mm.h>
1395     #include <linux/string_helpers.h>
1396     +#include <linux/stop_machine.h>
1397    
1398     #include <asm/pgtable.h>
1399     #include <asm/pgalloc.h>
1400     @@ -671,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
1401     pud_clear(pud);
1402     }
1403    
1404     +struct change_mapping_params {
1405     + pte_t *pte;
1406     + unsigned long start;
1407     + unsigned long end;
1408     + unsigned long aligned_start;
1409     + unsigned long aligned_end;
1410     +};
1411     +
1412     +static int stop_machine_change_mapping(void *data)
1413     +{
1414     + struct change_mapping_params *params =
1415     + (struct change_mapping_params *)data;
1416     +
1417     + if (!data)
1418     + return -1;
1419     +
1420     + spin_unlock(&init_mm.page_table_lock);
1421     + pte_clear(&init_mm, params->aligned_start, params->pte);
1422     + create_physical_mapping(params->aligned_start, params->start);
1423     + create_physical_mapping(params->end, params->aligned_end);
1424     + spin_lock(&init_mm.page_table_lock);
1425     + return 0;
1426     +}
1427     +
1428     static void remove_pte_table(pte_t *pte_start, unsigned long addr,
1429     unsigned long end)
1430     {
1431     @@ -699,6 +724,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
1432     }
1433     }
1434    
1435     +/*
1436     + * clear the pte and potentially split the mapping helper
1437     + */
1438     +static void split_kernel_mapping(unsigned long addr, unsigned long end,
1439     + unsigned long size, pte_t *pte)
1440     +{
1441     + unsigned long mask = ~(size - 1);
1442     + unsigned long aligned_start = addr & mask;
1443     + unsigned long aligned_end = addr + size;
1444     + struct change_mapping_params params;
1445     + bool split_region = false;
1446     +
1447     + if ((end - addr) < size) {
1448     + /*
1449     + * We're going to clear the PTE, but not flushed
1450     + * the mapping, time to remap and flush. The
1451     + * effects if visible outside the processor or
1452     + * if we are running in code close to the
1453     + * mapping we cleared, we are in trouble.
1454     + */
1455     + if (overlaps_kernel_text(aligned_start, addr) ||
1456     + overlaps_kernel_text(end, aligned_end)) {
1457     + /*
1458     + * Hack, just return, don't pte_clear
1459     + */
1460     + WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
1461     + "text, not splitting\n", addr, end);
1462     + return;
1463     + }
1464     + split_region = true;
1465     + }
1466     +
1467     + if (split_region) {
1468     + params.pte = pte;
1469     + params.start = addr;
1470     + params.end = end;
1471     + params.aligned_start = addr & ~(size - 1);
1472     + params.aligned_end = min_t(unsigned long, aligned_end,
1473     + (unsigned long)__va(memblock_end_of_DRAM()));
1474     + stop_machine(stop_machine_change_mapping, &params, NULL);
1475     + return;
1476     + }
1477     +
1478     + pte_clear(&init_mm, addr, pte);
1479     +}
1480     +
1481     static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
1482     unsigned long end)
1483     {
1484     @@ -714,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
1485     continue;
1486    
1487     if (pmd_huge(*pmd)) {
1488     - if (!IS_ALIGNED(addr, PMD_SIZE) ||
1489     - !IS_ALIGNED(next, PMD_SIZE)) {
1490     - WARN_ONCE(1, "%s: unaligned range\n", __func__);
1491     - continue;
1492     - }
1493     -
1494     - pte_clear(&init_mm, addr, (pte_t *)pmd);
1495     + split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
1496     continue;
1497     }
1498    
1499     @@ -745,13 +810,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
1500     continue;
1501    
1502     if (pud_huge(*pud)) {
1503     - if (!IS_ALIGNED(addr, PUD_SIZE) ||
1504     - !IS_ALIGNED(next, PUD_SIZE)) {
1505     - WARN_ONCE(1, "%s: unaligned range\n", __func__);
1506     - continue;
1507     - }
1508     -
1509     - pte_clear(&init_mm, addr, (pte_t *)pud);
1510     + split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
1511     continue;
1512     }
1513    
1514     @@ -777,13 +836,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
1515     continue;
1516    
1517     if (pgd_huge(*pgd)) {
1518     - if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
1519     - !IS_ALIGNED(next, PGDIR_SIZE)) {
1520     - WARN_ONCE(1, "%s: unaligned range\n", __func__);
1521     - continue;
1522     - }
1523     -
1524     - pte_clear(&init_mm, addr, (pte_t *)pgd);
1525     + split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
1526     continue;
1527     }
1528    
1529     diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
1530     index ac0717a90ca6..12f95b1f7d07 100644
1531     --- a/arch/powerpc/mm/pgtable_64.c
1532     +++ b/arch/powerpc/mm/pgtable_64.c
1533     @@ -483,6 +483,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
1534     if (old & PATB_HR) {
1535     asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
1536     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
1537     + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
1538     + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
1539     trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
1540     } else {
1541     asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
1542     diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
1543     index d304028641a2..4b295cfd5f7e 100644
1544     --- a/arch/powerpc/mm/tlb-radix.c
1545     +++ b/arch/powerpc/mm/tlb-radix.c
1546     @@ -453,14 +453,12 @@ void radix__flush_tlb_all(void)
1547     */
1548     asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1549     : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
1550     - trace_tlbie(0, 0, rb, rs, ric, prs, r);
1551     /*
1552     * now flush host entires by passing PRS = 0 and LPID == 0
1553     */
1554     asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1555     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
1556     asm volatile("eieio; tlbsync; ptesync": : :"memory");
1557     - trace_tlbie(0, 0, rb, 0, ric, prs, r);
1558     }
1559    
1560     void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
1561     diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
1562     index fadb95efbb9e..b1ac8ac38434 100644
1563     --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
1564     +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
1565     @@ -36,6 +36,7 @@
1566     #include <asm/xics.h>
1567     #include <asm/xive.h>
1568     #include <asm/plpar_wrappers.h>
1569     +#include <asm/topology.h>
1570    
1571     #include "pseries.h"
1572     #include "offline_states.h"
1573     @@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
1574     BUG_ON(cpu_online(cpu));
1575     set_cpu_present(cpu, false);
1576     set_hard_smp_processor_id(cpu, -1);
1577     + update_numa_cpu_lookup_table(cpu, -1);
1578     break;
1579     }
1580     if (cpu >= nr_cpu_ids)
1581     diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
1582     index d9c4c9366049..091f1d0d0af1 100644
1583     --- a/arch/powerpc/sysdev/xive/spapr.c
1584     +++ b/arch/powerpc/sysdev/xive/spapr.c
1585     @@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
1586    
1587     rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
1588     if (rc) {
1589     - pr_err("Error %lld getting queue info prio %d\n", rc, prio);
1590     + pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
1591     + target, prio);
1592     rc = -EIO;
1593     goto fail;
1594     }
1595     @@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
1596     /* Configure and enable the queue in HW */
1597     rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
1598     if (rc) {
1599     - pr_err("Error %lld setting queue for prio %d\n", rc, prio);
1600     + pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
1601     + target, prio);
1602     rc = -EIO;
1603     } else {
1604     q->qpage = qpage;
1605     @@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
1606     if (IS_ERR(qpage))
1607     return PTR_ERR(qpage);
1608    
1609     - return xive_spapr_configure_queue(cpu, q, prio, qpage,
1610     - xive_queue_shift);
1611     + return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
1612     + q, prio, qpage, xive_queue_shift);
1613     }
1614    
1615     static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
1616     @@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
1617     struct xive_q *q = &xc->queue[prio];
1618     unsigned int alloc_order;
1619     long rc;
1620     + int hw_cpu = get_hard_smp_processor_id(cpu);
1621    
1622     - rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
1623     + rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
1624     if (rc)
1625     - pr_err("Error %ld setting queue for prio %d\n", rc, prio);
1626     + pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
1627     + hw_cpu, prio);
1628    
1629     alloc_order = xive_alloc_order(xive_queue_shift);
1630     free_pages((unsigned long)q->qpage, alloc_order);
1631     diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
1632     index 59eea9c65d3e..79b7a3438d54 100644
1633     --- a/arch/s390/kernel/compat_linux.c
1634     +++ b/arch/s390/kernel/compat_linux.c
1635     @@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
1636    
1637     COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
1638     {
1639     - return sys_setgid((gid_t)gid);
1640     + return sys_setgid(low2highgid(gid));
1641     }
1642    
1643     COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
1644     @@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
1645    
1646     COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
1647     {
1648     - return sys_setuid((uid_t)uid);
1649     + return sys_setuid(low2highuid(uid));
1650     }
1651    
1652     COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
1653     @@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
1654    
1655     COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
1656     {
1657     - return sys_setfsuid((uid_t)uid);
1658     + return sys_setfsuid(low2highuid(uid));
1659     }
1660    
1661     COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
1662     {
1663     - return sys_setfsgid((gid_t)gid);
1664     + return sys_setfsgid(low2highgid(gid));
1665     }
1666    
1667     static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
1668     diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
1669     index e1d751ae2498..1a2526676a87 100644
1670     --- a/arch/sh/kernel/dwarf.c
1671     +++ b/arch/sh/kernel/dwarf.c
1672     @@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void)
1673    
1674     dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1675     sizeof(struct dwarf_frame), 0,
1676     - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1677     + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
1678    
1679     dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
1680     sizeof(struct dwarf_reg), 0,
1681     - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1682     + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
1683    
1684     dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
1685     dwarf_frame_cachep);
1686     diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
1687     index b2d9963d5978..68b1a67533ce 100644
1688     --- a/arch/sh/kernel/process.c
1689     +++ b/arch/sh/kernel/process.c
1690     @@ -59,7 +59,7 @@ void arch_task_cache_init(void)
1691    
1692     task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
1693     __alignof__(union thread_xstate),
1694     - SLAB_PANIC | SLAB_NOTRACK, NULL);
1695     + SLAB_PANIC, NULL);
1696     }
1697    
1698     #ifdef CONFIG_SH_FPU_EMU
1699     diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1700     index a0cc1be767c8..984e9d65ea0d 100644
1701     --- a/arch/sparc/mm/init_64.c
1702     +++ b/arch/sparc/mm/init_64.c
1703     @@ -2934,7 +2934,7 @@ void __flush_tlb_all(void)
1704     pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
1705     unsigned long address)
1706     {
1707     - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
1708     + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1709     pte_t *pte = NULL;
1710    
1711     if (page)
1712     @@ -2946,7 +2946,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
1713     pgtable_t pte_alloc_one(struct mm_struct *mm,
1714     unsigned long address)
1715     {
1716     - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
1717     + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1718     if (!page)
1719     return NULL;
1720     if (!pgtable_page_ctor(page)) {
1721     diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
1722     index 26775793c204..f0fdb268f8f2 100644
1723     --- a/arch/unicore32/include/asm/pgalloc.h
1724     +++ b/arch/unicore32/include/asm/pgalloc.h
1725     @@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
1726     #define pgd_alloc(mm) get_pgd_slow(mm)
1727     #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
1728    
1729     -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
1730     +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
1731    
1732     /*
1733     * Allocate one PTE table.
1734     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
1735     index 17de6acc0eab..559b37bf5a2e 100644
1736     --- a/arch/x86/Kconfig
1737     +++ b/arch/x86/Kconfig
1738     @@ -111,7 +111,6 @@ config X86
1739     select HAVE_ARCH_JUMP_LABEL
1740     select HAVE_ARCH_KASAN if X86_64
1741     select HAVE_ARCH_KGDB
1742     - select HAVE_ARCH_KMEMCHECK
1743     select HAVE_ARCH_MMAP_RND_BITS if MMU
1744     select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
1745     select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
1746     @@ -1443,7 +1442,7 @@ config ARCH_DMA_ADDR_T_64BIT
1747    
1748     config X86_DIRECT_GBPAGES
1749     def_bool y
1750     - depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK
1751     + depends on X86_64 && !DEBUG_PAGEALLOC
1752     ---help---
1753     Certain kernel features effectively disable kernel
1754     linear 1 GB mappings (even if the CPU otherwise
1755     diff --git a/arch/x86/Makefile b/arch/x86/Makefile
1756     index 504b1a4535ac..fad55160dcb9 100644
1757     --- a/arch/x86/Makefile
1758     +++ b/arch/x86/Makefile
1759     @@ -158,11 +158,6 @@ ifdef CONFIG_X86_X32
1760     endif
1761     export CONFIG_X86_X32_ABI
1762    
1763     -# Don't unroll struct assignments with kmemcheck enabled
1764     -ifeq ($(CONFIG_KMEMCHECK),y)
1765     - KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
1766     -endif
1767     -
1768     #
1769     # If the function graph tracer is used with mcount instead of fentry,
1770     # '-maccumulate-outgoing-args' is needed to prevent a GCC bug
1771     diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
1772     index 3f48f695d5e6..dce7092ab24a 100644
1773     --- a/arch/x86/entry/calling.h
1774     +++ b/arch/x86/entry/calling.h
1775     @@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with
1776    
1777     #define SIZEOF_PTREGS 21*8
1778    
1779     - .macro ALLOC_PT_GPREGS_ON_STACK
1780     - addq $-(15*8), %rsp
1781     - .endm
1782     +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
1783     + /*
1784     + * Push registers and sanitize registers of values that a
1785     + * speculation attack might otherwise want to exploit. The
1786     + * lower registers are likely clobbered well before they
1787     + * could be put to use in a speculative execution gadget.
1788     + * Interleave XOR with PUSH for better uop scheduling:
1789     + */
1790     + pushq %rdi /* pt_regs->di */
1791     + pushq %rsi /* pt_regs->si */
1792     + pushq \rdx /* pt_regs->dx */
1793     + pushq %rcx /* pt_regs->cx */
1794     + pushq \rax /* pt_regs->ax */
1795     + pushq %r8 /* pt_regs->r8 */
1796     + xorq %r8, %r8 /* nospec r8 */
1797     + pushq %r9 /* pt_regs->r9 */
1798     + xorq %r9, %r9 /* nospec r9 */
1799     + pushq %r10 /* pt_regs->r10 */
1800     + xorq %r10, %r10 /* nospec r10 */
1801     + pushq %r11 /* pt_regs->r11 */
1802     + xorq %r11, %r11 /* nospec r11*/
1803     + pushq %rbx /* pt_regs->rbx */
1804     + xorl %ebx, %ebx /* nospec rbx*/
1805     + pushq %rbp /* pt_regs->rbp */
1806     + xorl %ebp, %ebp /* nospec rbp*/
1807     + pushq %r12 /* pt_regs->r12 */
1808     + xorq %r12, %r12 /* nospec r12*/
1809     + pushq %r13 /* pt_regs->r13 */
1810     + xorq %r13, %r13 /* nospec r13*/
1811     + pushq %r14 /* pt_regs->r14 */
1812     + xorq %r14, %r14 /* nospec r14*/
1813     + pushq %r15 /* pt_regs->r15 */
1814     + xorq %r15, %r15 /* nospec r15*/
1815     + UNWIND_HINT_REGS
1816     +.endm
1817    
1818     - .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
1819     - .if \r11
1820     - movq %r11, 6*8+\offset(%rsp)
1821     - .endif
1822     - .if \r8910
1823     - movq %r10, 7*8+\offset(%rsp)
1824     - movq %r9, 8*8+\offset(%rsp)
1825     - movq %r8, 9*8+\offset(%rsp)
1826     - .endif
1827     - .if \rax
1828     - movq %rax, 10*8+\offset(%rsp)
1829     - .endif
1830     - .if \rcx
1831     - movq %rcx, 11*8+\offset(%rsp)
1832     - .endif
1833     - movq %rdx, 12*8+\offset(%rsp)
1834     - movq %rsi, 13*8+\offset(%rsp)
1835     - movq %rdi, 14*8+\offset(%rsp)
1836     - UNWIND_HINT_REGS offset=\offset extra=0
1837     - .endm
1838     - .macro SAVE_C_REGS offset=0
1839     - SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
1840     - .endm
1841     - .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
1842     - SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
1843     - .endm
1844     - .macro SAVE_C_REGS_EXCEPT_R891011
1845     - SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
1846     - .endm
1847     - .macro SAVE_C_REGS_EXCEPT_RCX_R891011
1848     - SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
1849     - .endm
1850     - .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
1851     - SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
1852     - .endm
1853     -
1854     - .macro SAVE_EXTRA_REGS offset=0
1855     - movq %r15, 0*8+\offset(%rsp)
1856     - movq %r14, 1*8+\offset(%rsp)
1857     - movq %r13, 2*8+\offset(%rsp)
1858     - movq %r12, 3*8+\offset(%rsp)
1859     - movq %rbp, 4*8+\offset(%rsp)
1860     - movq %rbx, 5*8+\offset(%rsp)
1861     - UNWIND_HINT_REGS offset=\offset
1862     - .endm
1863     -
1864     - .macro POP_EXTRA_REGS
1865     +.macro POP_REGS pop_rdi=1 skip_r11rcx=0
1866     popq %r15
1867     popq %r14
1868     popq %r13
1869     popq %r12
1870     popq %rbp
1871     popq %rbx
1872     - .endm
1873     -
1874     - .macro POP_C_REGS
1875     + .if \skip_r11rcx
1876     + popq %rsi
1877     + .else
1878     popq %r11
1879     + .endif
1880     popq %r10
1881     popq %r9
1882     popq %r8
1883     popq %rax
1884     + .if \skip_r11rcx
1885     + popq %rsi
1886     + .else
1887     popq %rcx
1888     + .endif
1889     popq %rdx
1890     popq %rsi
1891     + .if \pop_rdi
1892     popq %rdi
1893     - .endm
1894     -
1895     - .macro icebp
1896     - .byte 0xf1
1897     - .endm
1898     + .endif
1899     +.endm
1900    
1901     /*
1902     * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
1903     @@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with
1904     * is just setting the LSB, which makes it an invalid stack address and is also
1905     * a signal to the unwinder that it's a pt_regs pointer in disguise.
1906     *
1907     - * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
1908     + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
1909     * the original rbp.
1910     */
1911     .macro ENCODE_FRAME_POINTER ptregs_offset=0
1912     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
1913     index 16e2d72e79a0..68a2d76e4f8f 100644
1914     --- a/arch/x86/entry/entry_64.S
1915     +++ b/arch/x86/entry/entry_64.S
1916     @@ -209,7 +209,7 @@ ENTRY(entry_SYSCALL_64)
1917    
1918     swapgs
1919     /*
1920     - * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
1921     + * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
1922     * is not required to switch CR3.
1923     */
1924     movq %rsp, PER_CPU_VAR(rsp_scratch)
1925     @@ -223,22 +223,8 @@ ENTRY(entry_SYSCALL_64)
1926     pushq %rcx /* pt_regs->ip */
1927     GLOBAL(entry_SYSCALL_64_after_hwframe)
1928     pushq %rax /* pt_regs->orig_ax */
1929     - pushq %rdi /* pt_regs->di */
1930     - pushq %rsi /* pt_regs->si */
1931     - pushq %rdx /* pt_regs->dx */
1932     - pushq %rcx /* pt_regs->cx */
1933     - pushq $-ENOSYS /* pt_regs->ax */
1934     - pushq %r8 /* pt_regs->r8 */
1935     - pushq %r9 /* pt_regs->r9 */
1936     - pushq %r10 /* pt_regs->r10 */
1937     - pushq %r11 /* pt_regs->r11 */
1938     - pushq %rbx /* pt_regs->rbx */
1939     - pushq %rbp /* pt_regs->rbp */
1940     - pushq %r12 /* pt_regs->r12 */
1941     - pushq %r13 /* pt_regs->r13 */
1942     - pushq %r14 /* pt_regs->r14 */
1943     - pushq %r15 /* pt_regs->r15 */
1944     - UNWIND_HINT_REGS
1945     +
1946     + PUSH_AND_CLEAR_REGS rax=$-ENOSYS
1947    
1948     TRACE_IRQS_OFF
1949    
1950     @@ -317,15 +303,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
1951     syscall_return_via_sysret:
1952     /* rcx and r11 are already restored (see code above) */
1953     UNWIND_HINT_EMPTY
1954     - POP_EXTRA_REGS
1955     - popq %rsi /* skip r11 */
1956     - popq %r10
1957     - popq %r9
1958     - popq %r8
1959     - popq %rax
1960     - popq %rsi /* skip rcx */
1961     - popq %rdx
1962     - popq %rsi
1963     + POP_REGS pop_rdi=0 skip_r11rcx=1
1964    
1965     /*
1966     * Now all regs are restored except RSP and RDI.
1967     @@ -555,9 +533,7 @@ END(irq_entries_start)
1968     call switch_to_thread_stack
1969     1:
1970    
1971     - ALLOC_PT_GPREGS_ON_STACK
1972     - SAVE_C_REGS
1973     - SAVE_EXTRA_REGS
1974     + PUSH_AND_CLEAR_REGS
1975     ENCODE_FRAME_POINTER
1976    
1977     testb $3, CS(%rsp)
1978     @@ -618,15 +594,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
1979     ud2
1980     1:
1981     #endif
1982     - POP_EXTRA_REGS
1983     - popq %r11
1984     - popq %r10
1985     - popq %r9
1986     - popq %r8
1987     - popq %rax
1988     - popq %rcx
1989     - popq %rdx
1990     - popq %rsi
1991     + POP_REGS pop_rdi=0
1992    
1993     /*
1994     * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
1995     @@ -684,8 +652,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
1996     ud2
1997     1:
1998     #endif
1999     - POP_EXTRA_REGS
2000     - POP_C_REGS
2001     + POP_REGS
2002     addq $8, %rsp /* skip regs->orig_ax */
2003     INTERRUPT_RETURN
2004    
2005     @@ -900,7 +867,9 @@ ENTRY(\sym)
2006     pushq $-1 /* ORIG_RAX: no syscall to restart */
2007     .endif
2008    
2009     - ALLOC_PT_GPREGS_ON_STACK
2010     + /* Save all registers in pt_regs */
2011     + PUSH_AND_CLEAR_REGS
2012     + ENCODE_FRAME_POINTER
2013    
2014     .if \paranoid < 2
2015     testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
2016     @@ -1111,9 +1080,7 @@ ENTRY(xen_failsafe_callback)
2017     addq $0x30, %rsp
2018     UNWIND_HINT_IRET_REGS
2019     pushq $-1 /* orig_ax = -1 => not a system call */
2020     - ALLOC_PT_GPREGS_ON_STACK
2021     - SAVE_C_REGS
2022     - SAVE_EXTRA_REGS
2023     + PUSH_AND_CLEAR_REGS
2024     ENCODE_FRAME_POINTER
2025     jmp error_exit
2026     END(xen_failsafe_callback)
2027     @@ -1150,16 +1117,13 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
2028     #endif
2029    
2030     /*
2031     - * Save all registers in pt_regs, and switch gs if needed.
2032     + * Switch gs if needed.
2033     * Use slow, but surefire "are we in kernel?" check.
2034     * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
2035     */
2036     ENTRY(paranoid_entry)
2037     UNWIND_HINT_FUNC
2038     cld
2039     - SAVE_C_REGS 8
2040     - SAVE_EXTRA_REGS 8
2041     - ENCODE_FRAME_POINTER 8
2042     movl $1, %ebx
2043     movl $MSR_GS_BASE, %ecx
2044     rdmsr
2045     @@ -1198,21 +1162,18 @@ ENTRY(paranoid_exit)
2046     jmp .Lparanoid_exit_restore
2047     .Lparanoid_exit_no_swapgs:
2048     TRACE_IRQS_IRETQ_DEBUG
2049     + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
2050     .Lparanoid_exit_restore:
2051     jmp restore_regs_and_return_to_kernel
2052     END(paranoid_exit)
2053    
2054     /*
2055     - * Save all registers in pt_regs, and switch gs if needed.
2056     + * Switch gs if needed.
2057     * Return: EBX=0: came from user mode; EBX=1: otherwise
2058     */
2059     ENTRY(error_entry)
2060     - UNWIND_HINT_FUNC
2061     + UNWIND_HINT_REGS offset=8
2062     cld
2063     - SAVE_C_REGS 8
2064     - SAVE_EXTRA_REGS 8
2065     - ENCODE_FRAME_POINTER 8
2066     - xorl %ebx, %ebx
2067     testb $3, CS+8(%rsp)
2068     jz .Lerror_kernelspace
2069    
2070     @@ -1393,22 +1354,7 @@ ENTRY(nmi)
2071     pushq 1*8(%rdx) /* pt_regs->rip */
2072     UNWIND_HINT_IRET_REGS
2073     pushq $-1 /* pt_regs->orig_ax */
2074     - pushq %rdi /* pt_regs->di */
2075     - pushq %rsi /* pt_regs->si */
2076     - pushq (%rdx) /* pt_regs->dx */
2077     - pushq %rcx /* pt_regs->cx */
2078     - pushq %rax /* pt_regs->ax */
2079     - pushq %r8 /* pt_regs->r8 */
2080     - pushq %r9 /* pt_regs->r9 */
2081     - pushq %r10 /* pt_regs->r10 */
2082     - pushq %r11 /* pt_regs->r11 */
2083     - pushq %rbx /* pt_regs->rbx */
2084     - pushq %rbp /* pt_regs->rbp */
2085     - pushq %r12 /* pt_regs->r12 */
2086     - pushq %r13 /* pt_regs->r13 */
2087     - pushq %r14 /* pt_regs->r14 */
2088     - pushq %r15 /* pt_regs->r15 */
2089     - UNWIND_HINT_REGS
2090     + PUSH_AND_CLEAR_REGS rdx=(%rdx)
2091     ENCODE_FRAME_POINTER
2092    
2093     /*
2094     @@ -1618,7 +1564,8 @@ end_repeat_nmi:
2095     * frame to point back to repeat_nmi.
2096     */
2097     pushq $-1 /* ORIG_RAX: no syscall to restart */
2098     - ALLOC_PT_GPREGS_ON_STACK
2099     + PUSH_AND_CLEAR_REGS
2100     + ENCODE_FRAME_POINTER
2101    
2102     /*
2103     * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
2104     @@ -1642,8 +1589,7 @@ end_repeat_nmi:
2105     nmi_swapgs:
2106     SWAPGS_UNSAFE_STACK
2107     nmi_restore:
2108     - POP_EXTRA_REGS
2109     - POP_C_REGS
2110     + POP_REGS
2111    
2112     /*
2113     * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
2114     diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
2115     index 98d5358e4041..fd65e016e413 100644
2116     --- a/arch/x86/entry/entry_64_compat.S
2117     +++ b/arch/x86/entry/entry_64_compat.S
2118     @@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat)
2119     pushq %rcx /* pt_regs->cx */
2120     pushq $-ENOSYS /* pt_regs->ax */
2121     pushq $0 /* pt_regs->r8 = 0 */
2122     + xorq %r8, %r8 /* nospec r8 */
2123     pushq $0 /* pt_regs->r9 = 0 */
2124     + xorq %r9, %r9 /* nospec r9 */
2125     pushq $0 /* pt_regs->r10 = 0 */
2126     + xorq %r10, %r10 /* nospec r10 */
2127     pushq $0 /* pt_regs->r11 = 0 */
2128     + xorq %r11, %r11 /* nospec r11 */
2129     pushq %rbx /* pt_regs->rbx */
2130     + xorl %ebx, %ebx /* nospec rbx */
2131     pushq %rbp /* pt_regs->rbp (will be overwritten) */
2132     + xorl %ebp, %ebp /* nospec rbp */
2133     pushq $0 /* pt_regs->r12 = 0 */
2134     + xorq %r12, %r12 /* nospec r12 */
2135     pushq $0 /* pt_regs->r13 = 0 */
2136     + xorq %r13, %r13 /* nospec r13 */
2137     pushq $0 /* pt_regs->r14 = 0 */
2138     + xorq %r14, %r14 /* nospec r14 */
2139     pushq $0 /* pt_regs->r15 = 0 */
2140     + xorq %r15, %r15 /* nospec r15 */
2141     cld
2142    
2143     /*
2144     @@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
2145     pushq %rbp /* pt_regs->cx (stashed in bp) */
2146     pushq $-ENOSYS /* pt_regs->ax */
2147     pushq $0 /* pt_regs->r8 = 0 */
2148     + xorq %r8, %r8 /* nospec r8 */
2149     pushq $0 /* pt_regs->r9 = 0 */
2150     + xorq %r9, %r9 /* nospec r9 */
2151     pushq $0 /* pt_regs->r10 = 0 */
2152     + xorq %r10, %r10 /* nospec r10 */
2153     pushq $0 /* pt_regs->r11 = 0 */
2154     + xorq %r11, %r11 /* nospec r11 */
2155     pushq %rbx /* pt_regs->rbx */
2156     + xorl %ebx, %ebx /* nospec rbx */
2157     pushq %rbp /* pt_regs->rbp (will be overwritten) */
2158     + xorl %ebp, %ebp /* nospec rbp */
2159     pushq $0 /* pt_regs->r12 = 0 */
2160     + xorq %r12, %r12 /* nospec r12 */
2161     pushq $0 /* pt_regs->r13 = 0 */
2162     + xorq %r13, %r13 /* nospec r13 */
2163     pushq $0 /* pt_regs->r14 = 0 */
2164     + xorq %r14, %r14 /* nospec r14 */
2165     pushq $0 /* pt_regs->r15 = 0 */
2166     + xorq %r15, %r15 /* nospec r15 */
2167    
2168     /*
2169     * User mode is traced as though IRQs are on, and SYSENTER
2170     @@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat)
2171     pushq %rcx /* pt_regs->cx */
2172     pushq $-ENOSYS /* pt_regs->ax */
2173     pushq $0 /* pt_regs->r8 = 0 */
2174     + xorq %r8, %r8 /* nospec r8 */
2175     pushq $0 /* pt_regs->r9 = 0 */
2176     + xorq %r9, %r9 /* nospec r9 */
2177     pushq $0 /* pt_regs->r10 = 0 */
2178     + xorq %r10, %r10 /* nospec r10 */
2179     pushq $0 /* pt_regs->r11 = 0 */
2180     + xorq %r11, %r11 /* nospec r11 */
2181     pushq %rbx /* pt_regs->rbx */
2182     + xorl %ebx, %ebx /* nospec rbx */
2183     pushq %rbp /* pt_regs->rbp */
2184     + xorl %ebp, %ebp /* nospec rbp */
2185     pushq %r12 /* pt_regs->r12 */
2186     + xorq %r12, %r12 /* nospec r12 */
2187     pushq %r13 /* pt_regs->r13 */
2188     + xorq %r13, %r13 /* nospec r13 */
2189     pushq %r14 /* pt_regs->r14 */
2190     + xorq %r14, %r14 /* nospec r14 */
2191     pushq %r15 /* pt_regs->r15 */
2192     + xorq %r15, %r15 /* nospec r15 */
2193     cld
2194    
2195     /*
2196     diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
2197     index 09c26a4f139c..1c2558430cf0 100644
2198     --- a/arch/x86/events/intel/core.c
2199     +++ b/arch/x86/events/intel/core.c
2200     @@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu)
2201     break;
2202    
2203     case INTEL_FAM6_SANDYBRIDGE_X:
2204     - switch (cpu_data(cpu).x86_mask) {
2205     + switch (cpu_data(cpu).x86_stepping) {
2206     case 6: rev = 0x618; break;
2207     case 7: rev = 0x70c; break;
2208     }
2209     diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
2210     index ae64d0b69729..cf372b90557e 100644
2211     --- a/arch/x86/events/intel/lbr.c
2212     +++ b/arch/x86/events/intel/lbr.c
2213     @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void)
2214     * on PMU interrupt
2215     */
2216     if (boot_cpu_data.x86_model == 28
2217     - && boot_cpu_data.x86_mask < 10) {
2218     + && boot_cpu_data.x86_stepping < 10) {
2219     pr_cont("LBR disabled due to erratum");
2220     return;
2221     }
2222     diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
2223     index a5604c352930..408879b0c0d4 100644
2224     --- a/arch/x86/events/intel/p6.c
2225     +++ b/arch/x86/events/intel/p6.c
2226     @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = {
2227    
2228     static __init void p6_pmu_rdpmc_quirk(void)
2229     {
2230     - if (boot_cpu_data.x86_mask < 9) {
2231     + if (boot_cpu_data.x86_stepping < 9) {
2232     /*
2233     * PPro erratum 26; fixed in stepping 9 and above.
2234     */
2235     diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
2236     index 8d0ec9df1cbe..f077401869ee 100644
2237     --- a/arch/x86/include/asm/acpi.h
2238     +++ b/arch/x86/include/asm/acpi.h
2239     @@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
2240     if (boot_cpu_data.x86 == 0x0F &&
2241     boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
2242     boot_cpu_data.x86_model <= 0x05 &&
2243     - boot_cpu_data.x86_mask < 0x0A)
2244     + boot_cpu_data.x86_stepping < 0x0A)
2245     return 1;
2246     else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
2247     return 1;
2248     diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
2249     index 1e7c955b6303..4db77731e130 100644
2250     --- a/arch/x86/include/asm/barrier.h
2251     +++ b/arch/x86/include/asm/barrier.h
2252     @@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
2253    
2254     asm ("cmp %1,%2; sbb %0,%0;"
2255     :"=r" (mask)
2256     - :"r"(size),"r" (index)
2257     + :"g"(size),"r" (index)
2258     :"cc");
2259     return mask;
2260     }
2261     diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
2262     index 34d99af43994..6804d6642767 100644
2263     --- a/arch/x86/include/asm/bug.h
2264     +++ b/arch/x86/include/asm/bug.h
2265     @@ -5,23 +5,20 @@
2266     #include <linux/stringify.h>
2267    
2268     /*
2269     - * Since some emulators terminate on UD2, we cannot use it for WARN.
2270     - * Since various instruction decoders disagree on the length of UD1,
2271     - * we cannot use it either. So use UD0 for WARN.
2272     + * Despite that some emulators terminate on UD2, we use it for WARN().
2273     *
2274     - * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
2275     - * our kernel decoder thinks it takes a ModRM byte, which seems consistent
2276     - * with various things like the Intel SDM instruction encoding rules)
2277     + * Since various instruction decoders/specs disagree on the encoding of
2278     + * UD0/UD1.
2279     */
2280    
2281     -#define ASM_UD0 ".byte 0x0f, 0xff"
2282     +#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
2283     #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
2284     #define ASM_UD2 ".byte 0x0f, 0x0b"
2285    
2286     #define INSN_UD0 0xff0f
2287     #define INSN_UD2 0x0b0f
2288    
2289     -#define LEN_UD0 2
2290     +#define LEN_UD2 2
2291    
2292     #ifdef CONFIG_GENERIC_BUG
2293    
2294     @@ -77,7 +74,11 @@ do { \
2295     unreachable(); \
2296     } while (0)
2297    
2298     -#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
2299     +#define __WARN_FLAGS(flags) \
2300     +do { \
2301     + _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
2302     + annotate_reachable(); \
2303     +} while (0)
2304    
2305     #include <asm-generic/bug.h>
2306    
2307     diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
2308     index 836ca1178a6a..69f16f0729d0 100644
2309     --- a/arch/x86/include/asm/dma-mapping.h
2310     +++ b/arch/x86/include/asm/dma-mapping.h
2311     @@ -7,7 +7,6 @@
2312     * Documentation/DMA-API.txt for documentation.
2313     */
2314    
2315     -#include <linux/kmemcheck.h>
2316     #include <linux/scatterlist.h>
2317     #include <linux/dma-debug.h>
2318     #include <asm/io.h>
2319     diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
2320     deleted file mode 100644
2321     index 945a0337fbcf..000000000000
2322     --- a/arch/x86/include/asm/kmemcheck.h
2323     +++ /dev/null
2324     @@ -1,43 +0,0 @@
2325     -/* SPDX-License-Identifier: GPL-2.0 */
2326     -#ifndef ASM_X86_KMEMCHECK_H
2327     -#define ASM_X86_KMEMCHECK_H
2328     -
2329     -#include <linux/types.h>
2330     -#include <asm/ptrace.h>
2331     -
2332     -#ifdef CONFIG_KMEMCHECK
2333     -bool kmemcheck_active(struct pt_regs *regs);
2334     -
2335     -void kmemcheck_show(struct pt_regs *regs);
2336     -void kmemcheck_hide(struct pt_regs *regs);
2337     -
2338     -bool kmemcheck_fault(struct pt_regs *regs,
2339     - unsigned long address, unsigned long error_code);
2340     -bool kmemcheck_trap(struct pt_regs *regs);
2341     -#else
2342     -static inline bool kmemcheck_active(struct pt_regs *regs)
2343     -{
2344     - return false;
2345     -}
2346     -
2347     -static inline void kmemcheck_show(struct pt_regs *regs)
2348     -{
2349     -}
2350     -
2351     -static inline void kmemcheck_hide(struct pt_regs *regs)
2352     -{
2353     -}
2354     -
2355     -static inline bool kmemcheck_fault(struct pt_regs *regs,
2356     - unsigned long address, unsigned long error_code)
2357     -{
2358     - return false;
2359     -}
2360     -
2361     -static inline bool kmemcheck_trap(struct pt_regs *regs)
2362     -{
2363     - return false;
2364     -}
2365     -#endif /* CONFIG_KMEMCHECK */
2366     -
2367     -#endif
2368     diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
2369     index 4d57894635f2..76b058533e47 100644
2370     --- a/arch/x86/include/asm/nospec-branch.h
2371     +++ b/arch/x86/include/asm/nospec-branch.h
2372     @@ -6,6 +6,7 @@
2373     #include <asm/alternative.h>
2374     #include <asm/alternative-asm.h>
2375     #include <asm/cpufeatures.h>
2376     +#include <asm/msr-index.h>
2377    
2378     #ifdef __ASSEMBLY__
2379    
2380     @@ -164,10 +165,15 @@ static inline void vmexit_fill_RSB(void)
2381    
2382     static inline void indirect_branch_prediction_barrier(void)
2383     {
2384     - alternative_input("",
2385     - "call __ibp_barrier",
2386     - X86_FEATURE_USE_IBPB,
2387     - ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
2388     + asm volatile(ALTERNATIVE("",
2389     + "movl %[msr], %%ecx\n\t"
2390     + "movl %[val], %%eax\n\t"
2391     + "movl $0, %%edx\n\t"
2392     + "wrmsr",
2393     + X86_FEATURE_USE_IBPB)
2394     + : : [msr] "i" (MSR_IA32_PRED_CMD),
2395     + [val] "i" (PRED_CMD_IBPB)
2396     + : "eax", "ecx", "edx", "memory");
2397     }
2398    
2399     #endif /* __ASSEMBLY__ */
2400     diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
2401     index 4baa6bceb232..d652a3808065 100644
2402     --- a/arch/x86/include/asm/page_64.h
2403     +++ b/arch/x86/include/asm/page_64.h
2404     @@ -52,10 +52,6 @@ static inline void clear_page(void *page)
2405    
2406     void copy_page(void *to, void *from);
2407    
2408     -#ifdef CONFIG_X86_MCE
2409     -#define arch_unmap_kpfn arch_unmap_kpfn
2410     -#endif
2411     -
2412     #endif /* !__ASSEMBLY__ */
2413    
2414     #ifdef CONFIG_X86_VSYSCALL_EMULATION
2415     diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
2416     index 892df375b615..554841fab717 100644
2417     --- a/arch/x86/include/asm/paravirt.h
2418     +++ b/arch/x86/include/asm/paravirt.h
2419     @@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
2420     {
2421     PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
2422     }
2423     -static inline void __flush_tlb_single(unsigned long addr)
2424     +static inline void __flush_tlb_one_user(unsigned long addr)
2425     {
2426     - PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
2427     + PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
2428     }
2429    
2430     static inline void flush_tlb_others(const struct cpumask *cpumask,
2431     diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
2432     index 6ec54d01972d..f624f1f10316 100644
2433     --- a/arch/x86/include/asm/paravirt_types.h
2434     +++ b/arch/x86/include/asm/paravirt_types.h
2435     @@ -217,7 +217,7 @@ struct pv_mmu_ops {
2436     /* TLB operations */
2437     void (*flush_tlb_user)(void);
2438     void (*flush_tlb_kernel)(void);
2439     - void (*flush_tlb_single)(unsigned long addr);
2440     + void (*flush_tlb_one_user)(unsigned long addr);
2441     void (*flush_tlb_others)(const struct cpumask *cpus,
2442     const struct flush_tlb_info *info);
2443    
2444     diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
2445     index 211368922cad..8b8f1f14a0bf 100644
2446     --- a/arch/x86/include/asm/pgtable.h
2447     +++ b/arch/x86/include/asm/pgtable.h
2448     @@ -668,11 +668,6 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
2449     return false;
2450     }
2451    
2452     -static inline int pte_hidden(pte_t pte)
2453     -{
2454     - return pte_flags(pte) & _PAGE_HIDDEN;
2455     -}
2456     -
2457     static inline int pmd_present(pmd_t pmd)
2458     {
2459     /*
2460     diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
2461     index e67c0620aec2..e55466760ff8 100644
2462     --- a/arch/x86/include/asm/pgtable_32.h
2463     +++ b/arch/x86/include/asm/pgtable_32.h
2464     @@ -61,7 +61,7 @@ void paging_init(void);
2465     #define kpte_clear_flush(ptep, vaddr) \
2466     do { \
2467     pte_clear(&init_mm, (vaddr), (ptep)); \
2468     - __flush_tlb_one((vaddr)); \
2469     + __flush_tlb_one_kernel((vaddr)); \
2470     } while (0)
2471    
2472     #endif /* !__ASSEMBLY__ */
2473     diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
2474     index 9e9b05fc4860..3696398a9475 100644
2475     --- a/arch/x86/include/asm/pgtable_types.h
2476     +++ b/arch/x86/include/asm/pgtable_types.h
2477     @@ -32,7 +32,6 @@
2478    
2479     #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
2480     #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
2481     -#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
2482     #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
2483     #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
2484    
2485     @@ -79,18 +78,6 @@
2486     #define _PAGE_KNL_ERRATUM_MASK 0
2487     #endif
2488    
2489     -#ifdef CONFIG_KMEMCHECK
2490     -#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
2491     -#else
2492     -#define _PAGE_HIDDEN (_AT(pteval_t, 0))
2493     -#endif
2494     -
2495     -/*
2496     - * The same hidden bit is used by kmemcheck, but since kmemcheck
2497     - * works on kernel pages while soft-dirty engine on user space,
2498     - * they do not conflict with each other.
2499     - */
2500     -
2501     #ifdef CONFIG_MEM_SOFT_DIRTY
2502     #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
2503     #else
2504     diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
2505     index c57c6e77c29f..15fc074bd628 100644
2506     --- a/arch/x86/include/asm/processor.h
2507     +++ b/arch/x86/include/asm/processor.h
2508     @@ -91,7 +91,7 @@ struct cpuinfo_x86 {
2509     __u8 x86; /* CPU family */
2510     __u8 x86_vendor; /* CPU vendor */
2511     __u8 x86_model;
2512     - __u8 x86_mask;
2513     + __u8 x86_stepping;
2514     #ifdef CONFIG_X86_64
2515     /* Number of 4K pages in DTLB/ITLB combined(in pages): */
2516     int x86_tlbsize;
2517     @@ -109,7 +109,7 @@ struct cpuinfo_x86 {
2518     char x86_vendor_id[16];
2519     char x86_model_id[64];
2520     /* in KB - valid for CPUS which support this call: */
2521     - int x86_cache_size;
2522     + unsigned int x86_cache_size;
2523     int x86_cache_alignment; /* In bytes */
2524     /* Cache QoS architectural values: */
2525     int x86_cache_max_rmid; /* max index */
2526     @@ -968,7 +968,4 @@ bool xen_set_default_idle(void);
2527    
2528     void stop_this_cpu(void *dummy);
2529     void df_debug(struct pt_regs *regs, long error_code);
2530     -
2531     -void __ibp_barrier(void);
2532     -
2533     #endif /* _ASM_X86_PROCESSOR_H */
2534     diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
2535     index 076502241eae..55d392c6bd29 100644
2536     --- a/arch/x86/include/asm/string_32.h
2537     +++ b/arch/x86/include/asm/string_32.h
2538     @@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
2539     * No 3D Now!
2540     */
2541    
2542     -#ifndef CONFIG_KMEMCHECK
2543     -
2544     #if (__GNUC__ >= 4)
2545     #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
2546     #else
2547     @@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
2548     ? __constant_memcpy((t), (f), (n)) \
2549     : __memcpy((t), (f), (n)))
2550     #endif
2551     -#else
2552     -/*
2553     - * kmemcheck becomes very happy if we use the REP instructions unconditionally,
2554     - * because it means that we know both memory operands in advance.
2555     - */
2556     -#define memcpy(t, f, n) __memcpy((t), (f), (n))
2557     -#endif
2558    
2559     #endif
2560     #endif /* !CONFIG_FORTIFY_SOURCE */
2561     diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
2562     index 0b1b4445f4c5..533f74c300c2 100644
2563     --- a/arch/x86/include/asm/string_64.h
2564     +++ b/arch/x86/include/asm/string_64.h
2565     @@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len);
2566     extern void *__memcpy(void *to, const void *from, size_t len);
2567    
2568     #ifndef CONFIG_FORTIFY_SOURCE
2569     -#ifndef CONFIG_KMEMCHECK
2570     #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
2571     #define memcpy(dst, src, len) \
2572     ({ \
2573     @@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len);
2574     __ret; \
2575     })
2576     #endif
2577     -#else
2578     -/*
2579     - * kmemcheck becomes very happy if we use the REP instructions unconditionally,
2580     - * because it means that we know both memory operands in advance.
2581     - */
2582     -#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
2583     -#endif
2584     #endif /* !CONFIG_FORTIFY_SOURCE */
2585    
2586     #define __HAVE_ARCH_MEMSET
2587     diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
2588     index 4405c4b308e8..704f31315dde 100644
2589     --- a/arch/x86/include/asm/tlbflush.h
2590     +++ b/arch/x86/include/asm/tlbflush.h
2591     @@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
2592     #else
2593     #define __flush_tlb() __native_flush_tlb()
2594     #define __flush_tlb_global() __native_flush_tlb_global()
2595     -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
2596     +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
2597     #endif
2598    
2599     static inline bool tlb_defer_switch_to_init_mm(void)
2600     @@ -397,7 +397,7 @@ static inline void __native_flush_tlb_global(void)
2601     /*
2602     * flush one page in the user mapping
2603     */
2604     -static inline void __native_flush_tlb_single(unsigned long addr)
2605     +static inline void __native_flush_tlb_one_user(unsigned long addr)
2606     {
2607     u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
2608    
2609     @@ -434,18 +434,31 @@ static inline void __flush_tlb_all(void)
2610     /*
2611     * flush one page in the kernel mapping
2612     */
2613     -static inline void __flush_tlb_one(unsigned long addr)
2614     +static inline void __flush_tlb_one_kernel(unsigned long addr)
2615     {
2616     count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
2617     - __flush_tlb_single(addr);
2618     +
2619     + /*
2620     + * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
2621     + * paravirt equivalent. Even with PCID, this is sufficient: we only
2622     + * use PCID if we also use global PTEs for the kernel mapping, and
2623     + * INVLPG flushes global translations across all address spaces.
2624     + *
2625     + * If PTI is on, then the kernel is mapped with non-global PTEs, and
2626     + * __flush_tlb_one_user() will flush the given address for the current
2627     + * kernel address space and for its usermode counterpart, but it does
2628     + * not flush it for other address spaces.
2629     + */
2630     + __flush_tlb_one_user(addr);
2631    
2632     if (!static_cpu_has(X86_FEATURE_PTI))
2633     return;
2634    
2635     /*
2636     - * __flush_tlb_single() will have cleared the TLB entry for this ASID,
2637     - * but since kernel space is replicated across all, we must also
2638     - * invalidate all others.
2639     + * See above. We need to propagate the flush to all other address
2640     + * spaces. In principle, we only need to propagate it to kernelmode
2641     + * address spaces, but the extra bookkeeping we would need is not
2642     + * worth it.
2643     */
2644     invalidate_other_asid();
2645     }
2646     diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
2647     index 1f5c5161ead6..45c8605467f1 100644
2648     --- a/arch/x86/include/asm/xor.h
2649     +++ b/arch/x86/include/asm/xor.h
2650     @@ -1,7 +1,4 @@
2651     -#ifdef CONFIG_KMEMCHECK
2652     -/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
2653     -# include <asm-generic/xor.h>
2654     -#elif !defined(_ASM_X86_XOR_H)
2655     +#ifndef _ASM_X86_XOR_H
2656     #define _ASM_X86_XOR_H
2657    
2658     /*
2659     diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c
2660     index ea3046e0b0cf..28d70ac93faf 100644
2661     --- a/arch/x86/kernel/acpi/apei.c
2662     +++ b/arch/x86/kernel/acpi/apei.c
2663     @@ -55,5 +55,5 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
2664    
2665     void arch_apei_flush_tlb_one(unsigned long addr)
2666     {
2667     - __flush_tlb_one(addr);
2668     + __flush_tlb_one_kernel(addr);
2669     }
2670     diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
2671     index 6db28f17ff28..c88e0b127810 100644
2672     --- a/arch/x86/kernel/amd_nb.c
2673     +++ b/arch/x86/kernel/amd_nb.c
2674     @@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
2675     if (boot_cpu_data.x86 == 0x10 &&
2676     boot_cpu_data.x86_model >= 0x8 &&
2677     (boot_cpu_data.x86_model > 0x9 ||
2678     - boot_cpu_data.x86_mask >= 0x1))
2679     + boot_cpu_data.x86_stepping >= 0x1))
2680     amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
2681    
2682     if (boot_cpu_data.x86 == 0x15)
2683     diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
2684     index 89c7c8569e5e..5942aa5f569b 100644
2685     --- a/arch/x86/kernel/apic/apic.c
2686     +++ b/arch/x86/kernel/apic/apic.c
2687     @@ -553,7 +553,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
2688    
2689     static u32 hsx_deadline_rev(void)
2690     {
2691     - switch (boot_cpu_data.x86_mask) {
2692     + switch (boot_cpu_data.x86_stepping) {
2693     case 0x02: return 0x3a; /* EP */
2694     case 0x04: return 0x0f; /* EX */
2695     }
2696     @@ -563,7 +563,7 @@ static u32 hsx_deadline_rev(void)
2697    
2698     static u32 bdx_deadline_rev(void)
2699     {
2700     - switch (boot_cpu_data.x86_mask) {
2701     + switch (boot_cpu_data.x86_stepping) {
2702     case 0x02: return 0x00000011;
2703     case 0x03: return 0x0700000e;
2704     case 0x04: return 0x0f00000c;
2705     @@ -575,7 +575,7 @@ static u32 bdx_deadline_rev(void)
2706    
2707     static u32 skx_deadline_rev(void)
2708     {
2709     - switch (boot_cpu_data.x86_mask) {
2710     + switch (boot_cpu_data.x86_stepping) {
2711     case 0x03: return 0x01000136;
2712     case 0x04: return 0x02000014;
2713     }
2714     diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
2715     index e4b0d92b3ae0..2a7fd56e67b3 100644
2716     --- a/arch/x86/kernel/apm_32.c
2717     +++ b/arch/x86/kernel/apm_32.c
2718     @@ -2389,6 +2389,7 @@ static int __init apm_init(void)
2719     if (HZ != 100)
2720     idle_period = (idle_period * HZ) / 100;
2721     if (idle_threshold < 100) {
2722     + cpuidle_poll_state_init(&apm_idle_driver);
2723     if (!cpuidle_register_driver(&apm_idle_driver))
2724     if (cpuidle_register_device(&apm_cpuidle_device))
2725     cpuidle_unregister_driver(&apm_idle_driver);
2726     diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
2727     index fa1261eefa16..f91ba53e06c8 100644
2728     --- a/arch/x86/kernel/asm-offsets_32.c
2729     +++ b/arch/x86/kernel/asm-offsets_32.c
2730     @@ -18,7 +18,7 @@ void foo(void)
2731     OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
2732     OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
2733     OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
2734     - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
2735     + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
2736     OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
2737     OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
2738     OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
2739     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
2740     index ea831c858195..e7d5a7883632 100644
2741     --- a/arch/x86/kernel/cpu/amd.c
2742     +++ b/arch/x86/kernel/cpu/amd.c
2743     @@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
2744     return;
2745     }
2746    
2747     - if (c->x86_model == 6 && c->x86_mask == 1) {
2748     + if (c->x86_model == 6 && c->x86_stepping == 1) {
2749     const int K6_BUG_LOOP = 1000000;
2750     int n;
2751     void (*f_vide)(void);
2752     @@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
2753    
2754     /* K6 with old style WHCR */
2755     if (c->x86_model < 8 ||
2756     - (c->x86_model == 8 && c->x86_mask < 8)) {
2757     + (c->x86_model == 8 && c->x86_stepping < 8)) {
2758     /* We can only write allocate on the low 508Mb */
2759     if (mbytes > 508)
2760     mbytes = 508;
2761     @@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
2762     return;
2763     }
2764    
2765     - if ((c->x86_model == 8 && c->x86_mask > 7) ||
2766     + if ((c->x86_model == 8 && c->x86_stepping > 7) ||
2767     c->x86_model == 9 || c->x86_model == 13) {
2768     /* The more serious chips .. */
2769    
2770     @@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
2771     * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
2772     * As per AMD technical note 27212 0.2
2773     */
2774     - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
2775     + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
2776     rdmsr(MSR_K7_CLK_CTL, l, h);
2777     if ((l & 0xfff00000) != 0x20000000) {
2778     pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
2779     @@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
2780     * but they are not certified as MP capable.
2781     */
2782     /* Athlon 660/661 is valid. */
2783     - if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
2784     - (c->x86_mask == 1)))
2785     + if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
2786     + (c->x86_stepping == 1)))
2787     return;
2788    
2789     /* Duron 670 is valid */
2790     - if ((c->x86_model == 7) && (c->x86_mask == 0))
2791     + if ((c->x86_model == 7) && (c->x86_stepping == 0))
2792     return;
2793    
2794     /*
2795     @@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
2796     * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
2797     * more.
2798     */
2799     - if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
2800     - ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
2801     + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
2802     + ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
2803     (c->x86_model > 7))
2804     if (cpu_has(c, X86_FEATURE_MP))
2805     return;
2806     @@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
2807     /* Set MTRR capability flag if appropriate */
2808     if (c->x86 == 5)
2809     if (c->x86_model == 13 || c->x86_model == 9 ||
2810     - (c->x86_model == 8 && c->x86_mask >= 8))
2811     + (c->x86_model == 8 && c->x86_stepping >= 8))
2812     set_cpu_cap(c, X86_FEATURE_K6_MTRR);
2813     #endif
2814     #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
2815     @@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
2816     * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
2817     * all up to and including B1.
2818     */
2819     - if (c->x86_model <= 1 && c->x86_mask <= 1)
2820     + if (c->x86_model <= 1 && c->x86_stepping <= 1)
2821     set_cpu_cap(c, X86_FEATURE_CPB);
2822     }
2823    
2824     @@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
2825     /* AMD errata T13 (order #21922) */
2826     if ((c->x86 == 6)) {
2827     /* Duron Rev A0 */
2828     - if (c->x86_model == 3 && c->x86_mask == 0)
2829     + if (c->x86_model == 3 && c->x86_stepping == 0)
2830     size = 64;
2831     /* Tbird rev A1/A2 */
2832     if (c->x86_model == 4 &&
2833     - (c->x86_mask == 0 || c->x86_mask == 1))
2834     + (c->x86_stepping == 0 || c->x86_stepping == 1))
2835     size = 256;
2836     }
2837     return size;
2838     @@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
2839     }
2840    
2841     /* OSVW unavailable or ID unknown, match family-model-stepping range */
2842     - ms = (cpu->x86_model << 4) | cpu->x86_mask;
2843     + ms = (cpu->x86_model << 4) | cpu->x86_stepping;
2844     while ((range = *erratum++))
2845     if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
2846     (ms >= AMD_MODEL_RANGE_START(range)) &&
2847     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
2848     index 71949bf2de5a..d71c8b54b696 100644
2849     --- a/arch/x86/kernel/cpu/bugs.c
2850     +++ b/arch/x86/kernel/cpu/bugs.c
2851     @@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
2852     if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
2853     return SPECTRE_V2_CMD_NONE;
2854     else {
2855     - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
2856     - sizeof(arg));
2857     + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
2858     if (ret < 0)
2859     return SPECTRE_V2_CMD_AUTO;
2860    
2861     @@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
2862     }
2863    
2864     if (i >= ARRAY_SIZE(mitigation_options)) {
2865     - pr_err("unknown option (%s). Switching to AUTO select\n",
2866     - mitigation_options[i].option);
2867     + pr_err("unknown option (%s). Switching to AUTO select\n", arg);
2868     return SPECTRE_V2_CMD_AUTO;
2869     }
2870     }
2871     @@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
2872     cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
2873     cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
2874     !IS_ENABLED(CONFIG_RETPOLINE)) {
2875     - pr_err("%s selected but not compiled in. Switching to AUTO select\n",
2876     - mitigation_options[i].option);
2877     + pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
2878     return SPECTRE_V2_CMD_AUTO;
2879     }
2880    
2881     @@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void)
2882     goto retpoline_auto;
2883     break;
2884     }
2885     - pr_err("kernel not compiled with retpoline; no mitigation available!");
2886     + pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
2887     return;
2888    
2889     retpoline_auto:
2890     if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
2891     retpoline_amd:
2892     if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2893     - pr_err("LFENCE not serializing. Switching to generic retpoline\n");
2894     + pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
2895     goto retpoline_generic;
2896     }
2897     mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
2898     @@ -281,7 +278,7 @@ static void __init spectre_v2_select_mitigation(void)
2899     pr_info("%s\n", spectre_v2_strings[mode]);
2900    
2901     /*
2902     - * If neither SMEP or KPTI are available, there is a risk of
2903     + * If neither SMEP nor PTI are available, there is a risk of
2904     * hitting userspace addresses in the RSB after a context switch
2905     * from a shallow call stack to a deeper one. To prevent this fill
2906     * the entire RSB, even when using IBRS.
2907     @@ -295,21 +292,20 @@ static void __init spectre_v2_select_mitigation(void)
2908     if ((!boot_cpu_has(X86_FEATURE_PTI) &&
2909     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
2910     setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2911     - pr_info("Filling RSB on context switch\n");
2912     + pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
2913     }
2914    
2915     /* Initialize Indirect Branch Prediction Barrier if supported */
2916     if (boot_cpu_has(X86_FEATURE_IBPB)) {
2917     setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
2918     - pr_info("Enabling Indirect Branch Prediction Barrier\n");
2919     + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
2920     }
2921     }
2922    
2923     #undef pr_fmt
2924    
2925     #ifdef CONFIG_SYSFS
2926     -ssize_t cpu_show_meltdown(struct device *dev,
2927     - struct device_attribute *attr, char *buf)
2928     +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2929     {
2930     if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
2931     return sprintf(buf, "Not affected\n");
2932     @@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
2933     return sprintf(buf, "Vulnerable\n");
2934     }
2935    
2936     -ssize_t cpu_show_spectre_v1(struct device *dev,
2937     - struct device_attribute *attr, char *buf)
2938     +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2939     {
2940     if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
2941     return sprintf(buf, "Not affected\n");
2942     return sprintf(buf, "Mitigation: __user pointer sanitization\n");
2943     }
2944    
2945     -ssize_t cpu_show_spectre_v2(struct device *dev,
2946     - struct device_attribute *attr, char *buf)
2947     +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2948     {
2949     if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2950     return sprintf(buf, "Not affected\n");
2951     @@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
2952     spectre_v2_module_string());
2953     }
2954     #endif
2955     -
2956     -void __ibp_barrier(void)
2957     -{
2958     - __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
2959     -}
2960     -EXPORT_SYMBOL_GPL(__ibp_barrier);
2961     diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
2962     index 68bc6d9b3132..595be776727d 100644
2963     --- a/arch/x86/kernel/cpu/centaur.c
2964     +++ b/arch/x86/kernel/cpu/centaur.c
2965     @@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
2966     clear_cpu_cap(c, X86_FEATURE_TSC);
2967     break;
2968     case 8:
2969     - switch (c->x86_mask) {
2970     + switch (c->x86_stepping) {
2971     default:
2972     name = "2";
2973     break;
2974     @@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
2975     * - Note, it seems this may only be in engineering samples.
2976     */
2977     if ((c->x86 == 6) && (c->x86_model == 9) &&
2978     - (c->x86_mask == 1) && (size == 65))
2979     + (c->x86_stepping == 1) && (size == 65))
2980     size -= 1;
2981     return size;
2982     }
2983     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
2984     index 92b66e21bae5..651b7afed4da 100644
2985     --- a/arch/x86/kernel/cpu/common.c
2986     +++ b/arch/x86/kernel/cpu/common.c
2987     @@ -707,7 +707,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
2988     cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
2989     c->x86 = x86_family(tfms);
2990     c->x86_model = x86_model(tfms);
2991     - c->x86_mask = x86_stepping(tfms);
2992     + c->x86_stepping = x86_stepping(tfms);
2993    
2994     if (cap0 & (1<<19)) {
2995     c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
2996     @@ -1160,9 +1160,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
2997     int i;
2998    
2999     c->loops_per_jiffy = loops_per_jiffy;
3000     - c->x86_cache_size = -1;
3001     + c->x86_cache_size = 0;
3002     c->x86_vendor = X86_VENDOR_UNKNOWN;
3003     - c->x86_model = c->x86_mask = 0; /* So far unknown... */
3004     + c->x86_model = c->x86_stepping = 0; /* So far unknown... */
3005     c->x86_vendor_id[0] = '\0'; /* Unset */
3006     c->x86_model_id[0] = '\0'; /* Unset */
3007     c->x86_max_cores = 1;
3008     @@ -1353,8 +1353,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
3009    
3010     pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
3011    
3012     - if (c->x86_mask || c->cpuid_level >= 0)
3013     - pr_cont(", stepping: 0x%x)\n", c->x86_mask);
3014     + if (c->x86_stepping || c->cpuid_level >= 0)
3015     + pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
3016     else
3017     pr_cont(")\n");
3018     }
3019     diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
3020     index 6b4bb335641f..8949b7ae6d92 100644
3021     --- a/arch/x86/kernel/cpu/cyrix.c
3022     +++ b/arch/x86/kernel/cpu/cyrix.c
3023     @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
3024    
3025     /* common case step number/rev -- exceptions handled below */
3026     c->x86_model = (dir1 >> 4) + 1;
3027     - c->x86_mask = dir1 & 0xf;
3028     + c->x86_stepping = dir1 & 0xf;
3029    
3030     /* Now cook; the original recipe is by Channing Corn, from Cyrix.
3031     * We do the same thing for each generation: we work out
3032     diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
3033     index 4cf4f8cbc69d..d19e903214b4 100644
3034     --- a/arch/x86/kernel/cpu/intel.c
3035     +++ b/arch/x86/kernel/cpu/intel.c
3036     @@ -116,14 +116,13 @@ struct sku_microcode {
3037     u32 microcode;
3038     };
3039     static const struct sku_microcode spectre_bad_microcodes[] = {
3040     - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
3041     - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
3042     - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
3043     - { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
3044     - { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
3045     + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
3046     + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
3047     + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
3048     + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
3049     + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
3050     { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
3051     { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
3052     - { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
3053     { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
3054     { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
3055     { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
3056     @@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
3057     { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
3058     { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
3059     { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
3060     - /* Updated in the 20180108 release; blacklist until we know otherwise */
3061     - { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
3062     /* Observed in the wild */
3063     { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
3064     { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
3065     @@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
3066    
3067     for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
3068     if (c->x86_model == spectre_bad_microcodes[i].model &&
3069     - c->x86_mask == spectre_bad_microcodes[i].stepping)
3070     + c->x86_stepping == spectre_bad_microcodes[i].stepping)
3071     return (c->microcode <= spectre_bad_microcodes[i].microcode);
3072     }
3073     return false;
3074     @@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
3075     * need the microcode to have already been loaded... so if it is
3076     * not, recommend a BIOS update and disable large pages.
3077     */
3078     - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
3079     + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
3080     c->microcode < 0x20e) {
3081     pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
3082     clear_cpu_cap(c, X86_FEATURE_PSE);
3083     @@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
3084    
3085     /* CPUID workaround for 0F33/0F34 CPU */
3086     if (c->x86 == 0xF && c->x86_model == 0x3
3087     - && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
3088     + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
3089     c->x86_phys_bits = 36;
3090    
3091     /*
3092     @@ -253,21 +250,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
3093     if (c->x86 == 6 && c->x86_model < 15)
3094     clear_cpu_cap(c, X86_FEATURE_PAT);
3095    
3096     -#ifdef CONFIG_KMEMCHECK
3097     - /*
3098     - * P4s have a "fast strings" feature which causes single-
3099     - * stepping REP instructions to only generate a #DB on
3100     - * cache-line boundaries.
3101     - *
3102     - * Ingo Molnar reported a Pentium D (model 6) and a Xeon
3103     - * (model 2) with the same problem.
3104     - */
3105     - if (c->x86 == 15)
3106     - if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
3107     - MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
3108     - pr_info("kmemcheck: Disabling fast string operations\n");
3109     -#endif
3110     -
3111     /*
3112     * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
3113     * clear the fast string and enhanced fast string CPU capabilities.
3114     @@ -325,7 +307,7 @@ int ppro_with_ram_bug(void)
3115     if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
3116     boot_cpu_data.x86 == 6 &&
3117     boot_cpu_data.x86_model == 1 &&
3118     - boot_cpu_data.x86_mask < 8) {
3119     + boot_cpu_data.x86_stepping < 8) {
3120     pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
3121     return 1;
3122     }
3123     @@ -342,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
3124     * Mask B, Pentium, but not Pentium MMX
3125     */
3126     if (c->x86 == 5 &&
3127     - c->x86_mask >= 1 && c->x86_mask <= 4 &&
3128     + c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
3129     c->x86_model <= 3) {
3130     /*
3131     * Remember we have B step Pentia with bugs
3132     @@ -385,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
3133     * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
3134     * model 3 mask 3
3135     */
3136     - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
3137     + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
3138     clear_cpu_cap(c, X86_FEATURE_SEP);
3139    
3140     /*
3141     @@ -403,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
3142     * P4 Xeon erratum 037 workaround.
3143     * Hardware prefetcher may cause stale data to be loaded into the cache.
3144     */
3145     - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
3146     + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
3147     if (msr_set_bit(MSR_IA32_MISC_ENABLE,
3148     MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
3149     pr_info("CPU: C0 stepping P4 Xeon detected.\n");
3150     @@ -418,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
3151     * Specification Update").
3152     */
3153     if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
3154     - (c->x86_mask < 0x6 || c->x86_mask == 0xb))
3155     + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
3156     set_cpu_bug(c, X86_BUG_11AP);
3157    
3158    
3159     @@ -665,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
3160     case 6:
3161     if (l2 == 128)
3162     p = "Celeron (Mendocino)";
3163     - else if (c->x86_mask == 0 || c->x86_mask == 5)
3164     + else if (c->x86_stepping == 0 || c->x86_stepping == 5)
3165     p = "Celeron-A";
3166     break;
3167    
3168     diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
3169     index 99442370de40..18dd8f22e353 100644
3170     --- a/arch/x86/kernel/cpu/intel_rdt.c
3171     +++ b/arch/x86/kernel/cpu/intel_rdt.c
3172     @@ -771,7 +771,7 @@ static __init void rdt_quirks(void)
3173     cache_alloc_hsw_probe();
3174     break;
3175     case INTEL_FAM6_SKYLAKE_X:
3176     - if (boot_cpu_data.x86_mask <= 4)
3177     + if (boot_cpu_data.x86_stepping <= 4)
3178     set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
3179     }
3180     }
3181     diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
3182     index aa0d5df9dc60..e956eb267061 100644
3183     --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
3184     +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
3185     @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
3186    
3187     extern struct mca_config mca_cfg;
3188    
3189     +#ifndef CONFIG_X86_64
3190     +/*
3191     + * On 32-bit systems it would be difficult to safely unmap a poison page
3192     + * from the kernel 1:1 map because there are no non-canonical addresses that
3193     + * we can use to refer to the address without risking a speculative access.
3194     + * However, this isn't much of an issue because:
3195     + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
3196     + * are only mapped into the kernel as needed
3197     + * 2) Few people would run a 32-bit kernel on a machine that supports
3198     + * recoverable errors because they have too much memory to boot 32-bit.
3199     + */
3200     +static inline void mce_unmap_kpfn(unsigned long pfn) {}
3201     +#define mce_unmap_kpfn mce_unmap_kpfn
3202     +#endif
3203     +
3204     #endif /* __X86_MCE_INTERNAL_H__ */
3205     diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
3206     index a9e898b71208..73237aa271ea 100644
3207     --- a/arch/x86/kernel/cpu/mcheck/mce.c
3208     +++ b/arch/x86/kernel/cpu/mcheck/mce.c
3209     @@ -106,6 +106,10 @@ static struct irq_work mce_irq_work;
3210    
3211     static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
3212    
3213     +#ifndef mce_unmap_kpfn
3214     +static void mce_unmap_kpfn(unsigned long pfn);
3215     +#endif
3216     +
3217     /*
3218     * CPU/chipset specific EDAC code can register a notifier call here to print
3219     * MCE errors in a human-readable form.
3220     @@ -582,7 +586,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
3221    
3222     if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
3223     pfn = mce->addr >> PAGE_SHIFT;
3224     - memory_failure(pfn, MCE_VECTOR, 0);
3225     + if (memory_failure(pfn, MCE_VECTOR, 0))
3226     + mce_unmap_kpfn(pfn);
3227     }
3228    
3229     return NOTIFY_OK;
3230     @@ -1049,12 +1054,13 @@ static int do_memory_failure(struct mce *m)
3231     ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
3232     if (ret)
3233     pr_err("Memory error not recovered");
3234     + else
3235     + mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
3236     return ret;
3237     }
3238    
3239     -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
3240     -
3241     -void arch_unmap_kpfn(unsigned long pfn)
3242     +#ifndef mce_unmap_kpfn
3243     +static void mce_unmap_kpfn(unsigned long pfn)
3244     {
3245     unsigned long decoy_addr;
3246    
3247     @@ -1065,7 +1071,7 @@ void arch_unmap_kpfn(unsigned long pfn)
3248     * We would like to just call:
3249     * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
3250     * but doing that would radically increase the odds of a
3251     - * speculative access to the posion page because we'd have
3252     + * speculative access to the poison page because we'd have
3253     * the virtual address of the kernel 1:1 mapping sitting
3254     * around in registers.
3255     * Instead we get tricky. We create a non-canonical address
3256     @@ -1090,7 +1096,6 @@ void arch_unmap_kpfn(unsigned long pfn)
3257    
3258     if (set_memory_np(decoy_addr, 1))
3259     pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
3260     -
3261     }
3262     #endif
3263    
3264     diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
3265     index f7c55b0e753a..a15db2b4e0d6 100644
3266     --- a/arch/x86/kernel/cpu/microcode/intel.c
3267     +++ b/arch/x86/kernel/cpu/microcode/intel.c
3268     @@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
3269     */
3270     if (c->x86 == 6 &&
3271     c->x86_model == INTEL_FAM6_BROADWELL_X &&
3272     - c->x86_mask == 0x01 &&
3273     + c->x86_stepping == 0x01 &&
3274     llc_size_per_core > 2621440 &&
3275     c->microcode < 0x0b000021) {
3276     pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
3277     @@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
3278     return UCODE_NFOUND;
3279    
3280     sprintf(name, "intel-ucode/%02x-%02x-%02x",
3281     - c->x86, c->x86_model, c->x86_mask);
3282     + c->x86, c->x86_model, c->x86_stepping);
3283    
3284     if (request_firmware_direct(&firmware, name, device)) {
3285     pr_debug("data file %s load failed\n", name);
3286     @@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = {
3287    
3288     static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
3289     {
3290     - u64 llc_size = c->x86_cache_size * 1024;
3291     + u64 llc_size = c->x86_cache_size * 1024ULL;
3292    
3293     do_div(llc_size, c->x86_max_cores);
3294    
3295     diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
3296     index fdc55215d44d..e12ee86906c6 100644
3297     --- a/arch/x86/kernel/cpu/mtrr/generic.c
3298     +++ b/arch/x86/kernel/cpu/mtrr/generic.c
3299     @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
3300     */
3301     if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
3302     boot_cpu_data.x86_model == 1 &&
3303     - boot_cpu_data.x86_mask <= 7) {
3304     + boot_cpu_data.x86_stepping <= 7) {
3305     if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
3306     pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
3307     return -EINVAL;
3308     diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
3309     index 40d5a8a75212..7468de429087 100644
3310     --- a/arch/x86/kernel/cpu/mtrr/main.c
3311     +++ b/arch/x86/kernel/cpu/mtrr/main.c
3312     @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
3313     if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
3314     boot_cpu_data.x86 == 0xF &&
3315     boot_cpu_data.x86_model == 0x3 &&
3316     - (boot_cpu_data.x86_mask == 0x3 ||
3317     - boot_cpu_data.x86_mask == 0x4))
3318     + (boot_cpu_data.x86_stepping == 0x3 ||
3319     + boot_cpu_data.x86_stepping == 0x4))
3320     phys_addr = 36;
3321    
3322     size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
3323     diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
3324     index e7ecedafa1c8..2c8522a39ed5 100644
3325     --- a/arch/x86/kernel/cpu/proc.c
3326     +++ b/arch/x86/kernel/cpu/proc.c
3327     @@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
3328     c->x86_model,
3329     c->x86_model_id[0] ? c->x86_model_id : "unknown");
3330    
3331     - if (c->x86_mask || c->cpuid_level >= 0)
3332     - seq_printf(m, "stepping\t: %d\n", c->x86_mask);
3333     + if (c->x86_stepping || c->cpuid_level >= 0)
3334     + seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
3335     else
3336     seq_puts(m, "stepping\t: unknown\n");
3337     if (c->microcode)
3338     @@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
3339     }
3340    
3341     /* Cache size */
3342     - if (c->x86_cache_size >= 0)
3343     - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
3344     + if (c->x86_cache_size)
3345     + seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
3346    
3347     show_cpuinfo_core(m, c, cpu);
3348     show_cpuinfo_misc(m, c);
3349     diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
3350     index 1e82f787c160..c87560e1e3ef 100644
3351     --- a/arch/x86/kernel/early-quirks.c
3352     +++ b/arch/x86/kernel/early-quirks.c
3353     @@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
3354     INTEL_SKL_IDS(&gen9_early_ops),
3355     INTEL_BXT_IDS(&gen9_early_ops),
3356     INTEL_KBL_IDS(&gen9_early_ops),
3357     + INTEL_CFL_IDS(&gen9_early_ops),
3358     INTEL_GLK_IDS(&gen9_early_ops),
3359     INTEL_CNL_IDS(&gen9_early_ops),
3360     };
3361     diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
3362     index 9c4e7ba6870c..cbded50ee601 100644
3363     --- a/arch/x86/kernel/espfix_64.c
3364     +++ b/arch/x86/kernel/espfix_64.c
3365     @@ -57,7 +57,7 @@
3366     # error "Need more virtual address space for the ESPFIX hack"
3367     #endif
3368    
3369     -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
3370     +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
3371    
3372     /* This contains the *bottom* address of the espfix stack */
3373     DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
3374     diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
3375     index c29020907886..b59e4fb40fd9 100644
3376     --- a/arch/x86/kernel/head_32.S
3377     +++ b/arch/x86/kernel/head_32.S
3378     @@ -37,7 +37,7 @@
3379     #define X86 new_cpu_data+CPUINFO_x86
3380     #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
3381     #define X86_MODEL new_cpu_data+CPUINFO_x86_model
3382     -#define X86_MASK new_cpu_data+CPUINFO_x86_mask
3383     +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
3384     #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
3385     #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
3386     #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
3387     @@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
3388     shrb $4,%al
3389     movb %al,X86_MODEL
3390     andb $0x0f,%cl # mask mask revision
3391     - movb %cl,X86_MASK
3392     + movb %cl,X86_STEPPING
3393     movl %edx,X86_CAPABILITY
3394    
3395     .Lis486:
3396     diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
3397     index 3a4b12809ab5..bc6bc6689e68 100644
3398     --- a/arch/x86/kernel/mpparse.c
3399     +++ b/arch/x86/kernel/mpparse.c
3400     @@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
3401     processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
3402     processor.cpuflag = CPU_ENABLED;
3403     processor.cpufeature = (boot_cpu_data.x86 << 8) |
3404     - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
3405     + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
3406     processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
3407     processor.reserved[0] = 0;
3408     processor.reserved[1] = 0;
3409     diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
3410     index 19a3e8f961c7..e1df9ef5d78c 100644
3411     --- a/arch/x86/kernel/paravirt.c
3412     +++ b/arch/x86/kernel/paravirt.c
3413     @@ -190,9 +190,9 @@ static void native_flush_tlb_global(void)
3414     __native_flush_tlb_global();
3415     }
3416    
3417     -static void native_flush_tlb_single(unsigned long addr)
3418     +static void native_flush_tlb_one_user(unsigned long addr)
3419     {
3420     - __native_flush_tlb_single(addr);
3421     + __native_flush_tlb_one_user(addr);
3422     }
3423    
3424     struct static_key paravirt_steal_enabled;
3425     @@ -391,7 +391,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
3426    
3427     .flush_tlb_user = native_flush_tlb,
3428     .flush_tlb_kernel = native_flush_tlb_global,
3429     - .flush_tlb_single = native_flush_tlb_single,
3430     + .flush_tlb_one_user = native_flush_tlb_one_user,
3431     .flush_tlb_others = native_flush_tlb_others,
3432    
3433     .pgd_alloc = __paravirt_pgd_alloc,
3434     diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
3435     index 307d3bac5f04..11eda21eb697 100644
3436     --- a/arch/x86/kernel/relocate_kernel_64.S
3437     +++ b/arch/x86/kernel/relocate_kernel_64.S
3438     @@ -68,6 +68,9 @@ relocate_kernel:
3439     movq %cr4, %rax
3440     movq %rax, CR4(%r11)
3441    
3442     + /* Save CR4. Required to enable the right paging mode later. */
3443     + movq %rax, %r13
3444     +
3445     /* zero out flags, and disable interrupts */
3446     pushq $0
3447     popfq
3448     @@ -126,8 +129,13 @@ identity_mapped:
3449     /*
3450     * Set cr4 to a known state:
3451     * - physical address extension enabled
3452     + * - 5-level paging, if it was enabled before
3453     */
3454     movl $X86_CR4_PAE, %eax
3455     + testq $X86_CR4_LA57, %r13
3456     + jz 1f
3457     + orl $X86_CR4_LA57, %eax
3458     +1:
3459     movq %rax, %cr4
3460    
3461     jmp 1f
3462     diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
3463     index b33e860d32fe..a66428dc92ae 100644
3464     --- a/arch/x86/kernel/traps.c
3465     +++ b/arch/x86/kernel/traps.c
3466     @@ -42,7 +42,6 @@
3467     #include <linux/edac.h>
3468     #endif
3469    
3470     -#include <asm/kmemcheck.h>
3471     #include <asm/stacktrace.h>
3472     #include <asm/processor.h>
3473     #include <asm/debugreg.h>
3474     @@ -181,7 +180,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr)
3475     break;
3476    
3477     case BUG_TRAP_TYPE_WARN:
3478     - regs->ip += LEN_UD0;
3479     + regs->ip += LEN_UD2;
3480     return 1;
3481     }
3482    
3483     @@ -764,10 +763,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
3484     if (!dr6 && user_mode(regs))
3485     user_icebp = 1;
3486    
3487     - /* Catch kmemcheck conditions! */
3488     - if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
3489     - goto exit;
3490     -
3491     /* Store the virtualized DR6 value */
3492     tsk->thread.debugreg6 = dr6;
3493    
3494     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
3495     index beb7f8795bc1..ca000fc644bc 100644
3496     --- a/arch/x86/kvm/mmu.c
3497     +++ b/arch/x86/kvm/mmu.c
3498     @@ -5063,7 +5063,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
3499     typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
3500    
3501     /* The caller should hold mmu-lock before calling this function. */
3502     -static bool
3503     +static __always_inline bool
3504     slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
3505     slot_level_handler fn, int start_level, int end_level,
3506     gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
3507     @@ -5093,7 +5093,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
3508     return flush;
3509     }
3510    
3511     -static bool
3512     +static __always_inline bool
3513     slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
3514     slot_level_handler fn, int start_level, int end_level,
3515     bool lock_flush_tlb)
3516     @@ -5104,7 +5104,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
3517     lock_flush_tlb);
3518     }
3519    
3520     -static bool
3521     +static __always_inline bool
3522     slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
3523     slot_level_handler fn, bool lock_flush_tlb)
3524     {
3525     @@ -5112,7 +5112,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
3526     PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
3527     }
3528    
3529     -static bool
3530     +static __always_inline bool
3531     slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
3532     slot_level_handler fn, bool lock_flush_tlb)
3533     {
3534     @@ -5120,7 +5120,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
3535     PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
3536     }
3537    
3538     -static bool
3539     +static __always_inline bool
3540     slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
3541     slot_level_handler fn, bool lock_flush_tlb)
3542     {
3543     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
3544     index 0ea909ca45c2..dd35c6c50516 100644
3545     --- a/arch/x86/kvm/vmx.c
3546     +++ b/arch/x86/kvm/vmx.c
3547     @@ -10127,7 +10127,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
3548     if (cpu_has_vmx_msr_bitmap() &&
3549     nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) &&
3550     nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
3551     - ;
3552     + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
3553     + CPU_BASED_USE_MSR_BITMAPS);
3554     else
3555     vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
3556     CPU_BASED_USE_MSR_BITMAPS);
3557     @@ -10216,8 +10217,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
3558     * updated to reflect this when L1 (or its L2s) actually write to
3559     * the MSR.
3560     */
3561     - bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
3562     - bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
3563     + bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
3564     + bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
3565    
3566     if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
3567     !pred_cmd && !spec_ctrl)
3568     diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
3569     index d6f848d1211d..2dd1fe13a37b 100644
3570     --- a/arch/x86/lib/cpu.c
3571     +++ b/arch/x86/lib/cpu.c
3572     @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
3573     {
3574     unsigned int fam, model;
3575    
3576     - fam = x86_family(sig);
3577     + fam = x86_family(sig);
3578    
3579     model = (sig >> 4) & 0xf;
3580    
3581     diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
3582     index 52906808e277..27e9e90a8d35 100644
3583     --- a/arch/x86/mm/Makefile
3584     +++ b/arch/x86/mm/Makefile
3585     @@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o
3586    
3587     obj-$(CONFIG_HIGHMEM) += highmem_32.o
3588    
3589     -obj-$(CONFIG_KMEMCHECK) += kmemcheck/
3590     -
3591     KASAN_SANITIZE_kasan_init_$(BITS).o := n
3592     obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
3593    
3594     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
3595     index b264b590eeec..9150fe2c9b26 100644
3596     --- a/arch/x86/mm/fault.c
3597     +++ b/arch/x86/mm/fault.c
3598     @@ -20,7 +20,6 @@
3599     #include <asm/cpufeature.h> /* boot_cpu_has, ... */
3600     #include <asm/traps.h> /* dotraplinkage, ... */
3601     #include <asm/pgalloc.h> /* pgd_*(), ... */
3602     -#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
3603     #include <asm/fixmap.h> /* VSYSCALL_ADDR */
3604     #include <asm/vsyscall.h> /* emulate_vsyscall */
3605     #include <asm/vm86.h> /* struct vm86 */
3606     @@ -1257,8 +1256,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
3607     * Detect and handle instructions that would cause a page fault for
3608     * both a tracked kernel page and a userspace page.
3609     */
3610     - if (kmemcheck_active(regs))
3611     - kmemcheck_hide(regs);
3612     prefetchw(&mm->mmap_sem);
3613    
3614     if (unlikely(kmmio_fault(regs, address)))
3615     @@ -1281,9 +1278,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
3616     if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
3617     if (vmalloc_fault(address) >= 0)
3618     return;
3619     -
3620     - if (kmemcheck_fault(regs, address, error_code))
3621     - return;
3622     }
3623    
3624     /* Can handle a stale RO->RW TLB: */
3625     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
3626     index 6b462a472a7b..82f5252c723a 100644
3627     --- a/arch/x86/mm/init.c
3628     +++ b/arch/x86/mm/init.c
3629     @@ -93,8 +93,7 @@ __ref void *alloc_low_pages(unsigned int num)
3630     unsigned int order;
3631    
3632     order = get_order((unsigned long)num << PAGE_SHIFT);
3633     - return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
3634     - __GFP_ZERO, order);
3635     + return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
3636     }
3637    
3638     if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
3639     @@ -171,12 +170,11 @@ static void enable_global_pages(void)
3640     static void __init probe_page_size_mask(void)
3641     {
3642     /*
3643     - * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
3644     - * use small pages.
3645     + * For pagealloc debugging, identity mapping will use small pages.
3646     * This will simplify cpa(), which otherwise needs to support splitting
3647     * large pages into small in interrupt context, etc.
3648     */
3649     - if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
3650     + if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
3651     page_size_mask |= 1 << PG_LEVEL_2M;
3652     else
3653     direct_gbpages = 0;
3654     diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
3655     index adcea90a2046..fe85d1204db8 100644
3656     --- a/arch/x86/mm/init_64.c
3657     +++ b/arch/x86/mm/init_64.c
3658     @@ -184,7 +184,7 @@ static __ref void *spp_getpage(void)
3659     void *ptr;
3660    
3661     if (after_bootmem)
3662     - ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
3663     + ptr = (void *) get_zeroed_page(GFP_ATOMIC);
3664     else
3665     ptr = alloc_bootmem_pages(PAGE_SIZE);
3666    
3667     @@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
3668     * It's enough to flush this one mapping.
3669     * (PGE mappings get flushed as well)
3670     */
3671     - __flush_tlb_one(vaddr);
3672     + __flush_tlb_one_kernel(vaddr);
3673     }
3674    
3675     void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
3676     diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
3677     index 34f0e1847dd6..bb120e59c597 100644
3678     --- a/arch/x86/mm/ioremap.c
3679     +++ b/arch/x86/mm/ioremap.c
3680     @@ -749,5 +749,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
3681     set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
3682     else
3683     pte_clear(&init_mm, addr, pte);
3684     - __flush_tlb_one(addr);
3685     + __flush_tlb_one_kernel(addr);
3686     }
3687     diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
3688     deleted file mode 100644
3689     index 520b3bce4095..000000000000
3690     --- a/arch/x86/mm/kmemcheck/Makefile
3691     +++ /dev/null
3692     @@ -1 +0,0 @@
3693     -obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
3694     diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
3695     deleted file mode 100644
3696     index 872ec4159a68..000000000000
3697     --- a/arch/x86/mm/kmemcheck/error.c
3698     +++ /dev/null
3699     @@ -1,228 +0,0 @@
3700     -// SPDX-License-Identifier: GPL-2.0
3701     -#include <linux/interrupt.h>
3702     -#include <linux/kdebug.h>
3703     -#include <linux/kmemcheck.h>
3704     -#include <linux/kernel.h>
3705     -#include <linux/types.h>
3706     -#include <linux/ptrace.h>
3707     -#include <linux/stacktrace.h>
3708     -#include <linux/string.h>
3709     -
3710     -#include "error.h"
3711     -#include "shadow.h"
3712     -
3713     -enum kmemcheck_error_type {
3714     - KMEMCHECK_ERROR_INVALID_ACCESS,
3715     - KMEMCHECK_ERROR_BUG,
3716     -};
3717     -
3718     -#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
3719     -
3720     -struct kmemcheck_error {
3721     - enum kmemcheck_error_type type;
3722     -
3723     - union {
3724     - /* KMEMCHECK_ERROR_INVALID_ACCESS */
3725     - struct {
3726     - /* Kind of access that caused the error */
3727     - enum kmemcheck_shadow state;
3728     - /* Address and size of the erroneous read */
3729     - unsigned long address;
3730     - unsigned int size;
3731     - };
3732     - };
3733     -
3734     - struct pt_regs regs;
3735     - struct stack_trace trace;
3736     - unsigned long trace_entries[32];
3737     -
3738     - /* We compress it to a char. */
3739     - unsigned char shadow_copy[SHADOW_COPY_SIZE];
3740     - unsigned char memory_copy[SHADOW_COPY_SIZE];
3741     -};
3742     -
3743     -/*
3744     - * Create a ring queue of errors to output. We can't call printk() directly
3745     - * from the kmemcheck traps, since this may call the console drivers and
3746     - * result in a recursive fault.
3747     - */
3748     -static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
3749     -static unsigned int error_count;
3750     -static unsigned int error_rd;
3751     -static unsigned int error_wr;
3752     -static unsigned int error_missed_count;
3753     -
3754     -static struct kmemcheck_error *error_next_wr(void)
3755     -{
3756     - struct kmemcheck_error *e;
3757     -
3758     - if (error_count == ARRAY_SIZE(error_fifo)) {
3759     - ++error_missed_count;
3760     - return NULL;
3761     - }
3762     -
3763     - e = &error_fifo[error_wr];
3764     - if (++error_wr == ARRAY_SIZE(error_fifo))
3765     - error_wr = 0;
3766     - ++error_count;
3767     - return e;
3768     -}
3769     -
3770     -static struct kmemcheck_error *error_next_rd(void)
3771     -{
3772     - struct kmemcheck_error *e;
3773     -
3774     - if (error_count == 0)
3775     - return NULL;
3776     -
3777     - e = &error_fifo[error_rd];
3778     - if (++error_rd == ARRAY_SIZE(error_fifo))
3779     - error_rd = 0;
3780     - --error_count;
3781     - return e;
3782     -}
3783     -
3784     -void kmemcheck_error_recall(void)
3785     -{
3786     - static const char *desc[] = {
3787     - [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated",
3788     - [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized",
3789     - [KMEMCHECK_SHADOW_INITIALIZED] = "initialized",
3790     - [KMEMCHECK_SHADOW_FREED] = "freed",
3791     - };
3792     -
3793     - static const char short_desc[] = {
3794     - [KMEMCHECK_SHADOW_UNALLOCATED] = 'a',
3795     - [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u',
3796     - [KMEMCHECK_SHADOW_INITIALIZED] = 'i',
3797     - [KMEMCHECK_SHADOW_FREED] = 'f',
3798     - };
3799     -
3800     - struct kmemcheck_error *e;
3801     - unsigned int i;
3802     -
3803     - e = error_next_rd();
3804     - if (!e)
3805     - return;
3806     -
3807     - switch (e->type) {
3808     - case KMEMCHECK_ERROR_INVALID_ACCESS:
3809     - printk(KERN_WARNING "WARNING: kmemcheck: Caught %d-bit read from %s memory (%p)\n",
3810     - 8 * e->size, e->state < ARRAY_SIZE(desc) ?
3811     - desc[e->state] : "(invalid shadow state)",
3812     - (void *) e->address);
3813     -
3814     - printk(KERN_WARNING);
3815     - for (i = 0; i < SHADOW_COPY_SIZE; ++i)
3816     - printk(KERN_CONT "%02x", e->memory_copy[i]);
3817     - printk(KERN_CONT "\n");
3818     -
3819     - printk(KERN_WARNING);
3820     - for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
3821     - if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
3822     - printk(KERN_CONT " %c", short_desc[e->shadow_copy[i]]);
3823     - else
3824     - printk(KERN_CONT " ?");
3825     - }
3826     - printk(KERN_CONT "\n");
3827     - printk(KERN_WARNING "%*c\n", 2 + 2
3828     - * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
3829     - break;
3830     - case KMEMCHECK_ERROR_BUG:
3831     - printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
3832     - break;
3833     - }
3834     -
3835     - __show_regs(&e->regs, 1);
3836     - print_stack_trace(&e->trace, 0);
3837     -}
3838     -
3839     -static void do_wakeup(unsigned long data)
3840     -{
3841     - while (error_count > 0)
3842     - kmemcheck_error_recall();
3843     -
3844     - if (error_missed_count > 0) {
3845     - printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
3846     - "the queue was too small\n", error_missed_count);
3847     - error_missed_count = 0;
3848     - }
3849     -}
3850     -
3851     -static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
3852     -
3853     -/*
3854     - * Save the context of an error report.
3855     - */
3856     -void kmemcheck_error_save(enum kmemcheck_shadow state,
3857     - unsigned long address, unsigned int size, struct pt_regs *regs)
3858     -{
3859     - static unsigned long prev_ip;
3860     -
3861     - struct kmemcheck_error *e;
3862     - void *shadow_copy;
3863     - void *memory_copy;
3864     -
3865     - /* Don't report several adjacent errors from the same EIP. */
3866     - if (regs->ip == prev_ip)
3867     - return;
3868     - prev_ip = regs->ip;
3869     -
3870     - e = error_next_wr();
3871     - if (!e)
3872     - return;
3873     -
3874     - e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
3875     -
3876     - e->state = state;
3877     - e->address = address;
3878     - e->size = size;
3879     -
3880     - /* Save regs */
3881     - memcpy(&e->regs, regs, sizeof(*regs));
3882     -
3883     - /* Save stack trace */
3884     - e->trace.nr_entries = 0;
3885     - e->trace.entries = e->trace_entries;
3886     - e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
3887     - e->trace.skip = 0;
3888     - save_stack_trace_regs(regs, &e->trace);
3889     -
3890     - /* Round address down to nearest 16 bytes */
3891     - shadow_copy = kmemcheck_shadow_lookup(address
3892     - & ~(SHADOW_COPY_SIZE - 1));
3893     - BUG_ON(!shadow_copy);
3894     -
3895     - memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
3896     -
3897     - kmemcheck_show_addr(address);
3898     - memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
3899     - memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
3900     - kmemcheck_hide_addr(address);
3901     -
3902     - tasklet_hi_schedule_first(&kmemcheck_tasklet);
3903     -}
3904     -
3905     -/*
3906     - * Save the context of a kmemcheck bug.
3907     - */
3908     -void kmemcheck_error_save_bug(struct pt_regs *regs)
3909     -{
3910     - struct kmemcheck_error *e;
3911     -
3912     - e = error_next_wr();
3913     - if (!e)
3914     - return;
3915     -
3916     - e->type = KMEMCHECK_ERROR_BUG;
3917     -
3918     - memcpy(&e->regs, regs, sizeof(*regs));
3919     -
3920     - e->trace.nr_entries = 0;
3921     - e->trace.entries = e->trace_entries;
3922     - e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
3923     - e->trace.skip = 1;
3924     - save_stack_trace(&e->trace);
3925     -
3926     - tasklet_hi_schedule_first(&kmemcheck_tasklet);
3927     -}
3928     diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
3929     deleted file mode 100644
3930     index 39f80d7a874d..000000000000
3931     --- a/arch/x86/mm/kmemcheck/error.h
3932     +++ /dev/null
3933     @@ -1,16 +0,0 @@
3934     -/* SPDX-License-Identifier: GPL-2.0 */
3935     -#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
3936     -#define ARCH__X86__MM__KMEMCHECK__ERROR_H
3937     -
3938     -#include <linux/ptrace.h>
3939     -
3940     -#include "shadow.h"
3941     -
3942     -void kmemcheck_error_save(enum kmemcheck_shadow state,
3943     - unsigned long address, unsigned int size, struct pt_regs *regs);
3944     -
3945     -void kmemcheck_error_save_bug(struct pt_regs *regs);
3946     -
3947     -void kmemcheck_error_recall(void);
3948     -
3949     -#endif
3950     diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
3951     deleted file mode 100644
3952     index 4515bae36bbe..000000000000
3953     --- a/arch/x86/mm/kmemcheck/kmemcheck.c
3954     +++ /dev/null
3955     @@ -1,658 +0,0 @@
3956     -/**
3957     - * kmemcheck - a heavyweight memory checker for the linux kernel
3958     - * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no>
3959     - * (With a lot of help from Ingo Molnar and Pekka Enberg.)
3960     - *
3961     - * This program is free software; you can redistribute it and/or modify
3962     - * it under the terms of the GNU General Public License (version 2) as
3963     - * published by the Free Software Foundation.
3964     - */
3965     -
3966     -#include <linux/init.h>
3967     -#include <linux/interrupt.h>
3968     -#include <linux/kallsyms.h>
3969     -#include <linux/kernel.h>
3970     -#include <linux/kmemcheck.h>
3971     -#include <linux/mm.h>
3972     -#include <linux/page-flags.h>
3973     -#include <linux/percpu.h>
3974     -#include <linux/ptrace.h>
3975     -#include <linux/string.h>
3976     -#include <linux/types.h>
3977     -
3978     -#include <asm/cacheflush.h>
3979     -#include <asm/kmemcheck.h>
3980     -#include <asm/pgtable.h>
3981     -#include <asm/tlbflush.h>
3982     -
3983     -#include "error.h"
3984     -#include "opcode.h"
3985     -#include "pte.h"
3986     -#include "selftest.h"
3987     -#include "shadow.h"
3988     -
3989     -
3990     -#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
3991     -# define KMEMCHECK_ENABLED 0
3992     -#endif
3993     -
3994     -#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
3995     -# define KMEMCHECK_ENABLED 1
3996     -#endif
3997     -
3998     -#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
3999     -# define KMEMCHECK_ENABLED 2
4000     -#endif
4001     -
4002     -int kmemcheck_enabled = KMEMCHECK_ENABLED;
4003     -
4004     -int __init kmemcheck_init(void)
4005     -{
4006     -#ifdef CONFIG_SMP
4007     - /*
4008     - * Limit SMP to use a single CPU. We rely on the fact that this code
4009     - * runs before SMP is set up.
4010     - */
4011     - if (setup_max_cpus > 1) {
4012     - printk(KERN_INFO
4013     - "kmemcheck: Limiting number of CPUs to 1.\n");
4014     - setup_max_cpus = 1;
4015     - }
4016     -#endif
4017     -
4018     - if (!kmemcheck_selftest()) {
4019     - printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n");
4020     - kmemcheck_enabled = 0;
4021     - return -EINVAL;
4022     - }
4023     -
4024     - printk(KERN_INFO "kmemcheck: Initialized\n");
4025     - return 0;
4026     -}
4027     -
4028     -early_initcall(kmemcheck_init);
4029     -
4030     -/*
4031     - * We need to parse the kmemcheck= option before any memory is allocated.
4032     - */
4033     -static int __init param_kmemcheck(char *str)
4034     -{
4035     - int val;
4036     - int ret;
4037     -
4038     - if (!str)
4039     - return -EINVAL;
4040     -
4041     - ret = kstrtoint(str, 0, &val);
4042     - if (ret)
4043     - return ret;
4044     - kmemcheck_enabled = val;
4045     - return 0;
4046     -}
4047     -
4048     -early_param("kmemcheck", param_kmemcheck);
4049     -
4050     -int kmemcheck_show_addr(unsigned long address)
4051     -{
4052     - pte_t *pte;
4053     -
4054     - pte = kmemcheck_pte_lookup(address);
4055     - if (!pte)
4056     - return 0;
4057     -
4058     - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
4059     - __flush_tlb_one(address);
4060     - return 1;
4061     -}
4062     -
4063     -int kmemcheck_hide_addr(unsigned long address)
4064     -{
4065     - pte_t *pte;
4066     -
4067     - pte = kmemcheck_pte_lookup(address);
4068     - if (!pte)
4069     - return 0;
4070     -
4071     - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
4072     - __flush_tlb_one(address);
4073     - return 1;
4074     -}
4075     -
4076     -struct kmemcheck_context {
4077     - bool busy;
4078     - int balance;
4079     -
4080     - /*
4081     - * There can be at most two memory operands to an instruction, but
4082     - * each address can cross a page boundary -- so we may need up to
4083     - * four addresses that must be hidden/revealed for each fault.
4084     - */
4085     - unsigned long addr[4];
4086     - unsigned long n_addrs;
4087     - unsigned long flags;
4088     -
4089     - /* Data size of the instruction that caused a fault. */
4090     - unsigned int size;
4091     -};
4092     -
4093     -static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
4094     -
4095     -bool kmemcheck_active(struct pt_regs *regs)
4096     -{
4097     - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
4098     -
4099     - return data->balance > 0;
4100     -}
4101     -
4102     -/* Save an address that needs to be shown/hidden */
4103     -static void kmemcheck_save_addr(unsigned long addr)
4104     -{
4105     - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
4106     -
4107     - BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
4108     - data->addr[data->n_addrs++] = addr;
4109     -}
4110     -
4111     -static unsigned int kmemcheck_show_all(void)
4112     -{
4113     - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
4114     - unsigned int i;
4115     - unsigned int n;
4116     -
4117     - n = 0;
4118     - for (i = 0; i < data->n_addrs; ++i)
4119     - n += kmemcheck_show_addr(data->addr[i]);
4120     -
4121     - return n;
4122     -}
4123     -
4124     -static unsigned int kmemcheck_hide_all(void)
4125     -{
4126     - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
4127     - unsigned int i;
4128     - unsigned int n;
4129     -
4130     - n = 0;
4131     - for (i = 0; i < data->n_addrs; ++i)
4132     - n += kmemcheck_hide_addr(data->addr[i]);
4133     -
4134     - return n;
4135     -}
4136     -
4137     -/*
4138     - * Called from the #PF handler.
4139     - */
4140     -void kmemcheck_show(struct pt_regs *regs)
4141     -{
4142     - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
4143     -
4144     - BUG_ON(!irqs_disabled());
4145     -
4146     - if (unlikely(data->balance != 0)) {
4147     - kmemcheck_show_all();
4148     - kmemcheck_error_save_bug(regs);
4149     - data->balance = 0;
4150     - return;
4151     - }
4152     -
4153     - /*
4154     - * None of the addresses actually belonged to kmemcheck. Note that
4155     - * this is not an error.
4156     - */
4157     - if (kmemcheck_show_all() == 0)
4158     - return;
4159     -
4160     - ++data->balance;
4161     -
4162     - /*
4163     - * The IF needs to be cleared as well, so that the faulting
4164     - * instruction can run "uninterrupted". Otherwise, we might take
4165     - * an interrupt and start executing that before we've had a chance
4166     - * to hide the page again.
4167     - *
4168     - * NOTE: In the rare case of multiple faults, we must not override
4169     - * the original flags:
4170     - */
4171     - if (!(regs->flags & X86_EFLAGS_TF))
4172     - data->flags = regs->flags;
4173     -
4174     - regs->flags |= X86_EFLAGS_TF;
4175     - regs->flags &= ~X86_EFLAGS_IF;
4176     -}
4177     -
4178     -/*
4179     - * Called from the #DB handler.
4180     - */
4181     -void kmemcheck_hide(struct pt_regs *regs)
4182     -{
4183     - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
4184     - int n;
4185     -
4186     - BUG_ON(!irqs_disabled());
4187     -
4188     - if (unlikely(data->balance != 1)) {
4189     - kmemcheck_show_all();
4190     - kmemcheck_error_save_bug(regs);
4191     - data->n_addrs = 0;
4192     - data->balance = 0;
4193     -
4194     - if (!(data->flags & X86_EFLAGS_TF))
4195     - regs->flags &= ~X86_EFLAGS_TF;
4196     - if (data->flags & X86_EFLAGS_IF)
4197     - regs->flags |= X86_EFLAGS_IF;
4198     - return;
4199     - }
4200     -
4201     - if (kmemcheck_enabled)
4202     - n = kmemcheck_hide_all();
4203     - else
4204     - n = kmemcheck_show_all();
4205     -
4206     - if (n == 0)
4207     - return;
4208     -
4209     - --data->balance;
4210     -
4211     - data->n_addrs = 0;
4212     -
4213     - if (!(data->flags & X86_EFLAGS_TF))
4214     - regs->flags &= ~X86_EFLAGS_TF;
4215     - if (data->flags & X86_EFLAGS_IF)
4216     - regs->flags |= X86_EFLAGS_IF;
4217     -}
4218     -
4219     -void kmemcheck_show_pages(struct page *p, unsigned int n)
4220     -{
4221     - unsigned int i;
4222     -
4223     - for (i = 0; i < n; ++i) {
4224     - unsigned long address;
4225     - pte_t *pte;
4226     - unsigned int level;
4227     -
4228     - address = (unsigned long) page_address(&p[i]);
4229     - pte = lookup_address(address, &level);
4230     - BUG_ON(!pte);
4231     - BUG_ON(level != PG_LEVEL_4K);
4232     -
4233     - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
4234     - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN));
4235     - __flush_tlb_one(address);
4236     - }
4237     -}
4238     -
4239     -bool kmemcheck_page_is_tracked(struct page *p)
4240     -{
4241     - /* This will also check the "hidden" flag of the PTE. */
4242     - return kmemcheck_pte_lookup((unsigned long) page_address(p));
4243     -}
4244     -
4245     -void kmemcheck_hide_pages(struct page *p, unsigned int n)
4246     -{
4247     - unsigned int i;
4248     -
4249     - for (i = 0; i < n; ++i) {
4250     - unsigned long address;
4251     - pte_t *pte;
4252     - unsigned int level;
4253     -
4254     - address = (unsigned long) page_address(&p[i]);
4255     - pte = lookup_address(address, &level);
4256     - BUG_ON(!pte);
4257     - BUG_ON(level != PG_LEVEL_4K);
4258     -
4259     - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
4260     - set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
4261     - __flush_tlb_one(address);
4262     - }
4263     -}
4264     -
4265     -/* Access may NOT cross page boundary */
4266     -static void kmemcheck_read_strict(struct pt_regs *regs,
4267     - unsigned long addr, unsigned int size)
4268     -{
4269     - void *shadow;
4270     - enum kmemcheck_shadow status;
4271     -
4272     - shadow = kmemcheck_shadow_lookup(addr);
4273     - if (!shadow)
4274     - return;
4275     -
4276     - kmemcheck_save_addr(addr);
4277     - status = kmemcheck_shadow_test(shadow, size);
4278     - if (status == KMEMCHECK_SHADOW_INITIALIZED)
4279     - return;
4280     -
4281     - if (kmemcheck_enabled)
4282     - kmemcheck_error_save(status, addr, size, regs);
4283     -
4284     - if (kmemcheck_enabled == 2)
4285     - kmemcheck_enabled = 0;
4286     -
4287     - /* Don't warn about it again. */
4288     - kmemcheck_shadow_set(shadow, size);
4289     -}
4290     -
4291     -bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
4292     -{
4293     - enum kmemcheck_shadow status;
4294     - void *shadow;
4295     -
4296     - shadow = kmemcheck_shadow_lookup(addr);
4297     - if (!shadow)
4298     - return true;
4299     -
4300     - status = kmemcheck_shadow_test_all(shadow, size);
4301     -
4302     - return status == KMEMCHECK_SHADOW_INITIALIZED;
4303     -}
4304     -
4305     -/* Access may cross page boundary */
4306     -static void kmemcheck_read(struct pt_regs *regs,
4307     - unsigned long addr, unsigned int size)
4308     -{
4309     - unsigned long page = addr & PAGE_MASK;
4310     - unsigned long next_addr = addr + size - 1;
4311     - unsigned long next_page = next_addr & PAGE_MASK;
4312     -
4313     - if (likely(page == next_page)) {
4314     - kmemcheck_read_strict(regs, addr, size);
4315     - return;
4316     - }
4317     -
4318     - /*
4319     - * What we do is basically to split the access across the
4320     - * two pages and handle each part separately. Yes, this means
4321     - * that we may now see reads that are 3 + 5 bytes, for
4322     - * example (and if both are uninitialized, there will be two
4323     - * reports), but it makes the code a lot simpler.
4324     - */
4325     - kmemcheck_read_strict(regs, addr, next_page - addr);
4326     - kmemcheck_read_strict(regs, next_page, next_addr - next_page);
4327     -}
4328     -
4329     -static void kmemcheck_write_strict(struct pt_regs *regs,
4330     - unsigned long addr, unsigned int size)
4331     -{
4332     - void *shadow;
4333     -
4334     - shadow = kmemcheck_shadow_lookup(addr);
4335     - if (!shadow)
4336     - return;
4337     -
4338     - kmemcheck_save_addr(addr);
4339     - kmemcheck_shadow_set(shadow, size);
4340     -}
4341     -
4342     -static void kmemcheck_write(struct pt_regs *regs,
4343     - unsigned long addr, unsigned int size)
4344     -{
4345     - unsigned long page = addr & PAGE_MASK;
4346     - unsigned long next_addr = addr + size - 1;
4347     - unsigned long next_page = next_addr & PAGE_MASK;
4348     -
4349     - if (likely(page == next_page)) {
4350     - kmemcheck_write_strict(regs, addr, size);
4351     - return;
4352     - }
4353     -
4354     - /* See comment in kmemcheck_read(). */
4355     - kmemcheck_write_strict(regs, addr, next_page - addr);
4356     - kmemcheck_write_strict(regs, next_page, next_addr - next_page);
4357     -}
4358     -
4359     -/*
4360     - * Copying is hard. We have two addresses, each of which may be split across
4361     - * a page (and each page will have different shadow addresses).
4362     - */
4363     -static void kmemcheck_copy(struct pt_regs *regs,
4364     - unsigned long src_addr, unsigned long dst_addr, unsigned int size)
4365     -{
4366     - uint8_t shadow[8];
4367     - enum kmemcheck_shadow status;
4368     -
4369     - unsigned long page;
4370     - unsigned long next_addr;
4371     - unsigned long next_page;
4372     -
4373     - uint8_t *x;
4374     - unsigned int i;
4375     - unsigned int n;
4376     -
4377     - BUG_ON(size > sizeof(shadow));
4378     -
4379     - page = src_addr & PAGE_MASK;
4380     - next_addr = src_addr + size - 1;
4381     - next_page = next_addr & PAGE_MASK;
4382     -
4383     - if (likely(page == next_page)) {
4384     - /* Same page */
4385     - x = kmemcheck_shadow_lookup(src_addr);
4386     - if (x) {
4387     - kmemcheck_save_addr(src_addr);
4388     - for (i = 0; i < size; ++i)
4389     - shadow[i] = x[i];
4390     - } else {
4391     - for (i = 0; i < size; ++i)
4392     - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
4393     - }
4394     - } else {
4395     - n = next_page - src_addr;
4396     - BUG_ON(n > sizeof(shadow));
4397     -
4398     - /* First page */
4399     - x = kmemcheck_shadow_lookup(src_addr);
4400     - if (x) {
4401     - kmemcheck_save_addr(src_addr);
4402     - for (i = 0; i < n; ++i)
4403     - shadow[i] = x[i];
4404     - } else {
4405     - /* Not tracked */
4406     - for (i = 0; i < n; ++i)
4407     - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
4408     - }
4409     -
4410     - /* Second page */
4411     - x = kmemcheck_shadow_lookup(next_page);
4412     - if (x) {
4413     - kmemcheck_save_addr(next_page);
4414     - for (i = n; i < size; ++i)
4415     - shadow[i] = x[i - n];
4416     - } else {
4417     - /* Not tracked */
4418     - for (i = n; i < size; ++i)
4419     - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
4420     - }
4421     - }
4422     -
4423     - page = dst_addr & PAGE_MASK;
4424     - next_addr = dst_addr + size - 1;
4425     - next_page = next_addr & PAGE_MASK;
4426     -
4427     - if (likely(page == next_page)) {
4428     - /* Same page */
4429     - x = kmemcheck_shadow_lookup(dst_addr);
4430     - if (x) {
4431     - kmemcheck_save_addr(dst_addr);
4432     - for (i = 0; i < size; ++i) {
4433     - x[i] = shadow[i];
4434     - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
4435     - }
4436     - }
4437     - } else {
4438     - n = next_page - dst_addr;
4439     - BUG_ON(n > sizeof(shadow));
4440     -
4441     - /* First page */
4442     - x = kmemcheck_shadow_lookup(dst_addr);
4443     - if (x) {
4444     - kmemcheck_save_addr(dst_addr);
4445     - for (i = 0; i < n; ++i) {
4446     - x[i] = shadow[i];
4447     - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
4448     - }
4449     - }
4450     -
4451     - /* Second page */
4452     - x = kmemcheck_shadow_lookup(next_page);
4453     - if (x) {
4454     - kmemcheck_save_addr(next_page);
4455     - for (i = n; i < size; ++i) {
4456     - x[i - n] = shadow[i];
4457     - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
4458     - }
4459     - }
4460     - }
4461     -
4462     - status = kmemcheck_shadow_test(shadow, size);
4463     - if (status == KMEMCHECK_SHADOW_INITIALIZED)
4464     - return;
4465     -
4466     - if (kmemcheck_enabled)
4467     - kmemcheck_error_save(status, src_addr, size, regs);
4468     -
4469     - if (kmemcheck_enabled == 2)
4470     - kmemcheck_enabled = 0;
4471     -}
4472     -
4473     -enum kmemcheck_method {
4474     - KMEMCHECK_READ,
4475     - KMEMCHECK_WRITE,
4476     -};
4477     -
4478     -static void kmemcheck_access(struct pt_regs *regs,
4479     - unsigned long fallback_address, enum kmemcheck_method fallback_method)
4480     -{
4481     - const uint8_t *insn;
4482     - const uint8_t *insn_primary;
4483     - unsigned int size;
4484     -
4485     - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
4486     -
4487     - /* Recursive fault -- ouch. */
4488     - if (data->busy) {
4489     - kmemcheck_show_addr(fallback_address);
4490     - kmemcheck_error_save_bug(regs);
4491     - return;
4492     - }
4493     -
4494     - data->busy = true;
4495     -
4496     - insn = (const uint8_t *) regs->ip;
4497     - insn_primary = kmemcheck_opcode_get_primary(insn);
4498     -
4499     - kmemcheck_opcode_decode(insn, &size);
4500     -
4501     - switch (insn_primary[0]) {
4502     -#ifdef CONFIG_KMEMCHECK_BITOPS_OK
4503     - /* AND, OR, XOR */
4504     - /*
4505     - * Unfortunately, these instructions have to be excluded from
4506     - * our regular checking since they access only some (and not
4507     - * all) bits. This clears out "bogus" bitfield-access warnings.
4508     - */
4509     - case 0x80:
4510     - case 0x81:
4511     - case 0x82:
4512     - case 0x83:
4513     - switch ((insn_primary[1] >> 3) & 7) {
4514     - /* OR */
4515     - case 1:
4516     - /* AND */
4517     - case 4:
4518     - /* XOR */
4519     - case 6:
4520     - kmemcheck_write(regs, fallback_address, size);
4521     - goto out;
4522     -
4523     - /* ADD */
4524     - case 0:
4525     - /* ADC */
4526     - case 2:
4527     - /* SBB */
4528     - case 3:
4529     - /* SUB */
4530     - case 5:
4531     - /* CMP */
4532     - case 7:
4533     - break;
4534     - }
4535     - break;
4536     -#endif
4537     -
4538     - /* MOVS, MOVSB, MOVSW, MOVSD */
4539     - case 0xa4:
4540     - case 0xa5:
4541     - /*
4542     - * These instructions are special because they take two
4543     - * addresses, but we only get one page fault.
4544     - */
4545     - kmemcheck_copy(regs, regs->si, regs->di, size);
4546     - goto out;
4547     -
4548     - /* CMPS, CMPSB, CMPSW, CMPSD */
4549     - case 0xa6:
4550     - case 0xa7:
4551     - kmemcheck_read(regs, regs->si, size);
4552     - kmemcheck_read(regs, regs->di, size);
4553     - goto out;
4554     - }
4555     -
4556     - /*
4557     - * If the opcode isn't special in any way, we use the data from the
4558     - * page fault handler to determine the address and type of memory
4559     - * access.
4560     - */
4561     - switch (fallback_method) {
4562     - case KMEMCHECK_READ:
4563     - kmemcheck_read(regs, fallback_address, size);
4564     - goto out;
4565     - case KMEMCHECK_WRITE:
4566     - kmemcheck_write(regs, fallback_address, size);
4567     - goto out;
4568     - }
4569     -
4570     -out:
4571     - data->busy = false;
4572     -}
4573     -
4574     -bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
4575     - unsigned long error_code)
4576     -{
4577     - pte_t *pte;
4578     -
4579     - /*
4580     - * XXX: Is it safe to assume that memory accesses from virtual 86
4581     - * mode or non-kernel code segments will _never_ access kernel
4582     - * memory (e.g. tracked pages)? For now, we need this to avoid
4583     - * invoking kmemcheck for PnP BIOS calls.
4584     - */
4585     - if (regs->flags & X86_VM_MASK)
4586     - return false;
4587     - if (regs->cs != __KERNEL_CS)
4588     - return false;
4589     -
4590     - pte = kmemcheck_pte_lookup(address);
4591     - if (!pte)
4592     - return false;
4593     -
4594     - WARN_ON_ONCE(in_nmi());
4595     -
4596     - if (error_code & 2)
4597     - kmemcheck_access(regs, address, KMEMCHECK_WRITE);
4598     - else
4599     - kmemcheck_access(regs, address, KMEMCHECK_READ);
4600     -
4601     - kmemcheck_show(regs);
4602     - return true;
4603     -}
4604     -
4605     -bool kmemcheck_trap(struct pt_regs *regs)
4606     -{
4607     - if (!kmemcheck_active(regs))
4608     - return false;
4609     -
4610     - /* We're done. */
4611     - kmemcheck_hide(regs);
4612     - return true;
4613     -}
4614     diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
4615     deleted file mode 100644
4616     index df8109ddf7fe..000000000000
4617     --- a/arch/x86/mm/kmemcheck/opcode.c
4618     +++ /dev/null
4619     @@ -1,107 +0,0 @@
4620     -// SPDX-License-Identifier: GPL-2.0
4621     -#include <linux/types.h>
4622     -
4623     -#include "opcode.h"
4624     -
4625     -static bool opcode_is_prefix(uint8_t b)
4626     -{
4627     - return
4628     - /* Group 1 */
4629     - b == 0xf0 || b == 0xf2 || b == 0xf3
4630     - /* Group 2 */
4631     - || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
4632     - || b == 0x64 || b == 0x65
4633     - /* Group 3 */
4634     - || b == 0x66
4635     - /* Group 4 */
4636     - || b == 0x67;
4637     -}
4638     -
4639     -#ifdef CONFIG_X86_64
4640     -static bool opcode_is_rex_prefix(uint8_t b)
4641     -{
4642     - return (b & 0xf0) == 0x40;
4643     -}
4644     -#else
4645     -static bool opcode_is_rex_prefix(uint8_t b)
4646     -{
4647     - return false;
4648     -}
4649     -#endif
4650     -
4651     -#define REX_W (1 << 3)
4652     -
4653     -/*
4654     - * This is a VERY crude opcode decoder. We only need to find the size of the
4655     - * load/store that caused our #PF and this should work for all the opcodes
4656     - * that we care about. Moreover, the ones who invented this instruction set
4657     - * should be shot.
4658     - */
4659     -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
4660     -{
4661     - /* Default operand size */
4662     - int operand_size_override = 4;
4663     -
4664     - /* prefixes */
4665     - for (; opcode_is_prefix(*op); ++op) {
4666     - if (*op == 0x66)
4667     - operand_size_override = 2;
4668     - }
4669     -
4670     - /* REX prefix */
4671     - if (opcode_is_rex_prefix(*op)) {
4672     - uint8_t rex = *op;
4673     -
4674     - ++op;
4675     - if (rex & REX_W) {
4676     - switch (*op) {
4677     - case 0x63:
4678     - *size = 4;
4679     - return;
4680     - case 0x0f:
4681     - ++op;
4682     -
4683     - switch (*op) {
4684     - case 0xb6:
4685     - case 0xbe:
4686     - *size = 1;
4687     - return;
4688     - case 0xb7:
4689     - case 0xbf:
4690     - *size = 2;
4691     - return;
4692     - }
4693     -
4694     - break;
4695     - }
4696     -
4697     - *size = 8;
4698     - return;
4699     - }
4700     - }
4701     -
4702     - /* escape opcode */
4703     - if (*op == 0x0f) {
4704     - ++op;
4705     -
4706     - /*
4707     - * This is move with zero-extend and sign-extend, respectively;
4708     - * we don't have to think about 0xb6/0xbe, because this is
4709     - * already handled in the conditional below.
4710     - */
4711     - if (*op == 0xb7 || *op == 0xbf)
4712     - operand_size_override = 2;
4713     - }
4714     -
4715     - *size = (*op & 1) ? operand_size_override : 1;
4716     -}
4717     -
4718     -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
4719     -{
4720     - /* skip prefixes */
4721     - while (opcode_is_prefix(*op))
4722     - ++op;
4723     - if (opcode_is_rex_prefix(*op))
4724     - ++op;
4725     - return op;
4726     -}
4727     diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
4728     deleted file mode 100644
4729     index 51a1ce94c24a..000000000000
4730     --- a/arch/x86/mm/kmemcheck/opcode.h
4731     +++ /dev/null
4732     @@ -1,10 +0,0 @@
4733     -/* SPDX-License-Identifier: GPL-2.0 */
4734     -#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
4735     -#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
4736     -
4737     -#include <linux/types.h>
4738     -
4739     -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
4740     -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
4741     -
4742     -#endif
4743     diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
4744     deleted file mode 100644
4745     index 8a03be90272a..000000000000
4746     --- a/arch/x86/mm/kmemcheck/pte.c
4747     +++ /dev/null
4748     @@ -1,23 +0,0 @@
4749     -// SPDX-License-Identifier: GPL-2.0
4750     -#include <linux/mm.h>
4751     -
4752     -#include <asm/pgtable.h>
4753     -
4754     -#include "pte.h"
4755     -
4756     -pte_t *kmemcheck_pte_lookup(unsigned long address)
4757     -{
4758     - pte_t *pte;
4759     - unsigned int level;
4760     -
4761     - pte = lookup_address(address, &level);
4762     - if (!pte)
4763     - return NULL;
4764     - if (level != PG_LEVEL_4K)
4765     - return NULL;
4766     - if (!pte_hidden(*pte))
4767     - return NULL;
4768     -
4769     - return pte;
4770     -}
4771     -
4772     diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
4773     deleted file mode 100644
4774     index b595612382c2..000000000000
4775     --- a/arch/x86/mm/kmemcheck/pte.h
4776     +++ /dev/null
4777     @@ -1,11 +0,0 @@
4778     -/* SPDX-License-Identifier: GPL-2.0 */
4779     -#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
4780     -#define ARCH__X86__MM__KMEMCHECK__PTE_H
4781     -
4782     -#include <linux/mm.h>
4783     -
4784     -#include <asm/pgtable.h>
4785     -
4786     -pte_t *kmemcheck_pte_lookup(unsigned long address);
4787     -
4788     -#endif
4789     diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
4790     deleted file mode 100644
4791     index 7ce0be1f99eb..000000000000
4792     --- a/arch/x86/mm/kmemcheck/selftest.c
4793     +++ /dev/null
4794     @@ -1,71 +0,0 @@
4795     -// SPDX-License-Identifier: GPL-2.0
4796     -#include <linux/bug.h>
4797     -#include <linux/kernel.h>
4798     -
4799     -#include "opcode.h"
4800     -#include "selftest.h"
4801     -
4802     -struct selftest_opcode {
4803     - unsigned int expected_size;
4804     - const uint8_t *insn;
4805     - const char *desc;
4806     -};
4807     -
4808     -static const struct selftest_opcode selftest_opcodes[] = {
4809     - /* REP MOVS */
4810     - {1, "\xf3\xa4", "rep movsb <mem8>, <mem8>"},
4811     - {4, "\xf3\xa5", "rep movsl <mem32>, <mem32>"},
4812     -
4813     - /* MOVZX / MOVZXD */
4814     - {1, "\x66\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg16>"},
4815     - {1, "\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg32>"},
4816     -
4817     - /* MOVSX / MOVSXD */
4818     - {1, "\x66\x0f\xbe\x51\xf8", "movswq <mem8>, <reg16>"},
4819     - {1, "\x0f\xbe\x51\xf8", "movswq <mem8>, <reg32>"},
4820     -
4821     -#ifdef CONFIG_X86_64
4822     - /* MOVZX / MOVZXD */
4823     - {1, "\x49\x0f\xb6\x51\xf8", "movzbq <mem8>, <reg64>"},
4824     - {2, "\x49\x0f\xb7\x51\xf8", "movzbq <mem16>, <reg64>"},
4825     -
4826     - /* MOVSX / MOVSXD */
4827     - {1, "\x49\x0f\xbe\x51\xf8", "movsbq <mem8>, <reg64>"},
4828     - {2, "\x49\x0f\xbf\x51\xf8", "movsbq <mem16>, <reg64>"},
4829     - {4, "\x49\x63\x51\xf8", "movslq <mem32>, <reg64>"},
4830     -#endif
4831     -};
4832     -
4833     -static bool selftest_opcode_one(const struct selftest_opcode *op)
4834     -{
4835     - unsigned size;
4836     -
4837     - kmemcheck_opcode_decode(op->insn, &size);
4838     -
4839     - if (size == op->expected_size)
4840     - return true;
4841     -
4842     - printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n",
4843     - op->desc, op->expected_size, size);
4844     - return false;
4845     -}
4846     -
4847     -static bool selftest_opcodes_all(void)
4848     -{
4849     - bool pass = true;
4850     - unsigned int i;
4851     -
4852     - for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i)
4853     - pass = pass && selftest_opcode_one(&selftest_opcodes[i]);
4854     -
4855     - return pass;
4856     -}
4857     -
4858     -bool kmemcheck_selftest(void)
4859     -{
4860     - bool pass = true;
4861     -
4862     - pass = pass && selftest_opcodes_all();
4863     -
4864     - return pass;
4865     -}
4866     diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
4867     deleted file mode 100644
4868     index 8d759aae453d..000000000000
4869     --- a/arch/x86/mm/kmemcheck/selftest.h
4870     +++ /dev/null
4871     @@ -1,7 +0,0 @@
4872     -/* SPDX-License-Identifier: GPL-2.0 */
4873     -#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H
4874     -#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H
4875     -
4876     -bool kmemcheck_selftest(void);
4877     -
4878     -#endif
4879     diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
4880     deleted file mode 100644
4881     index c2638a7d2c10..000000000000
4882     --- a/arch/x86/mm/kmemcheck/shadow.c
4883     +++ /dev/null
4884     @@ -1,173 +0,0 @@
4885     -#include <linux/kmemcheck.h>
4886     -#include <linux/export.h>
4887     -#include <linux/mm.h>
4888     -
4889     -#include <asm/page.h>
4890     -#include <asm/pgtable.h>
4891     -
4892     -#include "pte.h"
4893     -#include "shadow.h"
4894     -
4895     -/*
4896     - * Return the shadow address for the given address. Returns NULL if the
4897     - * address is not tracked.
4898     - *
4899     - * We need to be extremely careful not to follow any invalid pointers,
4900     - * because this function can be called for *any* possible address.
4901     - */
4902     -void *kmemcheck_shadow_lookup(unsigned long address)
4903     -{
4904     - pte_t *pte;
4905     - struct page *page;
4906     -
4907     - if (!virt_addr_valid(address))
4908     - return NULL;
4909     -
4910     - pte = kmemcheck_pte_lookup(address);
4911     - if (!pte)
4912     - return NULL;
4913     -
4914     - page = virt_to_page(address);
4915     - if (!page->shadow)
4916     - return NULL;
4917     - return page->shadow + (address & (PAGE_SIZE - 1));
4918     -}
4919     -
4920     -static void mark_shadow(void *address, unsigned int n,
4921     - enum kmemcheck_shadow status)
4922     -{
4923     - unsigned long addr = (unsigned long) address;
4924     - unsigned long last_addr = addr + n - 1;
4925     - unsigned long page = addr & PAGE_MASK;
4926     - unsigned long last_page = last_addr & PAGE_MASK;
4927     - unsigned int first_n;
4928     - void *shadow;
4929     -
4930     - /* If the memory range crosses a page boundary, stop there. */
4931     - if (page == last_page)
4932     - first_n = n;
4933     - else
4934     - first_n = page + PAGE_SIZE - addr;
4935     -
4936     - shadow = kmemcheck_shadow_lookup(addr);
4937     - if (shadow)
4938     - memset(shadow, status, first_n);
4939     -
4940     - addr += first_n;
4941     - n -= first_n;
4942     -
4943     - /* Do full-page memset()s. */
4944     - while (n >= PAGE_SIZE) {
4945     - shadow = kmemcheck_shadow_lookup(addr);
4946     - if (shadow)
4947     - memset(shadow, status, PAGE_SIZE);
4948     -
4949     - addr += PAGE_SIZE;
4950     - n -= PAGE_SIZE;
4951     - }
4952     -
4953     - /* Do the remaining page, if any. */
4954     - if (n > 0) {
4955     - shadow = kmemcheck_shadow_lookup(addr);
4956     - if (shadow)
4957     - memset(shadow, status, n);
4958     - }
4959     -}
4960     -
4961     -void kmemcheck_mark_unallocated(void *address, unsigned int n)
4962     -{
4963     - mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
4964     -}
4965     -
4966     -void kmemcheck_mark_uninitialized(void *address, unsigned int n)
4967     -{
4968     - mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
4969     -}
4970     -
4971     -/*
4972     - * Fill the shadow memory of the given address such that the memory at that
4973     - * address is marked as being initialized.
4974     - */
4975     -void kmemcheck_mark_initialized(void *address, unsigned int n)
4976     -{
4977     - mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
4978     -}
4979     -EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
4980     -
4981     -void kmemcheck_mark_freed(void *address, unsigned int n)
4982     -{
4983     - mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
4984     -}
4985     -
4986     -void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
4987     -{
4988     - unsigned int i;
4989     -
4990     - for (i = 0; i < n; ++i)
4991     - kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
4992     -}
4993     -
4994     -void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
4995     -{
4996     - unsigned int i;
4997     -
4998     - for (i = 0; i < n; ++i)
4999     - kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
5000     -}
5001     -
5002     -void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
5003     -{
5004     - unsigned int i;
5005     -
5006     - for (i = 0; i < n; ++i)
5007     - kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
5008     -}
5009     -
5010     -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
5011     -{
5012     -#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
5013     - uint8_t *x;
5014     - unsigned int i;
5015     -
5016     - x = shadow;
5017     -
5018     - /*
5019     - * Make sure _some_ bytes are initialized. Gcc frequently generates
5020     - * code to access neighboring bytes.
5021     - */
5022     - for (i = 0; i < size; ++i) {
5023     - if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
5024     - return x[i];
5025     - }
5026     -
5027     - return x[0];
5028     -#else
5029     - return kmemcheck_shadow_test_all(shadow, size);
5030     -#endif
5031     -}
5032     -
5033     -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, unsigned int size)
5034     -{
5035     - uint8_t *x;
5036     - unsigned int i;
5037     -
5038     - x = shadow;
5039     -
5040     - /* All bytes must be initialized. */
5041     - for (i = 0; i < size; ++i) {
5042     - if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
5043     - return x[i];
5044     - }
5045     -
5046     - return x[0];
5047     -}
5048     -
5049     -void kmemcheck_shadow_set(void *shadow, unsigned int size)
5050     -{
5051     - uint8_t *x;
5052     - unsigned int i;
5053     -
5054     - x = shadow;
5055     - for (i = 0; i < size; ++i)
5056     - x[i] = KMEMCHECK_SHADOW_INITIALIZED;
5057     -}
5058     diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
5059     deleted file mode 100644
5060     index 49768dc18664..000000000000
5061     --- a/arch/x86/mm/kmemcheck/shadow.h
5062     +++ /dev/null
5063     @@ -1,19 +0,0 @@
5064     -/* SPDX-License-Identifier: GPL-2.0 */
5065     -#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
5066     -#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
5067     -
5068     -enum kmemcheck_shadow {
5069     - KMEMCHECK_SHADOW_UNALLOCATED,
5070     - KMEMCHECK_SHADOW_UNINITIALIZED,
5071     - KMEMCHECK_SHADOW_INITIALIZED,
5072     - KMEMCHECK_SHADOW_FREED,
5073     -};
5074     -
5075     -void *kmemcheck_shadow_lookup(unsigned long address);
5076     -
5077     -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
5078     -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow,
5079     - unsigned int size);
5080     -void kmemcheck_shadow_set(void *shadow, unsigned int size);
5081     -
5082     -#endif
5083     diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
5084     index c21c2ed04612..aa44c3aa4cd5 100644
5085     --- a/arch/x86/mm/kmmio.c
5086     +++ b/arch/x86/mm/kmmio.c
5087     @@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
5088     return -1;
5089     }
5090    
5091     - __flush_tlb_one(f->addr);
5092     + __flush_tlb_one_kernel(f->addr);
5093     return 0;
5094     }
5095    
5096     diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
5097     index dfb7d657cf43..3ed9a08885c5 100644
5098     --- a/arch/x86/mm/pageattr.c
5099     +++ b/arch/x86/mm/pageattr.c
5100     @@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
5101    
5102     if (!debug_pagealloc_enabled())
5103     spin_unlock(&cpa_lock);
5104     - base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
5105     + base = alloc_pages(GFP_KERNEL, 0);
5106     if (!debug_pagealloc_enabled())
5107     spin_lock(&cpa_lock);
5108     if (!base)
5109     @@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
5110    
5111     static int alloc_pte_page(pmd_t *pmd)
5112     {
5113     - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
5114     + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
5115     if (!pte)
5116     return -1;
5117    
5118     @@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd)
5119    
5120     static int alloc_pmd_page(pud_t *pud)
5121     {
5122     - pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
5123     + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
5124     if (!pmd)
5125     return -1;
5126    
5127     @@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
5128     pgd_entry = cpa->pgd + pgd_index(addr);
5129    
5130     if (pgd_none(*pgd_entry)) {
5131     - p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
5132     + p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
5133     if (!p4d)
5134     return -1;
5135    
5136     @@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
5137     */
5138     p4d = p4d_offset(pgd_entry, addr);
5139     if (p4d_none(*p4d)) {
5140     - pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
5141     + pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
5142     if (!pud)
5143     return -1;
5144    
5145     diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
5146     index 9b7bcbd33cc2..004abf9ebf12 100644
5147     --- a/arch/x86/mm/pgtable.c
5148     +++ b/arch/x86/mm/pgtable.c
5149     @@ -7,7 +7,7 @@
5150     #include <asm/fixmap.h>
5151     #include <asm/mtrr.h>
5152    
5153     -#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
5154     +#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
5155    
5156     #ifdef CONFIG_HIGHPTE
5157     #define PGALLOC_USER_GFP __GFP_HIGHMEM
5158     diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
5159     index c3c5274410a9..9bb7f0ab9fe6 100644
5160     --- a/arch/x86/mm/pgtable_32.c
5161     +++ b/arch/x86/mm/pgtable_32.c
5162     @@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
5163     * It's enough to flush this one mapping.
5164     * (PGE mappings get flushed as well)
5165     */
5166     - __flush_tlb_one(vaddr);
5167     + __flush_tlb_one_kernel(vaddr);
5168     }
5169    
5170     unsigned long __FIXADDR_TOP = 0xfffff000;
5171     diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
5172     index 012d02624848..0c936435ea93 100644
5173     --- a/arch/x86/mm/tlb.c
5174     +++ b/arch/x86/mm/tlb.c
5175     @@ -492,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
5176     * flush that changes context.tlb_gen from 2 to 3. If they get
5177     * processed on this CPU in reverse order, we'll see
5178     * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
5179     - * If we were to use __flush_tlb_single() and set local_tlb_gen to
5180     + * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
5181     * 3, we'd be break the invariant: we'd update local_tlb_gen above
5182     * 1 without the full flush that's needed for tlb_gen 2.
5183     *
5184     @@ -513,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
5185    
5186     addr = f->start;
5187     while (addr < f->end) {
5188     - __flush_tlb_single(addr);
5189     + __flush_tlb_one_user(addr);
5190     addr += PAGE_SIZE;
5191     }
5192     if (local)
5193     @@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *info)
5194    
5195     /* flush range by one by one 'invlpg' */
5196     for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
5197     - __flush_tlb_one(addr);
5198     + __flush_tlb_one_kernel(addr);
5199     }
5200    
5201     void flush_tlb_kernel_range(unsigned long start, unsigned long end)
5202     diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
5203     index 61975b6bcb1a..ad5d9538f0f9 100644
5204     --- a/arch/x86/platform/efi/efi_64.c
5205     +++ b/arch/x86/platform/efi/efi_64.c
5206     @@ -211,7 +211,7 @@ int __init efi_alloc_page_tables(void)
5207     if (efi_enabled(EFI_OLD_MEMMAP))
5208     return 0;
5209    
5210     - gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
5211     + gfp_mask = GFP_KERNEL | __GFP_ZERO;
5212     efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
5213     if (!efi_pgd)
5214     return -ENOMEM;
5215     diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
5216     index 8538a6723171..7d5d53f36a7a 100644
5217     --- a/arch/x86/platform/uv/tlb_uv.c
5218     +++ b/arch/x86/platform/uv/tlb_uv.c
5219     @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
5220     local_flush_tlb();
5221     stat->d_alltlb++;
5222     } else {
5223     - __flush_tlb_single(msg->address);
5224     + __flush_tlb_one_user(msg->address);
5225     stat->d_onetlb++;
5226     }
5227     stat->d_requestee++;
5228     diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
5229     index a0e2b8c6e5c7..d0b943a6b117 100644
5230     --- a/arch/x86/xen/mmu_pv.c
5231     +++ b/arch/x86/xen/mmu_pv.c
5232     @@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
5233     preempt_enable();
5234     }
5235    
5236     -static void xen_flush_tlb_single(unsigned long addr)
5237     +static void xen_flush_tlb_one_user(unsigned long addr)
5238     {
5239     struct mmuext_op *op;
5240     struct multicall_space mcs;
5241    
5242     - trace_xen_mmu_flush_tlb_single(addr);
5243     + trace_xen_mmu_flush_tlb_one_user(addr);
5244    
5245     preempt_disable();
5246    
5247     @@ -2360,7 +2360,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
5248    
5249     .flush_tlb_user = xen_flush_tlb,
5250     .flush_tlb_kernel = xen_flush_tlb,
5251     - .flush_tlb_single = xen_flush_tlb_single,
5252     + .flush_tlb_one_user = xen_flush_tlb_one_user,
5253     .flush_tlb_others = xen_flush_tlb_others,
5254    
5255     .pgd_alloc = xen_pgd_alloc,
5256     diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
5257     index 6083ba462f35..15812e553b95 100644
5258     --- a/arch/x86/xen/p2m.c
5259     +++ b/arch/x86/xen/p2m.c
5260     @@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
5261     int i, ret = 0;
5262     pte_t *pte;
5263    
5264     + if (xen_feature(XENFEAT_auto_translated_physmap))
5265     + return 0;
5266     +
5267     if (kmap_ops) {
5268     ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
5269     kmap_ops, count);
5270     @@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
5271     {
5272     int i, ret = 0;
5273    
5274     + if (xen_feature(XENFEAT_auto_translated_physmap))
5275     + return 0;
5276     +
5277     for (i = 0; i < count; i++) {
5278     unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
5279     unsigned long pfn = page_to_pfn(pages[i]);
5280     diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
5281     index 497cc55a0c16..96f26e026783 100644
5282     --- a/arch/x86/xen/xen-head.S
5283     +++ b/arch/x86/xen/xen-head.S
5284     @@ -9,7 +9,9 @@
5285    
5286     #include <asm/boot.h>
5287     #include <asm/asm.h>
5288     +#include <asm/msr.h>
5289     #include <asm/page_types.h>
5290     +#include <asm/percpu.h>
5291     #include <asm/unwind_hints.h>
5292    
5293     #include <xen/interface/elfnote.h>
5294     @@ -35,6 +37,20 @@ ENTRY(startup_xen)
5295     mov %_ASM_SI, xen_start_info
5296     mov $init_thread_union+THREAD_SIZE, %_ASM_SP
5297    
5298     +#ifdef CONFIG_X86_64
5299     + /* Set up %gs.
5300     + *
5301     + * The base of %gs always points to the bottom of the irqstack
5302     + * union. If the stack protector canary is enabled, it is
5303     + * located at %gs:40. Note that, on SMP, the boot cpu uses
5304     + * init data section till per cpu areas are set up.
5305     + */
5306     + movl $MSR_GS_BASE,%ecx
5307     + movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
5308     + cdq
5309     + wrmsr
5310     +#endif
5311     +
5312     jmp xen_start_kernel
5313     END(startup_xen)
5314     __FINIT
5315     diff --git a/block/blk-wbt.c b/block/blk-wbt.c
5316     index e59d59c11ebb..5c105514bca7 100644
5317     --- a/block/blk-wbt.c
5318     +++ b/block/blk-wbt.c
5319     @@ -698,7 +698,15 @@ u64 wbt_default_latency_nsec(struct request_queue *q)
5320    
5321     static int wbt_data_dir(const struct request *rq)
5322     {
5323     - return rq_data_dir(rq);
5324     + const int op = req_op(rq);
5325     +
5326     + if (op == REQ_OP_READ)
5327     + return READ;
5328     + else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH)
5329     + return WRITE;
5330     +
5331     + /* don't account */
5332     + return -1;
5333     }
5334    
5335     int wbt_init(struct request_queue *q)
5336     diff --git a/crypto/xor.c b/crypto/xor.c
5337     index 263af9fb45ea..bce9fe7af40a 100644
5338     --- a/crypto/xor.c
5339     +++ b/crypto/xor.c
5340     @@ -122,12 +122,7 @@ calibrate_xor_blocks(void)
5341     goto out;
5342     }
5343    
5344     - /*
5345     - * Note: Since the memory is not actually used for _anything_ but to
5346     - * test the XOR speed, we don't really want kmemcheck to warn about
5347     - * reading uninitialized bytes here.
5348     - */
5349     - b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
5350     + b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
5351     if (!b1) {
5352     printk(KERN_WARNING "xor: Yikes! No memory available.\n");
5353     return -ENOMEM;
5354     diff --git a/drivers/base/core.c b/drivers/base/core.c
5355     index 12ebd055724c..c8501cdb95f4 100644
5356     --- a/drivers/base/core.c
5357     +++ b/drivers/base/core.c
5358     @@ -313,6 +313,9 @@ static void __device_link_del(struct device_link *link)
5359     dev_info(link->consumer, "Dropping the link to %s\n",
5360     dev_name(link->supplier));
5361    
5362     + if (link->flags & DL_FLAG_PM_RUNTIME)
5363     + pm_runtime_drop_link(link->consumer);
5364     +
5365     list_del(&link->s_node);
5366     list_del(&link->c_node);
5367     device_link_free(link);
5368     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
5369     index 609227211295..fe4fd8aee19f 100644
5370     --- a/drivers/block/rbd.c
5371     +++ b/drivers/block/rbd.c
5372     @@ -124,11 +124,13 @@ static int atomic_dec_return_safe(atomic_t *v)
5373     #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
5374     #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
5375     #define RBD_FEATURE_DATA_POOL (1ULL<<7)
5376     +#define RBD_FEATURE_OPERATIONS (1ULL<<8)
5377    
5378     #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
5379     RBD_FEATURE_STRIPINGV2 | \
5380     RBD_FEATURE_EXCLUSIVE_LOCK | \
5381     - RBD_FEATURE_DATA_POOL)
5382     + RBD_FEATURE_DATA_POOL | \
5383     + RBD_FEATURE_OPERATIONS)
5384    
5385     /* Features supported by this (client software) implementation. */
5386    
5387     diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
5388     index 98a60db8e5d1..b33c8d6eb8c7 100644
5389     --- a/drivers/bluetooth/Kconfig
5390     +++ b/drivers/bluetooth/Kconfig
5391     @@ -66,6 +66,7 @@ config BT_HCIBTSDIO
5392    
5393     config BT_HCIUART
5394     tristate "HCI UART driver"
5395     + depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
5396     depends on TTY
5397     help
5398     Bluetooth HCI UART driver.
5399     @@ -80,7 +81,6 @@ config BT_HCIUART
5400     config BT_HCIUART_SERDEV
5401     bool
5402     depends on SERIAL_DEV_BUS && BT_HCIUART
5403     - depends on SERIAL_DEV_BUS=y || SERIAL_DEV_BUS=BT_HCIUART
5404     default y
5405    
5406     config BT_HCIUART_H4
5407     diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
5408     index d1f5bb534e0e..6e9df558325b 100644
5409     --- a/drivers/char/hw_random/via-rng.c
5410     +++ b/drivers/char/hw_random/via-rng.c
5411     @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)
5412     /* Enable secondary noise source on CPUs where it is present. */
5413    
5414     /* Nehemiah stepping 8 and higher */
5415     - if ((c->x86_model == 9) && (c->x86_mask > 7))
5416     + if ((c->x86_model == 9) && (c->x86_stepping > 7))
5417     lo |= VIA_NOISESRC2;
5418    
5419     /* Esther */
5420     diff --git a/drivers/char/random.c b/drivers/char/random.c
5421     index 8ad92707e45f..ea0115cf5fc0 100644
5422     --- a/drivers/char/random.c
5423     +++ b/drivers/char/random.c
5424     @@ -259,7 +259,6 @@
5425     #include <linux/cryptohash.h>
5426     #include <linux/fips.h>
5427     #include <linux/ptrace.h>
5428     -#include <linux/kmemcheck.h>
5429     #include <linux/workqueue.h>
5430     #include <linux/irq.h>
5431     #include <linux/syscalls.h>
5432     diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
5433     index 3a2ca0f79daf..d0c34df0529c 100644
5434     --- a/drivers/cpufreq/acpi-cpufreq.c
5435     +++ b/drivers/cpufreq/acpi-cpufreq.c
5436     @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
5437     if (c->x86_vendor == X86_VENDOR_INTEL) {
5438     if ((c->x86 == 15) &&
5439     (c->x86_model == 6) &&
5440     - (c->x86_mask == 8)) {
5441     + (c->x86_stepping == 8)) {
5442     pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
5443     return -ENODEV;
5444     }
5445     diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
5446     index c46a12df40dd..d5e27bc7585a 100644
5447     --- a/drivers/cpufreq/longhaul.c
5448     +++ b/drivers/cpufreq/longhaul.c
5449     @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
5450     break;
5451    
5452     case 7:
5453     - switch (c->x86_mask) {
5454     + switch (c->x86_stepping) {
5455     case 0:
5456     longhaul_version = TYPE_LONGHAUL_V1;
5457     cpu_model = CPU_SAMUEL2;
5458     @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
5459     break;
5460     case 1 ... 15:
5461     longhaul_version = TYPE_LONGHAUL_V2;
5462     - if (c->x86_mask < 8) {
5463     + if (c->x86_stepping < 8) {
5464     cpu_model = CPU_SAMUEL2;
5465     cpuname = "C3 'Samuel 2' [C5B]";
5466     } else {
5467     @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
5468     numscales = 32;
5469     memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
5470     memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
5471     - switch (c->x86_mask) {
5472     + switch (c->x86_stepping) {
5473     case 0 ... 1:
5474     cpu_model = CPU_NEHEMIAH;
5475     cpuname = "C3 'Nehemiah A' [C5XLOE]";
5476     diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
5477     index fd77812313f3..a25741b1281b 100644
5478     --- a/drivers/cpufreq/p4-clockmod.c
5479     +++ b/drivers/cpufreq/p4-clockmod.c
5480     @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
5481     #endif
5482    
5483     /* Errata workaround */
5484     - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
5485     + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
5486     switch (cpuid) {
5487     case 0x0f07:
5488     case 0x0f0a:
5489     diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
5490     index 80ac313e6c59..302e9ce793a0 100644
5491     --- a/drivers/cpufreq/powernow-k7.c
5492     +++ b/drivers/cpufreq/powernow-k7.c
5493     @@ -131,7 +131,7 @@ static int check_powernow(void)
5494     return 0;
5495     }
5496    
5497     - if ((c->x86_model == 6) && (c->x86_mask == 0)) {
5498     + if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
5499     pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
5500     have_a0 = 1;
5501     }
5502     diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
5503     index 3ff5160451b4..7e1e5bbcf430 100644
5504     --- a/drivers/cpufreq/powernv-cpufreq.c
5505     +++ b/drivers/cpufreq/powernv-cpufreq.c
5506     @@ -287,9 +287,9 @@ static int init_powernv_pstates(void)
5507    
5508     if (id == pstate_max)
5509     powernv_pstate_info.max = i;
5510     - else if (id == pstate_nominal)
5511     + if (id == pstate_nominal)
5512     powernv_pstate_info.nominal = i;
5513     - else if (id == pstate_min)
5514     + if (id == pstate_min)
5515     powernv_pstate_info.min = i;
5516    
5517     if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
5518     diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
5519     index 41bc5397f4bb..4fa5adf16c70 100644
5520     --- a/drivers/cpufreq/speedstep-centrino.c
5521     +++ b/drivers/cpufreq/speedstep-centrino.c
5522     @@ -37,7 +37,7 @@ struct cpu_id
5523     {
5524     __u8 x86; /* CPU family */
5525     __u8 x86_model; /* model */
5526     - __u8 x86_mask; /* stepping */
5527     + __u8 x86_stepping; /* stepping */
5528     };
5529    
5530     enum {
5531     @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
5532     {
5533     if ((c->x86 == x->x86) &&
5534     (c->x86_model == x->x86_model) &&
5535     - (c->x86_mask == x->x86_mask))
5536     + (c->x86_stepping == x->x86_stepping))
5537     return 1;
5538     return 0;
5539     }
5540     diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
5541     index ccab452a4ef5..dd7bb00991f4 100644
5542     --- a/drivers/cpufreq/speedstep-lib.c
5543     +++ b/drivers/cpufreq/speedstep-lib.c
5544     @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
5545     ebx = cpuid_ebx(0x00000001);
5546     ebx &= 0x000000FF;
5547    
5548     - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
5549     + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
5550    
5551     - switch (c->x86_mask) {
5552     + switch (c->x86_stepping) {
5553     case 4:
5554     /*
5555     * B-stepping [M-P4-M]
5556     @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
5557     msr_lo, msr_hi);
5558     if ((msr_hi & (1<<18)) &&
5559     (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
5560     - if (c->x86_mask == 0x01) {
5561     + if (c->x86_stepping == 0x01) {
5562     pr_debug("early PIII version\n");
5563     return SPEEDSTEP_CPU_PIII_C_EARLY;
5564     } else
5565     diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
5566     index b3869748cc6b..c939f18f70cc 100644
5567     --- a/drivers/crypto/padlock-aes.c
5568     +++ b/drivers/crypto/padlock-aes.c
5569     @@ -512,7 +512,7 @@ static int __init padlock_init(void)
5570    
5571     printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
5572    
5573     - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
5574     + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
5575     ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
5576     cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
5577     printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
5578     diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
5579     index 0d01d1624252..63d636424161 100644
5580     --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
5581     +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
5582     @@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
5583     algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
5584     ss = algt->ss;
5585    
5586     - spin_lock(&ss->slock);
5587     + spin_lock_bh(&ss->slock);
5588    
5589     writel(mode, ss->base + SS_CTL);
5590    
5591     @@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
5592     }
5593    
5594     writel(0, ss->base + SS_CTL);
5595     - spin_unlock(&ss->slock);
5596     - return dlen;
5597     + spin_unlock_bh(&ss->slock);
5598     + return 0;
5599     }
5600     diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
5601     index a1c4ee818614..202476fbbc4c 100644
5602     --- a/drivers/devfreq/devfreq.c
5603     +++ b/drivers/devfreq/devfreq.c
5604     @@ -676,7 +676,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
5605     devfreq = devfreq_add_device(dev, profile, governor_name, data);
5606     if (IS_ERR(devfreq)) {
5607     devres_free(ptr);
5608     - return ERR_PTR(-ENOMEM);
5609     + return devfreq;
5610     }
5611    
5612     *ptr = devfreq;
5613     diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
5614     index b44d9d7db347..012fa3d1f407 100644
5615     --- a/drivers/dma-buf/reservation.c
5616     +++ b/drivers/dma-buf/reservation.c
5617     @@ -455,13 +455,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
5618     unsigned long timeout)
5619     {
5620     struct dma_fence *fence;
5621     - unsigned seq, shared_count, i = 0;
5622     + unsigned seq, shared_count;
5623     long ret = timeout ? timeout : 1;
5624     + int i;
5625    
5626     retry:
5627     shared_count = 0;
5628     seq = read_seqcount_begin(&obj->seq);
5629     rcu_read_lock();
5630     + i = -1;
5631    
5632     fence = rcu_dereference(obj->fence_excl);
5633     if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
5634     @@ -477,14 +479,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
5635     fence = NULL;
5636     }
5637    
5638     - if (!fence && wait_all) {
5639     + if (wait_all) {
5640     struct reservation_object_list *fobj =
5641     rcu_dereference(obj->fence);
5642    
5643     if (fobj)
5644     shared_count = fobj->shared_count;
5645    
5646     - for (i = 0; i < shared_count; ++i) {
5647     + for (i = 0; !fence && i < shared_count; ++i) {
5648     struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
5649    
5650     if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
5651     diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
5652     index ac2f30295efe..59ce32e405ac 100644
5653     --- a/drivers/edac/amd64_edac.c
5654     +++ b/drivers/edac/amd64_edac.c
5655     @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
5656     struct amd64_family_type *fam_type = NULL;
5657    
5658     pvt->ext_model = boot_cpu_data.x86_model >> 4;
5659     - pvt->stepping = boot_cpu_data.x86_mask;
5660     + pvt->stepping = boot_cpu_data.x86_stepping;
5661     pvt->model = boot_cpu_data.x86_model;
5662     pvt->fam = boot_cpu_data.x86;
5663    
5664     diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
5665     index 262c8ded87c0..dafc9c4b1e6f 100644
5666     --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
5667     +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
5668     @@ -40,7 +40,7 @@ struct smu_table_entry {
5669     uint32_t table_addr_high;
5670     uint32_t table_addr_low;
5671     uint8_t *table;
5672     - uint32_t handle;
5673     + unsigned long handle;
5674     };
5675    
5676     struct smu_table_array {
5677     diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
5678     index 6f3849ec0c1d..e9f1e6fe7b94 100644
5679     --- a/drivers/gpu/drm/ast/ast_mode.c
5680     +++ b/drivers/gpu/drm/ast/ast_mode.c
5681     @@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc)
5682     {
5683     struct ast_private *ast = crtc->dev->dev_private;
5684     ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
5685     + ast_crtc_load_lut(crtc);
5686     }
5687    
5688    
5689     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
5690     index 18d9da53282b..3f818412765c 100644
5691     --- a/drivers/gpu/drm/i915/i915_drv.h
5692     +++ b/drivers/gpu/drm/i915/i915_drv.h
5693     @@ -842,6 +842,7 @@ struct intel_device_info {
5694     u8 gen;
5695     u16 gen_mask;
5696     enum intel_platform platform;
5697     + u8 gt; /* GT number, 0 if undefined */
5698     u8 ring_mask; /* Rings supported by the HW */
5699     u8 num_rings;
5700     #define DEFINE_FLAG(name) u8 name:1
5701     diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
5702     index 09d97e0990b7..2985f1e418ad 100644
5703     --- a/drivers/gpu/drm/i915/i915_pci.c
5704     +++ b/drivers/gpu/drm/i915/i915_pci.c
5705     @@ -224,15 +224,34 @@ static const struct intel_device_info intel_ironlake_m_info = {
5706     GEN_DEFAULT_PIPEOFFSETS, \
5707     CURSOR_OFFSETS
5708    
5709     -static const struct intel_device_info intel_sandybridge_d_info = {
5710     - GEN6_FEATURES,
5711     - .platform = INTEL_SANDYBRIDGE,
5712     +#define SNB_D_PLATFORM \
5713     + GEN6_FEATURES, \
5714     + .platform = INTEL_SANDYBRIDGE
5715     +
5716     +static const struct intel_device_info intel_sandybridge_d_gt1_info = {
5717     + SNB_D_PLATFORM,
5718     + .gt = 1,
5719     };
5720    
5721     -static const struct intel_device_info intel_sandybridge_m_info = {
5722     - GEN6_FEATURES,
5723     - .platform = INTEL_SANDYBRIDGE,
5724     - .is_mobile = 1,
5725     +static const struct intel_device_info intel_sandybridge_d_gt2_info = {
5726     + SNB_D_PLATFORM,
5727     + .gt = 2,
5728     +};
5729     +
5730     +#define SNB_M_PLATFORM \
5731     + GEN6_FEATURES, \
5732     + .platform = INTEL_SANDYBRIDGE, \
5733     + .is_mobile = 1
5734     +
5735     +
5736     +static const struct intel_device_info intel_sandybridge_m_gt1_info = {
5737     + SNB_M_PLATFORM,
5738     + .gt = 1,
5739     +};
5740     +
5741     +static const struct intel_device_info intel_sandybridge_m_gt2_info = {
5742     + SNB_M_PLATFORM,
5743     + .gt = 2,
5744     };
5745    
5746     #define GEN7_FEATURES \
5747     @@ -249,22 +268,41 @@ static const struct intel_device_info intel_sandybridge_m_info = {
5748     GEN_DEFAULT_PIPEOFFSETS, \
5749     IVB_CURSOR_OFFSETS
5750    
5751     -static const struct intel_device_info intel_ivybridge_d_info = {
5752     - GEN7_FEATURES,
5753     - .platform = INTEL_IVYBRIDGE,
5754     - .has_l3_dpf = 1,
5755     +#define IVB_D_PLATFORM \
5756     + GEN7_FEATURES, \
5757     + .platform = INTEL_IVYBRIDGE, \
5758     + .has_l3_dpf = 1
5759     +
5760     +static const struct intel_device_info intel_ivybridge_d_gt1_info = {
5761     + IVB_D_PLATFORM,
5762     + .gt = 1,
5763     };
5764    
5765     -static const struct intel_device_info intel_ivybridge_m_info = {
5766     - GEN7_FEATURES,
5767     - .platform = INTEL_IVYBRIDGE,
5768     - .is_mobile = 1,
5769     - .has_l3_dpf = 1,
5770     +static const struct intel_device_info intel_ivybridge_d_gt2_info = {
5771     + IVB_D_PLATFORM,
5772     + .gt = 2,
5773     +};
5774     +
5775     +#define IVB_M_PLATFORM \
5776     + GEN7_FEATURES, \
5777     + .platform = INTEL_IVYBRIDGE, \
5778     + .is_mobile = 1, \
5779     + .has_l3_dpf = 1
5780     +
5781     +static const struct intel_device_info intel_ivybridge_m_gt1_info = {
5782     + IVB_M_PLATFORM,
5783     + .gt = 1,
5784     +};
5785     +
5786     +static const struct intel_device_info intel_ivybridge_m_gt2_info = {
5787     + IVB_M_PLATFORM,
5788     + .gt = 2,
5789     };
5790    
5791     static const struct intel_device_info intel_ivybridge_q_info = {
5792     GEN7_FEATURES,
5793     .platform = INTEL_IVYBRIDGE,
5794     + .gt = 2,
5795     .num_pipes = 0, /* legal, last one wins */
5796     .has_l3_dpf = 1,
5797     };
5798     @@ -299,10 +337,24 @@ static const struct intel_device_info intel_valleyview_info = {
5799     .has_rc6p = 0 /* RC6p removed-by HSW */, \
5800     .has_runtime_pm = 1
5801    
5802     -static const struct intel_device_info intel_haswell_info = {
5803     - HSW_FEATURES,
5804     - .platform = INTEL_HASWELL,
5805     - .has_l3_dpf = 1,
5806     +#define HSW_PLATFORM \
5807     + HSW_FEATURES, \
5808     + .platform = INTEL_HASWELL, \
5809     + .has_l3_dpf = 1
5810     +
5811     +static const struct intel_device_info intel_haswell_gt1_info = {
5812     + HSW_PLATFORM,
5813     + .gt = 1,
5814     +};
5815     +
5816     +static const struct intel_device_info intel_haswell_gt2_info = {
5817     + HSW_PLATFORM,
5818     + .gt = 2,
5819     +};
5820     +
5821     +static const struct intel_device_info intel_haswell_gt3_info = {
5822     + HSW_PLATFORM,
5823     + .gt = 3,
5824     };
5825    
5826     #define BDW_FEATURES \
5827     @@ -318,12 +370,27 @@ static const struct intel_device_info intel_haswell_info = {
5828     .gen = 8, \
5829     .platform = INTEL_BROADWELL
5830    
5831     -static const struct intel_device_info intel_broadwell_info = {
5832     +static const struct intel_device_info intel_broadwell_gt1_info = {
5833     + BDW_PLATFORM,
5834     + .gt = 1,
5835     +};
5836     +
5837     +static const struct intel_device_info intel_broadwell_gt2_info = {
5838     BDW_PLATFORM,
5839     + .gt = 2,
5840     +};
5841     +
5842     +static const struct intel_device_info intel_broadwell_rsvd_info = {
5843     + BDW_PLATFORM,
5844     + .gt = 3,
5845     + /* According to the device ID those devices are GT3, they were
5846     + * previously treated as not GT3, keep it like that.
5847     + */
5848     };
5849    
5850     static const struct intel_device_info intel_broadwell_gt3_info = {
5851     BDW_PLATFORM,
5852     + .gt = 3,
5853     .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
5854     };
5855    
5856     @@ -358,13 +425,29 @@ static const struct intel_device_info intel_cherryview_info = {
5857     .has_guc = 1, \
5858     .ddb_size = 896
5859    
5860     -static const struct intel_device_info intel_skylake_info = {
5861     +static const struct intel_device_info intel_skylake_gt1_info = {
5862     SKL_PLATFORM,
5863     + .gt = 1,
5864     };
5865    
5866     -static const struct intel_device_info intel_skylake_gt3_info = {
5867     +static const struct intel_device_info intel_skylake_gt2_info = {
5868     SKL_PLATFORM,
5869     - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
5870     + .gt = 2,
5871     +};
5872     +
5873     +#define SKL_GT3_PLUS_PLATFORM \
5874     + SKL_PLATFORM, \
5875     + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
5876     +
5877     +
5878     +static const struct intel_device_info intel_skylake_gt3_info = {
5879     + SKL_GT3_PLUS_PLATFORM,
5880     + .gt = 3,
5881     +};
5882     +
5883     +static const struct intel_device_info intel_skylake_gt4_info = {
5884     + SKL_GT3_PLUS_PLATFORM,
5885     + .gt = 4,
5886     };
5887    
5888     #define GEN9_LP_FEATURES \
5889     @@ -416,12 +499,19 @@ static const struct intel_device_info intel_geminilake_info = {
5890     .has_guc = 1, \
5891     .ddb_size = 896
5892    
5893     -static const struct intel_device_info intel_kabylake_info = {
5894     +static const struct intel_device_info intel_kabylake_gt1_info = {
5895     KBL_PLATFORM,
5896     + .gt = 1,
5897     +};
5898     +
5899     +static const struct intel_device_info intel_kabylake_gt2_info = {
5900     + KBL_PLATFORM,
5901     + .gt = 2,
5902     };
5903    
5904     static const struct intel_device_info intel_kabylake_gt3_info = {
5905     KBL_PLATFORM,
5906     + .gt = 3,
5907     .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
5908     };
5909    
5910     @@ -434,20 +524,28 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
5911     .has_guc = 1, \
5912     .ddb_size = 896
5913    
5914     -static const struct intel_device_info intel_coffeelake_info = {
5915     +static const struct intel_device_info intel_coffeelake_gt1_info = {
5916     + CFL_PLATFORM,
5917     + .gt = 1,
5918     +};
5919     +
5920     +static const struct intel_device_info intel_coffeelake_gt2_info = {
5921     CFL_PLATFORM,
5922     + .gt = 2,
5923     };
5924    
5925     static const struct intel_device_info intel_coffeelake_gt3_info = {
5926     CFL_PLATFORM,
5927     + .gt = 3,
5928     .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
5929     };
5930    
5931     -static const struct intel_device_info intel_cannonlake_info = {
5932     +static const struct intel_device_info intel_cannonlake_gt2_info = {
5933     BDW_FEATURES,
5934     .is_alpha_support = 1,
5935     .platform = INTEL_CANNONLAKE,
5936     .gen = 10,
5937     + .gt = 2,
5938     .ddb_size = 1024,
5939     .has_csr = 1,
5940     .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
5941     @@ -476,31 +574,40 @@ static const struct pci_device_id pciidlist[] = {
5942     INTEL_PINEVIEW_IDS(&intel_pineview_info),
5943     INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
5944     INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
5945     - INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
5946     - INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
5947     + INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
5948     + INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
5949     + INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
5950     + INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
5951     INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
5952     - INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
5953     - INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
5954     - INTEL_HSW_IDS(&intel_haswell_info),
5955     + INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
5956     + INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
5957     + INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
5958     + INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
5959     + INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
5960     + INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
5961     + INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
5962     INTEL_VLV_IDS(&intel_valleyview_info),
5963     - INTEL_BDW_GT12_IDS(&intel_broadwell_info),
5964     + INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
5965     + INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
5966     INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
5967     - INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
5968     + INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
5969     INTEL_CHV_IDS(&intel_cherryview_info),
5970     - INTEL_SKL_GT1_IDS(&intel_skylake_info),
5971     - INTEL_SKL_GT2_IDS(&intel_skylake_info),
5972     + INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
5973     + INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
5974     INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
5975     - INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
5976     + INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
5977     INTEL_BXT_IDS(&intel_broxton_info),
5978     INTEL_GLK_IDS(&intel_geminilake_info),
5979     - INTEL_KBL_GT1_IDS(&intel_kabylake_info),
5980     - INTEL_KBL_GT2_IDS(&intel_kabylake_info),
5981     + INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
5982     + INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
5983     INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
5984     INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
5985     - INTEL_CFL_S_IDS(&intel_coffeelake_info),
5986     - INTEL_CFL_H_IDS(&intel_coffeelake_info),
5987     - INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info),
5988     - INTEL_CNL_IDS(&intel_cannonlake_info),
5989     + INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
5990     + INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
5991     + INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
5992     + INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
5993     + INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
5994     + INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
5995     {0, 0, 0}
5996     };
5997     MODULE_DEVICE_TABLE(pci, pciidlist);
5998     diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
5999     index 74fc9362ecf9..3eb920851141 100644
6000     --- a/drivers/gpu/drm/qxl/qxl_cmd.c
6001     +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
6002     @@ -388,7 +388,11 @@ void qxl_io_create_primary(struct qxl_device *qdev,
6003     create->width = bo->surf.width;
6004     create->height = bo->surf.height;
6005     create->stride = bo->surf.stride;
6006     - create->mem = qxl_bo_physical_address(qdev, bo, offset);
6007     + if (bo->shadow) {
6008     + create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset);
6009     + } else {
6010     + create->mem = qxl_bo_physical_address(qdev, bo, offset);
6011     + }
6012    
6013     QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
6014     bo->kptr);
6015     diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
6016     index afbf50d0c08f..9a9214ae0fb5 100644
6017     --- a/drivers/gpu/drm/qxl/qxl_display.c
6018     +++ b/drivers/gpu/drm/qxl/qxl_display.c
6019     @@ -289,6 +289,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
6020     {
6021     struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
6022    
6023     + qxl_bo_unref(&qxl_crtc->cursor_bo);
6024     drm_crtc_cleanup(crtc);
6025     kfree(qxl_crtc);
6026     }
6027     @@ -305,7 +306,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
6028     void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
6029     {
6030     struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
6031     + struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
6032    
6033     + WARN_ON(bo->shadow);
6034     drm_gem_object_unreference_unlocked(qxl_fb->obj);
6035     drm_framebuffer_cleanup(fb);
6036     kfree(qxl_fb);
6037     @@ -493,6 +496,53 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
6038     return 0;
6039     }
6040    
6041     +static int qxl_primary_apply_cursor(struct drm_plane *plane)
6042     +{
6043     + struct drm_device *dev = plane->dev;
6044     + struct qxl_device *qdev = dev->dev_private;
6045     + struct drm_framebuffer *fb = plane->state->fb;
6046     + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
6047     + struct qxl_cursor_cmd *cmd;
6048     + struct qxl_release *release;
6049     + int ret = 0;
6050     +
6051     + if (!qcrtc->cursor_bo)
6052     + return 0;
6053     +
6054     + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
6055     + QXL_RELEASE_CURSOR_CMD,
6056     + &release, NULL);
6057     + if (ret)
6058     + return ret;
6059     +
6060     + ret = qxl_release_list_add(release, qcrtc->cursor_bo);
6061     + if (ret)
6062     + goto out_free_release;
6063     +
6064     + ret = qxl_release_reserve_list(release, false);
6065     + if (ret)
6066     + goto out_free_release;
6067     +
6068     + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
6069     + cmd->type = QXL_CURSOR_SET;
6070     + cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x;
6071     + cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y;
6072     +
6073     + cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
6074     +
6075     + cmd->u.set.visible = 1;
6076     + qxl_release_unmap(qdev, release, &cmd->release_info);
6077     +
6078     + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
6079     + qxl_release_fence_buffer_objects(release);
6080     +
6081     + return ret;
6082     +
6083     +out_free_release:
6084     + qxl_release_free(qdev, release);
6085     + return ret;
6086     +}
6087     +
6088     static void qxl_primary_atomic_update(struct drm_plane *plane,
6089     struct drm_plane_state *old_state)
6090     {
6091     @@ -508,6 +558,8 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
6092     .x2 = qfb->base.width,
6093     .y2 = qfb->base.height
6094     };
6095     + int ret;
6096     + bool same_shadow = false;
6097    
6098     if (old_state->fb) {
6099     qfb_old = to_qxl_framebuffer(old_state->fb);
6100     @@ -519,15 +571,28 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
6101     if (bo == bo_old)
6102     return;
6103    
6104     + if (bo_old && bo_old->shadow && bo->shadow &&
6105     + bo_old->shadow == bo->shadow) {
6106     + same_shadow = true;
6107     + }
6108     +
6109     if (bo_old && bo_old->is_primary) {
6110     - qxl_io_destroy_primary(qdev);
6111     + if (!same_shadow)
6112     + qxl_io_destroy_primary(qdev);
6113     bo_old->is_primary = false;
6114     +
6115     + ret = qxl_primary_apply_cursor(plane);
6116     + if (ret)
6117     + DRM_ERROR(
6118     + "could not set cursor after creating primary");
6119     }
6120    
6121     if (!bo->is_primary) {
6122     - qxl_io_create_primary(qdev, 0, bo);
6123     + if (!same_shadow)
6124     + qxl_io_create_primary(qdev, 0, bo);
6125     bo->is_primary = true;
6126     }
6127     +
6128     qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
6129     }
6130    
6131     @@ -560,11 +625,12 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
6132     struct drm_device *dev = plane->dev;
6133     struct qxl_device *qdev = dev->dev_private;
6134     struct drm_framebuffer *fb = plane->state->fb;
6135     + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
6136     struct qxl_release *release;
6137     struct qxl_cursor_cmd *cmd;
6138     struct qxl_cursor *cursor;
6139     struct drm_gem_object *obj;
6140     - struct qxl_bo *cursor_bo, *user_bo = NULL;
6141     + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
6142     int ret;
6143     void *user_ptr;
6144     int size = 64*64*4;
6145     @@ -617,6 +683,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
6146     cmd->u.set.shape = qxl_bo_physical_address(qdev,
6147     cursor_bo, 0);
6148     cmd->type = QXL_CURSOR_SET;
6149     +
6150     + qxl_bo_unref(&qcrtc->cursor_bo);
6151     + qcrtc->cursor_bo = cursor_bo;
6152     + cursor_bo = NULL;
6153     } else {
6154    
6155     ret = qxl_release_reserve_list(release, true);
6156     @@ -634,6 +704,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
6157     qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
6158     qxl_release_fence_buffer_objects(release);
6159    
6160     + qxl_bo_unref(&cursor_bo);
6161     +
6162     return;
6163    
6164     out_backoff:
6165     @@ -679,8 +751,9 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
6166     static int qxl_plane_prepare_fb(struct drm_plane *plane,
6167     struct drm_plane_state *new_state)
6168     {
6169     + struct qxl_device *qdev = plane->dev->dev_private;
6170     struct drm_gem_object *obj;
6171     - struct qxl_bo *user_bo;
6172     + struct qxl_bo *user_bo, *old_bo = NULL;
6173     int ret;
6174    
6175     if (!new_state->fb)
6176     @@ -689,6 +762,32 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
6177     obj = to_qxl_framebuffer(new_state->fb)->obj;
6178     user_bo = gem_to_qxl_bo(obj);
6179    
6180     + if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6181     + user_bo->is_dumb && !user_bo->shadow) {
6182     + if (plane->state->fb) {
6183     + obj = to_qxl_framebuffer(plane->state->fb)->obj;
6184     + old_bo = gem_to_qxl_bo(obj);
6185     + }
6186     + if (old_bo && old_bo->shadow &&
6187     + user_bo->gem_base.size == old_bo->gem_base.size &&
6188     + plane->state->crtc == new_state->crtc &&
6189     + plane->state->crtc_w == new_state->crtc_w &&
6190     + plane->state->crtc_h == new_state->crtc_h &&
6191     + plane->state->src_x == new_state->src_x &&
6192     + plane->state->src_y == new_state->src_y &&
6193     + plane->state->src_w == new_state->src_w &&
6194     + plane->state->src_h == new_state->src_h &&
6195     + plane->state->rotation == new_state->rotation &&
6196     + plane->state->zpos == new_state->zpos) {
6197     + drm_gem_object_get(&old_bo->shadow->gem_base);
6198     + user_bo->shadow = old_bo->shadow;
6199     + } else {
6200     + qxl_bo_create(qdev, user_bo->gem_base.size,
6201     + true, true, QXL_GEM_DOMAIN_VRAM, NULL,
6202     + &user_bo->shadow);
6203     + }
6204     + }
6205     +
6206     ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
6207     if (ret)
6208     return ret;
6209     @@ -713,6 +812,11 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
6210     obj = to_qxl_framebuffer(old_state->fb)->obj;
6211     user_bo = gem_to_qxl_bo(obj);
6212     qxl_bo_unpin(user_bo);
6213     +
6214     + if (user_bo->shadow && !user_bo->is_primary) {
6215     + drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
6216     + user_bo->shadow = NULL;
6217     + }
6218     }
6219    
6220     static const uint32_t qxl_cursor_plane_formats[] = {
6221     diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
6222     index 3397a1907336..c0a927efa653 100644
6223     --- a/drivers/gpu/drm/qxl/qxl_drv.h
6224     +++ b/drivers/gpu/drm/qxl/qxl_drv.h
6225     @@ -113,6 +113,8 @@ struct qxl_bo {
6226     /* Constant after initialization */
6227     struct drm_gem_object gem_base;
6228     bool is_primary; /* is this now a primary surface */
6229     + bool is_dumb;
6230     + struct qxl_bo *shadow;
6231     bool hw_surf_alloc;
6232     struct qxl_surface surf;
6233     uint32_t surface_id;
6234     @@ -133,6 +135,8 @@ struct qxl_bo_list {
6235     struct qxl_crtc {
6236     struct drm_crtc base;
6237     int index;
6238     +
6239     + struct qxl_bo *cursor_bo;
6240     };
6241    
6242     struct qxl_output {
6243     diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
6244     index 5e65d5d2d937..11085ab01374 100644
6245     --- a/drivers/gpu/drm/qxl/qxl_dumb.c
6246     +++ b/drivers/gpu/drm/qxl/qxl_dumb.c
6247     @@ -63,6 +63,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
6248     &handle);
6249     if (r)
6250     return r;
6251     + qobj->is_dumb = true;
6252     args->pitch = pitch;
6253     args->handle = handle;
6254     return 0;
6255     diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
6256     index d34d1cf33895..95f4db70dd22 100644
6257     --- a/drivers/gpu/drm/radeon/radeon_uvd.c
6258     +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
6259     @@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
6260     /* calc dclk divider with current vco freq */
6261     dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
6262     pd_min, pd_even);
6263     - if (vclk_div > pd_max)
6264     + if (dclk_div > pd_max)
6265     break; /* vco is too big, it has to stop */
6266    
6267     /* calc score with current vco freq */
6268     diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
6269     index ee3e74266a13..97a0a639dad9 100644
6270     --- a/drivers/gpu/drm/radeon/si_dpm.c
6271     +++ b/drivers/gpu/drm/radeon/si_dpm.c
6272     @@ -2984,6 +2984,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
6273     (rdev->pdev->device == 0x6667)) {
6274     max_sclk = 75000;
6275     }
6276     + if ((rdev->pdev->revision == 0xC3) ||
6277     + (rdev->pdev->device == 0x6665)) {
6278     + max_sclk = 60000;
6279     + max_mclk = 80000;
6280     + }
6281     } else if (rdev->family == CHIP_OLAND) {
6282     if ((rdev->pdev->revision == 0xC7) ||
6283     (rdev->pdev->revision == 0x80) ||
6284     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
6285     index c088703777e2..68eed684dff5 100644
6286     --- a/drivers/gpu/drm/ttm/ttm_bo.c
6287     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
6288     @@ -175,7 +175,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
6289     list_add_tail(&bo->lru, &man->lru[bo->priority]);
6290     kref_get(&bo->list_kref);
6291    
6292     - if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
6293     + if (bo->ttm && !(bo->ttm->page_flags &
6294     + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
6295     list_add_tail(&bo->swap,
6296     &bo->glob->swap_lru[bo->priority]);
6297     kref_get(&bo->list_kref);
6298     diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
6299     index c8ebb757e36b..b17d0d38f290 100644
6300     --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
6301     +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
6302     @@ -299,7 +299,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
6303    
6304     static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
6305     unsigned long offset,
6306     - void *buf, int len, int write)
6307     + uint8_t *buf, int len, int write)
6308     {
6309     unsigned long page = offset >> PAGE_SHIFT;
6310     unsigned long bytes_left = len;
6311     @@ -328,6 +328,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
6312     ttm_bo_kunmap(&map);
6313    
6314     page++;
6315     + buf += bytes;
6316     bytes_left -= bytes;
6317     offset = 0;
6318     } while (bytes_left);
6319     diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
6320     index c13a4fd86b3c..a42744c7665b 100644
6321     --- a/drivers/hwmon/coretemp.c
6322     +++ b/drivers/hwmon/coretemp.c
6323     @@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
6324     for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
6325     const struct tjmax_model *tm = &tjmax_model_table[i];
6326     if (c->x86_model == tm->model &&
6327     - (tm->mask == ANY || c->x86_mask == tm->mask))
6328     + (tm->mask == ANY || c->x86_stepping == tm->mask))
6329     return tm->tjmax;
6330     }
6331    
6332     /* Early chips have no MSR for TjMax */
6333    
6334     - if (c->x86_model == 0xf && c->x86_mask < 4)
6335     + if (c->x86_model == 0xf && c->x86_stepping < 4)
6336     usemsr_ee = 0;
6337    
6338     if (c->x86_model > 0xe && usemsr_ee) {
6339     @@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned int cpu)
6340     * Readings might stop update when processor visited too deep sleep,
6341     * fixed for stepping D0 (6EC).
6342     */
6343     - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
6344     + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
6345     pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
6346     return -ENODEV;
6347     }
6348     diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
6349     index ef91b8a67549..84e91286fc4f 100644
6350     --- a/drivers/hwmon/hwmon-vid.c
6351     +++ b/drivers/hwmon/hwmon-vid.c
6352     @@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
6353     if (c->x86 < 6) /* Any CPU with family lower than 6 */
6354     return 0; /* doesn't have VID */
6355    
6356     - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
6357     + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
6358     if (vrm_ret == 134)
6359     vrm_ret = get_via_model_d_vrm();
6360     if (vrm_ret == 0)
6361     diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
6362     index ce3b91f22e30..5c740996aa62 100644
6363     --- a/drivers/hwmon/k10temp.c
6364     +++ b/drivers/hwmon/k10temp.c
6365     @@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
6366     * and AM3 formats, but that's the best we can do.
6367     */
6368     return boot_cpu_data.x86_model < 4 ||
6369     - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
6370     + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
6371     }
6372    
6373     static int k10temp_probe(struct pci_dev *pdev,
6374     diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
6375     index 5a632bcf869b..e59f9113fb93 100644
6376     --- a/drivers/hwmon/k8temp.c
6377     +++ b/drivers/hwmon/k8temp.c
6378     @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
6379     return -ENOMEM;
6380    
6381     model = boot_cpu_data.x86_model;
6382     - stepping = boot_cpu_data.x86_mask;
6383     + stepping = boot_cpu_data.x86_stepping;
6384    
6385     /* feature available since SH-C0, exclude older revisions */
6386     if ((model == 4 && stepping == 0) ||
6387     diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
6388     index 84fc32a2c8b3..ebfdb5503701 100644
6389     --- a/drivers/infiniband/core/device.c
6390     +++ b/drivers/infiniband/core/device.c
6391     @@ -446,7 +446,6 @@ int ib_register_device(struct ib_device *device,
6392     struct ib_udata uhw = {.outlen = 0, .inlen = 0};
6393     struct device *parent = device->dev.parent;
6394    
6395     - WARN_ON_ONCE(!parent);
6396     WARN_ON_ONCE(device->dma_device);
6397     if (device->dev.dma_ops) {
6398     /*
6399     @@ -455,16 +454,25 @@ int ib_register_device(struct ib_device *device,
6400     * into device->dev.
6401     */
6402     device->dma_device = &device->dev;
6403     - if (!device->dev.dma_mask)
6404     - device->dev.dma_mask = parent->dma_mask;
6405     - if (!device->dev.coherent_dma_mask)
6406     - device->dev.coherent_dma_mask =
6407     - parent->coherent_dma_mask;
6408     + if (!device->dev.dma_mask) {
6409     + if (parent)
6410     + device->dev.dma_mask = parent->dma_mask;
6411     + else
6412     + WARN_ON_ONCE(true);
6413     + }
6414     + if (!device->dev.coherent_dma_mask) {
6415     + if (parent)
6416     + device->dev.coherent_dma_mask =
6417     + parent->coherent_dma_mask;
6418     + else
6419     + WARN_ON_ONCE(true);
6420     + }
6421     } else {
6422     /*
6423     * The caller did not provide custom DMA operations. Use the
6424     * DMA mapping operations of the parent device.
6425     */
6426     + WARN_ON_ONCE(!parent);
6427     device->dma_device = parent;
6428     }
6429    
6430     diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
6431     index abc5ab581f82..0a1e96c25ca3 100644
6432     --- a/drivers/infiniband/core/sysfs.c
6433     +++ b/drivers/infiniband/core/sysfs.c
6434     @@ -1262,7 +1262,6 @@ int ib_device_register_sysfs(struct ib_device *device,
6435     int ret;
6436     int i;
6437    
6438     - WARN_ON_ONCE(!device->dev.parent);
6439     ret = dev_set_name(class_dev, "%s", device->name);
6440     if (ret)
6441     return ret;
6442     diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
6443     index 603acaf91828..6511cb21f6e2 100644
6444     --- a/drivers/infiniband/core/user_mad.c
6445     +++ b/drivers/infiniband/core/user_mad.c
6446     @@ -500,7 +500,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
6447     }
6448    
6449     memset(&ah_attr, 0, sizeof ah_attr);
6450     - ah_attr.type = rdma_ah_find_type(file->port->ib_dev,
6451     + ah_attr.type = rdma_ah_find_type(agent->device,
6452     file->port->port_num);
6453     rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
6454     rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
6455     diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
6456     index 0a98579700ec..5f9321eda1b7 100644
6457     --- a/drivers/infiniband/core/uverbs_std_types.c
6458     +++ b/drivers/infiniband/core/uverbs_std_types.c
6459     @@ -315,7 +315,7 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
6460     cq->uobject = &obj->uobject;
6461     cq->comp_handler = ib_uverbs_comp_handler;
6462     cq->event_handler = ib_uverbs_cq_event_handler;
6463     - cq->cq_context = &ev_file->ev_queue;
6464     + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
6465     obj->uobject.object = cq;
6466     obj->uobject.user_handle = user_handle;
6467     atomic_set(&cq->usecnt, 0);
6468     diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
6469     index c636842c5be0..8c681a36e6c7 100644
6470     --- a/drivers/infiniband/hw/mlx4/main.c
6471     +++ b/drivers/infiniband/hw/mlx4/main.c
6472     @@ -2972,9 +2972,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
6473     kfree(ibdev->ib_uc_qpns_bitmap);
6474    
6475     err_steer_qp_release:
6476     - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
6477     - mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
6478     - ibdev->steer_qpn_count);
6479     + mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
6480     + ibdev->steer_qpn_count);
6481     err_counter:
6482     for (i = 0; i < ibdev->num_ports; ++i)
6483     mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
6484     @@ -3079,11 +3078,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
6485     ibdev->iboe.nb.notifier_call = NULL;
6486     }
6487    
6488     - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
6489     - mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
6490     - ibdev->steer_qpn_count);
6491     - kfree(ibdev->ib_uc_qpns_bitmap);
6492     - }
6493     + mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
6494     + ibdev->steer_qpn_count);
6495     + kfree(ibdev->ib_uc_qpns_bitmap);
6496    
6497     iounmap(ibdev->uar_map);
6498     for (p = 0; p < ibdev->num_ports; ++p)
6499     diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
6500     index e9a91736b12d..d80b61a71eb8 100644
6501     --- a/drivers/infiniband/hw/qib/qib_rc.c
6502     +++ b/drivers/infiniband/hw/qib/qib_rc.c
6503     @@ -434,13 +434,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
6504     qp->s_state = OP(COMPARE_SWAP);
6505     put_ib_ateth_swap(wqe->atomic_wr.swap,
6506     &ohdr->u.atomic_eth);
6507     - put_ib_ateth_swap(wqe->atomic_wr.compare_add,
6508     - &ohdr->u.atomic_eth);
6509     + put_ib_ateth_compare(wqe->atomic_wr.compare_add,
6510     + &ohdr->u.atomic_eth);
6511     } else {
6512     qp->s_state = OP(FETCH_ADD);
6513     put_ib_ateth_swap(wqe->atomic_wr.compare_add,
6514     &ohdr->u.atomic_eth);
6515     - put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
6516     + put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
6517     }
6518     put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
6519     &ohdr->u.atomic_eth);
6520     diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
6521     index 77b3ed0df936..7f945f65d8cd 100644
6522     --- a/drivers/infiniband/sw/rxe/rxe_loc.h
6523     +++ b/drivers/infiniband/sw/rxe/rxe_loc.h
6524     @@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
6525    
6526     void rxe_release(struct kref *kref);
6527    
6528     -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
6529     int rxe_completer(void *arg);
6530     int rxe_requester(void *arg);
6531     int rxe_responder(void *arg);
6532     diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
6533     index 00bda9380a2e..aeea994b04c4 100644
6534     --- a/drivers/infiniband/sw/rxe/rxe_qp.c
6535     +++ b/drivers/infiniband/sw/rxe/rxe_qp.c
6536     @@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
6537     }
6538    
6539     /* called when the last reference to the qp is dropped */
6540     -void rxe_qp_cleanup(struct rxe_pool_entry *arg)
6541     +static void rxe_qp_do_cleanup(struct work_struct *work)
6542     {
6543     - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
6544     + struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
6545    
6546     rxe_drop_all_mcast_groups(qp);
6547    
6548     @@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg)
6549     kernel_sock_shutdown(qp->sk, SHUT_RDWR);
6550     sock_release(qp->sk);
6551     }
6552     +
6553     +/* called when the last reference to the qp is dropped */
6554     +void rxe_qp_cleanup(struct rxe_pool_entry *arg)
6555     +{
6556     + struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
6557     +
6558     + execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
6559     +}
6560     diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
6561     index d84222f9d5d2..44b838ec9420 100644
6562     --- a/drivers/infiniband/sw/rxe/rxe_req.c
6563     +++ b/drivers/infiniband/sw/rxe/rxe_req.c
6564     @@ -594,15 +594,8 @@ int rxe_requester(void *arg)
6565     rxe_add_ref(qp);
6566    
6567     next_wqe:
6568     - if (unlikely(!qp->valid)) {
6569     - rxe_drain_req_pkts(qp, true);
6570     + if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
6571     goto exit;
6572     - }
6573     -
6574     - if (unlikely(qp->req.state == QP_STATE_ERROR)) {
6575     - rxe_drain_req_pkts(qp, true);
6576     - goto exit;
6577     - }
6578    
6579     if (unlikely(qp->req.state == QP_STATE_RESET)) {
6580     qp->req.wqe_index = consumer_index(qp->sq.queue);
6581     diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
6582     index 4240866a5331..01f926fd9029 100644
6583     --- a/drivers/infiniband/sw/rxe/rxe_resp.c
6584     +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
6585     @@ -1210,7 +1210,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
6586     }
6587     }
6588    
6589     -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
6590     +static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
6591     {
6592     struct sk_buff *skb;
6593    
6594     diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
6595     index 0b362f49a10a..afbf701dc9a7 100644
6596     --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
6597     +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
6598     @@ -813,6 +813,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
6599     (queue_count(qp->sq.queue) > 1);
6600    
6601     rxe_run_task(&qp->req.task, must_sched);
6602     + if (unlikely(qp->req.state == QP_STATE_ERROR))
6603     + rxe_run_task(&qp->comp.task, 1);
6604    
6605     return err;
6606     }
6607     diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
6608     index 0c2dbe45c729..1019f5e7dbdd 100644
6609     --- a/drivers/infiniband/sw/rxe/rxe_verbs.h
6610     +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
6611     @@ -35,6 +35,7 @@
6612     #define RXE_VERBS_H
6613    
6614     #include <linux/interrupt.h>
6615     +#include <linux/workqueue.h>
6616     #include <rdma/rdma_user_rxe.h>
6617     #include "rxe_pool.h"
6618     #include "rxe_task.h"
6619     @@ -281,6 +282,8 @@ struct rxe_qp {
6620     struct timer_list rnr_nak_timer;
6621    
6622     spinlock_t state_lock; /* guard requester and completer */
6623     +
6624     + struct execute_work cleanup_work;
6625     };
6626    
6627     enum rxe_mem_state {
6628     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
6629     index 804419635cc7..1dfc855ac708 100644
6630     --- a/drivers/md/dm.c
6631     +++ b/drivers/md/dm.c
6632     @@ -815,7 +815,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
6633     queue_io(md, bio);
6634     } else {
6635     /* done with normal IO or empty flush */
6636     - bio->bi_status = io_error;
6637     + if (io_error)
6638     + bio->bi_status = io_error;
6639     bio_endio(bio);
6640     }
6641     }
6642     diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
6643     index ba80376a3b86..d097eb04a0e9 100644
6644     --- a/drivers/media/tuners/r820t.c
6645     +++ b/drivers/media/tuners/r820t.c
6646     @@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
6647     return 0;
6648     }
6649    
6650     -static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
6651     +static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
6652     {
6653     - return r820t_write(priv, reg, &val, 1);
6654     + u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
6655     +
6656     + return r820t_write(priv, reg, &tmp, 1);
6657     }
6658    
6659     static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
6660     @@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
6661     return -EINVAL;
6662     }
6663    
6664     -static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
6665     +static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
6666     u8 bit_mask)
6667     {
6668     + u8 tmp = val;
6669     int rc = r820t_read_cache_reg(priv, reg);
6670    
6671     if (rc < 0)
6672     return rc;
6673    
6674     - val = (rc & ~bit_mask) | (val & bit_mask);
6675     + tmp = (rc & ~bit_mask) | (tmp & bit_mask);
6676    
6677     - return r820t_write(priv, reg, &val, 1);
6678     + return r820t_write(priv, reg, &tmp, 1);
6679     }
6680    
6681     static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)
6682     diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
6683     index 1922cb8f6b88..1c5b7aec13d4 100644
6684     --- a/drivers/misc/c2port/core.c
6685     +++ b/drivers/misc/c2port/core.c
6686     @@ -15,7 +15,6 @@
6687     #include <linux/errno.h>
6688     #include <linux/err.h>
6689     #include <linux/kernel.h>
6690     -#include <linux/kmemcheck.h>
6691     #include <linux/ctype.h>
6692     #include <linux/delay.h>
6693     #include <linux/idr.h>
6694     @@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name,
6695     return ERR_PTR(-EINVAL);
6696    
6697     c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
6698     - kmemcheck_annotate_bitfield(c2dev, flags);
6699     if (unlikely(!c2dev))
6700     return ERR_PTR(-ENOMEM);
6701    
6702     diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
6703     index 229dc18f0581..768972af8b85 100644
6704     --- a/drivers/mmc/host/bcm2835.c
6705     +++ b/drivers/mmc/host/bcm2835.c
6706     @@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)
6707     char pio_limit_string[20];
6708     int ret;
6709    
6710     - mmc->f_max = host->max_clk;
6711     + if (!mmc->f_max || mmc->f_max > host->max_clk)
6712     + mmc->f_max = host->max_clk;
6713     mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
6714    
6715     mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
6716     diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
6717     index 85745ef179e2..08a55c2e96e1 100644
6718     --- a/drivers/mmc/host/meson-gx-mmc.c
6719     +++ b/drivers/mmc/host/meson-gx-mmc.c
6720     @@ -716,22 +716,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
6721     static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
6722     {
6723     struct meson_host *host = mmc_priv(mmc);
6724     - int ret;
6725     -
6726     - /*
6727     - * If this is the initial tuning, try to get a sane Rx starting
6728     - * phase before doing the actual tuning.
6729     - */
6730     - if (!mmc->doing_retune) {
6731     - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
6732     -
6733     - if (ret)
6734     - return ret;
6735     - }
6736     -
6737     - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
6738     - if (ret)
6739     - return ret;
6740    
6741     return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
6742     }
6743     @@ -762,9 +746,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
6744     if (!IS_ERR(mmc->supply.vmmc))
6745     mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
6746    
6747     - /* Reset phases */
6748     + /* Reset rx phase */
6749     clk_set_phase(host->rx_clk, 0);
6750     - clk_set_phase(host->tx_clk, 270);
6751    
6752     break;
6753    
6754     diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
6755     index d96a057a7db8..4ffa6b173a21 100644
6756     --- a/drivers/mmc/host/sdhci-of-esdhc.c
6757     +++ b/drivers/mmc/host/sdhci-of-esdhc.c
6758     @@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
6759     return clock / 256 / 16;
6760     }
6761    
6762     +static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
6763     +{
6764     + u32 val;
6765     + ktime_t timeout;
6766     +
6767     + val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
6768     +
6769     + if (enable)
6770     + val |= ESDHC_CLOCK_SDCLKEN;
6771     + else
6772     + val &= ~ESDHC_CLOCK_SDCLKEN;
6773     +
6774     + sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
6775     +
6776     + /* Wait max 20 ms */
6777     + timeout = ktime_add_ms(ktime_get(), 20);
6778     + val = ESDHC_CLOCK_STABLE;
6779     + while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
6780     + if (ktime_after(ktime_get(), timeout)) {
6781     + pr_err("%s: Internal clock never stabilised.\n",
6782     + mmc_hostname(host->mmc));
6783     + break;
6784     + }
6785     + udelay(10);
6786     + }
6787     +}
6788     +
6789     static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
6790     {
6791     struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
6792     @@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
6793    
6794     host->mmc->actual_clock = 0;
6795    
6796     - if (clock == 0)
6797     + if (clock == 0) {
6798     + esdhc_clock_enable(host, false);
6799     return;
6800     + }
6801    
6802     /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
6803     if (esdhc->vendor_ver < VENDOR_V_23)
6804     @@ -558,39 +587,20 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
6805     sdhci_writel(host, ctrl, ESDHC_PROCTL);
6806     }
6807    
6808     -static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
6809     +static void esdhc_reset(struct sdhci_host *host, u8 mask)
6810     {
6811     u32 val;
6812     - ktime_t timeout;
6813     -
6814     - val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
6815    
6816     - if (enable)
6817     - val |= ESDHC_CLOCK_SDCLKEN;
6818     - else
6819     - val &= ~ESDHC_CLOCK_SDCLKEN;
6820     -
6821     - sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
6822     -
6823     - /* Wait max 20 ms */
6824     - timeout = ktime_add_ms(ktime_get(), 20);
6825     - val = ESDHC_CLOCK_STABLE;
6826     - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
6827     - if (ktime_after(ktime_get(), timeout)) {
6828     - pr_err("%s: Internal clock never stabilised.\n",
6829     - mmc_hostname(host->mmc));
6830     - break;
6831     - }
6832     - udelay(10);
6833     - }
6834     -}
6835     -
6836     -static void esdhc_reset(struct sdhci_host *host, u8 mask)
6837     -{
6838     sdhci_reset(host, mask);
6839    
6840     sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
6841     sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
6842     +
6843     + if (mask & SDHCI_RESET_ALL) {
6844     + val = sdhci_readl(host, ESDHC_TBCTL);
6845     + val &= ~ESDHC_TB_EN;
6846     + sdhci_writel(host, val, ESDHC_TBCTL);
6847     + }
6848     }
6849    
6850     /* The SCFG, Supplemental Configuration Unit, provides SoC specific
6851     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
6852     index 6152e83ff935..90cc1977b792 100644
6853     --- a/drivers/mmc/host/sdhci.c
6854     +++ b/drivers/mmc/host/sdhci.c
6855     @@ -21,6 +21,7 @@
6856     #include <linux/dma-mapping.h>
6857     #include <linux/slab.h>
6858     #include <linux/scatterlist.h>
6859     +#include <linux/sizes.h>
6860     #include <linux/swiotlb.h>
6861     #include <linux/regulator/consumer.h>
6862     #include <linux/pm_runtime.h>
6863     @@ -502,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
6864     if (data->host_cookie == COOKIE_PRE_MAPPED)
6865     return data->sg_count;
6866    
6867     - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
6868     - mmc_get_dma_dir(data));
6869     + /* Bounce write requests to the bounce buffer */
6870     + if (host->bounce_buffer) {
6871     + unsigned int length = data->blksz * data->blocks;
6872     +
6873     + if (length > host->bounce_buffer_size) {
6874     + pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
6875     + mmc_hostname(host->mmc), length,
6876     + host->bounce_buffer_size);
6877     + return -EIO;
6878     + }
6879     + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
6880     + /* Copy the data to the bounce buffer */
6881     + sg_copy_to_buffer(data->sg, data->sg_len,
6882     + host->bounce_buffer,
6883     + length);
6884     + }
6885     + /* Switch ownership to the DMA */
6886     + dma_sync_single_for_device(host->mmc->parent,
6887     + host->bounce_addr,
6888     + host->bounce_buffer_size,
6889     + mmc_get_dma_dir(data));
6890     + /* Just a dummy value */
6891     + sg_count = 1;
6892     + } else {
6893     + /* Just access the data directly from memory */
6894     + sg_count = dma_map_sg(mmc_dev(host->mmc),
6895     + data->sg, data->sg_len,
6896     + mmc_get_dma_dir(data));
6897     + }
6898    
6899     if (sg_count == 0)
6900     return -ENOSPC;
6901     @@ -673,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
6902     }
6903     }
6904    
6905     +static u32 sdhci_sdma_address(struct sdhci_host *host)
6906     +{
6907     + if (host->bounce_buffer)
6908     + return host->bounce_addr;
6909     + else
6910     + return sg_dma_address(host->data->sg);
6911     +}
6912     +
6913     static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
6914     {
6915     u8 count;
6916     @@ -858,8 +894,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
6917     SDHCI_ADMA_ADDRESS_HI);
6918     } else {
6919     WARN_ON(sg_cnt != 1);
6920     - sdhci_writel(host, sg_dma_address(data->sg),
6921     - SDHCI_DMA_ADDRESS);
6922     + sdhci_writel(host, sdhci_sdma_address(host),
6923     + SDHCI_DMA_ADDRESS);
6924     }
6925     }
6926    
6927     @@ -2248,7 +2284,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
6928    
6929     mrq->data->host_cookie = COOKIE_UNMAPPED;
6930    
6931     - if (host->flags & SDHCI_REQ_USE_DMA)
6932     + /*
6933     + * No pre-mapping in the pre hook if we're using the bounce buffer,
6934     + * for that we would need two bounce buffers since one buffer is
6935     + * in flight when this is getting called.
6936     + */
6937     + if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
6938     sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
6939     }
6940    
6941     @@ -2352,8 +2393,45 @@ static bool sdhci_request_done(struct sdhci_host *host)
6942     struct mmc_data *data = mrq->data;
6943    
6944     if (data && data->host_cookie == COOKIE_MAPPED) {
6945     - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
6946     - mmc_get_dma_dir(data));
6947     + if (host->bounce_buffer) {
6948     + /*
6949     + * On reads, copy the bounced data into the
6950     + * sglist
6951     + */
6952     + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
6953     + unsigned int length = data->bytes_xfered;
6954     +
6955     + if (length > host->bounce_buffer_size) {
6956     + pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
6957     + mmc_hostname(host->mmc),
6958     + host->bounce_buffer_size,
6959     + data->bytes_xfered);
6960     + /* Cap it down and continue */
6961     + length = host->bounce_buffer_size;
6962     + }
6963     + dma_sync_single_for_cpu(
6964     + host->mmc->parent,
6965     + host->bounce_addr,
6966     + host->bounce_buffer_size,
6967     + DMA_FROM_DEVICE);
6968     + sg_copy_from_buffer(data->sg,
6969     + data->sg_len,
6970     + host->bounce_buffer,
6971     + length);
6972     + } else {
6973     + /* No copying, just switch ownership */
6974     + dma_sync_single_for_cpu(
6975     + host->mmc->parent,
6976     + host->bounce_addr,
6977     + host->bounce_buffer_size,
6978     + mmc_get_dma_dir(data));
6979     + }
6980     + } else {
6981     + /* Unmap the raw data */
6982     + dma_unmap_sg(mmc_dev(host->mmc), data->sg,
6983     + data->sg_len,
6984     + mmc_get_dma_dir(data));
6985     + }
6986     data->host_cookie = COOKIE_UNMAPPED;
6987     }
6988     }
6989     @@ -2636,7 +2714,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
6990     */
6991     if (intmask & SDHCI_INT_DMA_END) {
6992     u32 dmastart, dmanow;
6993     - dmastart = sg_dma_address(host->data->sg);
6994     +
6995     + dmastart = sdhci_sdma_address(host);
6996     dmanow = dmastart + host->data->bytes_xfered;
6997     /*
6998     * Force update to the next DMA block boundary.
6999     @@ -3217,6 +3296,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
7000     }
7001     EXPORT_SYMBOL_GPL(__sdhci_read_caps);
7002    
7003     +static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
7004     +{
7005     + struct mmc_host *mmc = host->mmc;
7006     + unsigned int max_blocks;
7007     + unsigned int bounce_size;
7008     + int ret;
7009     +
7010     + /*
7011     + * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
7012     + * has diminishing returns, this is probably because SD/MMC
7013     + * cards are usually optimized to handle this size of requests.
7014     + */
7015     + bounce_size = SZ_64K;
7016     + /*
7017     + * Adjust downwards to maximum request size if this is less
7018     + * than our segment size, else hammer down the maximum
7019     + * request size to the maximum buffer size.
7020     + */
7021     + if (mmc->max_req_size < bounce_size)
7022     + bounce_size = mmc->max_req_size;
7023     + max_blocks = bounce_size / 512;
7024     +
7025     + /*
7026     + * When we just support one segment, we can get significant
7027     + * speedups by the help of a bounce buffer to group scattered
7028     + * reads/writes together.
7029     + */
7030     + host->bounce_buffer = devm_kmalloc(mmc->parent,
7031     + bounce_size,
7032     + GFP_KERNEL);
7033     + if (!host->bounce_buffer) {
7034     + pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
7035     + mmc_hostname(mmc),
7036     + bounce_size);
7037     + /*
7038     + * Exiting with zero here makes sure we proceed with
7039     + * mmc->max_segs == 1.
7040     + */
7041     + return 0;
7042     + }
7043     +
7044     + host->bounce_addr = dma_map_single(mmc->parent,
7045     + host->bounce_buffer,
7046     + bounce_size,
7047     + DMA_BIDIRECTIONAL);
7048     + ret = dma_mapping_error(mmc->parent, host->bounce_addr);
7049     + if (ret)
7050     + /* Again fall back to max_segs == 1 */
7051     + return 0;
7052     + host->bounce_buffer_size = bounce_size;
7053     +
7054     + /* Lie about this since we're bouncing */
7055     + mmc->max_segs = max_blocks;
7056     + mmc->max_seg_size = bounce_size;
7057     + mmc->max_req_size = bounce_size;
7058     +
7059     + pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
7060     + mmc_hostname(mmc), max_blocks, bounce_size);
7061     +
7062     + return 0;
7063     +}
7064     +
7065     int sdhci_setup_host(struct sdhci_host *host)
7066     {
7067     struct mmc_host *mmc;
7068     @@ -3713,6 +3854,13 @@ int sdhci_setup_host(struct sdhci_host *host)
7069     */
7070     mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
7071    
7072     + if (mmc->max_segs == 1) {
7073     + /* This may alter mmc->*_blk_* parameters */
7074     + ret = sdhci_allocate_bounce_buffer(host);
7075     + if (ret)
7076     + return ret;
7077     + }
7078     +
7079     return 0;
7080    
7081     unreg:
7082     diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
7083     index 54bc444c317f..1d7d61e25dbf 100644
7084     --- a/drivers/mmc/host/sdhci.h
7085     +++ b/drivers/mmc/host/sdhci.h
7086     @@ -440,6 +440,9 @@ struct sdhci_host {
7087    
7088     int irq; /* Device IRQ */
7089     void __iomem *ioaddr; /* Mapped address */
7090     + char *bounce_buffer; /* For packing SDMA reads/writes */
7091     + dma_addr_t bounce_addr;
7092     + unsigned int bounce_buffer_size;
7093    
7094     const struct sdhci_ops *ops; /* Low level hw interface */
7095    
7096     diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
7097     index 8037d4b48a05..e2583a539b41 100644
7098     --- a/drivers/mtd/nand/vf610_nfc.c
7099     +++ b/drivers/mtd/nand/vf610_nfc.c
7100     @@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
7101     if (mtd->oobsize > 64)
7102     mtd->oobsize = 64;
7103    
7104     - /*
7105     - * mtd->ecclayout is not specified here because we're using the
7106     - * default large page ECC layout defined in NAND core.
7107     - */
7108     + /* Use default large page ECC layout defined in NAND core */
7109     + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
7110     if (chip->ecc.strength == 32) {
7111     nfc->ecc_mode = ECC_60_BYTE;
7112     chip->ecc.bytes = 60;
7113     diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
7114     index 1dd3a1264a53..06f3fe429d82 100644
7115     --- a/drivers/net/ethernet/marvell/mvpp2.c
7116     +++ b/drivers/net/ethernet/marvell/mvpp2.c
7117     @@ -6888,6 +6888,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
7118     int id = port->id;
7119     bool allmulti = dev->flags & IFF_ALLMULTI;
7120    
7121     +retry:
7122     mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
7123     mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
7124     mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
7125     @@ -6895,9 +6896,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
7126     /* Remove all port->id's mcast enries */
7127     mvpp2_prs_mcast_del_all(priv, id);
7128    
7129     - if (allmulti && !netdev_mc_empty(dev)) {
7130     - netdev_for_each_mc_addr(ha, dev)
7131     - mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
7132     + if (!allmulti) {
7133     + netdev_for_each_mc_addr(ha, dev) {
7134     + if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
7135     + allmulti = true;
7136     + goto retry;
7137     + }
7138     + }
7139     }
7140     }
7141    
7142     diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
7143     index 728a2fb1f5c0..22a3bfe1ed8f 100644
7144     --- a/drivers/net/ethernet/mellanox/mlx4/qp.c
7145     +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
7146     @@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
7147     u64 in_param = 0;
7148     int err;
7149    
7150     + if (!cnt)
7151     + return;
7152     +
7153     if (mlx4_is_mfunc(dev)) {
7154     set_param_l(&in_param, base_qpn);
7155     set_param_h(&in_param, cnt);
7156     diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
7157     index cd314946452c..9511f5fe62f4 100644
7158     --- a/drivers/net/wireless/marvell/mwifiex/pcie.c
7159     +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
7160     @@ -2781,7 +2781,10 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
7161     {
7162     struct pcie_service_card *card = adapter->card;
7163    
7164     - pci_reset_function(card->dev);
7165     + /* We can't afford to wait here; remove() might be waiting on us. If we
7166     + * can't grab the device lock, maybe we'll get another chance later.
7167     + */
7168     + pci_try_reset_function(card->dev);
7169     }
7170    
7171     static void mwifiex_pcie_work(struct work_struct *work)
7172     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
7173     index 9ac1511de7ba..b82e5b363c05 100644
7174     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
7175     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
7176     @@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
7177     }
7178     if (0 == tmp) {
7179     read_addr = REG_DBI_RDATA + addr % 4;
7180     - ret = rtl_read_word(rtlpriv, read_addr);
7181     + ret = rtl_read_byte(rtlpriv, read_addr);
7182     }
7183     return ret;
7184     }
7185     @@ -1164,7 +1164,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
7186     }
7187    
7188     tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
7189     - _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
7190     + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
7191     + ASPM_L1_LATENCY << 3);
7192    
7193     tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
7194     _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
7195     diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
7196     index 1ab1024330fb..25c4e3e55921 100644
7197     --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
7198     +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
7199     @@ -99,6 +99,7 @@
7200     #define RTL_USB_MAX_RX_COUNT 100
7201     #define QBSS_LOAD_SIZE 5
7202     #define MAX_WMMELE_LENGTH 64
7203     +#define ASPM_L1_LATENCY 7
7204    
7205     #define TOTAL_CAM_ENTRY 32
7206    
7207     diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
7208     index 5bee3af47588..39405598b22d 100644
7209     --- a/drivers/pci/dwc/pci-keystone.c
7210     +++ b/drivers/pci/dwc/pci-keystone.c
7211     @@ -178,7 +178,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
7212     }
7213    
7214     /* interrupt controller is in a child node */
7215     - *np_temp = of_find_node_by_name(np_pcie, controller);
7216     + *np_temp = of_get_child_by_name(np_pcie, controller);
7217     if (!(*np_temp)) {
7218     dev_err(dev, "Node for %s is absent\n", controller);
7219     return -EINVAL;
7220     @@ -187,6 +187,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
7221     temp = of_irq_count(*np_temp);
7222     if (!temp) {
7223     dev_err(dev, "No IRQ entries in %s\n", controller);
7224     + of_node_put(*np_temp);
7225     return -EINVAL;
7226     }
7227    
7228     @@ -204,6 +205,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
7229     break;
7230     }
7231    
7232     + of_node_put(*np_temp);
7233     +
7234     if (temp) {
7235     *num_irqs = temp;
7236     return 0;
7237     diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
7238     index a5073a921a04..32228d41f746 100644
7239     --- a/drivers/pci/host/pcie-iproc-platform.c
7240     +++ b/drivers/pci/host/pcie-iproc-platform.c
7241     @@ -92,6 +92,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
7242     pcie->need_ob_cfg = true;
7243     }
7244    
7245     + /*
7246     + * DT nodes are not used by all platforms that use the iProc PCIe
7247     + * core driver. For platforms that require explict inbound mapping
7248     + * configuration, "dma-ranges" would have been present in DT
7249     + */
7250     + pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
7251     +
7252     /* PHY use is optional */
7253     pcie->phy = devm_phy_get(dev, "pcie-phy");
7254     if (IS_ERR(pcie->phy)) {
7255     diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
7256     index 3a8b9d20ee57..c0ecc9f35667 100644
7257     --- a/drivers/pci/host/pcie-iproc.c
7258     +++ b/drivers/pci/host/pcie-iproc.c
7259     @@ -1396,9 +1396,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
7260     }
7261     }
7262    
7263     - ret = iproc_pcie_map_dma_ranges(pcie);
7264     - if (ret && ret != -ENOENT)
7265     - goto err_power_off_phy;
7266     + if (pcie->need_ib_cfg) {
7267     + ret = iproc_pcie_map_dma_ranges(pcie);
7268     + if (ret && ret != -ENOENT)
7269     + goto err_power_off_phy;
7270     + }
7271    
7272     #ifdef CONFIG_ARM
7273     pcie->sysdata.private_data = pcie;
7274     diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
7275     index a6b55cec9a66..4ac6282f2bfd 100644
7276     --- a/drivers/pci/host/pcie-iproc.h
7277     +++ b/drivers/pci/host/pcie-iproc.h
7278     @@ -74,6 +74,7 @@ struct iproc_msi;
7279     * @ob: outbound mapping related parameters
7280     * @ob_map: outbound mapping related parameters specific to the controller
7281     *
7282     + * @need_ib_cfg: indicates SW needs to configure the inbound mapping window
7283     * @ib: inbound mapping related parameters
7284     * @ib_map: outbound mapping region related parameters
7285     *
7286     @@ -101,6 +102,7 @@ struct iproc_pcie {
7287     struct iproc_pcie_ob ob;
7288     const struct iproc_pcie_ob_map *ob_map;
7289    
7290     + bool need_ib_cfg;
7291     struct iproc_pcie_ib ib;
7292     const struct iproc_pcie_ib_map *ib_map;
7293    
7294     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
7295     index f66f9375177c..4c3feb96f391 100644
7296     --- a/drivers/pci/quirks.c
7297     +++ b/drivers/pci/quirks.c
7298     @@ -1636,8 +1636,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev)
7299     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
7300     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
7301     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
7302     -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
7303    
7304     +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
7305    
7306     /*
7307     * It's possible for the MSI to get corrupted if shpc and acpi
7308     diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
7309     index 623d322447a2..7c4eb86c851e 100644
7310     --- a/drivers/platform/x86/apple-gmux.c
7311     +++ b/drivers/platform/x86/apple-gmux.c
7312     @@ -24,7 +24,6 @@
7313     #include <linux/delay.h>
7314     #include <linux/pci.h>
7315     #include <linux/vga_switcheroo.h>
7316     -#include <linux/vgaarb.h>
7317     #include <acpi/video.h>
7318     #include <asm/io.h>
7319    
7320     @@ -54,7 +53,6 @@ struct apple_gmux_data {
7321     bool indexed;
7322     struct mutex index_lock;
7323    
7324     - struct pci_dev *pdev;
7325     struct backlight_device *bdev;
7326    
7327     /* switcheroo data */
7328     @@ -599,23 +597,6 @@ static int gmux_resume(struct device *dev)
7329     return 0;
7330     }
7331    
7332     -static struct pci_dev *gmux_get_io_pdev(void)
7333     -{
7334     - struct pci_dev *pdev = NULL;
7335     -
7336     - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) {
7337     - u16 cmd;
7338     -
7339     - pci_read_config_word(pdev, PCI_COMMAND, &cmd);
7340     - if (!(cmd & PCI_COMMAND_IO))
7341     - continue;
7342     -
7343     - return pdev;
7344     - }
7345     -
7346     - return NULL;
7347     -}
7348     -
7349     static int is_thunderbolt(struct device *dev, void *data)
7350     {
7351     return to_pci_dev(dev)->is_thunderbolt;
7352     @@ -631,7 +612,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
7353     int ret = -ENXIO;
7354     acpi_status status;
7355     unsigned long long gpe;
7356     - struct pci_dev *pdev = NULL;
7357    
7358     if (apple_gmux_data)
7359     return -EBUSY;
7360     @@ -682,7 +662,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
7361     ver_minor = (version >> 16) & 0xff;
7362     ver_release = (version >> 8) & 0xff;
7363     } else {
7364     - pr_info("gmux device not present or IO disabled\n");
7365     + pr_info("gmux device not present\n");
7366     ret = -ENODEV;
7367     goto err_release;
7368     }
7369     @@ -690,23 +670,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
7370     pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
7371     ver_release, (gmux_data->indexed ? "indexed" : "classic"));
7372    
7373     - /*
7374     - * Apple systems with gmux are EFI based and normally don't use
7375     - * VGA. In addition changing IO+MEM ownership between IGP and dGPU
7376     - * disables IO/MEM used for backlight control on some systems.
7377     - * Lock IO+MEM to GPU with active IO to prevent switch.
7378     - */
7379     - pdev = gmux_get_io_pdev();
7380     - if (pdev && vga_tryget(pdev,
7381     - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) {
7382     - pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n",
7383     - pci_name(pdev));
7384     - ret = -EBUSY;
7385     - goto err_release;
7386     - } else if (pdev)
7387     - pr_info("locked IO for PCI:%s\n", pci_name(pdev));
7388     - gmux_data->pdev = pdev;
7389     -
7390     memset(&props, 0, sizeof(props));
7391     props.type = BACKLIGHT_PLATFORM;
7392     props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
7393     @@ -822,10 +785,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
7394     err_notify:
7395     backlight_device_unregister(bdev);
7396     err_release:
7397     - if (gmux_data->pdev)
7398     - vga_put(gmux_data->pdev,
7399     - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
7400     - pci_dev_put(pdev);
7401     release_region(gmux_data->iostart, gmux_data->iolen);
7402     err_free:
7403     kfree(gmux_data);
7404     @@ -845,11 +804,6 @@ static void gmux_remove(struct pnp_dev *pnp)
7405     &gmux_notify_handler);
7406     }
7407    
7408     - if (gmux_data->pdev) {
7409     - vga_put(gmux_data->pdev,
7410     - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
7411     - pci_dev_put(gmux_data->pdev);
7412     - }
7413     backlight_device_unregister(gmux_data->bdev);
7414    
7415     release_region(gmux_data->iostart, gmux_data->iolen);
7416     diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
7417     index e2a946c0e667..304e891e35fc 100644
7418     --- a/drivers/rtc/rtc-opal.c
7419     +++ b/drivers/rtc/rtc-opal.c
7420     @@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
7421     static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
7422     {
7423     long rc = OPAL_BUSY;
7424     + int retries = 10;
7425     u32 y_m_d;
7426     u64 h_m_s_ms;
7427     __be32 __y_m_d;
7428     @@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
7429     rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
7430     if (rc == OPAL_BUSY_EVENT)
7431     opal_poll_events(NULL);
7432     - else
7433     + else if (retries-- && (rc == OPAL_HARDWARE
7434     + || rc == OPAL_INTERNAL_ERROR))
7435     msleep(10);
7436     + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
7437     + break;
7438     }
7439    
7440     if (rc != OPAL_SUCCESS)
7441     @@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
7442     static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
7443     {
7444     long rc = OPAL_BUSY;
7445     + int retries = 10;
7446     u32 y_m_d = 0;
7447     u64 h_m_s_ms = 0;
7448    
7449     @@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
7450     rc = opal_rtc_write(y_m_d, h_m_s_ms);
7451     if (rc == OPAL_BUSY_EVENT)
7452     opal_poll_events(NULL);
7453     - else
7454     + else if (retries-- && (rc == OPAL_HARDWARE
7455     + || rc == OPAL_INTERNAL_ERROR))
7456     msleep(10);
7457     + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
7458     + break;
7459     }
7460    
7461     return rc == OPAL_SUCCESS ? 0 : -EIO;
7462     diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
7463     index f796bd61f3f0..40406c162d0d 100644
7464     --- a/drivers/scsi/scsi_sysfs.c
7465     +++ b/drivers/scsi/scsi_sysfs.c
7466     @@ -1383,7 +1383,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
7467     * check.
7468     */
7469     if (sdev->channel != starget->channel ||
7470     - sdev->id != starget->id ||
7471     + sdev->id != starget->id)
7472     + continue;
7473     + if (sdev->sdev_state == SDEV_DEL ||
7474     + sdev->sdev_state == SDEV_CANCEL ||
7475     !get_device(&sdev->sdev_gendev))
7476     continue;
7477     spin_unlock_irqrestore(shost->host_lock, flags);
7478     diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
7479     index 0f42a225a664..e6b779930230 100644
7480     --- a/drivers/scsi/smartpqi/Makefile
7481     +++ b/drivers/scsi/smartpqi/Makefile
7482     @@ -1,3 +1,3 @@
7483     ccflags-y += -I.
7484     -obj-m += smartpqi.o
7485     +obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
7486     smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
7487     diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
7488     index f9bc8ec6fb6b..9518ffd8b8ba 100644
7489     --- a/drivers/target/iscsi/iscsi_target_auth.c
7490     +++ b/drivers/target/iscsi/iscsi_target_auth.c
7491     @@ -421,7 +421,8 @@ static int chap_server_compute_md5(
7492     auth_ret = 0;
7493     out:
7494     kzfree(desc);
7495     - crypto_free_shash(tfm);
7496     + if (tfm)
7497     + crypto_free_shash(tfm);
7498     kfree(challenge);
7499     kfree(challenge_binhex);
7500     return auth_ret;
7501     diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
7502     index 7a6751fecd32..87248a2512e5 100644
7503     --- a/drivers/target/iscsi/iscsi_target_nego.c
7504     +++ b/drivers/target/iscsi/iscsi_target_nego.c
7505     @@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
7506     if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
7507     write_unlock_bh(&sk->sk_callback_lock);
7508     pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
7509     + if (iscsi_target_sk_data_ready == conn->orig_data_ready)
7510     + return;
7511     + conn->orig_data_ready(sk);
7512     return;
7513     }
7514    
7515     diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
7516     index 939a63bca82f..72eb3e41e3b6 100644
7517     --- a/drivers/usb/Kconfig
7518     +++ b/drivers/usb/Kconfig
7519     @@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO
7520     config USB_EHCI_BIG_ENDIAN_DESC
7521     bool
7522    
7523     +config USB_UHCI_BIG_ENDIAN_MMIO
7524     + bool
7525     + default y if SPARC_LEON
7526     +
7527     +config USB_UHCI_BIG_ENDIAN_DESC
7528     + bool
7529     + default y if SPARC_LEON
7530     +
7531     menuconfig USB_SUPPORT
7532     bool "USB support"
7533     depends on HAS_IOMEM
7534     diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
7535     index fa5692dec832..92b19721b595 100644
7536     --- a/drivers/usb/host/Kconfig
7537     +++ b/drivers/usb/host/Kconfig
7538     @@ -637,14 +637,6 @@ config USB_UHCI_ASPEED
7539     bool
7540     default y if ARCH_ASPEED
7541    
7542     -config USB_UHCI_BIG_ENDIAN_MMIO
7543     - bool
7544     - default y if SPARC_LEON
7545     -
7546     -config USB_UHCI_BIG_ENDIAN_DESC
7547     - bool
7548     - default y if SPARC_LEON
7549     -
7550     config USB_FHCI_HCD
7551     tristate "Freescale QE USB Host Controller support"
7552     depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
7553     diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
7554     index 9269d5685239..b90ef96e43d6 100644
7555     --- a/drivers/video/console/dummycon.c
7556     +++ b/drivers/video/console/dummycon.c
7557     @@ -67,7 +67,6 @@ const struct consw dummy_con = {
7558     .con_switch = DUMMY,
7559     .con_blank = DUMMY,
7560     .con_font_set = DUMMY,
7561     - .con_font_get = DUMMY,
7562     .con_font_default = DUMMY,
7563     .con_font_copy = DUMMY,
7564     };
7565     diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
7566     index e06358da4b99..3dee267d7c75 100644
7567     --- a/drivers/video/fbdev/atmel_lcdfb.c
7568     +++ b/drivers/video/fbdev/atmel_lcdfb.c
7569     @@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
7570     goto put_display_node;
7571     }
7572    
7573     - timings_np = of_find_node_by_name(display_np, "display-timings");
7574     + timings_np = of_get_child_by_name(display_np, "display-timings");
7575     if (!timings_np) {
7576     dev_err(dev, "failed to find display-timings node\n");
7577     ret = -ENODEV;
7578     @@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
7579     fb_add_videomode(&fb_vm, &info->modelist);
7580     }
7581    
7582     + /*
7583     + * FIXME: Make sure we are not referencing any fields in display_np
7584     + * and timings_np and drop our references to them before returning to
7585     + * avoid leaking the nodes on probe deferral and driver unbind.
7586     + */
7587     +
7588     return 0;
7589    
7590     put_timings_node:
7591     diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
7592     index 6082f653c68a..67773e8bbb95 100644
7593     --- a/drivers/video/fbdev/geode/video_gx.c
7594     +++ b/drivers/video/fbdev/geode/video_gx.c
7595     @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
7596     int timeout = 1000;
7597    
7598     /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
7599     - if (cpu_data(0).x86_mask == 1) {
7600     + if (cpu_data(0).x86_stepping == 1) {
7601     pll_table = gx_pll_table_14MHz;
7602     pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
7603     } else {
7604     diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
7605     index 149c5e7efc89..092981171df1 100644
7606     --- a/drivers/xen/xenbus/xenbus.h
7607     +++ b/drivers/xen/xenbus/xenbus.h
7608     @@ -76,6 +76,7 @@ struct xb_req_data {
7609     struct list_head list;
7610     wait_queue_head_t wq;
7611     struct xsd_sockmsg msg;
7612     + uint32_t caller_req_id;
7613     enum xsd_sockmsg_type type;
7614     char *body;
7615     const struct kvec *vec;
7616     diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
7617     index 5b081a01779d..d239fc3c5e3d 100644
7618     --- a/drivers/xen/xenbus/xenbus_comms.c
7619     +++ b/drivers/xen/xenbus/xenbus_comms.c
7620     @@ -309,6 +309,7 @@ static int process_msg(void)
7621     goto out;
7622    
7623     if (req->state == xb_req_state_wait_reply) {
7624     + req->msg.req_id = req->caller_req_id;
7625     req->msg.type = state.msg.type;
7626     req->msg.len = state.msg.len;
7627     req->body = state.body;
7628     diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
7629     index 3e59590c7254..3f3b29398ab8 100644
7630     --- a/drivers/xen/xenbus/xenbus_xs.c
7631     +++ b/drivers/xen/xenbus/xenbus_xs.c
7632     @@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
7633     req->state = xb_req_state_queued;
7634     init_waitqueue_head(&req->wq);
7635    
7636     + /* Save the caller req_id and restore it later in the reply */
7637     + req->caller_req_id = req->msg.req_id;
7638     req->msg.req_id = xs_request_enter(req);
7639    
7640     mutex_lock(&xb_write_mutex);
7641     @@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,
7642     req->num_vecs = num_vecs;
7643     req->cb = xs_wake_up;
7644    
7645     + msg.req_id = 0;
7646     msg.tx_id = t.id;
7647     msg.type = type;
7648     msg.len = 0;
7649     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
7650     index 5eaedff28a32..1ae61f82e54b 100644
7651     --- a/fs/btrfs/inode.c
7652     +++ b/fs/btrfs/inode.c
7653     @@ -1330,8 +1330,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
7654     leaf = path->nodes[0];
7655     if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7656     ret = btrfs_next_leaf(root, path);
7657     - if (ret < 0)
7658     + if (ret < 0) {
7659     + if (cow_start != (u64)-1)
7660     + cur_offset = cow_start;
7661     goto error;
7662     + }
7663     if (ret > 0)
7664     break;
7665     leaf = path->nodes[0];
7666     @@ -3368,6 +3371,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
7667     ret = btrfs_orphan_reserve_metadata(trans, inode);
7668     ASSERT(!ret);
7669     if (ret) {
7670     + /*
7671     + * dec doesn't need spin_lock as ->orphan_block_rsv
7672     + * would be released only if ->orphan_inodes is
7673     + * zero.
7674     + */
7675     atomic_dec(&root->orphan_inodes);
7676     clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
7677     &inode->runtime_flags);
7678     @@ -3382,12 +3390,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
7679     if (insert >= 1) {
7680     ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
7681     if (ret) {
7682     - atomic_dec(&root->orphan_inodes);
7683     if (reserve) {
7684     clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
7685     &inode->runtime_flags);
7686     btrfs_orphan_release_metadata(inode);
7687     }
7688     + /*
7689     + * btrfs_orphan_commit_root may race with us and set
7690     + * ->orphan_block_rsv to zero, in order to avoid that,
7691     + * decrease ->orphan_inodes after everything is done.
7692     + */
7693     + atomic_dec(&root->orphan_inodes);
7694     if (ret != -EEXIST) {
7695     clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7696     &inode->runtime_flags);
7697     @@ -3419,28 +3432,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
7698     {
7699     struct btrfs_root *root = inode->root;
7700     int delete_item = 0;
7701     - int release_rsv = 0;
7702     int ret = 0;
7703    
7704     - spin_lock(&root->orphan_lock);
7705     if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7706     &inode->runtime_flags))
7707     delete_item = 1;
7708    
7709     + if (delete_item && trans)
7710     + ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
7711     +
7712     if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
7713     &inode->runtime_flags))
7714     - release_rsv = 1;
7715     - spin_unlock(&root->orphan_lock);
7716     + btrfs_orphan_release_metadata(inode);
7717    
7718     - if (delete_item) {
7719     + /*
7720     + * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
7721     + * to zero, in order to avoid that, decrease ->orphan_inodes after
7722     + * everything is done.
7723     + */
7724     + if (delete_item)
7725     atomic_dec(&root->orphan_inodes);
7726     - if (trans)
7727     - ret = btrfs_del_orphan_item(trans, root,
7728     - btrfs_ino(inode));
7729     - }
7730     -
7731     - if (release_rsv)
7732     - btrfs_orphan_release_metadata(inode);
7733    
7734     return ret;
7735     }
7736     @@ -5315,7 +5326,7 @@ void btrfs_evict_inode(struct inode *inode)
7737     trace_btrfs_inode_evict(inode);
7738    
7739     if (!root) {
7740     - kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7741     + clear_inode(inode);
7742     return;
7743     }
7744    
7745     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
7746     index d3002842d7f6..b6dfe7af7a1f 100644
7747     --- a/fs/btrfs/tree-log.c
7748     +++ b/fs/btrfs/tree-log.c
7749     @@ -28,6 +28,7 @@
7750     #include "hash.h"
7751     #include "compression.h"
7752     #include "qgroup.h"
7753     +#include "inode-map.h"
7754    
7755     /* magic values for the inode_only field in btrfs_log_inode:
7756     *
7757     @@ -2494,6 +2495,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
7758     clean_tree_block(fs_info, next);
7759     btrfs_wait_tree_block_writeback(next);
7760     btrfs_tree_unlock(next);
7761     + } else {
7762     + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
7763     + clear_extent_buffer_dirty(next);
7764     }
7765    
7766     WARN_ON(root_owner !=
7767     @@ -2574,6 +2578,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
7768     clean_tree_block(fs_info, next);
7769     btrfs_wait_tree_block_writeback(next);
7770     btrfs_tree_unlock(next);
7771     + } else {
7772     + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
7773     + clear_extent_buffer_dirty(next);
7774     }
7775    
7776     WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
7777     @@ -2652,6 +2659,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
7778     clean_tree_block(fs_info, next);
7779     btrfs_wait_tree_block_writeback(next);
7780     btrfs_tree_unlock(next);
7781     + } else {
7782     + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
7783     + clear_extent_buffer_dirty(next);
7784     }
7785    
7786     WARN_ON(log->root_key.objectid !=
7787     @@ -3038,13 +3048,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
7788    
7789     while (1) {
7790     ret = find_first_extent_bit(&log->dirty_log_pages,
7791     - 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
7792     + 0, &start, &end,
7793     + EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
7794     NULL);
7795     if (ret)
7796     break;
7797    
7798     clear_extent_bits(&log->dirty_log_pages, start, end,
7799     - EXTENT_DIRTY | EXTENT_NEW);
7800     + EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
7801     }
7802    
7803     /*
7804     @@ -5705,6 +5716,23 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
7805     path);
7806     }
7807    
7808     + if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
7809     + struct btrfs_root *root = wc.replay_dest;
7810     +
7811     + btrfs_release_path(path);
7812     +
7813     + /*
7814     + * We have just replayed everything, and the highest
7815     + * objectid of fs roots probably has changed in case
7816     + * some inode_item's got replayed.
7817     + *
7818     + * root->objectid_mutex is not acquired as log replay
7819     + * could only happen during mount.
7820     + */
7821     + ret = btrfs_find_highest_objectid(root,
7822     + &root->highest_objectid);
7823     + }
7824     +
7825     key.offset = found_key.offset - 1;
7826     wc.replay_dest->log_root = NULL;
7827     free_extent_buffer(log->node);
7828     diff --git a/fs/dcache.c b/fs/dcache.c
7829     index 34c852af215c..b8d999a5768b 100644
7830     --- a/fs/dcache.c
7831     +++ b/fs/dcache.c
7832     @@ -2705,8 +2705,6 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
7833     */
7834     unsigned int i;
7835     BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
7836     - kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
7837     - kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
7838     for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
7839     swap(((long *) &dentry->d_iname)[i],
7840     ((long *) &target->d_iname)[i]);
7841     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
7842     index ea2ccc524bd9..0b9f3f284799 100644
7843     --- a/fs/ext4/inode.c
7844     +++ b/fs/ext4/inode.c
7845     @@ -3724,10 +3724,18 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
7846     /* Credits for sb + inode write */
7847     handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
7848     if (IS_ERR(handle)) {
7849     - /* This is really bad luck. We've written the data
7850     - * but cannot extend i_size. Bail out and pretend
7851     - * the write failed... */
7852     - ret = PTR_ERR(handle);
7853     + /*
7854     + * We wrote the data but cannot extend
7855     + * i_size. Bail out. In async io case, we do
7856     + * not return error here because we have
7857     + * already submmitted the corresponding
7858     + * bio. Returning error here makes the caller
7859     + * think that this IO is done and failed
7860     + * resulting in race with bio's completion
7861     + * handler.
7862     + */
7863     + if (!ret)
7864     + ret = PTR_ERR(handle);
7865     if (inode->i_nlink)
7866     ext4_orphan_del(NULL, inode);
7867    
7868     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
7869     index f29351c66610..16d247f056e2 100644
7870     --- a/fs/ext4/super.c
7871     +++ b/fs/ext4/super.c
7872     @@ -742,6 +742,7 @@ __acquires(bitlock)
7873     }
7874    
7875     ext4_unlock_group(sb, grp);
7876     + ext4_commit_super(sb, 1);
7877     ext4_handle_error(sb);
7878     /*
7879     * We only get here in the ERRORS_RO case; relocking the group
7880     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
7881     index 8b08044b3120..c0681814c379 100644
7882     --- a/fs/jbd2/transaction.c
7883     +++ b/fs/jbd2/transaction.c
7884     @@ -495,8 +495,10 @@ void jbd2_journal_free_reserved(handle_t *handle)
7885     EXPORT_SYMBOL(jbd2_journal_free_reserved);
7886    
7887     /**
7888     - * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
7889     + * int jbd2_journal_start_reserved() - start reserved handle
7890     * @handle: handle to start
7891     + * @type: for handle statistics
7892     + * @line_no: for handle statistics
7893     *
7894     * Start handle that has been previously reserved with jbd2_journal_reserve().
7895     * This attaches @handle to the running transaction (or creates one if there's
7896     @@ -626,6 +628,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
7897     * int jbd2_journal_restart() - restart a handle .
7898     * @handle: handle to restart
7899     * @nblocks: nr credits requested
7900     + * @gfp_mask: memory allocation flags (for start_this_handle)
7901     *
7902     * Restart a handle for a multi-transaction filesystem
7903     * operation.
7904     diff --git a/fs/mbcache.c b/fs/mbcache.c
7905     index d818fd236787..49c5b25bfa8c 100644
7906     --- a/fs/mbcache.c
7907     +++ b/fs/mbcache.c
7908     @@ -94,6 +94,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
7909     entry->e_key = key;
7910     entry->e_value = value;
7911     entry->e_reusable = reusable;
7912     + entry->e_referenced = 0;
7913     head = mb_cache_entry_head(cache, key);
7914     hlist_bl_lock(head);
7915     hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
7916     diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
7917     index 4689940a953c..5193218f5889 100644
7918     --- a/fs/ocfs2/dlmglue.c
7919     +++ b/fs/ocfs2/dlmglue.c
7920     @@ -2486,6 +2486,15 @@ int ocfs2_inode_lock_with_page(struct inode *inode,
7921     ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
7922     if (ret == -EAGAIN) {
7923     unlock_page(page);
7924     + /*
7925     + * If we can't get inode lock immediately, we should not return
7926     + * directly here, since this will lead to a softlockup problem.
7927     + * The method is to get a blocking lock and immediately unlock
7928     + * before returning, this can avoid CPU resource waste due to
7929     + * lots of retries, and benefits fairness in getting lock.
7930     + */
7931     + if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
7932     + ocfs2_inode_unlock(inode, ex);
7933     ret = AOP_TRUNCATED_PAGE;
7934     }
7935    
7936     diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
7937     index 321511ed8c42..d60900b615f9 100644
7938     --- a/fs/overlayfs/inode.c
7939     +++ b/fs/overlayfs/inode.c
7940     @@ -579,6 +579,16 @@ static int ovl_inode_set(struct inode *inode, void *data)
7941     static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
7942     struct dentry *upperdentry)
7943     {
7944     + if (S_ISDIR(inode->i_mode)) {
7945     + /* Real lower dir moved to upper layer under us? */
7946     + if (!lowerdentry && ovl_inode_lower(inode))
7947     + return false;
7948     +
7949     + /* Lookup of an uncovered redirect origin? */
7950     + if (!upperdentry && ovl_inode_upper(inode))
7951     + return false;
7952     + }
7953     +
7954     /*
7955     * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
7956     * This happens when finding a copied up overlay inode for a renamed
7957     @@ -606,6 +616,8 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
7958     struct inode *inode;
7959     /* Already indexed or could be indexed on copy up? */
7960     bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
7961     + struct dentry *origin = indexed ? lowerdentry : NULL;
7962     + bool is_dir;
7963    
7964     if (WARN_ON(upperdentry && indexed && !lowerdentry))
7965     return ERR_PTR(-EIO);
7966     @@ -614,15 +626,19 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
7967     realinode = d_inode(lowerdentry);
7968    
7969     /*
7970     - * Copy up origin (lower) may exist for non-indexed upper, but we must
7971     - * not use lower as hash key in that case.
7972     - * Hash inodes that are or could be indexed by origin inode and
7973     - * non-indexed upper inodes that could be hard linked by upper inode.
7974     + * Copy up origin (lower) may exist for non-indexed non-dir upper, but
7975     + * we must not use lower as hash key in that case.
7976     + * Hash non-dir that is or could be indexed by origin inode.
7977     + * Hash dir that is or could be merged by origin inode.
7978     + * Hash pure upper and non-indexed non-dir by upper inode.
7979     */
7980     - if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
7981     - struct inode *key = d_inode(indexed ? lowerdentry :
7982     - upperdentry);
7983     - unsigned int nlink;
7984     + is_dir = S_ISDIR(realinode->i_mode);
7985     + if (is_dir)
7986     + origin = lowerdentry;
7987     +
7988     + if (upperdentry || origin) {
7989     + struct inode *key = d_inode(origin ?: upperdentry);
7990     + unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
7991    
7992     inode = iget5_locked(dentry->d_sb, (unsigned long) key,
7993     ovl_inode_test, ovl_inode_set, key);
7994     @@ -643,8 +659,9 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
7995     goto out;
7996     }
7997    
7998     - nlink = ovl_get_nlink(lowerdentry, upperdentry,
7999     - realinode->i_nlink);
8000     + /* Recalculate nlink for non-dir due to indexing */
8001     + if (!is_dir)
8002     + nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
8003     set_nlink(inode, nlink);
8004     } else {
8005     inode = new_inode(dentry->d_sb);
8006     diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
8007     index f5738e96a052..b8f8d666e8d4 100644
8008     --- a/fs/overlayfs/super.c
8009     +++ b/fs/overlayfs/super.c
8010     @@ -200,6 +200,7 @@ static void ovl_destroy_inode(struct inode *inode)
8011     struct ovl_inode *oi = OVL_I(inode);
8012    
8013     dput(oi->__upperdentry);
8014     + iput(oi->lower);
8015     kfree(oi->redirect);
8016     ovl_dir_cache_free(inode);
8017     mutex_destroy(&oi->lock);
8018     diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
8019     index b9b239fa5cfd..f60ce2e04df0 100644
8020     --- a/fs/overlayfs/util.c
8021     +++ b/fs/overlayfs/util.c
8022     @@ -253,7 +253,7 @@ void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
8023     if (upperdentry)
8024     OVL_I(inode)->__upperdentry = upperdentry;
8025     if (lowerdentry)
8026     - OVL_I(inode)->lower = d_inode(lowerdentry);
8027     + OVL_I(inode)->lower = igrab(d_inode(lowerdentry));
8028    
8029     ovl_copyattr(d_inode(upperdentry ?: lowerdentry), inode);
8030     }
8031     @@ -269,7 +269,7 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
8032     */
8033     smp_wmb();
8034     OVL_I(inode)->__upperdentry = upperdentry;
8035     - if (!S_ISDIR(upperinode->i_mode) && inode_unhashed(inode)) {
8036     + if (inode_unhashed(inode)) {
8037     inode->i_private = upperinode;
8038     __insert_inode_hash(inode, (unsigned long) upperinode);
8039     }
8040     diff --git a/fs/seq_file.c b/fs/seq_file.c
8041     index 4be761c1a03d..eea09f6d8830 100644
8042     --- a/fs/seq_file.c
8043     +++ b/fs/seq_file.c
8044     @@ -181,8 +181,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
8045     * if request is to read from zero offset, reset iterator to first
8046     * record as it might have been already advanced by previous requests
8047     */
8048     - if (*ppos == 0)
8049     + if (*ppos == 0) {
8050     m->index = 0;
8051     + m->version = 0;
8052     + m->count = 0;
8053     + }
8054    
8055     /* Don't assume *ppos is where we left it */
8056     if (unlikely(*ppos != m->read_pos)) {
8057     diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
8058     index 34c8f5600ce0..c65e4489006d 100644
8059     --- a/include/drm/i915_pciids.h
8060     +++ b/include/drm/i915_pciids.h
8061     @@ -118,92 +118,125 @@
8062     #define INTEL_IRONLAKE_M_IDS(info) \
8063     INTEL_VGA_DEVICE(0x0046, info)
8064    
8065     -#define INTEL_SNB_D_IDS(info) \
8066     +#define INTEL_SNB_D_GT1_IDS(info) \
8067     INTEL_VGA_DEVICE(0x0102, info), \
8068     - INTEL_VGA_DEVICE(0x0112, info), \
8069     - INTEL_VGA_DEVICE(0x0122, info), \
8070     INTEL_VGA_DEVICE(0x010A, info)
8071    
8072     -#define INTEL_SNB_M_IDS(info) \
8073     - INTEL_VGA_DEVICE(0x0106, info), \
8074     +#define INTEL_SNB_D_GT2_IDS(info) \
8075     + INTEL_VGA_DEVICE(0x0112, info), \
8076     + INTEL_VGA_DEVICE(0x0122, info)
8077     +
8078     +#define INTEL_SNB_D_IDS(info) \
8079     + INTEL_SNB_D_GT1_IDS(info), \
8080     + INTEL_SNB_D_GT2_IDS(info)
8081     +
8082     +#define INTEL_SNB_M_GT1_IDS(info) \
8083     + INTEL_VGA_DEVICE(0x0106, info)
8084     +
8085     +#define INTEL_SNB_M_GT2_IDS(info) \
8086     INTEL_VGA_DEVICE(0x0116, info), \
8087     INTEL_VGA_DEVICE(0x0126, info)
8088    
8089     +#define INTEL_SNB_M_IDS(info) \
8090     + INTEL_SNB_M_GT1_IDS(info), \
8091     + INTEL_SNB_M_GT2_IDS(info)
8092     +
8093     +#define INTEL_IVB_M_GT1_IDS(info) \
8094     + INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */
8095     +
8096     +#define INTEL_IVB_M_GT2_IDS(info) \
8097     + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
8098     +
8099     #define INTEL_IVB_M_IDS(info) \
8100     - INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
8101     - INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
8102     + INTEL_IVB_M_GT1_IDS(info), \
8103     + INTEL_IVB_M_GT2_IDS(info)
8104    
8105     -#define INTEL_IVB_D_IDS(info) \
8106     +#define INTEL_IVB_D_GT1_IDS(info) \
8107     INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
8108     + INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */
8109     +
8110     +#define INTEL_IVB_D_GT2_IDS(info) \
8111     INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
8112     - INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
8113     INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
8114    
8115     +#define INTEL_IVB_D_IDS(info) \
8116     + INTEL_IVB_D_GT1_IDS(info), \
8117     + INTEL_IVB_D_GT2_IDS(info)
8118     +
8119     #define INTEL_IVB_Q_IDS(info) \
8120     INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
8121    
8122     -#define INTEL_HSW_IDS(info) \
8123     +#define INTEL_HSW_GT1_IDS(info) \
8124     INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
8125     - INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
8126     - INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
8127     INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
8128     - INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
8129     - INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
8130     INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
8131     - INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
8132     - INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
8133     INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
8134     - INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
8135     - INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
8136     INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
8137     - INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
8138     - INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
8139     INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
8140     - INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
8141     - INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
8142     INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
8143     - INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
8144     - INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
8145     INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
8146     - INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
8147     - INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
8148     INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
8149     - INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
8150     - INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
8151     INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
8152     - INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
8153     - INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
8154     INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
8155     - INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
8156     - INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
8157     INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
8158     - INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
8159     - INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
8160     INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
8161     - INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
8162     - INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
8163     INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
8164     - INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
8165     - INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
8166     INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
8167     - INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
8168     - INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
8169     INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
8170     + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
8171     + INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
8172     + INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
8173     + INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
8174     +
8175     +#define INTEL_HSW_GT2_IDS(info) \
8176     + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
8177     + INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
8178     + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
8179     + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
8180     + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
8181     + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
8182     + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
8183     + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
8184     + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
8185     + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
8186     + INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
8187     + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
8188     + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
8189     + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
8190     + INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
8191     INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
8192     INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
8193     - INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
8194     INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
8195     - INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
8196     - INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
8197     INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
8198     - INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
8199     - INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
8200     INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
8201     + INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
8202     +
8203     +#define INTEL_HSW_GT3_IDS(info) \
8204     + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
8205     + INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
8206     + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
8207     + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
8208     + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
8209     + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
8210     + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
8211     + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
8212     + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
8213     + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
8214     + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
8215     + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
8216     + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
8217     + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
8218     + INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
8219     + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
8220     + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
8221     INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
8222     - INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
8223     - INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
8224     INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
8225    
8226     +#define INTEL_HSW_IDS(info) \
8227     + INTEL_HSW_GT1_IDS(info), \
8228     + INTEL_HSW_GT2_IDS(info), \
8229     + INTEL_HSW_GT3_IDS(info)
8230     +
8231     #define INTEL_VLV_IDS(info) \
8232     INTEL_VGA_DEVICE(0x0f30, info), \
8233     INTEL_VGA_DEVICE(0x0f31, info), \
8234     @@ -212,17 +245,19 @@
8235     INTEL_VGA_DEVICE(0x0157, info), \
8236     INTEL_VGA_DEVICE(0x0155, info)
8237    
8238     -#define INTEL_BDW_GT12_IDS(info) \
8239     +#define INTEL_BDW_GT1_IDS(info) \
8240     INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
8241     INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
8242     INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
8243     INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
8244     - INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
8245     + INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
8246     + INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
8247     +
8248     +#define INTEL_BDW_GT2_IDS(info) \
8249     + INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
8250     INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
8251     INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
8252     - INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
8253     - INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
8254     - INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
8255     + INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
8256     INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
8257     INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
8258    
8259     @@ -243,7 +278,8 @@
8260     INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
8261    
8262     #define INTEL_BDW_IDS(info) \
8263     - INTEL_BDW_GT12_IDS(info), \
8264     + INTEL_BDW_GT1_IDS(info), \
8265     + INTEL_BDW_GT2_IDS(info), \
8266     INTEL_BDW_GT3_IDS(info), \
8267     INTEL_BDW_RSVD_IDS(info)
8268    
8269     @@ -303,7 +339,6 @@
8270     #define INTEL_KBL_GT1_IDS(info) \
8271     INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
8272     INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
8273     - INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
8274     INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
8275     INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
8276     INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
8277     @@ -313,6 +348,7 @@
8278    
8279     #define INTEL_KBL_GT2_IDS(info) \
8280     INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
8281     + INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
8282     INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
8283     INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
8284     INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
8285     @@ -335,25 +371,33 @@
8286     INTEL_KBL_GT4_IDS(info)
8287    
8288     /* CFL S */
8289     -#define INTEL_CFL_S_IDS(info) \
8290     +#define INTEL_CFL_S_GT1_IDS(info) \
8291     INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
8292     - INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
8293     + INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */
8294     +
8295     +#define INTEL_CFL_S_GT2_IDS(info) \
8296     INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
8297     INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
8298     INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */
8299    
8300     /* CFL H */
8301     -#define INTEL_CFL_H_IDS(info) \
8302     +#define INTEL_CFL_H_GT2_IDS(info) \
8303     INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
8304     INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
8305    
8306     /* CFL U */
8307     -#define INTEL_CFL_U_IDS(info) \
8308     +#define INTEL_CFL_U_GT3_IDS(info) \
8309     INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
8310     INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
8311     INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \
8312     INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */
8313    
8314     +#define INTEL_CFL_IDS(info) \
8315     + INTEL_CFL_S_GT1_IDS(info), \
8316     + INTEL_CFL_S_GT2_IDS(info), \
8317     + INTEL_CFL_H_GT2_IDS(info), \
8318     + INTEL_CFL_U_GT3_IDS(info)
8319     +
8320     /* CNL U 2+2 */
8321     #define INTEL_CNL_U_GT2_IDS(info) \
8322     INTEL_VGA_DEVICE(0x5A52, info), \
8323     diff --git a/include/linux/c2port.h b/include/linux/c2port.h
8324     index 4efabcb51347..f2736348ca26 100644
8325     --- a/include/linux/c2port.h
8326     +++ b/include/linux/c2port.h
8327     @@ -9,8 +9,6 @@
8328     * the Free Software Foundation
8329     */
8330    
8331     -#include <linux/kmemcheck.h>
8332     -
8333     #define C2PORT_NAME_LEN 32
8334    
8335     struct device;
8336     @@ -22,10 +20,8 @@ struct device;
8337     /* Main struct */
8338     struct c2port_ops;
8339     struct c2port_device {
8340     - kmemcheck_bitfield_begin(flags);
8341     unsigned int access:1;
8342     unsigned int flash_access:1;
8343     - kmemcheck_bitfield_end(flags);
8344    
8345     int id;
8346     char name[C2PORT_NAME_LEN];
8347     diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
8348     index 2272ded07496..bf09213895f7 100644
8349     --- a/include/linux/compiler-gcc.h
8350     +++ b/include/linux/compiler-gcc.h
8351     @@ -167,8 +167,6 @@
8352    
8353     #if GCC_VERSION >= 40100
8354     # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
8355     -
8356     -#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
8357     #endif
8358    
8359     #if GCC_VERSION >= 40300
8360     @@ -196,6 +194,11 @@
8361     #endif /* __CHECKER__ */
8362     #endif /* GCC_VERSION >= 40300 */
8363    
8364     +#if GCC_VERSION >= 40400
8365     +#define __optimize(level) __attribute__((__optimize__(level)))
8366     +#define __nostackprotector __optimize("no-stack-protector")
8367     +#endif /* GCC_VERSION >= 40400 */
8368     +
8369     #if GCC_VERSION >= 40500
8370    
8371     #ifndef __CHECKER__
8372     diff --git a/include/linux/compiler.h b/include/linux/compiler.h
8373     index fab5dc250c61..e8c9cd18bb05 100644
8374     --- a/include/linux/compiler.h
8375     +++ b/include/linux/compiler.h
8376     @@ -266,6 +266,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
8377    
8378     #endif /* __ASSEMBLY__ */
8379    
8380     +#ifndef __optimize
8381     +# define __optimize(level)
8382     +#endif
8383     +
8384     /* Compile time object size, -1 for unknown */
8385     #ifndef __compiletime_object_size
8386     # define __compiletime_object_size(obj) -1
8387     diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
8388     index 8f7788d23b57..a6989e02d0a0 100644
8389     --- a/include/linux/cpuidle.h
8390     +++ b/include/linux/cpuidle.h
8391     @@ -225,7 +225,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
8392     }
8393     #endif
8394    
8395     -#ifdef CONFIG_ARCH_HAS_CPU_RELAX
8396     +#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
8397     void cpuidle_poll_state_init(struct cpuidle_driver *drv);
8398     #else
8399     static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
8400     diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
8401     index 46930f82a988..7bf3b99e6fbb 100644
8402     --- a/include/linux/dma-mapping.h
8403     +++ b/include/linux/dma-mapping.h
8404     @@ -9,7 +9,6 @@
8405     #include <linux/dma-debug.h>
8406     #include <linux/dma-direction.h>
8407     #include <linux/scatterlist.h>
8408     -#include <linux/kmemcheck.h>
8409     #include <linux/bug.h>
8410     #include <linux/mem_encrypt.h>
8411    
8412     @@ -230,7 +229,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
8413     const struct dma_map_ops *ops = get_dma_ops(dev);
8414     dma_addr_t addr;
8415    
8416     - kmemcheck_mark_initialized(ptr, size);
8417     BUG_ON(!valid_dma_direction(dir));
8418     addr = ops->map_page(dev, virt_to_page(ptr),
8419     offset_in_page(ptr), size,
8420     @@ -263,11 +261,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
8421     unsigned long attrs)
8422     {
8423     const struct dma_map_ops *ops = get_dma_ops(dev);
8424     - int i, ents;
8425     - struct scatterlist *s;
8426     + int ents;
8427    
8428     - for_each_sg(sg, s, nents, i)
8429     - kmemcheck_mark_initialized(sg_virt(s), s->length);
8430     BUG_ON(!valid_dma_direction(dir));
8431     ents = ops->map_sg(dev, sg, nents, dir, attrs);
8432     BUG_ON(ents < 0);
8433     @@ -297,7 +292,6 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
8434     const struct dma_map_ops *ops = get_dma_ops(dev);
8435     dma_addr_t addr;
8436    
8437     - kmemcheck_mark_initialized(page_address(page) + offset, size);
8438     BUG_ON(!valid_dma_direction(dir));
8439     addr = ops->map_page(dev, page, offset, size, dir, attrs);
8440     debug_dma_map_page(dev, page, offset, size, dir, addr, false);
8441     diff --git a/include/linux/filter.h b/include/linux/filter.h
8442     index 48ec57e70f9f..42197b16dd78 100644
8443     --- a/include/linux/filter.h
8444     +++ b/include/linux/filter.h
8445     @@ -454,13 +454,11 @@ struct bpf_binary_header {
8446    
8447     struct bpf_prog {
8448     u16 pages; /* Number of allocated pages */
8449     - kmemcheck_bitfield_begin(meta);
8450     u16 jited:1, /* Is our filter JIT'ed? */
8451     locked:1, /* Program image locked? */
8452     gpl_compatible:1, /* Is filter GPL compatible? */
8453     cb_access:1, /* Is control block accessed? */
8454     dst_needed:1; /* Do we need dst entry? */
8455     - kmemcheck_bitfield_end(meta);
8456     enum bpf_prog_type type; /* Type of BPF program */
8457     u32 len; /* Number of filter blocks */
8458     u32 jited_len; /* Size of jited insns in bytes */
8459     diff --git a/include/linux/gfp.h b/include/linux/gfp.h
8460     index 710143741eb5..b041f94678de 100644
8461     --- a/include/linux/gfp.h
8462     +++ b/include/linux/gfp.h
8463     @@ -37,7 +37,6 @@ struct vm_area_struct;
8464     #define ___GFP_THISNODE 0x40000u
8465     #define ___GFP_ATOMIC 0x80000u
8466     #define ___GFP_ACCOUNT 0x100000u
8467     -#define ___GFP_NOTRACK 0x200000u
8468     #define ___GFP_DIRECT_RECLAIM 0x400000u
8469     #define ___GFP_WRITE 0x800000u
8470     #define ___GFP_KSWAPD_RECLAIM 0x1000000u
8471     @@ -201,19 +200,11 @@ struct vm_area_struct;
8472     * __GFP_COMP address compound page metadata.
8473     *
8474     * __GFP_ZERO returns a zeroed page on success.
8475     - *
8476     - * __GFP_NOTRACK avoids tracking with kmemcheck.
8477     - *
8478     - * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
8479     - * distinguishing in the source between false positives and allocations that
8480     - * cannot be supported (e.g. page tables).
8481     */
8482     #define __GFP_COLD ((__force gfp_t)___GFP_COLD)
8483     #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
8484     #define __GFP_COMP ((__force gfp_t)___GFP_COMP)
8485     #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
8486     -#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
8487     -#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
8488    
8489     /* Disable lockdep for GFP context tracking */
8490     #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
8491     diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
8492     index baeb872283d9..69c238210325 100644
8493     --- a/include/linux/interrupt.h
8494     +++ b/include/linux/interrupt.h
8495     @@ -594,21 +594,6 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
8496     __tasklet_hi_schedule(t);
8497     }
8498    
8499     -extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
8500     -
8501     -/*
8502     - * This version avoids touching any other tasklets. Needed for kmemcheck
8503     - * in order not to take any page faults while enqueueing this tasklet;
8504     - * consider VERY carefully whether you really need this or
8505     - * tasklet_hi_schedule()...
8506     - */
8507     -static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
8508     -{
8509     - if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
8510     - __tasklet_hi_schedule_first(t);
8511     -}
8512     -
8513     -
8514     static inline void tasklet_disable_nosync(struct tasklet_struct *t)
8515     {
8516     atomic_inc(&t->count);
8517     diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
8518     index 606b6bce3a5b..29290bfb94a8 100644
8519     --- a/include/linux/jbd2.h
8520     +++ b/include/linux/jbd2.h
8521     @@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
8522     #define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
8523    
8524     /**
8525     - * struct jbd_inode is the structure linking inodes in ordered mode
8526     - * present in a transaction so that we can sync them during commit.
8527     + * struct jbd_inode - The jbd_inode type is the structure linking inodes in
8528     + * ordered mode present in a transaction so that we can sync them during commit.
8529     */
8530     struct jbd2_inode {
8531     - /* Which transaction does this inode belong to? Either the running
8532     - * transaction or the committing one. [j_list_lock] */
8533     + /**
8534     + * @i_transaction:
8535     + *
8536     + * Which transaction does this inode belong to? Either the running
8537     + * transaction or the committing one. [j_list_lock]
8538     + */
8539     transaction_t *i_transaction;
8540    
8541     - /* Pointer to the running transaction modifying inode's data in case
8542     - * there is already a committing transaction touching it. [j_list_lock] */
8543     + /**
8544     + * @i_next_transaction:
8545     + *
8546     + * Pointer to the running transaction modifying inode's data in case
8547     + * there is already a committing transaction touching it. [j_list_lock]
8548     + */
8549     transaction_t *i_next_transaction;
8550    
8551     - /* List of inodes in the i_transaction [j_list_lock] */
8552     + /**
8553     + * @i_list: List of inodes in the i_transaction [j_list_lock]
8554     + */
8555     struct list_head i_list;
8556    
8557     - /* VFS inode this inode belongs to [constant during the lifetime
8558     - * of the structure] */
8559     + /**
8560     + * @i_vfs_inode:
8561     + *
8562     + * VFS inode this inode belongs to [constant for lifetime of structure]
8563     + */
8564     struct inode *i_vfs_inode;
8565    
8566     - /* Flags of inode [j_list_lock] */
8567     + /**
8568     + * @i_flags: Flags of inode [j_list_lock]
8569     + */
8570     unsigned long i_flags;
8571     };
8572    
8573     @@ -447,12 +462,20 @@ struct jbd2_revoke_table_s;
8574     * struct handle_s - The handle_s type is the concrete type associated with
8575     * handle_t.
8576     * @h_transaction: Which compound transaction is this update a part of?
8577     + * @h_journal: Which journal handle belongs to - used iff h_reserved set.
8578     + * @h_rsv_handle: Handle reserved for finishing the logical operation.
8579     * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
8580     - * @h_ref: Reference count on this handle
8581     - * @h_err: Field for caller's use to track errors through large fs operations
8582     - * @h_sync: flag for sync-on-close
8583     - * @h_jdata: flag to force data journaling
8584     - * @h_aborted: flag indicating fatal error on handle
8585     + * @h_ref: Reference count on this handle.
8586     + * @h_err: Field for caller's use to track errors through large fs operations.
8587     + * @h_sync: Flag for sync-on-close.
8588     + * @h_jdata: Flag to force data journaling.
8589     + * @h_reserved: Flag for handle for reserved credits.
8590     + * @h_aborted: Flag indicating fatal error on handle.
8591     + * @h_type: For handle statistics.
8592     + * @h_line_no: For handle statistics.
8593     + * @h_start_jiffies: Handle Start time.
8594     + * @h_requested_credits: Holds @h_buffer_credits after handle is started.
8595     + * @saved_alloc_context: Saved context while transaction is open.
8596     **/
8597    
8598     /* Docbook can't yet cope with the bit fields, but will leave the documentation
8599     @@ -462,32 +485,23 @@ struct jbd2_revoke_table_s;
8600     struct jbd2_journal_handle
8601     {
8602     union {
8603     - /* Which compound transaction is this update a part of? */
8604     transaction_t *h_transaction;
8605     /* Which journal handle belongs to - used iff h_reserved set */
8606     journal_t *h_journal;
8607     };
8608    
8609     - /* Handle reserved for finishing the logical operation */
8610     handle_t *h_rsv_handle;
8611     -
8612     - /* Number of remaining buffers we are allowed to dirty: */
8613     int h_buffer_credits;
8614     -
8615     - /* Reference count on this handle */
8616     int h_ref;
8617     -
8618     - /* Field for caller's use to track errors through large fs */
8619     - /* operations */
8620     int h_err;
8621    
8622     /* Flags [no locking] */
8623     - unsigned int h_sync: 1; /* sync-on-close */
8624     - unsigned int h_jdata: 1; /* force data journaling */
8625     - unsigned int h_reserved: 1; /* handle with reserved credits */
8626     - unsigned int h_aborted: 1; /* fatal error on handle */
8627     - unsigned int h_type: 8; /* for handle statistics */
8628     - unsigned int h_line_no: 16; /* for handle statistics */
8629     + unsigned int h_sync: 1;
8630     + unsigned int h_jdata: 1;
8631     + unsigned int h_reserved: 1;
8632     + unsigned int h_aborted: 1;
8633     + unsigned int h_type: 8;
8634     + unsigned int h_line_no: 16;
8635    
8636     unsigned long h_start_jiffies;
8637     unsigned int h_requested_credits;
8638     @@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end)
8639     /**
8640     * struct journal_s - The journal_s type is the concrete type associated with
8641     * journal_t.
8642     - * @j_flags: General journaling state flags
8643     - * @j_errno: Is there an outstanding uncleared error on the journal (from a
8644     - * prior abort)?
8645     - * @j_sb_buffer: First part of superblock buffer
8646     - * @j_superblock: Second part of superblock buffer
8647     - * @j_format_version: Version of the superblock format
8648     - * @j_state_lock: Protect the various scalars in the journal
8649     - * @j_barrier_count: Number of processes waiting to create a barrier lock
8650     - * @j_barrier: The barrier lock itself
8651     - * @j_running_transaction: The current running transaction..
8652     - * @j_committing_transaction: the transaction we are pushing to disk
8653     - * @j_checkpoint_transactions: a linked circular list of all transactions
8654     - * waiting for checkpointing
8655     - * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
8656     - * to start committing, or for a barrier lock to be released
8657     - * @j_wait_done_commit: Wait queue for waiting for commit to complete
8658     - * @j_wait_commit: Wait queue to trigger commit
8659     - * @j_wait_updates: Wait queue to wait for updates to complete
8660     - * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
8661     - * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
8662     - * @j_head: Journal head - identifies the first unused block in the journal
8663     - * @j_tail: Journal tail - identifies the oldest still-used block in the
8664     - * journal.
8665     - * @j_free: Journal free - how many free blocks are there in the journal?
8666     - * @j_first: The block number of the first usable block
8667     - * @j_last: The block number one beyond the last usable block
8668     - * @j_dev: Device where we store the journal
8669     - * @j_blocksize: blocksize for the location where we store the journal.
8670     - * @j_blk_offset: starting block offset for into the device where we store the
8671     - * journal
8672     - * @j_fs_dev: Device which holds the client fs. For internal journal this will
8673     - * be equal to j_dev
8674     - * @j_reserved_credits: Number of buffers reserved from the running transaction
8675     - * @j_maxlen: Total maximum capacity of the journal region on disk.
8676     - * @j_list_lock: Protects the buffer lists and internal buffer state.
8677     - * @j_inode: Optional inode where we store the journal. If present, all journal
8678     - * block numbers are mapped into this inode via bmap().
8679     - * @j_tail_sequence: Sequence number of the oldest transaction in the log
8680     - * @j_transaction_sequence: Sequence number of the next transaction to grant
8681     - * @j_commit_sequence: Sequence number of the most recently committed
8682     - * transaction
8683     - * @j_commit_request: Sequence number of the most recent transaction wanting
8684     - * commit
8685     - * @j_uuid: Uuid of client object.
8686     - * @j_task: Pointer to the current commit thread for this journal
8687     - * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
8688     - * single compound commit transaction
8689     - * @j_commit_interval: What is the maximum transaction lifetime before we begin
8690     - * a commit?
8691     - * @j_commit_timer: The timer used to wakeup the commit thread
8692     - * @j_revoke_lock: Protect the revoke table
8693     - * @j_revoke: The revoke table - maintains the list of revoked blocks in the
8694     - * current transaction.
8695     - * @j_revoke_table: alternate revoke tables for j_revoke
8696     - * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
8697     - * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
8698     - * number that will fit in j_blocksize
8699     - * @j_last_sync_writer: most recent pid which did a synchronous write
8700     - * @j_history_lock: Protect the transactions statistics history
8701     - * @j_proc_entry: procfs entry for the jbd statistics directory
8702     - * @j_stats: Overall statistics
8703     - * @j_private: An opaque pointer to fs-private information.
8704     - * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
8705     */
8706     -
8707     struct journal_s
8708     {
8709     - /* General journaling state flags [j_state_lock] */
8710     + /**
8711     + * @j_flags: General journaling state flags [j_state_lock]
8712     + */
8713     unsigned long j_flags;
8714    
8715     - /*
8716     + /**
8717     + * @j_errno:
8718     + *
8719     * Is there an outstanding uncleared error on the journal (from a prior
8720     * abort)? [j_state_lock]
8721     */
8722     int j_errno;
8723    
8724     - /* The superblock buffer */
8725     + /**
8726     + * @j_sb_buffer: The first part of the superblock buffer.
8727     + */
8728     struct buffer_head *j_sb_buffer;
8729     +
8730     + /**
8731     + * @j_superblock: The second part of the superblock buffer.
8732     + */
8733     journal_superblock_t *j_superblock;
8734    
8735     - /* Version of the superblock format */
8736     + /**
8737     + * @j_format_version: Version of the superblock format.
8738     + */
8739     int j_format_version;
8740    
8741     - /*
8742     - * Protect the various scalars in the journal
8743     + /**
8744     + * @j_state_lock: Protect the various scalars in the journal.
8745     */
8746     rwlock_t j_state_lock;
8747    
8748     - /*
8749     + /**
8750     + * @j_barrier_count:
8751     + *
8752     * Number of processes waiting to create a barrier lock [j_state_lock]
8753     */
8754     int j_barrier_count;
8755    
8756     - /* The barrier lock itself */
8757     + /**
8758     + * @j_barrier: The barrier lock itself.
8759     + */
8760     struct mutex j_barrier;
8761    
8762     - /*
8763     + /**
8764     + * @j_running_transaction:
8765     + *
8766     * Transactions: The current running transaction...
8767     * [j_state_lock] [caller holding open handle]
8768     */
8769     transaction_t *j_running_transaction;
8770    
8771     - /*
8772     + /**
8773     + * @j_committing_transaction:
8774     + *
8775     * the transaction we are pushing to disk
8776     * [j_state_lock] [caller holding open handle]
8777     */
8778     transaction_t *j_committing_transaction;
8779    
8780     - /*
8781     + /**
8782     + * @j_checkpoint_transactions:
8783     + *
8784     * ... and a linked circular list of all transactions waiting for
8785     * checkpointing. [j_list_lock]
8786     */
8787     transaction_t *j_checkpoint_transactions;
8788    
8789     - /*
8790     + /**
8791     + * @j_wait_transaction_locked:
8792     + *
8793     * Wait queue for waiting for a locked transaction to start committing,
8794     - * or for a barrier lock to be released
8795     + * or for a barrier lock to be released.
8796     */
8797     wait_queue_head_t j_wait_transaction_locked;
8798    
8799     - /* Wait queue for waiting for commit to complete */
8800     + /**
8801     + * @j_wait_done_commit: Wait queue for waiting for commit to complete.
8802     + */
8803     wait_queue_head_t j_wait_done_commit;
8804    
8805     - /* Wait queue to trigger commit */
8806     + /**
8807     + * @j_wait_commit: Wait queue to trigger commit.
8808     + */
8809     wait_queue_head_t j_wait_commit;
8810    
8811     - /* Wait queue to wait for updates to complete */
8812     + /**
8813     + * @j_wait_updates: Wait queue to wait for updates to complete.
8814     + */
8815     wait_queue_head_t j_wait_updates;
8816    
8817     - /* Wait queue to wait for reserved buffer credits to drop */
8818     + /**
8819     + * @j_wait_reserved:
8820     + *
8821     + * Wait queue to wait for reserved buffer credits to drop.
8822     + */
8823     wait_queue_head_t j_wait_reserved;
8824    
8825     - /* Semaphore for locking against concurrent checkpoints */
8826     + /**
8827     + * @j_checkpoint_mutex:
8828     + *
8829     + * Semaphore for locking against concurrent checkpoints.
8830     + */
8831     struct mutex j_checkpoint_mutex;
8832    
8833     - /*
8834     + /**
8835     + * @j_chkpt_bhs:
8836     + *
8837     * List of buffer heads used by the checkpoint routine. This
8838     * was moved from jbd2_log_do_checkpoint() to reduce stack
8839     * usage. Access to this array is controlled by the
8840     - * j_checkpoint_mutex. [j_checkpoint_mutex]
8841     + * @j_checkpoint_mutex. [j_checkpoint_mutex]
8842     */
8843     struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
8844     -
8845     - /*
8846     +
8847     + /**
8848     + * @j_head:
8849     + *
8850     * Journal head: identifies the first unused block in the journal.
8851     * [j_state_lock]
8852     */
8853     unsigned long j_head;
8854    
8855     - /*
8856     + /**
8857     + * @j_tail:
8858     + *
8859     * Journal tail: identifies the oldest still-used block in the journal.
8860     * [j_state_lock]
8861     */
8862     unsigned long j_tail;
8863    
8864     - /*
8865     + /**
8866     + * @j_free:
8867     + *
8868     * Journal free: how many free blocks are there in the journal?
8869     * [j_state_lock]
8870     */
8871     unsigned long j_free;
8872    
8873     - /*
8874     - * Journal start and end: the block numbers of the first usable block
8875     - * and one beyond the last usable block in the journal. [j_state_lock]
8876     + /**
8877     + * @j_first:
8878     + *
8879     + * The block number of the first usable block in the journal
8880     + * [j_state_lock].
8881     */
8882     unsigned long j_first;
8883     +
8884     + /**
8885     + * @j_last:
8886     + *
8887     + * The block number one beyond the last usable block in the journal
8888     + * [j_state_lock].
8889     + */
8890     unsigned long j_last;
8891    
8892     - /*
8893     - * Device, blocksize and starting block offset for the location where we
8894     - * store the journal.
8895     + /**
8896     + * @j_dev: Device where we store the journal.
8897     */
8898     struct block_device *j_dev;
8899     +
8900     + /**
8901     + * @j_blocksize: Block size for the location where we store the journal.
8902     + */
8903     int j_blocksize;
8904     +
8905     + /**
8906     + * @j_blk_offset:
8907     + *
8908     + * Starting block offset into the device where we store the journal.
8909     + */
8910     unsigned long long j_blk_offset;
8911     +
8912     + /**
8913     + * @j_devname: Journal device name.
8914     + */
8915     char j_devname[BDEVNAME_SIZE+24];
8916    
8917     - /*
8918     + /**
8919     + * @j_fs_dev:
8920     + *
8921     * Device which holds the client fs. For internal journal this will be
8922     * equal to j_dev.
8923     */
8924     struct block_device *j_fs_dev;
8925    
8926     - /* Total maximum capacity of the journal region on disk. */
8927     + /**
8928     + * @j_maxlen: Total maximum capacity of the journal region on disk.
8929     + */
8930     unsigned int j_maxlen;
8931    
8932     - /* Number of buffers reserved from the running transaction */
8933     + /**
8934     + * @j_reserved_credits:
8935     + *
8936     + * Number of buffers reserved from the running transaction.
8937     + */
8938     atomic_t j_reserved_credits;
8939    
8940     - /*
8941     - * Protects the buffer lists and internal buffer state.
8942     + /**
8943     + * @j_list_lock: Protects the buffer lists and internal buffer state.
8944     */
8945     spinlock_t j_list_lock;
8946    
8947     - /* Optional inode where we store the journal. If present, all */
8948     - /* journal block numbers are mapped into this inode via */
8949     - /* bmap(). */
8950     + /**
8951     + * @j_inode:
8952     + *
8953     + * Optional inode where we store the journal. If present, all
8954     + * journal block numbers are mapped into this inode via bmap().
8955     + */
8956     struct inode *j_inode;
8957    
8958     - /*
8959     + /**
8960     + * @j_tail_sequence:
8961     + *
8962     * Sequence number of the oldest transaction in the log [j_state_lock]
8963     */
8964     tid_t j_tail_sequence;
8965    
8966     - /*
8967     + /**
8968     + * @j_transaction_sequence:
8969     + *
8970     * Sequence number of the next transaction to grant [j_state_lock]
8971     */
8972     tid_t j_transaction_sequence;
8973    
8974     - /*
8975     + /**
8976     + * @j_commit_sequence:
8977     + *
8978     * Sequence number of the most recently committed transaction
8979     * [j_state_lock].
8980     */
8981     tid_t j_commit_sequence;
8982    
8983     - /*
8984     + /**
8985     + * @j_commit_request:
8986     + *
8987     * Sequence number of the most recent transaction wanting commit
8988     * [j_state_lock]
8989     */
8990     tid_t j_commit_request;
8991    
8992     - /*
8993     + /**
8994     + * @j_uuid:
8995     + *
8996     * Journal uuid: identifies the object (filesystem, LVM volume etc)
8997     * backed by this journal. This will eventually be replaced by an array
8998     * of uuids, allowing us to index multiple devices within a single
8999     @@ -958,85 +997,151 @@ struct journal_s
9000     */
9001     __u8 j_uuid[16];
9002    
9003     - /* Pointer to the current commit thread for this journal */
9004     + /**
9005     + * @j_task: Pointer to the current commit thread for this journal.
9006     + */
9007     struct task_struct *j_task;
9008    
9009     - /*
9010     + /**
9011     + * @j_max_transaction_buffers:
9012     + *
9013     * Maximum number of metadata buffers to allow in a single compound
9014     - * commit transaction
9015     + * commit transaction.
9016     */
9017     int j_max_transaction_buffers;
9018    
9019     - /*
9020     + /**
9021     + * @j_commit_interval:
9022     + *
9023     * What is the maximum transaction lifetime before we begin a commit?
9024     */
9025     unsigned long j_commit_interval;
9026    
9027     - /* The timer used to wakeup the commit thread: */
9028     + /**
9029     + * @j_commit_timer: The timer used to wakeup the commit thread.
9030     + */
9031     struct timer_list j_commit_timer;
9032    
9033     - /*
9034     - * The revoke table: maintains the list of revoked blocks in the
9035     - * current transaction. [j_revoke_lock]
9036     + /**
9037     + * @j_revoke_lock: Protect the revoke table.
9038     */
9039     spinlock_t j_revoke_lock;
9040     +
9041     + /**
9042     + * @j_revoke:
9043     + *
9044     + * The revoke table - maintains the list of revoked blocks in the
9045     + * current transaction.
9046     + */
9047     struct jbd2_revoke_table_s *j_revoke;
9048     +
9049     + /**
9050     + * @j_revoke_table: Alternate revoke tables for j_revoke.
9051     + */
9052     struct jbd2_revoke_table_s *j_revoke_table[2];
9053    
9054     - /*
9055     - * array of bhs for jbd2_journal_commit_transaction
9056     + /**
9057     + * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
9058     */
9059     struct buffer_head **j_wbuf;
9060     +
9061     + /**
9062     + * @j_wbufsize:
9063     + *
9064     + * Size of @j_wbuf array.
9065     + */
9066     int j_wbufsize;
9067    
9068     - /*
9069     - * this is the pid of hte last person to run a synchronous operation
9070     - * through the journal
9071     + /**
9072     + * @j_last_sync_writer:
9073     + *
9074     + * The pid of the last person to run a synchronous operation
9075     + * through the journal.
9076     */
9077     pid_t j_last_sync_writer;
9078    
9079     - /*
9080     - * the average amount of time in nanoseconds it takes to commit a
9081     + /**
9082     + * @j_average_commit_time:
9083     + *
9084     + * The average amount of time in nanoseconds it takes to commit a
9085     * transaction to disk. [j_state_lock]
9086     */
9087     u64 j_average_commit_time;
9088    
9089     - /*
9090     - * minimum and maximum times that we should wait for
9091     - * additional filesystem operations to get batched into a
9092     - * synchronous handle in microseconds
9093     + /**
9094     + * @j_min_batch_time:
9095     + *
9096     + * Minimum time that we should wait for additional filesystem operations
9097     + * to get batched into a synchronous handle in microseconds.
9098     */
9099     u32 j_min_batch_time;
9100     +
9101     + /**
9102     + * @j_max_batch_time:
9103     + *
9104     + * Maximum time that we should wait for additional filesystem operations
9105     + * to get batched into a synchronous handle in microseconds.
9106     + */
9107     u32 j_max_batch_time;
9108    
9109     - /* This function is called when a transaction is closed */
9110     + /**
9111     + * @j_commit_callback:
9112     + *
9113     + * This function is called when a transaction is closed.
9114     + */
9115     void (*j_commit_callback)(journal_t *,
9116     transaction_t *);
9117    
9118     /*
9119     * Journal statistics
9120     */
9121     +
9122     + /**
9123     + * @j_history_lock: Protect the transactions statistics history.
9124     + */
9125     spinlock_t j_history_lock;
9126     +
9127     + /**
9128     + * @j_proc_entry: procfs entry for the jbd statistics directory.
9129     + */
9130     struct proc_dir_entry *j_proc_entry;
9131     +
9132     + /**
9133     + * @j_stats: Overall statistics.
9134     + */
9135     struct transaction_stats_s j_stats;
9136    
9137     - /* Failed journal commit ID */
9138     + /**
9139     + * @j_failed_commit: Failed journal commit ID.
9140     + */
9141     unsigned int j_failed_commit;
9142    
9143     - /*
9144     + /**
9145     + * @j_private:
9146     + *
9147     * An opaque pointer to fs-private information. ext3 puts its
9148     - * superblock pointer here
9149     + * superblock pointer here.
9150     */
9151     void *j_private;
9152    
9153     - /* Reference to checksum algorithm driver via cryptoapi */
9154     + /**
9155     + * @j_chksum_driver:
9156     + *
9157     + * Reference to checksum algorithm driver via cryptoapi.
9158     + */
9159     struct crypto_shash *j_chksum_driver;
9160    
9161     - /* Precomputed journal UUID checksum for seeding other checksums */
9162     + /**
9163     + * @j_csum_seed:
9164     + *
9165     + * Precomputed journal UUID checksum for seeding other checksums.
9166     + */
9167     __u32 j_csum_seed;
9168    
9169     #ifdef CONFIG_DEBUG_LOCK_ALLOC
9170     - /*
9171     + /**
9172     + * @j_trans_commit_map:
9173     + *
9174     * Lockdep entity to track transaction commit dependencies. Handles
9175     * hold this "lock" for read, when we wait for commit, we acquire the
9176     * "lock" for writing. This matches the properties of jbd2 journalling
9177     diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
9178     deleted file mode 100644
9179     index 7b1d7bead7d9..000000000000
9180     --- a/include/linux/kmemcheck.h
9181     +++ /dev/null
9182     @@ -1,172 +0,0 @@
9183     -/* SPDX-License-Identifier: GPL-2.0 */
9184     -#ifndef LINUX_KMEMCHECK_H
9185     -#define LINUX_KMEMCHECK_H
9186     -
9187     -#include <linux/mm_types.h>
9188     -#include <linux/types.h>
9189     -
9190     -#ifdef CONFIG_KMEMCHECK
9191     -extern int kmemcheck_enabled;
9192     -
9193     -/* The slab-related functions. */
9194     -void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
9195     -void kmemcheck_free_shadow(struct page *page, int order);
9196     -void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
9197     - size_t size);
9198     -void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
9199     -
9200     -void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
9201     - gfp_t gfpflags);
9202     -
9203     -void kmemcheck_show_pages(struct page *p, unsigned int n);
9204     -void kmemcheck_hide_pages(struct page *p, unsigned int n);
9205     -
9206     -bool kmemcheck_page_is_tracked(struct page *p);
9207     -
9208     -void kmemcheck_mark_unallocated(void *address, unsigned int n);
9209     -void kmemcheck_mark_uninitialized(void *address, unsigned int n);
9210     -void kmemcheck_mark_initialized(void *address, unsigned int n);
9211     -void kmemcheck_mark_freed(void *address, unsigned int n);
9212     -
9213     -void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
9214     -void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
9215     -void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
9216     -
9217     -int kmemcheck_show_addr(unsigned long address);
9218     -int kmemcheck_hide_addr(unsigned long address);
9219     -
9220     -bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
9221     -
9222     -/*
9223     - * Bitfield annotations
9224     - *
9225     - * How to use: If you have a struct using bitfields, for example
9226     - *
9227     - * struct a {
9228     - * int x:8, y:8;
9229     - * };
9230     - *
9231     - * then this should be rewritten as
9232     - *
9233     - * struct a {
9234     - * kmemcheck_bitfield_begin(flags);
9235     - * int x:8, y:8;
9236     - * kmemcheck_bitfield_end(flags);
9237     - * };
9238     - *
9239     - * Now the "flags_begin" and "flags_end" members may be used to refer to the
9240     - * beginning and end, respectively, of the bitfield (and things like
9241     - * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
9242     - * fields should be annotated:
9243     - *
9244     - * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
9245     - * kmemcheck_annotate_bitfield(a, flags);
9246     - */
9247     -#define kmemcheck_bitfield_begin(name) \
9248     - int name##_begin[0];
9249     -
9250     -#define kmemcheck_bitfield_end(name) \
9251     - int name##_end[0];
9252     -
9253     -#define kmemcheck_annotate_bitfield(ptr, name) \
9254     - do { \
9255     - int _n; \
9256     - \
9257     - if (!ptr) \
9258     - break; \
9259     - \
9260     - _n = (long) &((ptr)->name##_end) \
9261     - - (long) &((ptr)->name##_begin); \
9262     - BUILD_BUG_ON(_n < 0); \
9263     - \
9264     - kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
9265     - } while (0)
9266     -
9267     -#define kmemcheck_annotate_variable(var) \
9268     - do { \
9269     - kmemcheck_mark_initialized(&(var), sizeof(var)); \
9270     - } while (0) \
9271     -
9272     -#else
9273     -#define kmemcheck_enabled 0
9274     -
9275     -static inline void
9276     -kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
9277     -{
9278     -}
9279     -
9280     -static inline void
9281     -kmemcheck_free_shadow(struct page *page, int order)
9282     -{
9283     -}
9284     -
9285     -static inline void
9286     -kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
9287     - size_t size)
9288     -{
9289     -}
9290     -
9291     -static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
9292     - size_t size)
9293     -{
9294     -}
9295     -
9296     -static inline void kmemcheck_pagealloc_alloc(struct page *p,
9297     - unsigned int order, gfp_t gfpflags)
9298     -{
9299     -}
9300     -
9301     -static inline bool kmemcheck_page_is_tracked(struct page *p)
9302     -{
9303     - return false;
9304     -}
9305     -
9306     -static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
9307     -{
9308     -}
9309     -
9310     -static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
9311     -{
9312     -}
9313     -
9314     -static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
9315     -{
9316     -}
9317     -
9318     -static inline void kmemcheck_mark_freed(void *address, unsigned int n)
9319     -{
9320     -}
9321     -
9322     -static inline void kmemcheck_mark_unallocated_pages(struct page *p,
9323     - unsigned int n)
9324     -{
9325     -}
9326     -
9327     -static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
9328     - unsigned int n)
9329     -{
9330     -}
9331     -
9332     -static inline void kmemcheck_mark_initialized_pages(struct page *p,
9333     - unsigned int n)
9334     -{
9335     -}
9336     -
9337     -static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
9338     -{
9339     - return true;
9340     -}
9341     -
9342     -#define kmemcheck_bitfield_begin(name)
9343     -#define kmemcheck_bitfield_end(name)
9344     -#define kmemcheck_annotate_bitfield(ptr, name) \
9345     - do { \
9346     - } while (0)
9347     -
9348     -#define kmemcheck_annotate_variable(var) \
9349     - do { \
9350     - } while (0)
9351     -
9352     -#endif /* CONFIG_KMEMCHECK */
9353     -
9354     -#endif /* LINUX_KMEMCHECK_H */
9355     diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
9356     index a13525daf09b..ae15864c8708 100644
9357     --- a/include/linux/mlx5/driver.h
9358     +++ b/include/linux/mlx5/driver.h
9359     @@ -1201,7 +1201,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
9360     int eqn;
9361     int err;
9362    
9363     - err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
9364     + err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
9365     if (err)
9366     return NULL;
9367    
9368     diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
9369     index c30b32e3c862..10191c28fc04 100644
9370     --- a/include/linux/mm_inline.h
9371     +++ b/include/linux/mm_inline.h
9372     @@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
9373    
9374     #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
9375    
9376     -#ifdef arch_unmap_kpfn
9377     -extern void arch_unmap_kpfn(unsigned long pfn);
9378     -#else
9379     -static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
9380     -#endif
9381     -
9382     #endif
9383     diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
9384     index c85f11dafd56..9f0bb908e2b5 100644
9385     --- a/include/linux/mm_types.h
9386     +++ b/include/linux/mm_types.h
9387     @@ -207,14 +207,6 @@ struct page {
9388     not kmapped, ie. highmem) */
9389     #endif /* WANT_PAGE_VIRTUAL */
9390    
9391     -#ifdef CONFIG_KMEMCHECK
9392     - /*
9393     - * kmemcheck wants to track the status of each byte in a page; this
9394     - * is a pointer to such a status block. NULL if not tracked.
9395     - */
9396     - void *shadow;
9397     -#endif
9398     -
9399     #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
9400     int _last_cpupid;
9401     #endif
9402     diff --git a/include/linux/net.h b/include/linux/net.h
9403     index d97d80d7fdf8..caeb159abda5 100644
9404     --- a/include/linux/net.h
9405     +++ b/include/linux/net.h
9406     @@ -22,7 +22,6 @@
9407     #include <linux/random.h>
9408     #include <linux/wait.h>
9409     #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
9410     -#include <linux/kmemcheck.h>
9411     #include <linux/rcupdate.h>
9412     #include <linux/once.h>
9413     #include <linux/fs.h>
9414     @@ -111,9 +110,7 @@ struct socket_wq {
9415     struct socket {
9416     socket_state state;
9417    
9418     - kmemcheck_bitfield_begin(type);
9419     short type;
9420     - kmemcheck_bitfield_end(type);
9421    
9422     unsigned long flags;
9423    
9424     diff --git a/include/linux/nospec.h b/include/linux/nospec.h
9425     index b99bced39ac2..fbc98e2c8228 100644
9426     --- a/include/linux/nospec.h
9427     +++ b/include/linux/nospec.h
9428     @@ -19,20 +19,6 @@
9429     static inline unsigned long array_index_mask_nospec(unsigned long index,
9430     unsigned long size)
9431     {
9432     - /*
9433     - * Warn developers about inappropriate array_index_nospec() usage.
9434     - *
9435     - * Even if the CPU speculates past the WARN_ONCE branch, the
9436     - * sign bit of @index is taken into account when generating the
9437     - * mask.
9438     - *
9439     - * This warning is compiled out when the compiler can infer that
9440     - * @index and @size are less than LONG_MAX.
9441     - */
9442     - if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
9443     - "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
9444     - return 0;
9445     -
9446     /*
9447     * Always calculate and emit the mask even if the compiler
9448     * thinks the mask is not needed. The compiler does not take
9449     @@ -43,6 +29,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
9450     }
9451     #endif
9452    
9453     +/*
9454     + * Warn developers about inappropriate array_index_nospec() usage.
9455     + *
9456     + * Even if the CPU speculates past the WARN_ONCE branch, the
9457     + * sign bit of @index is taken into account when generating the
9458     + * mask.
9459     + *
9460     + * This warning is compiled out when the compiler can infer that
9461     + * @index and @size are less than LONG_MAX.
9462     + */
9463     +#define array_index_mask_nospec_check(index, size) \
9464     +({ \
9465     + if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
9466     + "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
9467     + _mask = 0; \
9468     + else \
9469     + _mask = array_index_mask_nospec(index, size); \
9470     + _mask; \
9471     +})
9472     +
9473     /*
9474     * array_index_nospec - sanitize an array index after a bounds check
9475     *
9476     @@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
9477     ({ \
9478     typeof(index) _i = (index); \
9479     typeof(size) _s = (size); \
9480     - unsigned long _mask = array_index_mask_nospec(_i, _s); \
9481     + unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
9482     \
9483     BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
9484     BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
9485     diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
9486     index fa6ace66fea5..289e4d54e3e0 100644
9487     --- a/include/linux/ring_buffer.h
9488     +++ b/include/linux/ring_buffer.h
9489     @@ -2,7 +2,6 @@
9490     #ifndef _LINUX_RING_BUFFER_H
9491     #define _LINUX_RING_BUFFER_H
9492    
9493     -#include <linux/kmemcheck.h>
9494     #include <linux/mm.h>
9495     #include <linux/seq_file.h>
9496     #include <linux/poll.h>
9497     @@ -14,9 +13,7 @@ struct ring_buffer_iter;
9498     * Don't refer to this struct directly, use functions below.
9499     */
9500     struct ring_buffer_event {
9501     - kmemcheck_bitfield_begin(bitfield);
9502     u32 type_len:5, time_delta:27;
9503     - kmemcheck_bitfield_end(bitfield);
9504    
9505     u32 array[];
9506     };
9507     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
9508     index 051e0939ec19..be45224b01d7 100644
9509     --- a/include/linux/skbuff.h
9510     +++ b/include/linux/skbuff.h
9511     @@ -15,7 +15,6 @@
9512     #define _LINUX_SKBUFF_H
9513    
9514     #include <linux/kernel.h>
9515     -#include <linux/kmemcheck.h>
9516     #include <linux/compiler.h>
9517     #include <linux/time.h>
9518     #include <linux/bug.h>
9519     @@ -706,7 +705,6 @@ struct sk_buff {
9520     /* Following fields are _not_ copied in __copy_skb_header()
9521     * Note that queue_mapping is here mostly to fill a hole.
9522     */
9523     - kmemcheck_bitfield_begin(flags1);
9524     __u16 queue_mapping;
9525    
9526     /* if you move cloned around you also must adapt those constants */
9527     @@ -725,7 +723,6 @@ struct sk_buff {
9528     head_frag:1,
9529     xmit_more:1,
9530     __unused:1; /* one bit hole */
9531     - kmemcheck_bitfield_end(flags1);
9532    
9533     /* fields enclosed in headers_start/headers_end are copied
9534     * using a single memcpy() in __copy_skb_header()
9535     diff --git a/include/linux/slab.h b/include/linux/slab.h
9536     index af5aa65c7c18..ae5ed6492d54 100644
9537     --- a/include/linux/slab.h
9538     +++ b/include/linux/slab.h
9539     @@ -78,12 +78,6 @@
9540    
9541     #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
9542    
9543     -/* Don't track use of uninitialized memory */
9544     -#ifdef CONFIG_KMEMCHECK
9545     -# define SLAB_NOTRACK 0x01000000UL
9546     -#else
9547     -# define SLAB_NOTRACK 0x00000000UL
9548     -#endif
9549     #ifdef CONFIG_FAILSLAB
9550     # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
9551     #else
9552     diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
9553     index 4bcdf00c110f..34f053a150a9 100644
9554     --- a/include/linux/thread_info.h
9555     +++ b/include/linux/thread_info.h
9556     @@ -44,10 +44,9 @@ enum {
9557     #endif
9558    
9559     #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
9560     -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
9561     - __GFP_ZERO)
9562     +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
9563     #else
9564     -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
9565     +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
9566     #endif
9567    
9568     /*
9569     diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
9570     index db8162dd8c0b..8e51b4a69088 100644
9571     --- a/include/net/inet_sock.h
9572     +++ b/include/net/inet_sock.h
9573     @@ -17,7 +17,6 @@
9574     #define _INET_SOCK_H
9575    
9576     #include <linux/bitops.h>
9577     -#include <linux/kmemcheck.h>
9578     #include <linux/string.h>
9579     #include <linux/types.h>
9580     #include <linux/jhash.h>
9581     @@ -84,7 +83,6 @@ struct inet_request_sock {
9582     #define ireq_state req.__req_common.skc_state
9583     #define ireq_family req.__req_common.skc_family
9584    
9585     - kmemcheck_bitfield_begin(flags);
9586     u16 snd_wscale : 4,
9587     rcv_wscale : 4,
9588     tstamp_ok : 1,
9589     @@ -93,7 +91,6 @@ struct inet_request_sock {
9590     ecn_ok : 1,
9591     acked : 1,
9592     no_srccheck: 1;
9593     - kmemcheck_bitfield_end(flags);
9594     u32 ir_mark;
9595     union {
9596     struct ip_options_rcu __rcu *ireq_opt;
9597     diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
9598     index 6a75d67a30fd..1356fa6a7566 100644
9599     --- a/include/net/inet_timewait_sock.h
9600     +++ b/include/net/inet_timewait_sock.h
9601     @@ -15,8 +15,6 @@
9602     #ifndef _INET_TIMEWAIT_SOCK_
9603     #define _INET_TIMEWAIT_SOCK_
9604    
9605     -
9606     -#include <linux/kmemcheck.h>
9607     #include <linux/list.h>
9608     #include <linux/timer.h>
9609     #include <linux/types.h>
9610     @@ -69,14 +67,12 @@ struct inet_timewait_sock {
9611     /* Socket demultiplex comparisons on incoming packets. */
9612     /* these three are in inet_sock */
9613     __be16 tw_sport;
9614     - kmemcheck_bitfield_begin(flags);
9615     /* And these are ours. */
9616     unsigned int tw_kill : 1,
9617     tw_transparent : 1,
9618     tw_flowlabel : 20,
9619     tw_pad : 2, /* 2 bits hole */
9620     tw_tos : 8;
9621     - kmemcheck_bitfield_end(flags);
9622     struct timer_list tw_timer;
9623     struct inet_bind_bucket *tw_tb;
9624     };
9625     diff --git a/include/net/sock.h b/include/net/sock.h
9626     index 006580155a87..9bd5d68076d9 100644
9627     --- a/include/net/sock.h
9628     +++ b/include/net/sock.h
9629     @@ -436,7 +436,6 @@ struct sock {
9630     #define SK_FL_TYPE_MASK 0xffff0000
9631     #endif
9632    
9633     - kmemcheck_bitfield_begin(flags);
9634     unsigned int sk_padding : 1,
9635     sk_kern_sock : 1,
9636     sk_no_check_tx : 1,
9637     @@ -445,8 +444,6 @@ struct sock {
9638     sk_protocol : 8,
9639     sk_type : 16;
9640     #define SK_PROTOCOL_MAX U8_MAX
9641     - kmemcheck_bitfield_end(flags);
9642     -
9643     u16 sk_gso_max_segs;
9644     unsigned long sk_lingertime;
9645     struct proto *sk_prot_creator;
9646     diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
9647     index e8608b2dc844..6533aa64f009 100644
9648     --- a/include/rdma/ib_verbs.h
9649     +++ b/include/rdma/ib_verbs.h
9650     @@ -971,9 +971,9 @@ struct ib_wc {
9651     u32 invalidate_rkey;
9652     } ex;
9653     u32 src_qp;
9654     + u32 slid;
9655     int wc_flags;
9656     u16 pkey_index;
9657     - u32 slid;
9658     u8 sl;
9659     u8 dlid_path_bits;
9660     u8 port_num; /* valid only for DR SMPs on switches */
9661     diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
9662     index 648cbf603736..72162f3a03fa 100644
9663     --- a/include/trace/events/mmflags.h
9664     +++ b/include/trace/events/mmflags.h
9665     @@ -46,7 +46,6 @@
9666     {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
9667     {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
9668     {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
9669     - {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \
9670     {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
9671     {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
9672     {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
9673     diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
9674     index a7c8b452aab9..d791863b62fc 100644
9675     --- a/include/trace/events/xen.h
9676     +++ b/include/trace/events/xen.h
9677     @@ -365,7 +365,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
9678     TP_printk("%s", "")
9679     );
9680    
9681     -TRACE_EVENT(xen_mmu_flush_tlb_single,
9682     +TRACE_EVENT(xen_mmu_flush_tlb_one_user,
9683     TP_PROTO(unsigned long addr),
9684     TP_ARGS(addr),
9685     TP_STRUCT__entry(
9686     diff --git a/init/do_mounts.c b/init/do_mounts.c
9687     index f6d4dd764a52..7cf4f6dafd5f 100644
9688     --- a/init/do_mounts.c
9689     +++ b/init/do_mounts.c
9690     @@ -380,8 +380,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
9691    
9692     void __init mount_block_root(char *name, int flags)
9693     {
9694     - struct page *page = alloc_page(GFP_KERNEL |
9695     - __GFP_NOTRACK_FALSE_POSITIVE);
9696     + struct page *page = alloc_page(GFP_KERNEL);
9697     char *fs_names = page_address(page);
9698     char *p;
9699     #ifdef CONFIG_BLOCK
9700     diff --git a/init/main.c b/init/main.c
9701     index b32ec72cdf3d..2d355a61dfc5 100644
9702     --- a/init/main.c
9703     +++ b/init/main.c
9704     @@ -69,7 +69,6 @@
9705     #include <linux/kgdb.h>
9706     #include <linux/ftrace.h>
9707     #include <linux/async.h>
9708     -#include <linux/kmemcheck.h>
9709     #include <linux/sfi.h>
9710     #include <linux/shmem_fs.h>
9711     #include <linux/slab.h>
9712     diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
9713     index 2246115365d9..d203a5d6b726 100644
9714     --- a/kernel/bpf/core.c
9715     +++ b/kernel/bpf/core.c
9716     @@ -85,8 +85,6 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
9717     if (fp == NULL)
9718     return NULL;
9719    
9720     - kmemcheck_annotate_bitfield(fp, meta);
9721     -
9722     aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
9723     if (aux == NULL) {
9724     vfree(fp);
9725     @@ -127,8 +125,6 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
9726     if (fp == NULL) {
9727     __bpf_prog_uncharge(fp_old->aux->user, delta);
9728     } else {
9729     - kmemcheck_annotate_bitfield(fp, meta);
9730     -
9731     memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
9732     fp->pages = pages;
9733     fp->aux->prog = fp;
9734     @@ -662,8 +658,6 @@ static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
9735    
9736     fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
9737     if (fp != NULL) {
9738     - kmemcheck_annotate_bitfield(fp, meta);
9739     -
9740     /* aux->prog still points to the fp_other one, so
9741     * when promoting the clone to the real program,
9742     * this still needs to be adapted.
9743     diff --git a/kernel/fork.c b/kernel/fork.c
9744     index 500ce64517d9..98c91bd341b4 100644
9745     --- a/kernel/fork.c
9746     +++ b/kernel/fork.c
9747     @@ -469,7 +469,7 @@ void __init fork_init(void)
9748     /* create a slab on which task_structs can be allocated */
9749     task_struct_cachep = kmem_cache_create("task_struct",
9750     arch_task_struct_size, align,
9751     - SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
9752     + SLAB_PANIC|SLAB_ACCOUNT, NULL);
9753     #endif
9754    
9755     /* do the arch specific task caches init */
9756     @@ -2208,18 +2208,18 @@ void __init proc_caches_init(void)
9757     sighand_cachep = kmem_cache_create("sighand_cache",
9758     sizeof(struct sighand_struct), 0,
9759     SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
9760     - SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
9761     + SLAB_ACCOUNT, sighand_ctor);
9762     signal_cachep = kmem_cache_create("signal_cache",
9763     sizeof(struct signal_struct), 0,
9764     - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
9765     + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
9766     NULL);
9767     files_cachep = kmem_cache_create("files_cache",
9768     sizeof(struct files_struct), 0,
9769     - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
9770     + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
9771     NULL);
9772     fs_cachep = kmem_cache_create("fs_cache",
9773     sizeof(struct fs_struct), 0,
9774     - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
9775     + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
9776     NULL);
9777     /*
9778     * FIXME! The "sizeof(struct mm_struct)" currently includes the
9779     @@ -2230,7 +2230,7 @@ void __init proc_caches_init(void)
9780     */
9781     mm_cachep = kmem_cache_create("mm_struct",
9782     sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
9783     - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
9784     + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
9785     NULL);
9786     vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
9787     mmap_init();
9788     diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
9789     index e36e652d996f..4d362d3e4571 100644
9790     --- a/kernel/locking/lockdep.c
9791     +++ b/kernel/locking/lockdep.c
9792     @@ -47,7 +47,6 @@
9793     #include <linux/stringify.h>
9794     #include <linux/bitops.h>
9795     #include <linux/gfp.h>
9796     -#include <linux/kmemcheck.h>
9797     #include <linux/random.h>
9798     #include <linux/jhash.h>
9799    
9800     @@ -3225,8 +3224,6 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
9801     {
9802     int i;
9803    
9804     - kmemcheck_mark_initialized(lock, sizeof(*lock));
9805     -
9806     for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
9807     lock->class_cache[i] = NULL;
9808    
9809     diff --git a/kernel/memremap.c b/kernel/memremap.c
9810     index 403ab9cdb949..4712ce646e04 100644
9811     --- a/kernel/memremap.c
9812     +++ b/kernel/memremap.c
9813     @@ -301,7 +301,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
9814    
9815     /* pages are dead and unused, undo the arch mapping */
9816     align_start = res->start & ~(SECTION_SIZE - 1);
9817     - align_size = ALIGN(resource_size(res), SECTION_SIZE);
9818     + align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
9819     + - align_start;
9820    
9821     mem_hotplug_begin();
9822     arch_remove_memory(align_start, align_size);
9823     diff --git a/kernel/signal.c b/kernel/signal.c
9824     index 1facff1dbbae..6895f6bb98a7 100644
9825     --- a/kernel/signal.c
9826     +++ b/kernel/signal.c
9827     @@ -1038,8 +1038,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
9828     else
9829     override_rlimit = 0;
9830    
9831     - q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
9832     - override_rlimit);
9833     + q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
9834     if (q) {
9835     list_add_tail(&q->list, &pending->list);
9836     switch ((unsigned long) info) {
9837     diff --git a/kernel/softirq.c b/kernel/softirq.c
9838     index 4e09821f9d9e..e89c3b0cff6d 100644
9839     --- a/kernel/softirq.c
9840     +++ b/kernel/softirq.c
9841     @@ -486,16 +486,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
9842     }
9843     EXPORT_SYMBOL(__tasklet_hi_schedule);
9844    
9845     -void __tasklet_hi_schedule_first(struct tasklet_struct *t)
9846     -{
9847     - BUG_ON(!irqs_disabled());
9848     -
9849     - t->next = __this_cpu_read(tasklet_hi_vec.head);
9850     - __this_cpu_write(tasklet_hi_vec.head, t);
9851     - __raise_softirq_irqoff(HI_SOFTIRQ);
9852     -}
9853     -EXPORT_SYMBOL(__tasklet_hi_schedule_first);
9854     -
9855     static __latent_entropy void tasklet_action(struct softirq_action *a)
9856     {
9857     struct tasklet_struct *list;
9858     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
9859     index 56aca862c4f5..069550540a39 100644
9860     --- a/kernel/sysctl.c
9861     +++ b/kernel/sysctl.c
9862     @@ -30,7 +30,6 @@
9863     #include <linux/proc_fs.h>
9864     #include <linux/security.h>
9865     #include <linux/ctype.h>
9866     -#include <linux/kmemcheck.h>
9867     #include <linux/kmemleak.h>
9868     #include <linux/fs.h>
9869     #include <linux/init.h>
9870     @@ -1173,15 +1172,6 @@ static struct ctl_table kern_table[] = {
9871     .extra1 = &zero,
9872     .extra2 = &one_thousand,
9873     },
9874     -#endif
9875     -#ifdef CONFIG_KMEMCHECK
9876     - {
9877     - .procname = "kmemcheck",
9878     - .data = &kmemcheck_enabled,
9879     - .maxlen = sizeof(int),
9880     - .mode = 0644,
9881     - .proc_handler = proc_dointvec,
9882     - },
9883     #endif
9884     {
9885     .procname = "panic_on_warn",
9886     diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
9887     index 434c840e2d82..4ad6f6ca18c1 100644
9888     --- a/kernel/trace/Kconfig
9889     +++ b/kernel/trace/Kconfig
9890     @@ -343,7 +343,7 @@ config PROFILE_ANNOTATED_BRANCHES
9891     on if you need to profile the system's use of these macros.
9892    
9893     config PROFILE_ALL_BRANCHES
9894     - bool "Profile all if conditionals"
9895     + bool "Profile all if conditionals" if !FORTIFY_SOURCE
9896     select TRACE_BRANCH_PROFILING
9897     help
9898     This tracer profiles all branch conditions. Every if ()
9899     diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
9900     index 0476a9372014..39c221454186 100644
9901     --- a/kernel/trace/ring_buffer.c
9902     +++ b/kernel/trace/ring_buffer.c
9903     @@ -13,7 +13,6 @@
9904     #include <linux/uaccess.h>
9905     #include <linux/hardirq.h>
9906     #include <linux/kthread.h> /* for self test */
9907     -#include <linux/kmemcheck.h>
9908     #include <linux/module.h>
9909     #include <linux/percpu.h>
9910     #include <linux/mutex.h>
9911     @@ -2059,7 +2058,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
9912     }
9913    
9914     event = __rb_page_index(tail_page, tail);
9915     - kmemcheck_annotate_bitfield(event, bitfield);
9916    
9917     /* account for padding bytes */
9918     local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
9919     @@ -2690,7 +2688,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
9920     /* We reserved something on the buffer */
9921    
9922     event = __rb_page_index(tail_page, tail);
9923     - kmemcheck_annotate_bitfield(event, bitfield);
9924     rb_update_event(cpu_buffer, event, info);
9925    
9926     local_inc(&tail_page->entries);
9927     diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
9928     index 61e7f0678d33..a764aec3c9a1 100644
9929     --- a/kernel/trace/trace_events_filter.c
9930     +++ b/kernel/trace/trace_events_filter.c
9931     @@ -400,7 +400,6 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
9932     for (i = 0; i < len; i++) {
9933     if (buff[i] == '*') {
9934     if (!i) {
9935     - *search = buff + 1;
9936     type = MATCH_END_ONLY;
9937     } else if (i == len - 1) {
9938     if (type == MATCH_END_ONLY)
9939     @@ -410,14 +409,14 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
9940     buff[i] = 0;
9941     break;
9942     } else { /* pattern continues, use full glob */
9943     - type = MATCH_GLOB;
9944     - break;
9945     + return MATCH_GLOB;
9946     }
9947     } else if (strchr("[?\\", buff[i])) {
9948     - type = MATCH_GLOB;
9949     - break;
9950     + return MATCH_GLOB;
9951     }
9952     }
9953     + if (buff[0] == '*')
9954     + *search = buff + 1;
9955    
9956     return type;
9957     }
9958     diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
9959     index 00cb02daeddd..62d0e25c054c 100644
9960     --- a/lib/Kconfig.debug
9961     +++ b/lib/Kconfig.debug
9962     @@ -504,7 +504,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
9963    
9964     config DEBUG_SLAB
9965     bool "Debug slab memory allocations"
9966     - depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
9967     + depends on DEBUG_KERNEL && SLAB
9968     help
9969     Say Y here to have the kernel do limited verification on memory
9970     allocation as well as poisoning memory on free to catch use of freed
9971     @@ -516,7 +516,7 @@ config DEBUG_SLAB_LEAK
9972    
9973     config SLUB_DEBUG_ON
9974     bool "SLUB debugging on by default"
9975     - depends on SLUB && SLUB_DEBUG && !KMEMCHECK
9976     + depends on SLUB && SLUB_DEBUG
9977     default n
9978     help
9979     Boot with debugging on by default. SLUB boots by default with
9980     @@ -730,8 +730,6 @@ config DEBUG_STACKOVERFLOW
9981    
9982     If in doubt, say "N".
9983    
9984     -source "lib/Kconfig.kmemcheck"
9985     -
9986     source "lib/Kconfig.kasan"
9987    
9988     endmenu # "Memory Debugging"
9989     diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
9990     deleted file mode 100644
9991     index 846e039a86b4..000000000000
9992     --- a/lib/Kconfig.kmemcheck
9993     +++ /dev/null
9994     @@ -1,94 +0,0 @@
9995     -config HAVE_ARCH_KMEMCHECK
9996     - bool
9997     -
9998     -if HAVE_ARCH_KMEMCHECK
9999     -
10000     -menuconfig KMEMCHECK
10001     - bool "kmemcheck: trap use of uninitialized memory"
10002     - depends on DEBUG_KERNEL
10003     - depends on !X86_USE_3DNOW
10004     - depends on SLUB || SLAB
10005     - depends on !CC_OPTIMIZE_FOR_SIZE
10006     - depends on !FUNCTION_TRACER
10007     - select FRAME_POINTER
10008     - select STACKTRACE
10009     - default n
10010     - help
10011     - This option enables tracing of dynamically allocated kernel memory
10012     - to see if memory is used before it has been given an initial value.
10013     - Be aware that this requires half of your memory for bookkeeping and
10014     - will insert extra code at *every* read and write to tracked memory
10015     - thus slow down the kernel code (but user code is unaffected).
10016     -
10017     - The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
10018     - or enable kmemcheck at boot-time. If the kernel is started with
10019     - kmemcheck=0, the large memory and CPU overhead is not incurred.
10020     -
10021     -choice
10022     - prompt "kmemcheck: default mode at boot"
10023     - depends on KMEMCHECK
10024     - default KMEMCHECK_ONESHOT_BY_DEFAULT
10025     - help
10026     - This option controls the default behaviour of kmemcheck when the
10027     - kernel boots and no kmemcheck= parameter is given.
10028     -
10029     -config KMEMCHECK_DISABLED_BY_DEFAULT
10030     - bool "disabled"
10031     - depends on KMEMCHECK
10032     -
10033     -config KMEMCHECK_ENABLED_BY_DEFAULT
10034     - bool "enabled"
10035     - depends on KMEMCHECK
10036     -
10037     -config KMEMCHECK_ONESHOT_BY_DEFAULT
10038     - bool "one-shot"
10039     - depends on KMEMCHECK
10040     - help
10041     - In one-shot mode, only the first error detected is reported before
10042     - kmemcheck is disabled.
10043     -
10044     -endchoice
10045     -
10046     -config KMEMCHECK_QUEUE_SIZE
10047     - int "kmemcheck: error queue size"
10048     - depends on KMEMCHECK
10049     - default 64
10050     - help
10051     - Select the maximum number of errors to store in the queue. Since
10052     - errors can occur virtually anywhere and in any context, we need a
10053     - temporary storage area which is guarantueed not to generate any
10054     - other faults. The queue will be emptied as soon as a tasklet may
10055     - be scheduled. If the queue is full, new error reports will be
10056     - lost.
10057     -
10058     -config KMEMCHECK_SHADOW_COPY_SHIFT
10059     - int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
10060     - depends on KMEMCHECK
10061     - range 2 8
10062     - default 5
10063     - help
10064     - Select the number of shadow bytes to save along with each entry of
10065     - the queue. These bytes indicate what parts of an allocation are
10066     - initialized, uninitialized, etc. and will be displayed when an
10067     - error is detected to help the debugging of a particular problem.
10068     -
10069     -config KMEMCHECK_PARTIAL_OK
10070     - bool "kmemcheck: allow partially uninitialized memory"
10071     - depends on KMEMCHECK
10072     - default y
10073     - help
10074     - This option works around certain GCC optimizations that produce
10075     - 32-bit reads from 16-bit variables where the upper 16 bits are
10076     - thrown away afterwards. This may of course also hide some real
10077     - bugs.
10078     -
10079     -config KMEMCHECK_BITOPS_OK
10080     - bool "kmemcheck: allow bit-field manipulation"
10081     - depends on KMEMCHECK
10082     - default n
10083     - help
10084     - This option silences warnings that would be generated for bit-field
10085     - accesses where not all the bits are initialized at the same time.
10086     - This may also hide some real bugs.
10087     -
10088     -endif
10089     diff --git a/lib/swiotlb.c b/lib/swiotlb.c
10090     index 8c6c83ef57a4..20df2fd9b150 100644
10091     --- a/lib/swiotlb.c
10092     +++ b/lib/swiotlb.c
10093     @@ -585,7 +585,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
10094    
10095     not_found:
10096     spin_unlock_irqrestore(&io_tlb_lock, flags);
10097     - if (printk_ratelimit())
10098     + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
10099     dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
10100     return SWIOTLB_MAP_ERROR;
10101     found:
10102     @@ -712,6 +712,7 @@ void *
10103     swiotlb_alloc_coherent(struct device *hwdev, size_t size,
10104     dma_addr_t *dma_handle, gfp_t flags)
10105     {
10106     + bool warn = !(flags & __GFP_NOWARN);
10107     dma_addr_t dev_addr;
10108     void *ret;
10109     int order = get_order(size);
10110     @@ -737,8 +738,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
10111     * GFP_DMA memory; fall back on map_single(), which
10112     * will grab memory from the lowest available address range.
10113     */
10114     - phys_addr_t paddr = map_single(hwdev, 0, size,
10115     - DMA_FROM_DEVICE, 0);
10116     + phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE,
10117     + warn ? 0 : DMA_ATTR_NO_WARN);
10118     if (paddr == SWIOTLB_MAP_ERROR)
10119     goto err_warn;
10120    
10121     @@ -768,9 +769,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
10122     return ret;
10123    
10124     err_warn:
10125     - pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
10126     - dev_name(hwdev), size);
10127     - dump_stack();
10128     + if (warn && printk_ratelimit()) {
10129     + pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
10130     + dev_name(hwdev), size);
10131     + dump_stack();
10132     + }
10133    
10134     return NULL;
10135     }
10136     diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
10137     index 5b0adf1435de..e5e606ee5f71 100644
10138     --- a/mm/Kconfig.debug
10139     +++ b/mm/Kconfig.debug
10140     @@ -11,7 +11,6 @@ config DEBUG_PAGEALLOC
10141     bool "Debug page memory allocations"
10142     depends on DEBUG_KERNEL
10143     depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
10144     - depends on !KMEMCHECK
10145     select PAGE_EXTENSION
10146     select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
10147     ---help---
10148     diff --git a/mm/Makefile b/mm/Makefile
10149     index 4659b93cba43..e7ebd176fb93 100644
10150     --- a/mm/Makefile
10151     +++ b/mm/Makefile
10152     @@ -17,7 +17,6 @@ KCOV_INSTRUMENT_slub.o := n
10153     KCOV_INSTRUMENT_page_alloc.o := n
10154     KCOV_INSTRUMENT_debug-pagealloc.o := n
10155     KCOV_INSTRUMENT_kmemleak.o := n
10156     -KCOV_INSTRUMENT_kmemcheck.o := n
10157     KCOV_INSTRUMENT_memcontrol.o := n
10158     KCOV_INSTRUMENT_mmzone.o := n
10159     KCOV_INSTRUMENT_vmstat.o := n
10160     @@ -70,7 +69,6 @@ obj-$(CONFIG_KSM) += ksm.o
10161     obj-$(CONFIG_PAGE_POISONING) += page_poison.o
10162     obj-$(CONFIG_SLAB) += slab.o
10163     obj-$(CONFIG_SLUB) += slub.o
10164     -obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
10165     obj-$(CONFIG_KASAN) += kasan/
10166     obj-$(CONFIG_FAILSLAB) += failslab.o
10167     obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
10168     diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
10169     deleted file mode 100644
10170     index 800d64b854ea..000000000000
10171     --- a/mm/kmemcheck.c
10172     +++ /dev/null
10173     @@ -1,126 +0,0 @@
10174     -// SPDX-License-Identifier: GPL-2.0
10175     -#include <linux/gfp.h>
10176     -#include <linux/mm_types.h>
10177     -#include <linux/mm.h>
10178     -#include <linux/slab.h>
10179     -#include "slab.h"
10180     -#include <linux/kmemcheck.h>
10181     -
10182     -void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
10183     -{
10184     - struct page *shadow;
10185     - int pages;
10186     - int i;
10187     -
10188     - pages = 1 << order;
10189     -
10190     - /*
10191     - * With kmemcheck enabled, we need to allocate a memory area for the
10192     - * shadow bits as well.
10193     - */
10194     - shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
10195     - if (!shadow) {
10196     - if (printk_ratelimit())
10197     - pr_err("kmemcheck: failed to allocate shadow bitmap\n");
10198     - return;
10199     - }
10200     -
10201     - for(i = 0; i < pages; ++i)
10202     - page[i].shadow = page_address(&shadow[i]);
10203     -
10204     - /*
10205     - * Mark it as non-present for the MMU so that our accesses to
10206     - * this memory will trigger a page fault and let us analyze
10207     - * the memory accesses.
10208     - */
10209     - kmemcheck_hide_pages(page, pages);
10210     -}
10211     -
10212     -void kmemcheck_free_shadow(struct page *page, int order)
10213     -{
10214     - struct page *shadow;
10215     - int pages;
10216     - int i;
10217     -
10218     - if (!kmemcheck_page_is_tracked(page))
10219     - return;
10220     -
10221     - pages = 1 << order;
10222     -
10223     - kmemcheck_show_pages(page, pages);
10224     -
10225     - shadow = virt_to_page(page[0].shadow);
10226     -
10227     - for(i = 0; i < pages; ++i)
10228     - page[i].shadow = NULL;
10229     -
10230     - __free_pages(shadow, order);
10231     -}
10232     -
10233     -void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
10234     - size_t size)
10235     -{
10236     - if (unlikely(!object)) /* Skip object if allocation failed */
10237     - return;
10238     -
10239     - /*
10240     - * Has already been memset(), which initializes the shadow for us
10241     - * as well.
10242     - */
10243     - if (gfpflags & __GFP_ZERO)
10244     - return;
10245     -
10246     - /* No need to initialize the shadow of a non-tracked slab. */
10247     - if (s->flags & SLAB_NOTRACK)
10248     - return;
10249     -
10250     - if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
10251     - /*
10252     - * Allow notracked objects to be allocated from
10253     - * tracked caches. Note however that these objects
10254     - * will still get page faults on access, they just
10255     - * won't ever be flagged as uninitialized. If page
10256     - * faults are not acceptable, the slab cache itself
10257     - * should be marked NOTRACK.
10258     - */
10259     - kmemcheck_mark_initialized(object, size);
10260     - } else if (!s->ctor) {
10261     - /*
10262     - * New objects should be marked uninitialized before
10263     - * they're returned to the called.
10264     - */
10265     - kmemcheck_mark_uninitialized(object, size);
10266     - }
10267     -}
10268     -
10269     -void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
10270     -{
10271     - /* TODO: RCU freeing is unsupported for now; hide false positives. */
10272     - if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU))
10273     - kmemcheck_mark_freed(object, size);
10274     -}
10275     -
10276     -void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
10277     - gfp_t gfpflags)
10278     -{
10279     - int pages;
10280     -
10281     - if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
10282     - return;
10283     -
10284     - pages = 1 << order;
10285     -
10286     - /*
10287     - * NOTE: We choose to track GFP_ZERO pages too; in fact, they
10288     - * can become uninitialized by copying uninitialized memory
10289     - * into them.
10290     - */
10291     -
10292     - /* XXX: Can use zone->node for node? */
10293     - kmemcheck_alloc_shadow(page, order, gfpflags, -1);
10294     -
10295     - if (gfpflags & __GFP_ZERO)
10296     - kmemcheck_mark_initialized_pages(page, pages);
10297     - else
10298     - kmemcheck_mark_uninitialized_pages(page, pages);
10299     -}
10300     diff --git a/mm/kmemleak.c b/mm/kmemleak.c
10301     index a1ba553816eb..bd1374f402cd 100644
10302     --- a/mm/kmemleak.c
10303     +++ b/mm/kmemleak.c
10304     @@ -110,7 +110,6 @@
10305     #include <linux/atomic.h>
10306    
10307     #include <linux/kasan.h>
10308     -#include <linux/kmemcheck.h>
10309     #include <linux/kmemleak.h>
10310     #include <linux/memory_hotplug.h>
10311    
10312     @@ -1238,9 +1237,6 @@ static bool update_checksum(struct kmemleak_object *object)
10313     {
10314     u32 old_csum = object->checksum;
10315    
10316     - if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
10317     - return false;
10318     -
10319     kasan_disable_current();
10320     object->checksum = crc32(0, (void *)object->pointer, object->size);
10321     kasan_enable_current();
10322     @@ -1314,11 +1310,6 @@ static void scan_block(void *_start, void *_end,
10323     if (scan_should_stop())
10324     break;
10325    
10326     - /* don't scan uninitialized memory */
10327     - if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
10328     - BYTES_PER_POINTER))
10329     - continue;
10330     -
10331     kasan_disable_current();
10332     pointer = *ptr;
10333     kasan_enable_current();
10334     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
10335     index 88366626c0b7..1cd3b3569af8 100644
10336     --- a/mm/memory-failure.c
10337     +++ b/mm/memory-failure.c
10338     @@ -1146,8 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
10339     return 0;
10340     }
10341    
10342     - arch_unmap_kpfn(pfn);
10343     -
10344     orig_head = hpage = compound_head(p);
10345     num_poisoned_pages_inc();
10346    
10347     diff --git a/mm/memory.c b/mm/memory.c
10348     index a728bed16c20..fc7779165dcf 100644
10349     --- a/mm/memory.c
10350     +++ b/mm/memory.c
10351     @@ -81,7 +81,7 @@
10352    
10353     #include "internal.h"
10354    
10355     -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
10356     +#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
10357     #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
10358     #endif
10359    
10360     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
10361     index 2de080003693..6627caeeaf82 100644
10362     --- a/mm/page_alloc.c
10363     +++ b/mm/page_alloc.c
10364     @@ -24,7 +24,6 @@
10365     #include <linux/memblock.h>
10366     #include <linux/compiler.h>
10367     #include <linux/kernel.h>
10368     -#include <linux/kmemcheck.h>
10369     #include <linux/kasan.h>
10370     #include <linux/module.h>
10371     #include <linux/suspend.h>
10372     @@ -1022,7 +1021,6 @@ static __always_inline bool free_pages_prepare(struct page *page,
10373     VM_BUG_ON_PAGE(PageTail(page), page);
10374    
10375     trace_mm_page_free(page, order);
10376     - kmemcheck_free_shadow(page, order);
10377    
10378     /*
10379     * Check tail pages before head page information is cleared to
10380     @@ -2674,15 +2672,6 @@ void split_page(struct page *page, unsigned int order)
10381     VM_BUG_ON_PAGE(PageCompound(page), page);
10382     VM_BUG_ON_PAGE(!page_count(page), page);
10383    
10384     -#ifdef CONFIG_KMEMCHECK
10385     - /*
10386     - * Split shadow pages too, because free(page[0]) would
10387     - * otherwise free the whole shadow.
10388     - */
10389     - if (kmemcheck_page_is_tracked(page))
10390     - split_page(virt_to_page(page[0].shadow), order);
10391     -#endif
10392     -
10393     for (i = 1; i < (1 << order); i++)
10394     set_page_refcounted(page + i);
10395     split_page_owner(page, order);
10396     @@ -4228,9 +4217,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
10397     page = NULL;
10398     }
10399    
10400     - if (kmemcheck_enabled && page)
10401     - kmemcheck_pagealloc_alloc(page, order, gfp_mask);
10402     -
10403     trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
10404    
10405     return page;
10406     diff --git a/mm/slab.c b/mm/slab.c
10407     index b7095884fd93..966839a1ac2c 100644
10408     --- a/mm/slab.c
10409     +++ b/mm/slab.c
10410     @@ -114,7 +114,6 @@
10411     #include <linux/rtmutex.h>
10412     #include <linux/reciprocal_div.h>
10413     #include <linux/debugobjects.h>
10414     -#include <linux/kmemcheck.h>
10415     #include <linux/memory.h>
10416     #include <linux/prefetch.h>
10417     #include <linux/sched/task_stack.h>
10418     @@ -1413,7 +1412,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
10419     if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
10420     flags |= __GFP_RECLAIMABLE;
10421    
10422     - page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
10423     + page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
10424     if (!page) {
10425     slab_out_of_memory(cachep, flags, nodeid);
10426     return NULL;
10427     @@ -1435,15 +1434,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
10428     if (sk_memalloc_socks() && page_is_pfmemalloc(page))
10429     SetPageSlabPfmemalloc(page);
10430    
10431     - if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
10432     - kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
10433     -
10434     - if (cachep->ctor)
10435     - kmemcheck_mark_uninitialized_pages(page, nr_pages);
10436     - else
10437     - kmemcheck_mark_unallocated_pages(page, nr_pages);
10438     - }
10439     -
10440     return page;
10441     }
10442    
10443     @@ -1455,8 +1445,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
10444     int order = cachep->gfporder;
10445     unsigned long nr_freed = (1 << order);
10446    
10447     - kmemcheck_free_shadow(page, order);
10448     -
10449     if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
10450     mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
10451     else
10452     @@ -3516,8 +3504,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
10453     kmemleak_free_recursive(objp, cachep->flags);
10454     objp = cache_free_debugcheck(cachep, objp, caller);
10455    
10456     - kmemcheck_slab_free(cachep, objp, cachep->object_size);
10457     -
10458     /*
10459     * Skip calling cache_free_alien() when the platform is not numa.
10460     * This will avoid cache misses that happen while accessing slabp (which
10461     diff --git a/mm/slab.h b/mm/slab.h
10462     index 86d7c7d860f9..485d9fbb8802 100644
10463     --- a/mm/slab.h
10464     +++ b/mm/slab.h
10465     @@ -40,7 +40,6 @@ struct kmem_cache {
10466    
10467     #include <linux/memcontrol.h>
10468     #include <linux/fault-inject.h>
10469     -#include <linux/kmemcheck.h>
10470     #include <linux/kasan.h>
10471     #include <linux/kmemleak.h>
10472     #include <linux/random.h>
10473     @@ -142,10 +141,10 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
10474     #if defined(CONFIG_SLAB)
10475     #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
10476     SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
10477     - SLAB_NOTRACK | SLAB_ACCOUNT)
10478     + SLAB_ACCOUNT)
10479     #elif defined(CONFIG_SLUB)
10480     #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
10481     - SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
10482     + SLAB_TEMPORARY | SLAB_ACCOUNT)
10483     #else
10484     #define SLAB_CACHE_FLAGS (0)
10485     #endif
10486     @@ -164,7 +163,6 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
10487     SLAB_NOLEAKTRACE | \
10488     SLAB_RECLAIM_ACCOUNT | \
10489     SLAB_TEMPORARY | \
10490     - SLAB_NOTRACK | \
10491     SLAB_ACCOUNT)
10492    
10493     int __kmem_cache_shutdown(struct kmem_cache *);
10494     @@ -439,7 +437,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
10495     for (i = 0; i < size; i++) {
10496     void *object = p[i];
10497    
10498     - kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
10499     kmemleak_alloc_recursive(object, s->object_size, 1,
10500     s->flags, flags);
10501     kasan_slab_alloc(s, object, flags);
10502     diff --git a/mm/slab_common.c b/mm/slab_common.c
10503     index 0d7fe71ff5e4..65212caa1f2a 100644
10504     --- a/mm/slab_common.c
10505     +++ b/mm/slab_common.c
10506     @@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
10507     SLAB_FAILSLAB | SLAB_KASAN)
10508    
10509     #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
10510     - SLAB_NOTRACK | SLAB_ACCOUNT)
10511     + SLAB_ACCOUNT)
10512    
10513     /*
10514     * Merge control. If this is set then no merging of slab caches will occur.
10515     diff --git a/mm/slub.c b/mm/slub.c
10516     index 8e1c027a30f4..41c01690d116 100644
10517     --- a/mm/slub.c
10518     +++ b/mm/slub.c
10519     @@ -22,7 +22,6 @@
10520     #include <linux/notifier.h>
10521     #include <linux/seq_file.h>
10522     #include <linux/kasan.h>
10523     -#include <linux/kmemcheck.h>
10524     #include <linux/cpu.h>
10525     #include <linux/cpuset.h>
10526     #include <linux/mempolicy.h>
10527     @@ -1370,12 +1369,11 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x)
10528     * So in order to make the debug calls that expect irqs to be
10529     * disabled we need to disable interrupts temporarily.
10530     */
10531     -#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
10532     +#ifdef CONFIG_LOCKDEP
10533     {
10534     unsigned long flags;
10535    
10536     local_irq_save(flags);
10537     - kmemcheck_slab_free(s, x, s->object_size);
10538     debug_check_no_locks_freed(x, s->object_size);
10539     local_irq_restore(flags);
10540     }
10541     @@ -1399,8 +1397,7 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
10542     * Compiler cannot detect this function can be removed if slab_free_hook()
10543     * evaluates to nothing. Thus, catch all relevant config debug options here.
10544     */
10545     -#if defined(CONFIG_KMEMCHECK) || \
10546     - defined(CONFIG_LOCKDEP) || \
10547     +#if defined(CONFIG_LOCKDEP) || \
10548     defined(CONFIG_DEBUG_KMEMLEAK) || \
10549     defined(CONFIG_DEBUG_OBJECTS_FREE) || \
10550     defined(CONFIG_KASAN)
10551     @@ -1436,8 +1433,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
10552     struct page *page;
10553     int order = oo_order(oo);
10554    
10555     - flags |= __GFP_NOTRACK;
10556     -
10557     if (node == NUMA_NO_NODE)
10558     page = alloc_pages(flags, order);
10559     else
10560     @@ -1596,22 +1591,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
10561     stat(s, ORDER_FALLBACK);
10562     }
10563    
10564     - if (kmemcheck_enabled &&
10565     - !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
10566     - int pages = 1 << oo_order(oo);
10567     -
10568     - kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
10569     -
10570     - /*
10571     - * Objects from caches that have a constructor don't get
10572     - * cleared when they're allocated, so we need to do it here.
10573     - */
10574     - if (s->ctor)
10575     - kmemcheck_mark_uninitialized_pages(page, pages);
10576     - else
10577     - kmemcheck_mark_unallocated_pages(page, pages);
10578     - }
10579     -
10580     page->objects = oo_objects(oo);
10581    
10582     order = compound_order(page);
10583     @@ -1687,8 +1666,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
10584     check_object(s, page, p, SLUB_RED_INACTIVE);
10585     }
10586    
10587     - kmemcheck_free_shadow(page, compound_order(page));
10588     -
10589     mod_lruvec_page_state(page,
10590     (s->flags & SLAB_RECLAIM_ACCOUNT) ?
10591     NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
10592     @@ -3792,7 +3769,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
10593     struct page *page;
10594     void *ptr = NULL;
10595    
10596     - flags |= __GFP_COMP | __GFP_NOTRACK;
10597     + flags |= __GFP_COMP;
10598     page = alloc_pages_node(node, flags, get_order(size));
10599     if (page)
10600     ptr = page_address(page);
10601     @@ -5655,8 +5632,6 @@ static char *create_unique_id(struct kmem_cache *s)
10602     *p++ = 'a';
10603     if (s->flags & SLAB_CONSISTENCY_CHECKS)
10604     *p++ = 'F';
10605     - if (!(s->flags & SLAB_NOTRACK))
10606     - *p++ = 't';
10607     if (s->flags & SLAB_ACCOUNT)
10608     *p++ = 'A';
10609     if (p != name + 1)
10610     diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
10611     index f3a4efcf1456..3aa5a93ad107 100644
10612     --- a/net/9p/trans_virtio.c
10613     +++ b/net/9p/trans_virtio.c
10614     @@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq)
10615     spin_unlock_irqrestore(&chan->lock, flags);
10616     /* Wakeup if anyone waiting for VirtIO ring space. */
10617     wake_up(chan->vc_wq);
10618     - p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
10619     + if (len)
10620     + p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
10621     }
10622     }
10623    
10624     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
10625     index 15fa5baa8fae..cc811add68c6 100644
10626     --- a/net/core/skbuff.c
10627     +++ b/net/core/skbuff.c
10628     @@ -41,7 +41,6 @@
10629     #include <linux/module.h>
10630     #include <linux/types.h>
10631     #include <linux/kernel.h>
10632     -#include <linux/kmemcheck.h>
10633     #include <linux/mm.h>
10634     #include <linux/interrupt.h>
10635     #include <linux/in.h>
10636     @@ -234,14 +233,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
10637     shinfo = skb_shinfo(skb);
10638     memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
10639     atomic_set(&shinfo->dataref, 1);
10640     - kmemcheck_annotate_variable(shinfo->destructor_arg);
10641    
10642     if (flags & SKB_ALLOC_FCLONE) {
10643     struct sk_buff_fclones *fclones;
10644    
10645     fclones = container_of(skb, struct sk_buff_fclones, skb1);
10646    
10647     - kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
10648     skb->fclone = SKB_FCLONE_ORIG;
10649     refcount_set(&fclones->fclone_ref, 1);
10650    
10651     @@ -301,7 +298,6 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
10652     shinfo = skb_shinfo(skb);
10653     memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
10654     atomic_set(&shinfo->dataref, 1);
10655     - kmemcheck_annotate_variable(shinfo->destructor_arg);
10656    
10657     return skb;
10658     }
10659     @@ -1284,7 +1280,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
10660     if (!n)
10661     return NULL;
10662    
10663     - kmemcheck_annotate_bitfield(n, flags1);
10664     n->fclone = SKB_FCLONE_UNAVAILABLE;
10665     }
10666    
10667     diff --git a/net/core/sock.c b/net/core/sock.c
10668     index beb1e299fed3..ec6eb546b228 100644
10669     --- a/net/core/sock.c
10670     +++ b/net/core/sock.c
10671     @@ -1469,8 +1469,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
10672     sk = kmalloc(prot->obj_size, priority);
10673    
10674     if (sk != NULL) {
10675     - kmemcheck_annotate_bitfield(sk, flags);
10676     -
10677     if (security_sk_alloc(sk, family, priority))
10678     goto out_free;
10679    
10680     diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
10681     index 5b039159e67a..d451b9f19b59 100644
10682     --- a/net/ipv4/inet_timewait_sock.c
10683     +++ b/net/ipv4/inet_timewait_sock.c
10684     @@ -9,7 +9,6 @@
10685     */
10686    
10687     #include <linux/kernel.h>
10688     -#include <linux/kmemcheck.h>
10689     #include <linux/slab.h>
10690     #include <linux/module.h>
10691     #include <net/inet_hashtables.h>
10692     @@ -167,8 +166,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
10693     if (tw) {
10694     const struct inet_sock *inet = inet_sk(sk);
10695    
10696     - kmemcheck_annotate_bitfield(tw, flags);
10697     -
10698     tw->tw_dr = dr;
10699     /* Give us an identity. */
10700     tw->tw_daddr = inet->inet_daddr;
10701     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
10702     index ff48ac654e5a..d9d215e27b8a 100644
10703     --- a/net/ipv4/tcp_input.c
10704     +++ b/net/ipv4/tcp_input.c
10705     @@ -6204,7 +6204,6 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
10706     if (req) {
10707     struct inet_request_sock *ireq = inet_rsk(req);
10708    
10709     - kmemcheck_annotate_bitfield(ireq, flags);
10710     ireq->ireq_opt = NULL;
10711     #if IS_ENABLED(CONFIG_IPV6)
10712     ireq->pktopts = NULL;
10713     diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
10714     index c5b9ce41d66f..aee385eb72e7 100644
10715     --- a/net/mpls/af_mpls.c
10716     +++ b/net/mpls/af_mpls.c
10717     @@ -8,6 +8,7 @@
10718     #include <linux/ipv6.h>
10719     #include <linux/mpls.h>
10720     #include <linux/netconf.h>
10721     +#include <linux/nospec.h>
10722     #include <linux/vmalloc.h>
10723     #include <linux/percpu.h>
10724     #include <net/ip.h>
10725     @@ -904,24 +905,27 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
10726     return err;
10727     }
10728    
10729     -static bool mpls_label_ok(struct net *net, unsigned int index,
10730     +static bool mpls_label_ok(struct net *net, unsigned int *index,
10731     struct netlink_ext_ack *extack)
10732     {
10733     + bool is_ok = true;
10734     +
10735     /* Reserved labels may not be set */
10736     - if (index < MPLS_LABEL_FIRST_UNRESERVED) {
10737     + if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
10738     NL_SET_ERR_MSG(extack,
10739     "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
10740     - return false;
10741     + is_ok = false;
10742     }
10743    
10744     /* The full 20 bit range may not be supported. */
10745     - if (index >= net->mpls.platform_labels) {
10746     + if (is_ok && *index >= net->mpls.platform_labels) {
10747     NL_SET_ERR_MSG(extack,
10748     "Label >= configured maximum in platform_labels");
10749     - return false;
10750     + is_ok = false;
10751     }
10752    
10753     - return true;
10754     + *index = array_index_nospec(*index, net->mpls.platform_labels);
10755     + return is_ok;
10756     }
10757    
10758     static int mpls_route_add(struct mpls_route_config *cfg,
10759     @@ -944,7 +948,7 @@ static int mpls_route_add(struct mpls_route_config *cfg,
10760     index = find_free_label(net);
10761     }
10762    
10763     - if (!mpls_label_ok(net, index, extack))
10764     + if (!mpls_label_ok(net, &index, extack))
10765     goto errout;
10766    
10767     /* Append makes no sense with mpls */
10768     @@ -1021,7 +1025,7 @@ static int mpls_route_del(struct mpls_route_config *cfg,
10769    
10770     index = cfg->rc_label;
10771    
10772     - if (!mpls_label_ok(net, index, extack))
10773     + if (!mpls_label_ok(net, &index, extack))
10774     goto errout;
10775    
10776     mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
10777     @@ -1779,7 +1783,7 @@ static int rtm_to_route_config(struct sk_buff *skb,
10778     goto errout;
10779    
10780     if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
10781     - cfg->rc_label, extack))
10782     + &cfg->rc_label, extack))
10783     goto errout;
10784     break;
10785     }
10786     @@ -2106,7 +2110,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
10787     goto errout;
10788     }
10789    
10790     - if (!mpls_label_ok(net, in_label, extack)) {
10791     + if (!mpls_label_ok(net, &in_label, extack)) {
10792     err = -EINVAL;
10793     goto errout;
10794     }
10795     diff --git a/net/socket.c b/net/socket.c
10796     index d894c7c5fa54..43d2f17f5eea 100644
10797     --- a/net/socket.c
10798     +++ b/net/socket.c
10799     @@ -568,7 +568,6 @@ struct socket *sock_alloc(void)
10800    
10801     sock = SOCKET_I(inode);
10802    
10803     - kmemcheck_annotate_bitfield(sock, type);
10804     inode->i_ino = get_next_ino();
10805     inode->i_mode = S_IFSOCK | S_IRWXUGO;
10806     inode->i_uid = current_fsuid();
10807     diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
10808     index f1889f4d4803..491ae9fc561f 100644
10809     --- a/net/sunrpc/xprtrdma/rpc_rdma.c
10810     +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
10811     @@ -142,7 +142,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
10812     if (xdr->page_len) {
10813     remaining = xdr->page_len;
10814     offset = offset_in_page(xdr->page_base);
10815     - count = 0;
10816     + count = RPCRDMA_MIN_SEND_SGES;
10817     while (remaining) {
10818     remaining -= min_t(unsigned int,
10819     PAGE_SIZE - offset, remaining);
10820     diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
10821     index 11a1fbf7e59e..9e8e1de19b2e 100644
10822     --- a/net/sunrpc/xprtrdma/verbs.c
10823     +++ b/net/sunrpc/xprtrdma/verbs.c
10824     @@ -523,7 +523,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
10825     pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
10826     return -ENOMEM;
10827     }
10828     - ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES;
10829     + ia->ri_max_send_sges = max_sge;
10830    
10831     if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
10832     dprintk("RPC: %s: insufficient wqe's available\n",
10833     @@ -1331,6 +1331,9 @@ __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
10834     static void
10835     rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
10836     {
10837     + if (!rb)
10838     + return;
10839     +
10840     if (!rpcrdma_regbuf_is_mapped(rb))
10841     return;
10842    
10843     @@ -1346,9 +1349,6 @@ rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
10844     void
10845     rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
10846     {
10847     - if (!rb)
10848     - return;
10849     -
10850     rpcrdma_dma_unmap_regbuf(rb);
10851     kfree(rb);
10852     }
10853     diff --git a/scripts/kernel-doc b/scripts/kernel-doc
10854     index 9d3eafea58f0..8323ff9dec71 100755
10855     --- a/scripts/kernel-doc
10856     +++ b/scripts/kernel-doc
10857     @@ -2182,8 +2182,6 @@ sub dump_struct($$) {
10858     # strip comments:
10859     $members =~ s/\/\*.*?\*\///gos;
10860     $nested =~ s/\/\*.*?\*\///gos;
10861     - # strip kmemcheck_bitfield_{begin,end}.*;
10862     - $members =~ s/kmemcheck_bitfield_.*?;//gos;
10863     # strip attributes
10864     $members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
10865     $members =~ s/__aligned\s*\([^;]*\)//gos;
10866     diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
10867     index ac30fc1ab98b..dea11d1babf5 100644
10868     --- a/sound/core/seq/seq_clientmgr.c
10869     +++ b/sound/core/seq/seq_clientmgr.c
10870     @@ -999,7 +999,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
10871     {
10872     struct snd_seq_client *client = file->private_data;
10873     int written = 0, len;
10874     - int err = -EINVAL;
10875     + int err;
10876     struct snd_seq_event event;
10877    
10878     if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
10879     @@ -1014,11 +1014,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
10880    
10881     /* allocate the pool now if the pool is not allocated yet */
10882     if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
10883     - if (snd_seq_pool_init(client->pool) < 0)
10884     + mutex_lock(&client->ioctl_mutex);
10885     + err = snd_seq_pool_init(client->pool);
10886     + mutex_unlock(&client->ioctl_mutex);
10887     + if (err < 0)
10888     return -ENOMEM;
10889     }
10890    
10891     /* only process whole events */
10892     + err = -EINVAL;
10893     while (count >= sizeof(struct snd_seq_event)) {
10894     /* Read in the event header from the user */
10895     len = sizeof(event);
10896     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
10897     index b2d039537d5e..b7acffdf16a4 100644
10898     --- a/sound/pci/hda/patch_realtek.c
10899     +++ b/sound/pci/hda/patch_realtek.c
10900     @@ -3355,6 +3355,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
10901     spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
10902     }
10903    
10904     +static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
10905     + const struct hda_fixup *fix,
10906     + int action)
10907     +{
10908     + unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
10909     + unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
10910     +
10911     + if (cfg_headphone && cfg_headset_mic == 0x411111f0)
10912     + snd_hda_codec_set_pincfg(codec, 0x19,
10913     + (cfg_headphone & ~AC_DEFCFG_DEVICE) |
10914     + (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
10915     +}
10916     +
10917     static void alc269_fixup_hweq(struct hda_codec *codec,
10918     const struct hda_fixup *fix, int action)
10919     {
10920     @@ -4827,6 +4840,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
10921     }
10922     }
10923    
10924     +static void alc_fixup_tpt470_dock(struct hda_codec *codec,
10925     + const struct hda_fixup *fix, int action)
10926     +{
10927     + static const struct hda_pintbl pincfgs[] = {
10928     + { 0x17, 0x21211010 }, /* dock headphone */
10929     + { 0x19, 0x21a11010 }, /* dock mic */
10930     + { }
10931     + };
10932     + struct alc_spec *spec = codec->spec;
10933     +
10934     + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
10935     + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
10936     + /* Enable DOCK device */
10937     + snd_hda_codec_write(codec, 0x17, 0,
10938     + AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
10939     + /* Enable DOCK device */
10940     + snd_hda_codec_write(codec, 0x19, 0,
10941     + AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
10942     + snd_hda_apply_pincfgs(codec, pincfgs);
10943     + }
10944     +}
10945     +
10946     static void alc_shutup_dell_xps13(struct hda_codec *codec)
10947     {
10948     struct alc_spec *spec = codec->spec;
10949     @@ -5206,6 +5241,7 @@ enum {
10950     ALC269_FIXUP_LIFEBOOK_EXTMIC,
10951     ALC269_FIXUP_LIFEBOOK_HP_PIN,
10952     ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
10953     + ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
10954     ALC269_FIXUP_AMIC,
10955     ALC269_FIXUP_DMIC,
10956     ALC269VB_FIXUP_AMIC,
10957     @@ -5301,6 +5337,7 @@ enum {
10958     ALC700_FIXUP_INTEL_REFERENCE,
10959     ALC274_FIXUP_DELL_BIND_DACS,
10960     ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
10961     + ALC298_FIXUP_TPT470_DOCK,
10962     };
10963    
10964     static const struct hda_fixup alc269_fixups[] = {
10965     @@ -5411,6 +5448,10 @@ static const struct hda_fixup alc269_fixups[] = {
10966     .type = HDA_FIXUP_FUNC,
10967     .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
10968     },
10969     + [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
10970     + .type = HDA_FIXUP_FUNC,
10971     + .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
10972     + },
10973     [ALC269_FIXUP_AMIC] = {
10974     .type = HDA_FIXUP_PINS,
10975     .v.pins = (const struct hda_pintbl[]) {
10976     @@ -6126,6 +6167,12 @@ static const struct hda_fixup alc269_fixups[] = {
10977     .chained = true,
10978     .chain_id = ALC274_FIXUP_DELL_BIND_DACS
10979     },
10980     + [ALC298_FIXUP_TPT470_DOCK] = {
10981     + .type = HDA_FIXUP_FUNC,
10982     + .v.func = alc_fixup_tpt470_dock,
10983     + .chained = true,
10984     + .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
10985     + },
10986     };
10987    
10988     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
10989     @@ -6176,6 +6223,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
10990     SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
10991     SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
10992     SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
10993     + SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
10994     + SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
10995     SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
10996     SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
10997     SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
10998     @@ -6277,6 +6326,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
10999     SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
11000     SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
11001     SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
11002     + SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
11003     SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
11004     SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
11005     SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
11006     @@ -6305,8 +6355,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
11007     SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
11008     SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
11009     SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
11010     + SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11011     + SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11012     SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
11013     SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
11014     + SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
11015     + SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11016     + SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11017     + SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11018     + SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11019     + SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11020     SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
11021     SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
11022     SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
11023     @@ -6327,7 +6385,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
11024     SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
11025     SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
11026     SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
11027     + SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11028     + SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11029     + SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11030     SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
11031     + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11032     + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
11033     SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
11034     SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
11035     SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
11036     @@ -6584,6 +6647,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
11037     {0x12, 0xb7a60130},
11038     {0x14, 0x90170110},
11039     {0x21, 0x02211020}),
11040     + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
11041     + {0x12, 0x90a60130},
11042     + {0x14, 0x90170110},
11043     + {0x14, 0x01011020},
11044     + {0x21, 0x0221101f}),
11045     SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
11046     ALC256_STANDARD_PINS),
11047     SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
11048     @@ -6653,6 +6721,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
11049     {0x12, 0x90a60120},
11050     {0x14, 0x90170110},
11051     {0x21, 0x0321101f}),
11052     + SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
11053     + {0x12, 0xb7a60130},
11054     + {0x14, 0x90170110},
11055     + {0x21, 0x04211020}),
11056     SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
11057     ALC290_STANDARD_PINS,
11058     {0x15, 0x04211040},
11059     diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c
11060     index 56d26f36a3cb..b4a929562218 100644
11061     --- a/sound/soc/intel/common/sst-match-acpi.c
11062     +++ b/sound/soc/intel/common/sst-match-acpi.c
11063     @@ -83,11 +83,9 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
11064    
11065     for (mach = machines; mach->id[0]; mach++) {
11066     if (sst_acpi_check_hid(mach->id) == true) {
11067     - if (mach->machine_quirk == NULL)
11068     - return mach;
11069     -
11070     - if (mach->machine_quirk(mach) != NULL)
11071     - return mach;
11072     + if (mach->machine_quirk)
11073     + mach = mach->machine_quirk(mach);
11074     + return mach;
11075     }
11076     }
11077     return NULL;
11078     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
11079     index 75bce127d768..89efec891e68 100644
11080     --- a/sound/usb/mixer.c
11081     +++ b/sound/usb/mixer.c
11082     @@ -347,17 +347,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
11083     int validx, int *value_ret)
11084     {
11085     struct snd_usb_audio *chip = cval->head.mixer->chip;
11086     - unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
11087     + /* enough space for one range */
11088     + unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
11089     unsigned char *val;
11090     - int idx = 0, ret, size;
11091     + int idx = 0, ret, val_size, size;
11092     __u8 bRequest;
11093    
11094     + val_size = uac2_ctl_value_size(cval->val_type);
11095     +
11096     if (request == UAC_GET_CUR) {
11097     bRequest = UAC2_CS_CUR;
11098     - size = uac2_ctl_value_size(cval->val_type);
11099     + size = val_size;
11100     } else {
11101     bRequest = UAC2_CS_RANGE;
11102     - size = sizeof(buf);
11103     + size = sizeof(__u16) + 3 * val_size;
11104     }
11105    
11106     memset(buf, 0, sizeof(buf));
11107     @@ -390,16 +393,17 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
11108     val = buf + sizeof(__u16);
11109     break;
11110     case UAC_GET_MAX:
11111     - val = buf + sizeof(__u16) * 2;
11112     + val = buf + sizeof(__u16) + val_size;
11113     break;
11114     case UAC_GET_RES:
11115     - val = buf + sizeof(__u16) * 3;
11116     + val = buf + sizeof(__u16) + val_size * 2;
11117     break;
11118     default:
11119     return -EINVAL;
11120     }
11121    
11122     - *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
11123     + *value_ret = convert_signed_value(cval,
11124     + snd_usb_combine_bytes(val, val_size));
11125    
11126     return 0;
11127     }
11128     diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
11129     index b9c9a19f9588..3cbfae6604f9 100644
11130     --- a/sound/usb/pcm.c
11131     +++ b/sound/usb/pcm.c
11132     @@ -352,6 +352,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
11133     ep = 0x86;
11134     iface = usb_ifnum_to_if(dev, 2);
11135    
11136     + if (!iface || iface->num_altsetting == 0)
11137     + return -EINVAL;
11138     +
11139     + alts = &iface->altsetting[1];
11140     + goto add_sync_ep;
11141     + case USB_ID(0x1397, 0x0002):
11142     + ep = 0x81;
11143     + iface = usb_ifnum_to_if(dev, 1);
11144     +
11145     if (!iface || iface->num_altsetting == 0)
11146     return -EINVAL;
11147    
11148     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
11149     index 8d7db7cd4f88..ed56cd307059 100644
11150     --- a/sound/usb/quirks.c
11151     +++ b/sound/usb/quirks.c
11152     @@ -1369,8 +1369,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
11153     return SNDRV_PCM_FMTBIT_DSD_U32_BE;
11154     break;
11155    
11156     - /* Amanero Combo384 USB interface with native DSD support */
11157     - case USB_ID(0x16d0, 0x071a):
11158     + /* Amanero Combo384 USB based DACs with native DSD support */
11159     + case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
11160     + case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
11161     + case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
11162     + case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
11163     if (fp->altsetting == 2) {
11164     switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
11165     case 0x199:
11166     diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
11167     deleted file mode 100644
11168     index 2bccd2c7b897..000000000000
11169     --- a/tools/include/linux/kmemcheck.h
11170     +++ /dev/null
11171     @@ -1,9 +0,0 @@
11172     -/* SPDX-License-Identifier: GPL-2.0 */
11173     -#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_
11174     -#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_
11175     -
11176     -static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
11177     -{
11178     -}
11179     -
11180     -#endif
11181     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
11182     index 2e458eb45586..c7fb5c2392ee 100644
11183     --- a/tools/objtool/check.c
11184     +++ b/tools/objtool/check.c
11185     @@ -1935,13 +1935,19 @@ static bool ignore_unreachable_insn(struct instruction *insn)
11186     if (is_kasan_insn(insn) || is_ubsan_insn(insn))
11187     return true;
11188    
11189     - if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
11190     - insn = insn->jump_dest;
11191     - continue;
11192     + if (insn->type == INSN_JUMP_UNCONDITIONAL) {
11193     + if (insn->jump_dest &&
11194     + insn->jump_dest->func == insn->func) {
11195     + insn = insn->jump_dest;
11196     + continue;
11197     + }
11198     +
11199     + break;
11200     }
11201    
11202     if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
11203     break;
11204     +
11205     insn = list_next_entry(insn, list);
11206     }
11207    
11208     diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
11209     index 35d4b9c9a9e8..9e693ce4b73b 100644
11210     --- a/tools/perf/builtin-kmem.c
11211     +++ b/tools/perf/builtin-kmem.c
11212     @@ -655,7 +655,6 @@ static const struct {
11213     { "__GFP_RECLAIMABLE", "RC" },
11214     { "__GFP_MOVABLE", "M" },
11215     { "__GFP_ACCOUNT", "AC" },
11216     - { "__GFP_NOTRACK", "NT" },
11217     { "__GFP_WRITE", "WR" },
11218     { "__GFP_RECLAIM", "R" },
11219     { "__GFP_DIRECT_RECLAIM", "DR" },
11220     diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
11221     index 24dbf634e2dd..0b457e8e0f0c 100644
11222     --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
11223     +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
11224     @@ -1717,7 +1717,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
11225    
11226     if (nr == __NR_getpid)
11227     change_syscall(_metadata, tracee, __NR_getppid);
11228     - if (nr == __NR_open)
11229     + if (nr == __NR_openat)
11230     change_syscall(_metadata, tracee, -1);
11231     }
11232    
11233     @@ -1792,7 +1792,7 @@ TEST_F(TRACE_syscall, ptrace_syscall_dropped)
11234     true);
11235    
11236     /* Tracer should skip the open syscall, resulting in EPERM. */
11237     - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open));
11238     + EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
11239     }
11240    
11241     TEST_F(TRACE_syscall, syscall_allowed)
11242     diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
11243     index a65b016d4c13..1097f04e4d80 100644
11244     --- a/tools/testing/selftests/vm/compaction_test.c
11245     +++ b/tools/testing/selftests/vm/compaction_test.c
11246     @@ -137,6 +137,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
11247     printf("No of huge pages allocated = %d\n",
11248     (atoi(nr_hugepages)));
11249    
11250     + lseek(fd, 0, SEEK_SET);
11251     +
11252     if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
11253     != strlen(initial_nr_hugepages)) {
11254     perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
11255     diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
11256     index 91fbfa8fdc15..aa6e2d7f6a1f 100644
11257     --- a/tools/testing/selftests/x86/Makefile
11258     +++ b/tools/testing/selftests/x86/Makefile
11259     @@ -5,16 +5,26 @@ include ../lib.mk
11260    
11261     .PHONY: all all_32 all_64 warn_32bit_failure clean
11262    
11263     -TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
11264     - check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
11265     +UNAME_M := $(shell uname -m)
11266     +CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
11267     +CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
11268     +
11269     +TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
11270     + check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
11271     protection_keys test_vdso test_vsyscall
11272     TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
11273     test_FCMOV test_FCOMI test_FISTTP \
11274     vdso_restorer
11275     TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
11276     +# Some selftests require 32bit support enabled also on 64bit systems
11277     +TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
11278    
11279     -TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
11280     +TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED)
11281     TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
11282     +ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11)
11283     +TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED)
11284     +endif
11285     +
11286     BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
11287     BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
11288    
11289     @@ -23,18 +33,16 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
11290    
11291     CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
11292    
11293     -UNAME_M := $(shell uname -m)
11294     -CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
11295     -CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
11296     -
11297     ifeq ($(CAN_BUILD_I386),1)
11298     all: all_32
11299     TEST_PROGS += $(BINARIES_32)
11300     +EXTRA_CFLAGS += -DCAN_BUILD_32
11301     endif
11302    
11303     ifeq ($(CAN_BUILD_X86_64),1)
11304     all: all_64
11305     TEST_PROGS += $(BINARIES_64)
11306     +EXTRA_CFLAGS += -DCAN_BUILD_64
11307     endif
11308    
11309     all_32: $(BINARIES_32)
11310     diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
11311     index ec0f6b45ce8b..9c0325e1ea68 100644
11312     --- a/tools/testing/selftests/x86/mpx-mini-test.c
11313     +++ b/tools/testing/selftests/x86/mpx-mini-test.c
11314     @@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
11315     return si->si_upper;
11316     }
11317     #else
11318     +
11319     +/*
11320     + * This deals with old version of _sigfault in some distros:
11321     + *
11322     +
11323     +old _sigfault:
11324     + struct {
11325     + void *si_addr;
11326     + } _sigfault;
11327     +
11328     +new _sigfault:
11329     + struct {
11330     + void __user *_addr;
11331     + int _trapno;
11332     + short _addr_lsb;
11333     + union {
11334     + struct {
11335     + void __user *_lower;
11336     + void __user *_upper;
11337     + } _addr_bnd;
11338     + __u32 _pkey;
11339     + };
11340     + } _sigfault;
11341     + *
11342     + */
11343     +
11344     static inline void **__si_bounds_hack(siginfo_t *si)
11345     {
11346     void *sigfault = &si->_sifields._sigfault;
11347     void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
11348     - void **__si_lower = end_sigfault;
11349     + int *trapno = (int*)end_sigfault;
11350     + /* skip _trapno and _addr_lsb */
11351     + void **__si_lower = (void**)(trapno + 2);
11352    
11353     return __si_lower;
11354     }
11355     @@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
11356    
11357     static inline void *__si_bounds_upper(siginfo_t *si)
11358     {
11359     - return (*__si_bounds_hack(si)) + sizeof(void *);
11360     + return *(__si_bounds_hack(si) + 1);
11361     }
11362     #endif
11363    
11364     diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
11365     index 7a1cc0e56d2d..6cbb83b47150 100644
11366     --- a/tools/testing/selftests/x86/protection_keys.c
11367     +++ b/tools/testing/selftests/x86/protection_keys.c
11368     @@ -393,34 +393,6 @@ pid_t fork_lazy_child(void)
11369     return forkret;
11370     }
11371    
11372     -void davecmp(void *_a, void *_b, int len)
11373     -{
11374     - int i;
11375     - unsigned long *a = _a;
11376     - unsigned long *b = _b;
11377     -
11378     - for (i = 0; i < len / sizeof(*a); i++) {
11379     - if (a[i] == b[i])
11380     - continue;
11381     -
11382     - dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
11383     - }
11384     -}
11385     -
11386     -void dumpit(char *f)
11387     -{
11388     - int fd = open(f, O_RDONLY);
11389     - char buf[100];
11390     - int nr_read;
11391     -
11392     - dprintf2("maps fd: %d\n", fd);
11393     - do {
11394     - nr_read = read(fd, &buf[0], sizeof(buf));
11395     - write(1, buf, nr_read);
11396     - } while (nr_read > 0);
11397     - close(fd);
11398     -}
11399     -
11400     #define PKEY_DISABLE_ACCESS 0x1
11401     #define PKEY_DISABLE_WRITE 0x2
11402    
11403     diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
11404     index a48da95c18fd..ddfdd635de16 100644
11405     --- a/tools/testing/selftests/x86/single_step_syscall.c
11406     +++ b/tools/testing/selftests/x86/single_step_syscall.c
11407     @@ -119,7 +119,9 @@ static void check_result(void)
11408    
11409     int main()
11410     {
11411     +#ifdef CAN_BUILD_32
11412     int tmp;
11413     +#endif
11414    
11415     sethandler(SIGTRAP, sigtrap, 0);
11416    
11417     @@ -139,12 +141,13 @@ int main()
11418     : : "c" (post_nop) : "r11");
11419     check_result();
11420     #endif
11421     -
11422     +#ifdef CAN_BUILD_32
11423     printf("[RUN]\tSet TF and check int80\n");
11424     set_eflags(get_eflags() | X86_EFLAGS_TF);
11425     asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
11426     : INT80_CLOBBERS);
11427     check_result();
11428     +#endif
11429    
11430     /*
11431     * This test is particularly interesting if fast syscalls use
11432     diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
11433     index bf0d687c7db7..64f11c8d9b76 100644
11434     --- a/tools/testing/selftests/x86/test_mremap_vdso.c
11435     +++ b/tools/testing/selftests/x86/test_mremap_vdso.c
11436     @@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
11437     vdso_size += PAGE_SIZE;
11438     }
11439    
11440     +#ifdef __i386__
11441     /* Glibc is likely to explode now - exit with raw syscall */
11442     asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
11443     +#else /* __x86_64__ */
11444     + syscall(SYS_exit, ret);
11445     +#endif
11446     } else {
11447     int status;
11448    
11449     diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
11450     index 29973cde06d3..235259011704 100644
11451     --- a/tools/testing/selftests/x86/test_vdso.c
11452     +++ b/tools/testing/selftests/x86/test_vdso.c
11453     @@ -26,20 +26,59 @@
11454     # endif
11455     #endif
11456    
11457     +/* max length of lines in /proc/self/maps - anything longer is skipped here */
11458     +#define MAPS_LINE_LEN 128
11459     +
11460     int nerrs = 0;
11461    
11462     +typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
11463     +
11464     +getcpu_t vgetcpu;
11465     +getcpu_t vdso_getcpu;
11466     +
11467     +static void *vsyscall_getcpu(void)
11468     +{
11469     #ifdef __x86_64__
11470     -# define VSYS(x) (x)
11471     + FILE *maps;
11472     + char line[MAPS_LINE_LEN];
11473     + bool found = false;
11474     +
11475     + maps = fopen("/proc/self/maps", "r");
11476     + if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */
11477     + return NULL;
11478     +
11479     + while (fgets(line, MAPS_LINE_LEN, maps)) {
11480     + char r, x;
11481     + void *start, *end;
11482     + char name[MAPS_LINE_LEN];
11483     +
11484     + /* sscanf() is safe here as strlen(name) >= strlen(line) */
11485     + if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
11486     + &start, &end, &r, &x, name) != 5)
11487     + continue;
11488     +
11489     + if (strcmp(name, "[vsyscall]"))
11490     + continue;
11491     +
11492     + /* assume entries are OK, as we test vDSO here not vsyscall */
11493     + found = true;
11494     + break;
11495     + }
11496     +
11497     + fclose(maps);
11498     +
11499     + if (!found) {
11500     + printf("Warning: failed to find vsyscall getcpu\n");
11501     + return NULL;
11502     + }
11503     + return (void *) (0xffffffffff600800);
11504     #else
11505     -# define VSYS(x) 0
11506     + return NULL;
11507     #endif
11508     +}
11509    
11510     -typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
11511     -
11512     -const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
11513     -getcpu_t vdso_getcpu;
11514    
11515     -void fill_function_pointers()
11516     +static void fill_function_pointers()
11517     {
11518     void *vdso = dlopen("linux-vdso.so.1",
11519     RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
11520     @@ -54,6 +93,8 @@ void fill_function_pointers()
11521     vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
11522     if (!vdso_getcpu)
11523     printf("Warning: failed to find getcpu in vDSO\n");
11524     +
11525     + vgetcpu = (getcpu_t) vsyscall_getcpu();
11526     }
11527    
11528     static long sys_getcpu(unsigned * cpu, unsigned * node,
11529     diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
11530     index 6e0bd52ad53d..003b6c55b10e 100644
11531     --- a/tools/testing/selftests/x86/test_vsyscall.c
11532     +++ b/tools/testing/selftests/x86/test_vsyscall.c
11533     @@ -33,6 +33,9 @@
11534     # endif
11535     #endif
11536    
11537     +/* max length of lines in /proc/self/maps - anything longer is skipped here */
11538     +#define MAPS_LINE_LEN 128
11539     +
11540     static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
11541     int flags)
11542     {
11543     @@ -98,7 +101,7 @@ static int init_vsys(void)
11544     #ifdef __x86_64__
11545     int nerrs = 0;
11546     FILE *maps;
11547     - char line[128];
11548     + char line[MAPS_LINE_LEN];
11549     bool found = false;
11550    
11551     maps = fopen("/proc/self/maps", "r");
11552     @@ -108,10 +111,12 @@ static int init_vsys(void)
11553     return 0;
11554     }
11555    
11556     - while (fgets(line, sizeof(line), maps)) {
11557     + while (fgets(line, MAPS_LINE_LEN, maps)) {
11558     char r, x;
11559     void *start, *end;
11560     - char name[128];
11561     + char name[MAPS_LINE_LEN];
11562     +
11563     + /* sscanf() is safe here as strlen(name) >= strlen(line) */
11564     if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
11565     &start, &end, &r, &x, name) != 5)
11566     continue;