Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0225-4.9.126-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3224 - (show annotations) (download)
Tue Sep 11 10:04:15 2018 UTC (5 years, 8 months ago) by niro
File size: 75480 byte(s)
-linux-4.9.226
1 diff --git a/Makefile b/Makefile
2 index aef09ca7a924..b26481fef3f0 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 125
9 +SUBLEVEL = 126
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
14 index 4f95577b0180..6e0d549eb3bb 100644
15 --- a/arch/alpha/kernel/osf_sys.c
16 +++ b/arch/alpha/kernel/osf_sys.c
17 @@ -526,24 +526,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
18 SYSCALL_DEFINE1(osf_utsname, char __user *, name)
19 {
20 int error;
21 + char tmp[5 * 32];
22
23 down_read(&uts_sem);
24 - error = -EFAULT;
25 - if (copy_to_user(name + 0, utsname()->sysname, 32))
26 - goto out;
27 - if (copy_to_user(name + 32, utsname()->nodename, 32))
28 - goto out;
29 - if (copy_to_user(name + 64, utsname()->release, 32))
30 - goto out;
31 - if (copy_to_user(name + 96, utsname()->version, 32))
32 - goto out;
33 - if (copy_to_user(name + 128, utsname()->machine, 32))
34 - goto out;
35 + memcpy(tmp + 0 * 32, utsname()->sysname, 32);
36 + memcpy(tmp + 1 * 32, utsname()->nodename, 32);
37 + memcpy(tmp + 2 * 32, utsname()->release, 32);
38 + memcpy(tmp + 3 * 32, utsname()->version, 32);
39 + memcpy(tmp + 4 * 32, utsname()->machine, 32);
40 + up_read(&uts_sem);
41
42 - error = 0;
43 - out:
44 - up_read(&uts_sem);
45 - return error;
46 + if (copy_to_user(name, tmp, sizeof(tmp)))
47 + return -EFAULT;
48 + return 0;
49 }
50
51 SYSCALL_DEFINE0(getpagesize)
52 @@ -561,24 +556,22 @@ SYSCALL_DEFINE0(getdtablesize)
53 */
54 SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
55 {
56 - unsigned len;
57 - int i;
58 + int len, err = 0;
59 + char *kname;
60 + char tmp[32];
61
62 - if (!access_ok(VERIFY_WRITE, name, namelen))
63 - return -EFAULT;
64 -
65 - len = namelen;
66 - if (len > 32)
67 - len = 32;
68 + if (namelen < 0 || namelen > 32)
69 + namelen = 32;
70
71 down_read(&uts_sem);
72 - for (i = 0; i < len; ++i) {
73 - __put_user(utsname()->domainname[i], name + i);
74 - if (utsname()->domainname[i] == '\0')
75 - break;
76 - }
77 + kname = utsname()->domainname;
78 + len = strnlen(kname, namelen);
79 + len = min(len + 1, namelen);
80 + memcpy(tmp, kname, len);
81 up_read(&uts_sem);
82
83 + if (copy_to_user(name, tmp, len))
84 + return -EFAULT;
85 return 0;
86 }
87
88 @@ -741,13 +734,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
89 };
90 unsigned long offset;
91 const char *res;
92 - long len, err = -EINVAL;
93 + long len;
94 + char tmp[__NEW_UTS_LEN + 1];
95
96 offset = command-1;
97 if (offset >= ARRAY_SIZE(sysinfo_table)) {
98 /* Digital UNIX has a few unpublished interfaces here */
99 printk("sysinfo(%d)", command);
100 - goto out;
101 + return -EINVAL;
102 }
103
104 down_read(&uts_sem);
105 @@ -755,13 +749,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
106 len = strlen(res)+1;
107 if ((unsigned long)len > (unsigned long)count)
108 len = count;
109 - if (copy_to_user(buf, res, len))
110 - err = -EFAULT;
111 - else
112 - err = 0;
113 + memcpy(tmp, res, len);
114 up_read(&uts_sem);
115 - out:
116 - return err;
117 + if (copy_to_user(buf, tmp, len))
118 + return -EFAULT;
119 + return 0;
120 }
121
122 SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
123 diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
124 index f11012bb58cc..cfcf5dcb51a8 100644
125 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
126 +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
127 @@ -205,6 +205,7 @@
128 #address-cells = <1>;
129 #size-cells = <0>;
130 reg = <0x70>;
131 + reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
132 };
133 };
134
135 diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
136 index 0031806475f0..f93238ad06bd 100644
137 --- a/arch/powerpc/include/asm/fadump.h
138 +++ b/arch/powerpc/include/asm/fadump.h
139 @@ -190,9 +190,6 @@ struct fadump_crash_info_header {
140 struct cpumask online_mask;
141 };
142
143 -/* Crash memory ranges */
144 -#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
145 -
146 struct fad_crash_memory_ranges {
147 unsigned long long base;
148 unsigned long long size;
149 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
150 index 93a6eeba3ace..e3acf5c3480e 100644
151 --- a/arch/powerpc/kernel/fadump.c
152 +++ b/arch/powerpc/kernel/fadump.c
153 @@ -35,6 +35,7 @@
154 #include <linux/crash_dump.h>
155 #include <linux/kobject.h>
156 #include <linux/sysfs.h>
157 +#include <linux/slab.h>
158
159 #include <asm/page.h>
160 #include <asm/prom.h>
161 @@ -48,8 +49,10 @@ static struct fadump_mem_struct fdm;
162 static const struct fadump_mem_struct *fdm_active;
163
164 static DEFINE_MUTEX(fadump_mutex);
165 -struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
166 +struct fad_crash_memory_ranges *crash_memory_ranges;
167 +int crash_memory_ranges_size;
168 int crash_mem_ranges;
169 +int max_crash_mem_ranges;
170
171 /* Scan the Firmware Assisted dump configuration details. */
172 int __init early_init_dt_scan_fw_dump(unsigned long node,
173 @@ -731,38 +734,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
174 return 0;
175 }
176
177 -static inline void fadump_add_crash_memory(unsigned long long base,
178 - unsigned long long end)
179 +static void free_crash_memory_ranges(void)
180 +{
181 + kfree(crash_memory_ranges);
182 + crash_memory_ranges = NULL;
183 + crash_memory_ranges_size = 0;
184 + max_crash_mem_ranges = 0;
185 +}
186 +
187 +/*
188 + * Allocate or reallocate crash memory ranges array in incremental units
189 + * of PAGE_SIZE.
190 + */
191 +static int allocate_crash_memory_ranges(void)
192 +{
193 + struct fad_crash_memory_ranges *new_array;
194 + u64 new_size;
195 +
196 + new_size = crash_memory_ranges_size + PAGE_SIZE;
197 + pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
198 + new_size);
199 +
200 + new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
201 + if (new_array == NULL) {
202 + pr_err("Insufficient memory for setting up crash memory ranges\n");
203 + free_crash_memory_ranges();
204 + return -ENOMEM;
205 + }
206 +
207 + crash_memory_ranges = new_array;
208 + crash_memory_ranges_size = new_size;
209 + max_crash_mem_ranges = (new_size /
210 + sizeof(struct fad_crash_memory_ranges));
211 + return 0;
212 +}
213 +
214 +static inline int fadump_add_crash_memory(unsigned long long base,
215 + unsigned long long end)
216 {
217 if (base == end)
218 - return;
219 + return 0;
220 +
221 + if (crash_mem_ranges == max_crash_mem_ranges) {
222 + int ret;
223 +
224 + ret = allocate_crash_memory_ranges();
225 + if (ret)
226 + return ret;
227 + }
228
229 pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
230 crash_mem_ranges, base, end - 1, (end - base));
231 crash_memory_ranges[crash_mem_ranges].base = base;
232 crash_memory_ranges[crash_mem_ranges].size = end - base;
233 crash_mem_ranges++;
234 + return 0;
235 }
236
237 -static void fadump_exclude_reserved_area(unsigned long long start,
238 +static int fadump_exclude_reserved_area(unsigned long long start,
239 unsigned long long end)
240 {
241 unsigned long long ra_start, ra_end;
242 + int ret = 0;
243
244 ra_start = fw_dump.reserve_dump_area_start;
245 ra_end = ra_start + fw_dump.reserve_dump_area_size;
246
247 if ((ra_start < end) && (ra_end > start)) {
248 if ((start < ra_start) && (end > ra_end)) {
249 - fadump_add_crash_memory(start, ra_start);
250 - fadump_add_crash_memory(ra_end, end);
251 + ret = fadump_add_crash_memory(start, ra_start);
252 + if (ret)
253 + return ret;
254 +
255 + ret = fadump_add_crash_memory(ra_end, end);
256 } else if (start < ra_start) {
257 - fadump_add_crash_memory(start, ra_start);
258 + ret = fadump_add_crash_memory(start, ra_start);
259 } else if (ra_end < end) {
260 - fadump_add_crash_memory(ra_end, end);
261 + ret = fadump_add_crash_memory(ra_end, end);
262 }
263 } else
264 - fadump_add_crash_memory(start, end);
265 + ret = fadump_add_crash_memory(start, end);
266 +
267 + return ret;
268 }
269
270 static int fadump_init_elfcore_header(char *bufp)
271 @@ -802,10 +855,11 @@ static int fadump_init_elfcore_header(char *bufp)
272 * Traverse through memblock structure and setup crash memory ranges. These
273 * ranges will be used create PT_LOAD program headers in elfcore header.
274 */
275 -static void fadump_setup_crash_memory_ranges(void)
276 +static int fadump_setup_crash_memory_ranges(void)
277 {
278 struct memblock_region *reg;
279 unsigned long long start, end;
280 + int ret;
281
282 pr_debug("Setup crash memory ranges.\n");
283 crash_mem_ranges = 0;
284 @@ -816,7 +870,9 @@ static void fadump_setup_crash_memory_ranges(void)
285 * specified during fadump registration. We need to create a separate
286 * program header for this chunk with the correct offset.
287 */
288 - fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
289 + ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
290 + if (ret)
291 + return ret;
292
293 for_each_memblock(memory, reg) {
294 start = (unsigned long long)reg->base;
295 @@ -825,8 +881,12 @@ static void fadump_setup_crash_memory_ranges(void)
296 start = fw_dump.boot_memory_size;
297
298 /* add this range excluding the reserved dump area. */
299 - fadump_exclude_reserved_area(start, end);
300 + ret = fadump_exclude_reserved_area(start, end);
301 + if (ret)
302 + return ret;
303 }
304 +
305 + return 0;
306 }
307
308 /*
309 @@ -950,6 +1010,7 @@ static void register_fadump(void)
310 {
311 unsigned long addr;
312 void *vaddr;
313 + int ret;
314
315 /*
316 * If no memory is reserved then we can not register for firmware-
317 @@ -958,7 +1019,9 @@ static void register_fadump(void)
318 if (!fw_dump.reserve_dump_area_size)
319 return;
320
321 - fadump_setup_crash_memory_ranges();
322 + ret = fadump_setup_crash_memory_ranges();
323 + if (ret)
324 + return ret;
325
326 addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
327 /* Initialize fadump crash info header. */
328 @@ -1036,6 +1099,7 @@ void fadump_cleanup(void)
329 } else if (fw_dump.dump_registered) {
330 /* Un-register Firmware-assisted dump if it was registered. */
331 fadump_unregister_dump(&fdm);
332 + free_crash_memory_ranges();
333 }
334 }
335
336 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
337 index 9ed90c502005..f52cc6fd4290 100644
338 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
339 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
340 @@ -3124,12 +3124,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
341 #endif /* CONFIG_DEBUG_FS */
342 }
343
344 +static void pnv_pci_enable_bridge(struct pci_bus *bus)
345 +{
346 + struct pci_dev *dev = bus->self;
347 + struct pci_bus *child;
348 +
349 + /* Empty bus ? bail */
350 + if (list_empty(&bus->devices))
351 + return;
352 +
353 + /*
354 + * If there's a bridge associated with that bus enable it. This works
355 + * around races in the generic code if the enabling is done during
356 + * parallel probing. This can be removed once those races have been
357 + * fixed.
358 + */
359 + if (dev) {
360 + int rc = pci_enable_device(dev);
361 + if (rc)
362 + pci_err(dev, "Error enabling bridge (%d)\n", rc);
363 + pci_set_master(dev);
364 + }
365 +
366 + /* Perform the same to child busses */
367 + list_for_each_entry(child, &bus->children, node)
368 + pnv_pci_enable_bridge(child);
369 +}
370 +
371 +static void pnv_pci_enable_bridges(void)
372 +{
373 + struct pci_controller *hose;
374 +
375 + list_for_each_entry(hose, &hose_list, list_node)
376 + pnv_pci_enable_bridge(hose->bus);
377 +}
378 +
379 static void pnv_pci_ioda_fixup(void)
380 {
381 pnv_pci_ioda_setup_PEs();
382 pnv_pci_ioda_setup_iommu_api();
383 pnv_pci_ioda_create_dbgfs();
384
385 + pnv_pci_enable_bridges();
386 +
387 #ifdef CONFIG_EEH
388 eeh_init();
389 eeh_addr_cache_build();
390 diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
391 index 904a677208d1..34989ce43147 100644
392 --- a/arch/powerpc/platforms/pseries/ras.c
393 +++ b/arch/powerpc/platforms/pseries/ras.c
394 @@ -346,7 +346,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
395 }
396
397 savep = __va(regs->gpr[3]);
398 - regs->gpr[3] = savep[0]; /* restore original r3 */
399 + regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
400
401 /* If it isn't an extended log we can use the per cpu 64bit buffer */
402 h = (struct rtas_error_log *)&savep[1];
403 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
404 index 646988d4c1a3..740f43b9b541 100644
405 --- a/arch/sparc/kernel/sys_sparc_32.c
406 +++ b/arch/sparc/kernel/sys_sparc_32.c
407 @@ -201,23 +201,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
408
409 asmlinkage long sys_getdomainname(char __user *name, int len)
410 {
411 - int nlen, err;
412 -
413 + int nlen, err;
414 + char tmp[__NEW_UTS_LEN + 1];
415 +
416 if (len < 0)
417 return -EINVAL;
418
419 - down_read(&uts_sem);
420 -
421 + down_read(&uts_sem);
422 +
423 nlen = strlen(utsname()->domainname) + 1;
424 err = -EINVAL;
425 if (nlen > len)
426 - goto out;
427 + goto out_unlock;
428 + memcpy(tmp, utsname()->domainname, nlen);
429
430 - err = -EFAULT;
431 - if (!copy_to_user(name, utsname()->domainname, nlen))
432 - err = 0;
433 + up_read(&uts_sem);
434
435 -out:
436 + if (copy_to_user(name, tmp, nlen))
437 + return -EFAULT;
438 + return 0;
439 +
440 +out_unlock:
441 up_read(&uts_sem);
442 return err;
443 }
444 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
445 index 02e05e221b94..ebecbc927460 100644
446 --- a/arch/sparc/kernel/sys_sparc_64.c
447 +++ b/arch/sparc/kernel/sys_sparc_64.c
448 @@ -524,23 +524,27 @@ extern void check_pending(int signum);
449
450 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
451 {
452 - int nlen, err;
453 + int nlen, err;
454 + char tmp[__NEW_UTS_LEN + 1];
455
456 if (len < 0)
457 return -EINVAL;
458
459 - down_read(&uts_sem);
460 -
461 + down_read(&uts_sem);
462 +
463 nlen = strlen(utsname()->domainname) + 1;
464 err = -EINVAL;
465 if (nlen > len)
466 - goto out;
467 + goto out_unlock;
468 + memcpy(tmp, utsname()->domainname, nlen);
469 +
470 + up_read(&uts_sem);
471
472 - err = -EFAULT;
473 - if (!copy_to_user(name, utsname()->domainname, nlen))
474 - err = 0;
475 + if (copy_to_user(name, tmp, nlen))
476 + return -EFAULT;
477 + return 0;
478
479 -out:
480 +out_unlock:
481 up_read(&uts_sem);
482 return err;
483 }
484 diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
485 index 3407b148c240..490f9be3fda2 100644
486 --- a/arch/x86/kernel/kexec-bzimage64.c
487 +++ b/arch/x86/kernel/kexec-bzimage64.c
488 @@ -529,7 +529,7 @@ static int bzImage64_cleanup(void *loader_data)
489 static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
490 {
491 return verify_pefile_signature(kernel, kernel_len,
492 - NULL,
493 + VERIFY_USE_SECONDARY_KEYRING,
494 VERIFYING_KEXEC_PE_SIGNATURE);
495 }
496 #endif
497 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
498 index 8e4ac0a91309..8888d894bf39 100644
499 --- a/arch/x86/kvm/vmx.c
500 +++ b/arch/x86/kvm/vmx.c
501 @@ -198,12 +198,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_
502
503 static const struct {
504 const char *option;
505 - enum vmx_l1d_flush_state cmd;
506 + bool for_parse;
507 } vmentry_l1d_param[] = {
508 - {"auto", VMENTER_L1D_FLUSH_AUTO},
509 - {"never", VMENTER_L1D_FLUSH_NEVER},
510 - {"cond", VMENTER_L1D_FLUSH_COND},
511 - {"always", VMENTER_L1D_FLUSH_ALWAYS},
512 + [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
513 + [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
514 + [VMENTER_L1D_FLUSH_COND] = {"cond", true},
515 + [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
516 + [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
517 + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
518 };
519
520 #define L1D_CACHE_ORDER 4
521 @@ -287,8 +289,9 @@ static int vmentry_l1d_flush_parse(const char *s)
522
523 if (s) {
524 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
525 - if (sysfs_streq(s, vmentry_l1d_param[i].option))
526 - return vmentry_l1d_param[i].cmd;
527 + if (vmentry_l1d_param[i].for_parse &&
528 + sysfs_streq(s, vmentry_l1d_param[i].option))
529 + return i;
530 }
531 }
532 return -EINVAL;
533 @@ -298,13 +301,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
534 {
535 int l1tf, ret;
536
537 - if (!boot_cpu_has(X86_BUG_L1TF))
538 - return 0;
539 -
540 l1tf = vmentry_l1d_flush_parse(s);
541 if (l1tf < 0)
542 return l1tf;
543
544 + if (!boot_cpu_has(X86_BUG_L1TF))
545 + return 0;
546 +
547 /*
548 * Has vmx_init() run already? If not then this is the pre init
549 * parameter parsing. In that case just store the value and let
550 @@ -324,6 +327,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
551
552 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
553 {
554 + if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
555 + return sprintf(s, "???\n");
556 +
557 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
558 }
559
560 diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
561 index 2041abb10a23..34545ecfdd6b 100644
562 --- a/arch/xtensa/include/asm/cacheasm.h
563 +++ b/arch/xtensa/include/asm/cacheasm.h
564 @@ -31,16 +31,32 @@
565 *
566 */
567
568 - .macro __loop_cache_all ar at insn size line_width
569
570 - movi \ar, 0
571 + .macro __loop_cache_unroll ar at insn size line_width max_immed
572 +
573 + .if (1 << (\line_width)) > (\max_immed)
574 + .set _reps, 1
575 + .elseif (2 << (\line_width)) > (\max_immed)
576 + .set _reps, 2
577 + .else
578 + .set _reps, 4
579 + .endif
580 +
581 + __loopi \ar, \at, \size, (_reps << (\line_width))
582 + .set _index, 0
583 + .rep _reps
584 + \insn \ar, _index << (\line_width)
585 + .set _index, _index + 1
586 + .endr
587 + __endla \ar, \at, _reps << (\line_width)
588 +
589 + .endm
590 +
591
592 - __loopi \ar, \at, \size, (4 << (\line_width))
593 - \insn \ar, 0 << (\line_width)
594 - \insn \ar, 1 << (\line_width)
595 - \insn \ar, 2 << (\line_width)
596 - \insn \ar, 3 << (\line_width)
597 - __endla \ar, \at, 4 << (\line_width)
598 + .macro __loop_cache_all ar at insn size line_width max_immed
599 +
600 + movi \ar, 0
601 + __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
602
603 .endm
604
605 @@ -57,14 +73,9 @@
606 .endm
607
608
609 - .macro __loop_cache_page ar at insn line_width
610 + .macro __loop_cache_page ar at insn line_width max_immed
611
612 - __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
613 - \insn \ar, 0 << (\line_width)
614 - \insn \ar, 1 << (\line_width)
615 - \insn \ar, 2 << (\line_width)
616 - \insn \ar, 3 << (\line_width)
617 - __endla \ar, \at, 4 << (\line_width)
618 + __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
619
620 .endm
621
622 @@ -72,7 +83,8 @@
623 .macro ___unlock_dcache_all ar at
624
625 #if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
626 - __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
627 + __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
628 + XCHAL_DCACHE_LINEWIDTH 240
629 #endif
630
631 .endm
632 @@ -81,7 +93,8 @@
633 .macro ___unlock_icache_all ar at
634
635 #if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
636 - __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
637 + __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
638 + XCHAL_ICACHE_LINEWIDTH 240
639 #endif
640
641 .endm
642 @@ -90,7 +103,8 @@
643 .macro ___flush_invalidate_dcache_all ar at
644
645 #if XCHAL_DCACHE_SIZE
646 - __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
647 + __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
648 + XCHAL_DCACHE_LINEWIDTH 240
649 #endif
650
651 .endm
652 @@ -99,7 +113,8 @@
653 .macro ___flush_dcache_all ar at
654
655 #if XCHAL_DCACHE_SIZE
656 - __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
657 + __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
658 + XCHAL_DCACHE_LINEWIDTH 240
659 #endif
660
661 .endm
662 @@ -108,8 +123,8 @@
663 .macro ___invalidate_dcache_all ar at
664
665 #if XCHAL_DCACHE_SIZE
666 - __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
667 - XCHAL_DCACHE_LINEWIDTH
668 + __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
669 + XCHAL_DCACHE_LINEWIDTH 1020
670 #endif
671
672 .endm
673 @@ -118,8 +133,8 @@
674 .macro ___invalidate_icache_all ar at
675
676 #if XCHAL_ICACHE_SIZE
677 - __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
678 - XCHAL_ICACHE_LINEWIDTH
679 + __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
680 + XCHAL_ICACHE_LINEWIDTH 1020
681 #endif
682
683 .endm
684 @@ -166,7 +181,7 @@
685 .macro ___flush_invalidate_dcache_page ar as
686
687 #if XCHAL_DCACHE_SIZE
688 - __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
689 + __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
690 #endif
691
692 .endm
693 @@ -175,7 +190,7 @@
694 .macro ___flush_dcache_page ar as
695
696 #if XCHAL_DCACHE_SIZE
697 - __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
698 + __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
699 #endif
700
701 .endm
702 @@ -184,7 +199,7 @@
703 .macro ___invalidate_dcache_page ar as
704
705 #if XCHAL_DCACHE_SIZE
706 - __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
707 + __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
708 #endif
709
710 .endm
711 @@ -193,7 +208,7 @@
712 .macro ___invalidate_icache_page ar as
713
714 #if XCHAL_ICACHE_SIZE
715 - __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
716 + __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
717 #endif
718
719 .endm
720 diff --git a/certs/system_keyring.c b/certs/system_keyring.c
721 index 50979d6dcecd..247665054691 100644
722 --- a/certs/system_keyring.c
723 +++ b/certs/system_keyring.c
724 @@ -14,6 +14,7 @@
725 #include <linux/sched.h>
726 #include <linux/cred.h>
727 #include <linux/err.h>
728 +#include <linux/verification.h>
729 #include <keys/asymmetric-type.h>
730 #include <keys/system_keyring.h>
731 #include <crypto/pkcs7.h>
732 @@ -207,7 +208,7 @@ int verify_pkcs7_signature(const void *data, size_t len,
733
734 if (!trusted_keys) {
735 trusted_keys = builtin_trusted_keys;
736 - } else if (trusted_keys == (void *)1UL) {
737 + } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
738 #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
739 trusted_keys = secondary_trusted_keys;
740 #else
741 diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
742 index 1063b644efcd..b2aa925a84bc 100644
743 --- a/crypto/asymmetric_keys/pkcs7_key_type.c
744 +++ b/crypto/asymmetric_keys/pkcs7_key_type.c
745 @@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep)
746
747 return verify_pkcs7_signature(NULL, 0,
748 prep->data, prep->datalen,
749 - (void *)1UL, usage,
750 + VERIFY_USE_SECONDARY_KEYRING, usage,
751 pkcs7_view_content, prep);
752 }
753
754 diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
755 index 9e7f28122bb7..6d475a2e298c 100644
756 --- a/drivers/crypto/caam/jr.c
757 +++ b/drivers/crypto/caam/jr.c
758 @@ -189,7 +189,8 @@ static void caam_jr_dequeue(unsigned long devarg)
759 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
760
761 /* Unmap just-run descriptor so we can post-process */
762 - dma_unmap_single(dev, jrp->outring[hw_idx].desc,
763 + dma_unmap_single(dev,
764 + caam_dma_to_cpu(jrp->outring[hw_idx].desc),
765 jrp->entinfo[sw_idx].desc_size,
766 DMA_TO_DEVICE);
767
768 diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
769 index 46131701c378..92e116365272 100644
770 --- a/drivers/crypto/vmx/aes_cbc.c
771 +++ b/drivers/crypto/vmx/aes_cbc.c
772 @@ -111,24 +111,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
773 ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
774 nbytes);
775 } else {
776 - preempt_disable();
777 - pagefault_disable();
778 - enable_kernel_vsx();
779 -
780 blkcipher_walk_init(&walk, dst, src, nbytes);
781 ret = blkcipher_walk_virt(desc, &walk);
782 while ((nbytes = walk.nbytes)) {
783 + preempt_disable();
784 + pagefault_disable();
785 + enable_kernel_vsx();
786 aes_p8_cbc_encrypt(walk.src.virt.addr,
787 walk.dst.virt.addr,
788 nbytes & AES_BLOCK_MASK,
789 &ctx->enc_key, walk.iv, 1);
790 + disable_kernel_vsx();
791 + pagefault_enable();
792 + preempt_enable();
793 +
794 nbytes &= AES_BLOCK_SIZE - 1;
795 ret = blkcipher_walk_done(desc, &walk, nbytes);
796 }
797 -
798 - disable_kernel_vsx();
799 - pagefault_enable();
800 - preempt_enable();
801 }
802
803 return ret;
804 @@ -152,24 +151,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
805 ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
806 nbytes);
807 } else {
808 - preempt_disable();
809 - pagefault_disable();
810 - enable_kernel_vsx();
811 -
812 blkcipher_walk_init(&walk, dst, src, nbytes);
813 ret = blkcipher_walk_virt(desc, &walk);
814 while ((nbytes = walk.nbytes)) {
815 + preempt_disable();
816 + pagefault_disable();
817 + enable_kernel_vsx();
818 aes_p8_cbc_encrypt(walk.src.virt.addr,
819 walk.dst.virt.addr,
820 nbytes & AES_BLOCK_MASK,
821 &ctx->dec_key, walk.iv, 0);
822 + disable_kernel_vsx();
823 + pagefault_enable();
824 + preempt_enable();
825 +
826 nbytes &= AES_BLOCK_SIZE - 1;
827 ret = blkcipher_walk_done(desc, &walk, nbytes);
828 }
829 -
830 - disable_kernel_vsx();
831 - pagefault_enable();
832 - preempt_enable();
833 }
834
835 return ret;
836 diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
837 index 24353ec336c5..52e7ae05ae1f 100644
838 --- a/drivers/crypto/vmx/aes_xts.c
839 +++ b/drivers/crypto/vmx/aes_xts.c
840 @@ -123,32 +123,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
841 ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
842 crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
843 } else {
844 + blkcipher_walk_init(&walk, dst, src, nbytes);
845 +
846 + ret = blkcipher_walk_virt(desc, &walk);
847 +
848 preempt_disable();
849 pagefault_disable();
850 enable_kernel_vsx();
851
852 - blkcipher_walk_init(&walk, dst, src, nbytes);
853 -
854 - ret = blkcipher_walk_virt(desc, &walk);
855 iv = walk.iv;
856 memset(tweak, 0, AES_BLOCK_SIZE);
857 aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
858
859 + disable_kernel_vsx();
860 + pagefault_enable();
861 + preempt_enable();
862 +
863 while ((nbytes = walk.nbytes)) {
864 + preempt_disable();
865 + pagefault_disable();
866 + enable_kernel_vsx();
867 if (enc)
868 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
869 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
870 else
871 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
872 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
873 + disable_kernel_vsx();
874 + pagefault_enable();
875 + preempt_enable();
876
877 nbytes &= AES_BLOCK_SIZE - 1;
878 ret = blkcipher_walk_done(desc, &walk, nbytes);
879 }
880 -
881 - disable_kernel_vsx();
882 - pagefault_enable();
883 - preempt_enable();
884 }
885 return ret;
886 }
887 diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
888 index c6f780f5abc9..555fd47c1831 100644
889 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
890 +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
891 @@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
892 I915_USERPTR_UNSYNCHRONIZED))
893 return -EINVAL;
894
895 + if (!args->user_size)
896 + return -EINVAL;
897 +
898 if (offset_in_page(args->user_ptr | args->user_size))
899 return -EINVAL;
900
901 diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
902 index 99eba524f6dd..1642b55f70da 100644
903 --- a/drivers/iio/frequency/ad9523.c
904 +++ b/drivers/iio/frequency/ad9523.c
905 @@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct device *dev,
906 return ret;
907
908 if (!state)
909 - return 0;
910 + return len;
911
912 mutex_lock(&indio_dev->mlock);
913 switch ((u32)this_attr->address) {
914 @@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
915 code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
916 AD9523_CLK_DIST_DIV_REV(ret);
917 *val = code / 1000000;
918 - *val2 = (code % 1000000) * 10;
919 + *val2 = code % 1000000;
920 return IIO_VAL_INT_PLUS_MICRO;
921 default:
922 return -EINVAL;
923 diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
924 index 6c5e29db88e3..df15b6d7b645 100644
925 --- a/drivers/infiniband/sw/rxe/rxe_comp.c
926 +++ b/drivers/infiniband/sw/rxe/rxe_comp.c
927 @@ -273,6 +273,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
928 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
929 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
930 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
931 + wqe->status = IB_WC_FATAL_ERR;
932 return COMPST_ERROR;
933 }
934 reset_retry_counters(qp);
935 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
936 index 876f438aa048..fe7c6ec67d98 100644
937 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
938 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
939 @@ -1701,8 +1701,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
940 int ret;
941
942 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
943 - pr_debug("%s-%d: already closed\n", ch->sess_name,
944 - ch->qp->qp_num);
945 + pr_debug("%s: already closed\n", ch->sess_name);
946 return false;
947 }
948
949 diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
950 index 8c53748a769d..63110fbbb410 100644
951 --- a/drivers/iommu/dmar.c
952 +++ b/drivers/iommu/dmar.c
953 @@ -1328,8 +1328,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
954 qi_submit_sync(&desc, iommu);
955 }
956
957 -void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
958 - u64 addr, unsigned mask)
959 +void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
960 + u16 qdep, u64 addr, unsigned mask)
961 {
962 struct qi_desc desc;
963
964 @@ -1344,7 +1344,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
965 qdep = 0;
966
967 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
968 - QI_DIOTLB_TYPE;
969 + QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
970
971 qi_submit_sync(&desc, iommu);
972 }
973 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
974 index 1612d3a22d42..2558a381e118 100644
975 --- a/drivers/iommu/intel-iommu.c
976 +++ b/drivers/iommu/intel-iommu.c
977 @@ -421,6 +421,7 @@ struct device_domain_info {
978 struct list_head global; /* link to global list */
979 u8 bus; /* PCI bus number */
980 u8 devfn; /* PCI devfn number */
981 + u16 pfsid; /* SRIOV physical function source ID */
982 u8 pasid_supported:3;
983 u8 pasid_enabled:1;
984 u8 pri_supported:1;
985 @@ -1511,6 +1512,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
986 return;
987
988 pdev = to_pci_dev(info->dev);
989 + /* For IOMMU that supports device IOTLB throttling (DIT), we assign
990 + * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
991 + * queue depth at PF level. If DIT is not set, PFSID will be treated as
992 + * reserved, which should be set to 0.
993 + */
994 + if (!ecap_dit(info->iommu->ecap))
995 + info->pfsid = 0;
996 + else {
997 + struct pci_dev *pf_pdev;
998 +
999 + /* pdev will be returned if device is not a vf */
1000 + pf_pdev = pci_physfn(pdev);
1001 + info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
1002 + }
1003
1004 #ifdef CONFIG_INTEL_IOMMU_SVM
1005 /* The PCIe spec, in its wisdom, declares that the behaviour of
1006 @@ -1576,7 +1591,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1007
1008 sid = info->bus << 8 | info->devfn;
1009 qdep = info->ats_qdep;
1010 - qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1011 + qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1012 + qdep, addr, mask);
1013 }
1014 spin_unlock_irqrestore(&device_domain_lock, flags);
1015 }
1016 diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
1017 index dd2afbca51c9..26d2f89db4d9 100644
1018 --- a/drivers/mailbox/mailbox-xgene-slimpro.c
1019 +++ b/drivers/mailbox/mailbox-xgene-slimpro.c
1020 @@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
1021 platform_set_drvdata(pdev, ctx);
1022
1023 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1024 - mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
1025 - if (!mb_base)
1026 - return -ENOMEM;
1027 + mb_base = devm_ioremap_resource(&pdev->dev, regs);
1028 + if (IS_ERR(mb_base))
1029 + return PTR_ERR(mb_base);
1030
1031 /* Setup mailbox links */
1032 for (i = 0; i < MBOX_CNT; i++) {
1033 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
1034 index bb7aa31c2a08..cdf388d40274 100644
1035 --- a/drivers/md/bcache/writeback.c
1036 +++ b/drivers/md/bcache/writeback.c
1037 @@ -456,8 +456,10 @@ static int bch_writeback_thread(void *arg)
1038 * data on cache. BCACHE_DEV_DETACHING flag is set in
1039 * bch_cached_dev_detach().
1040 */
1041 - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1042 + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
1043 + up_write(&dc->writeback_lock);
1044 break;
1045 + }
1046 }
1047
1048 up_write(&dc->writeback_lock);
1049 diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1050 index 6937ca42be8c..a184c9830ca5 100644
1051 --- a/drivers/md/dm-cache-metadata.c
1052 +++ b/drivers/md/dm-cache-metadata.c
1053 @@ -344,7 +344,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
1054 disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
1055 memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
1056 memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
1057 - disk_super->policy_hint_size = 0;
1058 + disk_super->policy_hint_size = cpu_to_le32(0);
1059
1060 __copy_sm_root(cmd, disk_super);
1061
1062 @@ -659,6 +659,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
1063 disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
1064 disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
1065 disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
1066 + disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
1067
1068 disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
1069 disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
1070 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1071 index 0f0374a4ac6e..a952ad890f32 100644
1072 --- a/drivers/md/dm-thin.c
1073 +++ b/drivers/md/dm-thin.c
1074 @@ -2518,6 +2518,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1075 case PM_WRITE:
1076 if (old_mode != new_mode)
1077 notify_of_pool_mode_change(pool, "write");
1078 + if (old_mode == PM_OUT_OF_DATA_SPACE)
1079 + cancel_delayed_work_sync(&pool->no_space_timeout);
1080 pool->out_of_data_space = false;
1081 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
1082 dm_pool_metadata_read_write(pool->pmd);
1083 diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
1084 index 0fc62995695b..11347a3e6d40 100644
1085 --- a/drivers/mfd/hi655x-pmic.c
1086 +++ b/drivers/mfd/hi655x-pmic.c
1087 @@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = {
1088 .reg_bits = 32,
1089 .reg_stride = HI655X_STRIDE,
1090 .val_bits = 8,
1091 - .max_register = HI655X_BUS_ADDR(0xFFF),
1092 + .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
1093 };
1094
1095 static struct resource pwrkey_resources[] = {
1096 diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
1097 index cc1706a92ace..a078d49485d8 100644
1098 --- a/drivers/misc/cxl/main.c
1099 +++ b/drivers/misc/cxl/main.c
1100 @@ -293,7 +293,7 @@ int cxl_adapter_context_get(struct cxl *adapter)
1101 int rc;
1102
1103 rc = atomic_inc_unless_negative(&adapter->contexts_num);
1104 - return rc >= 0 ? 0 : -EBUSY;
1105 + return rc ? 0 : -EBUSY;
1106 }
1107
1108 void cxl_adapter_context_put(struct cxl *adapter)
1109 diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
1110 index 5e047bfc0cc4..518e2dec2aa2 100644
1111 --- a/drivers/misc/vmw_balloon.c
1112 +++ b/drivers/misc/vmw_balloon.c
1113 @@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
1114 success = false;
1115 }
1116
1117 - if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
1118 + /*
1119 + * 2MB pages are only supported with batching. If batching is for some
1120 + * reason disabled, do not use 2MB pages, since otherwise the legacy
1121 + * mechanism is used with 2MB pages, causing a failure.
1122 + */
1123 + if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
1124 + (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
1125 b->supported_page_sizes = 2;
1126 else
1127 b->supported_page_sizes = 1;
1128 @@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
1129
1130 pfn32 = (u32)pfn;
1131 if (pfn32 != pfn)
1132 - return -1;
1133 + return -EINVAL;
1134
1135 STATS_INC(b->stats.lock[false]);
1136
1137 @@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
1138
1139 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
1140 STATS_INC(b->stats.lock_fail[false]);
1141 - return 1;
1142 + return -EIO;
1143 }
1144
1145 static int vmballoon_send_batched_lock(struct vmballoon *b,
1146 @@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
1147
1148 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
1149 target);
1150 - if (locked > 0) {
1151 + if (locked) {
1152 STATS_INC(b->stats.refused_alloc[false]);
1153
1154 - if (hv_status == VMW_BALLOON_ERROR_RESET ||
1155 - hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
1156 + if (locked == -EIO &&
1157 + (hv_status == VMW_BALLOON_ERROR_RESET ||
1158 + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
1159 vmballoon_free_page(page, false);
1160 return -EIO;
1161 }
1162 @@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
1163 } else {
1164 vmballoon_free_page(page, false);
1165 }
1166 - return -EIO;
1167 + return locked;
1168 }
1169
1170 /* track allocated page */
1171 @@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
1172 */
1173 static int vmballoon_vmci_init(struct vmballoon *b)
1174 {
1175 - int error = 0;
1176 + unsigned long error, dummy;
1177
1178 - if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
1179 - error = vmci_doorbell_create(&b->vmci_doorbell,
1180 - VMCI_FLAG_DELAYED_CB,
1181 - VMCI_PRIVILEGE_FLAG_RESTRICTED,
1182 - vmballoon_doorbell, b);
1183 -
1184 - if (error == VMCI_SUCCESS) {
1185 - VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
1186 - b->vmci_doorbell.context,
1187 - b->vmci_doorbell.resource, error);
1188 - STATS_INC(b->stats.doorbell_set);
1189 - }
1190 - }
1191 + if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1192 + return 0;
1193
1194 - if (error != 0) {
1195 - vmballoon_vmci_cleanup(b);
1196 + error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1197 + VMCI_PRIVILEGE_FLAG_RESTRICTED,
1198 + vmballoon_doorbell, b);
1199
1200 - return -EIO;
1201 - }
1202 + if (error != VMCI_SUCCESS)
1203 + goto fail;
1204 +
1205 + error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
1206 + b->vmci_doorbell.resource, dummy);
1207 +
1208 + STATS_INC(b->stats.doorbell_set);
1209 +
1210 + if (error != VMW_BALLOON_SUCCESS)
1211 + goto fail;
1212
1213 return 0;
1214 +fail:
1215 + vmballoon_vmci_cleanup(b);
1216 + return -EIO;
1217 }
1218
1219 /*
1220 @@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
1221
1222 return 0;
1223 }
1224 -module_init(vmballoon_init);
1225 +
1226 +/*
1227 + * Using late_initcall() instead of module_init() allows the balloon to use the
1228 + * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1229 + * VMCI is probed only after the balloon is initialized. If the balloon is used
1230 + * as a module, late_initcall() is equivalent to module_init().
1231 + */
1232 +late_initcall(vmballoon_init);
1233
1234 static void __exit vmballoon_exit(void)
1235 {
1236 diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
1237 index edf710bc5e77..3de1457ad3a7 100644
1238 --- a/drivers/net/wireless/marvell/libertas/dev.h
1239 +++ b/drivers/net/wireless/marvell/libertas/dev.h
1240 @@ -103,6 +103,7 @@ struct lbs_private {
1241 u8 fw_ready;
1242 u8 surpriseremoved;
1243 u8 setup_fw_on_resume;
1244 + u8 power_up_on_resume;
1245 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
1246 void (*reset_card) (struct lbs_private *priv);
1247 int (*power_save) (struct lbs_private *priv);
1248 diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
1249 index 47f4a14c84fe..a0ae8d8763bb 100644
1250 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c
1251 +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
1252 @@ -1341,15 +1341,23 @@ static void if_sdio_remove(struct sdio_func *func)
1253 static int if_sdio_suspend(struct device *dev)
1254 {
1255 struct sdio_func *func = dev_to_sdio_func(dev);
1256 - int ret;
1257 struct if_sdio_card *card = sdio_get_drvdata(func);
1258 + struct lbs_private *priv = card->priv;
1259 + int ret;
1260
1261 mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
1262 + priv->power_up_on_resume = false;
1263
1264 /* If we're powered off anyway, just let the mmc layer remove the
1265 * card. */
1266 - if (!lbs_iface_active(card->priv))
1267 - return -ENOSYS;
1268 + if (!lbs_iface_active(priv)) {
1269 + if (priv->fw_ready) {
1270 + priv->power_up_on_resume = true;
1271 + if_sdio_power_off(card);
1272 + }
1273 +
1274 + return 0;
1275 + }
1276
1277 dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
1278 sdio_func_id(func), flags);
1279 @@ -1357,9 +1365,14 @@ static int if_sdio_suspend(struct device *dev)
1280 /* If we aren't being asked to wake on anything, we should bail out
1281 * and let the SD stack power down the card.
1282 */
1283 - if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1284 + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1285 dev_info(dev, "Suspend without wake params -- powering down card\n");
1286 - return -ENOSYS;
1287 + if (priv->fw_ready) {
1288 + priv->power_up_on_resume = true;
1289 + if_sdio_power_off(card);
1290 + }
1291 +
1292 + return 0;
1293 }
1294
1295 if (!(flags & MMC_PM_KEEP_POWER)) {
1296 @@ -1372,7 +1385,7 @@ static int if_sdio_suspend(struct device *dev)
1297 if (ret)
1298 return ret;
1299
1300 - ret = lbs_suspend(card->priv);
1301 + ret = lbs_suspend(priv);
1302 if (ret)
1303 return ret;
1304
1305 @@ -1387,6 +1400,11 @@ static int if_sdio_resume(struct device *dev)
1306
1307 dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
1308
1309 + if (card->priv->power_up_on_resume) {
1310 + if_sdio_power_on(card);
1311 + wait_event(card->pwron_waitq, card->priv->fw_ready);
1312 + }
1313 +
1314 ret = lbs_resume(card->priv);
1315
1316 return ret;
1317 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
1318 index c1a65ce31243..de6d3b749c60 100644
1319 --- a/drivers/nvdimm/bus.c
1320 +++ b/drivers/nvdimm/bus.c
1321 @@ -748,9 +748,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
1322 * overshoots the remainder by 4 bytes, assume it was
1323 * including 'status'.
1324 */
1325 - if (out_field[1] - 8 == remainder)
1326 + if (out_field[1] - 4 == remainder)
1327 return remainder;
1328 - return out_field[1] - 4;
1329 + return out_field[1] - 8;
1330 } else if (cmd == ND_CMD_CALL) {
1331 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
1332
1333 diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
1334 index b5c6b0636893..c0e06f0c19d1 100644
1335 --- a/drivers/pwm/pwm-tiehrpwm.c
1336 +++ b/drivers/pwm/pwm-tiehrpwm.c
1337 @@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
1338 aqcsfrc_mask = AQCSFRC_CSFA_MASK;
1339 }
1340
1341 + /* Update shadow register first before modifying active register */
1342 + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
1343 /*
1344 * Changes to immediate action on Action Qualifier. This puts
1345 * Action Qualifier control on PWM output from next TBCLK
1346 diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
1347 index 51e52446eacb..bd5ca548c265 100644
1348 --- a/drivers/rtc/rtc-omap.c
1349 +++ b/drivers/rtc/rtc-omap.c
1350 @@ -817,13 +817,6 @@ static int omap_rtc_probe(struct platform_device *pdev)
1351 goto err;
1352 }
1353
1354 - if (rtc->is_pmic_controller) {
1355 - if (!pm_power_off) {
1356 - omap_rtc_power_off_rtc = rtc;
1357 - pm_power_off = omap_rtc_power_off;
1358 - }
1359 - }
1360 -
1361 /* Support ext_wakeup pinconf */
1362 rtc_pinctrl_desc.name = dev_name(&pdev->dev);
1363
1364 @@ -833,6 +826,13 @@ static int omap_rtc_probe(struct platform_device *pdev)
1365 return PTR_ERR(rtc->pctldev);
1366 }
1367
1368 + if (rtc->is_pmic_controller) {
1369 + if (!pm_power_off) {
1370 + omap_rtc_power_off_rtc = rtc;
1371 + pm_power_off = omap_rtc_power_off;
1372 + }
1373 + }
1374 +
1375 return 0;
1376
1377 err:
1378 diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
1379 index 0d8f43a17edb..1905d20c229f 100644
1380 --- a/drivers/spi/spi-davinci.c
1381 +++ b/drivers/spi/spi-davinci.c
1382 @@ -215,7 +215,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
1383 pdata = &dspi->pdata;
1384
1385 /* program delay transfers if tx_delay is non zero */
1386 - if (spicfg->wdelay)
1387 + if (spicfg && spicfg->wdelay)
1388 spidat1 |= SPIDAT1_WDEL;
1389
1390 /*
1391 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
1392 index a67b0ff6a362..db3b6e9151a8 100644
1393 --- a/drivers/spi/spi-fsl-dspi.c
1394 +++ b/drivers/spi/spi-fsl-dspi.c
1395 @@ -715,30 +715,30 @@ static int dspi_probe(struct platform_device *pdev)
1396 return PTR_ERR(dspi->regmap);
1397 }
1398
1399 + dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1400 + if (IS_ERR(dspi->clk)) {
1401 + ret = PTR_ERR(dspi->clk);
1402 + dev_err(&pdev->dev, "unable to get clock\n");
1403 + goto out_master_put;
1404 + }
1405 + ret = clk_prepare_enable(dspi->clk);
1406 + if (ret)
1407 + goto out_master_put;
1408 +
1409 dspi_init(dspi);
1410 dspi->irq = platform_get_irq(pdev, 0);
1411 if (dspi->irq < 0) {
1412 dev_err(&pdev->dev, "can't get platform irq\n");
1413 ret = dspi->irq;
1414 - goto out_master_put;
1415 + goto out_clk_put;
1416 }
1417
1418 ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
1419 pdev->name, dspi);
1420 if (ret < 0) {
1421 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1422 - goto out_master_put;
1423 - }
1424 -
1425 - dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1426 - if (IS_ERR(dspi->clk)) {
1427 - ret = PTR_ERR(dspi->clk);
1428 - dev_err(&pdev->dev, "unable to get clock\n");
1429 - goto out_master_put;
1430 + goto out_clk_put;
1431 }
1432 - ret = clk_prepare_enable(dspi->clk);
1433 - if (ret)
1434 - goto out_master_put;
1435
1436 master->max_speed_hz =
1437 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1438 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1439 index 6cbd46c565f8..53e6db8b0330 100644
1440 --- a/drivers/tty/serial/serial_core.c
1441 +++ b/drivers/tty/serial/serial_core.c
1442 @@ -172,6 +172,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
1443 {
1444 struct uart_port *uport = uart_port_check(state);
1445 unsigned long page;
1446 + unsigned long flags = 0;
1447 int retval = 0;
1448
1449 if (uport->type == PORT_UNKNOWN)
1450 @@ -186,15 +187,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
1451 * Initialise and allocate the transmit and temporary
1452 * buffer.
1453 */
1454 - if (!state->xmit.buf) {
1455 - /* This is protected by the per port mutex */
1456 - page = get_zeroed_page(GFP_KERNEL);
1457 - if (!page)
1458 - return -ENOMEM;
1459 + page = get_zeroed_page(GFP_KERNEL);
1460 + if (!page)
1461 + return -ENOMEM;
1462
1463 + uart_port_lock(state, flags);
1464 + if (!state->xmit.buf) {
1465 state->xmit.buf = (unsigned char *) page;
1466 uart_circ_clear(&state->xmit);
1467 + } else {
1468 + free_page(page);
1469 }
1470 + uart_port_unlock(uport, flags);
1471
1472 retval = uport->ops->startup(uport);
1473 if (retval == 0) {
1474 @@ -253,6 +257,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
1475 {
1476 struct uart_port *uport = uart_port_check(state);
1477 struct tty_port *port = &state->port;
1478 + unsigned long flags = 0;
1479
1480 /*
1481 * Set the TTY IO error marker
1482 @@ -285,10 +290,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
1483 /*
1484 * Free the transmit buffer page.
1485 */
1486 + uart_port_lock(state, flags);
1487 if (state->xmit.buf) {
1488 free_page((unsigned long)state->xmit.buf);
1489 state->xmit.buf = NULL;
1490 }
1491 + uart_port_unlock(uport, flags);
1492 }
1493
1494 /**
1495 diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
1496 index 76c1ad96fb37..74273bc7ca9a 100644
1497 --- a/drivers/video/fbdev/core/fbmem.c
1498 +++ b/drivers/video/fbdev/core/fbmem.c
1499 @@ -1695,12 +1695,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
1500 return 0;
1501 }
1502
1503 -static int do_unregister_framebuffer(struct fb_info *fb_info)
1504 +static int unbind_console(struct fb_info *fb_info)
1505 {
1506 struct fb_event event;
1507 - int i, ret = 0;
1508 + int ret;
1509 + int i = fb_info->node;
1510
1511 - i = fb_info->node;
1512 if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
1513 return -EINVAL;
1514
1515 @@ -1715,17 +1715,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1516 unlock_fb_info(fb_info);
1517 console_unlock();
1518
1519 + return ret;
1520 +}
1521 +
1522 +static int __unlink_framebuffer(struct fb_info *fb_info);
1523 +
1524 +static int do_unregister_framebuffer(struct fb_info *fb_info)
1525 +{
1526 + struct fb_event event;
1527 + int ret;
1528 +
1529 + ret = unbind_console(fb_info);
1530 +
1531 if (ret)
1532 return -EINVAL;
1533
1534 pm_vt_switch_unregister(fb_info->dev);
1535
1536 - unlink_framebuffer(fb_info);
1537 + __unlink_framebuffer(fb_info);
1538 if (fb_info->pixmap.addr &&
1539 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
1540 kfree(fb_info->pixmap.addr);
1541 fb_destroy_modelist(&fb_info->modelist);
1542 - registered_fb[i] = NULL;
1543 + registered_fb[fb_info->node] = NULL;
1544 num_registered_fb--;
1545 fb_cleanup_device(fb_info);
1546 event.info = fb_info;
1547 @@ -1738,7 +1750,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1548 return 0;
1549 }
1550
1551 -int unlink_framebuffer(struct fb_info *fb_info)
1552 +static int __unlink_framebuffer(struct fb_info *fb_info)
1553 {
1554 int i;
1555
1556 @@ -1750,6 +1762,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
1557 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1558 fb_info->dev = NULL;
1559 }
1560 +
1561 + return 0;
1562 +}
1563 +
1564 +int unlink_framebuffer(struct fb_info *fb_info)
1565 +{
1566 + int ret;
1567 +
1568 + ret = __unlink_framebuffer(fb_info);
1569 + if (ret)
1570 + return ret;
1571 +
1572 + unbind_console(fb_info);
1573 +
1574 return 0;
1575 }
1576 EXPORT_SYMBOL(unlink_framebuffer);
1577 diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
1578 index f329eee6dc93..352abc39e891 100644
1579 --- a/fs/9p/xattr.c
1580 +++ b/fs/9p/xattr.c
1581 @@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
1582 {
1583 struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
1584 struct iov_iter from;
1585 - int retval;
1586 + int retval, err;
1587
1588 iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
1589
1590 @@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
1591 retval);
1592 else
1593 p9_client_write(fid, 0, &from, &retval);
1594 - p9_client_clunk(fid);
1595 + err = p9_client_clunk(fid);
1596 + if (!retval && err)
1597 + retval = err;
1598 return retval;
1599 }
1600
1601 diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
1602 index a69ef4e9c24c..d6e4191276c0 100644
1603 --- a/fs/nfs/blocklayout/dev.c
1604 +++ b/fs/nfs/blocklayout/dev.c
1605 @@ -203,7 +203,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
1606 chunk = div_u64(offset, dev->chunk_size);
1607 div_u64_rem(chunk, dev->nr_children, &chunk_idx);
1608
1609 - if (chunk_idx > dev->nr_children) {
1610 + if (chunk_idx >= dev->nr_children) {
1611 dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
1612 __func__, chunk_idx, offset, dev->chunk_size);
1613 /* error, should not happen */
1614 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1615 index cf5fdc25289a..e7ca62a86dab 100644
1616 --- a/fs/nfs/nfs4proc.c
1617 +++ b/fs/nfs/nfs4proc.c
1618 @@ -541,8 +541,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
1619 ret = -EIO;
1620 return ret;
1621 out_retry:
1622 - if (ret == 0)
1623 + if (ret == 0) {
1624 exception->retry = 1;
1625 + /*
1626 + * For NFS4ERR_MOVED, the client transport will need to
1627 + * be recomputed after migration recovery has completed.
1628 + */
1629 + if (errorcode == -NFS4ERR_MOVED)
1630 + rpc_task_release_transport(task);
1631 + }
1632 return ret;
1633 }
1634
1635 diff --git a/fs/quota/quota.c b/fs/quota/quota.c
1636 index 2d445425aad7..a2329f7ec638 100644
1637 --- a/fs/quota/quota.c
1638 +++ b/fs/quota/quota.c
1639 @@ -17,6 +17,7 @@
1640 #include <linux/quotaops.h>
1641 #include <linux/types.h>
1642 #include <linux/writeback.h>
1643 +#include <linux/nospec.h>
1644
1645 static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
1646 qid_t id)
1647 @@ -706,6 +707,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
1648
1649 if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
1650 return -EINVAL;
1651 + type = array_index_nospec(type, MAXQUOTAS);
1652 /*
1653 * Quota not supported on this fs? Check this before s_quota_types
1654 * since they needn't be set if quota is not supported at all.
1655 diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
1656 index 504658fd0d08..f8ce849e90d1 100644
1657 --- a/fs/ubifs/journal.c
1658 +++ b/fs/ubifs/journal.c
1659 @@ -661,6 +661,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
1660 spin_lock(&ui->ui_lock);
1661 ui->synced_i_size = ui->ui_size;
1662 spin_unlock(&ui->ui_lock);
1663 + if (xent) {
1664 + spin_lock(&host_ui->ui_lock);
1665 + host_ui->synced_i_size = host_ui->ui_size;
1666 + spin_unlock(&host_ui->ui_lock);
1667 + }
1668 mark_inode_clean(c, ui);
1669 mark_inode_clean(c, host_ui);
1670 return 0;
1671 @@ -1265,7 +1270,7 @@ static int recomp_data_node(const struct ubifs_info *c,
1672 int err, len, compr_type, out_len;
1673
1674 out_len = le32_to_cpu(dn->size);
1675 - buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
1676 + buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1677 if (!buf)
1678 return -ENOMEM;
1679
1680 @@ -1344,7 +1349,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1681 else if (err)
1682 goto out_free;
1683 else {
1684 - if (le32_to_cpu(dn->size) <= dlen)
1685 + int dn_len = le32_to_cpu(dn->size);
1686 +
1687 + if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
1688 + ubifs_err(c, "bad data node (block %u, inode %lu)",
1689 + blk, inode->i_ino);
1690 + ubifs_dump_node(c, dn);
1691 + goto out_free;
1692 + }
1693 +
1694 + if (dn_len <= dlen)
1695 dlen = 0; /* Nothing to do */
1696 else {
1697 int compr_type = le16_to_cpu(dn->compr_type);
1698 diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
1699 index 6c3a1abd0e22..780a436d8c45 100644
1700 --- a/fs/ubifs/lprops.c
1701 +++ b/fs/ubifs/lprops.c
1702 @@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
1703 }
1704 }
1705
1706 - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1707 - if (!buf)
1708 - return -ENOMEM;
1709 -
1710 /*
1711 * After an unclean unmount, empty and freeable LEBs
1712 * may contain garbage - do not scan them.
1713 @@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
1714 return LPT_SCAN_CONTINUE;
1715 }
1716
1717 + buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1718 + if (!buf)
1719 + return -ENOMEM;
1720 +
1721 sleb = ubifs_scan(c, lnum, 0, buf, 0);
1722 if (IS_ERR(sleb)) {
1723 ret = PTR_ERR(sleb);
1724 diff --git a/fs/xattr.c b/fs/xattr.c
1725 index 932b9061a3a2..093998872329 100644
1726 --- a/fs/xattr.c
1727 +++ b/fs/xattr.c
1728 @@ -540,7 +540,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
1729 if (error > 0) {
1730 if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
1731 (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
1732 - posix_acl_fix_xattr_to_user(kvalue, size);
1733 + posix_acl_fix_xattr_to_user(kvalue, error);
1734 if (size && copy_to_user(value, kvalue, error))
1735 error = -EFAULT;
1736 } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
1737 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
1738 index 23e129ef6726..e353f6600b0b 100644
1739 --- a/include/linux/intel-iommu.h
1740 +++ b/include/linux/intel-iommu.h
1741 @@ -125,6 +125,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
1742 * Extended Capability Register
1743 */
1744
1745 +#define ecap_dit(e) ((e >> 41) & 0x1)
1746 #define ecap_pasid(e) ((e >> 40) & 0x1)
1747 #define ecap_pss(e) ((e >> 35) & 0x1f)
1748 #define ecap_eafs(e) ((e >> 34) & 0x1)
1749 @@ -294,6 +295,7 @@ enum {
1750 #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
1751 #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
1752 #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
1753 +#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
1754 #define QI_DEV_IOTLB_SIZE 1
1755 #define QI_DEV_IOTLB_MAX_INVS 32
1756
1757 @@ -318,6 +320,7 @@ enum {
1758 #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
1759 #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
1760 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
1761 +#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
1762 #define QI_DEV_EIOTLB_MAX_INVS 32
1763
1764 #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
1765 @@ -463,9 +466,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
1766 u8 fm, u64 type);
1767 extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1768 unsigned int size_order, u64 type);
1769 -extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1770 - u64 addr, unsigned mask);
1771 -
1772 +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1773 + u16 qdep, u64 addr, unsigned mask);
1774 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
1775
1776 extern int dmar_ir_support(void);
1777 diff --git a/include/linux/pci.h b/include/linux/pci.h
1778 index 78c9f4a91d94..534cb43e8635 100644
1779 --- a/include/linux/pci.h
1780 +++ b/include/linux/pci.h
1781 @@ -2151,4 +2151,16 @@ static inline bool pci_ari_enabled(struct pci_bus *bus)
1782 /* provide the legacy pci_dma_* API */
1783 #include <linux/pci-dma-compat.h>
1784
1785 +#define pci_printk(level, pdev, fmt, arg...) \
1786 + dev_printk(level, &(pdev)->dev, fmt, ##arg)
1787 +
1788 +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
1789 +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
1790 +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
1791 +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
1792 +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
1793 +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
1794 +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
1795 +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
1796 +
1797 #endif /* LINUX_PCI_H */
1798 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
1799 index 333ad11b3dd9..44161a4c124c 100644
1800 --- a/include/linux/sunrpc/clnt.h
1801 +++ b/include/linux/sunrpc/clnt.h
1802 @@ -155,6 +155,7 @@ int rpc_switch_client_transport(struct rpc_clnt *,
1803
1804 void rpc_shutdown_client(struct rpc_clnt *);
1805 void rpc_release_client(struct rpc_clnt *);
1806 +void rpc_task_release_transport(struct rpc_task *);
1807 void rpc_task_release_client(struct rpc_task *);
1808
1809 int rpcb_create_local(struct net *);
1810 diff --git a/include/linux/verification.h b/include/linux/verification.h
1811 index a10549a6c7cd..cfa4730d607a 100644
1812 --- a/include/linux/verification.h
1813 +++ b/include/linux/verification.h
1814 @@ -12,6 +12,12 @@
1815 #ifndef _LINUX_VERIFICATION_H
1816 #define _LINUX_VERIFICATION_H
1817
1818 +/*
1819 + * Indicate that both builtin trusted keys and secondary trusted keys
1820 + * should be used.
1821 + */
1822 +#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
1823 +
1824 /*
1825 * The use to which an asymmetric key is being put.
1826 */
1827 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
1828 index f9466fa54ba4..2ad9a6d37ff4 100644
1829 --- a/include/video/udlfb.h
1830 +++ b/include/video/udlfb.h
1831 @@ -87,7 +87,7 @@ struct dlfb_data {
1832 #define MIN_RAW_PIX_BYTES 2
1833 #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
1834
1835 -#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
1836 +#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
1837 #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
1838
1839 /* remove these once align.h patch is taken into kernel */
1840 diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
1841 index e8517b63eb37..dd2b5a4d89a5 100644
1842 --- a/kernel/power/Kconfig
1843 +++ b/kernel/power/Kconfig
1844 @@ -105,6 +105,7 @@ config PM_SLEEP
1845 def_bool y
1846 depends on SUSPEND || HIBERNATE_CALLBACKS
1847 select PM
1848 + select SRCU
1849
1850 config PM_SLEEP_SMP
1851 def_bool y
1852 diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
1853 index 5fa65aa904d3..2c3e7f024c15 100644
1854 --- a/kernel/printk/nmi.c
1855 +++ b/kernel/printk/nmi.c
1856 @@ -260,12 +260,12 @@ void __init printk_nmi_init(void)
1857 printk_nmi_flush();
1858 }
1859
1860 -void printk_nmi_enter(void)
1861 +void notrace printk_nmi_enter(void)
1862 {
1863 this_cpu_write(printk_func, vprintk_nmi);
1864 }
1865
1866 -void printk_nmi_exit(void)
1867 +void notrace printk_nmi_exit(void)
1868 {
1869 this_cpu_write(printk_func, vprintk_default);
1870 }
1871 diff --git a/kernel/sys.c b/kernel/sys.c
1872 index b13b530b5e0f..6c4e9b533258 100644
1873 --- a/kernel/sys.c
1874 +++ b/kernel/sys.c
1875 @@ -1142,18 +1142,19 @@ static int override_release(char __user *release, size_t len)
1876
1877 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1878 {
1879 - int errno = 0;
1880 + struct new_utsname tmp;
1881
1882 down_read(&uts_sem);
1883 - if (copy_to_user(name, utsname(), sizeof *name))
1884 - errno = -EFAULT;
1885 + memcpy(&tmp, utsname(), sizeof(tmp));
1886 up_read(&uts_sem);
1887 + if (copy_to_user(name, &tmp, sizeof(tmp)))
1888 + return -EFAULT;
1889
1890 - if (!errno && override_release(name->release, sizeof(name->release)))
1891 - errno = -EFAULT;
1892 - if (!errno && override_architecture(name))
1893 - errno = -EFAULT;
1894 - return errno;
1895 + if (override_release(name->release, sizeof(name->release)))
1896 + return -EFAULT;
1897 + if (override_architecture(name))
1898 + return -EFAULT;
1899 + return 0;
1900 }
1901
1902 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1903 @@ -1162,55 +1163,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1904 */
1905 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1906 {
1907 - int error = 0;
1908 + struct old_utsname tmp;
1909
1910 if (!name)
1911 return -EFAULT;
1912
1913 down_read(&uts_sem);
1914 - if (copy_to_user(name, utsname(), sizeof(*name)))
1915 - error = -EFAULT;
1916 + memcpy(&tmp, utsname(), sizeof(tmp));
1917 up_read(&uts_sem);
1918 + if (copy_to_user(name, &tmp, sizeof(tmp)))
1919 + return -EFAULT;
1920
1921 - if (!error && override_release(name->release, sizeof(name->release)))
1922 - error = -EFAULT;
1923 - if (!error && override_architecture(name))
1924 - error = -EFAULT;
1925 - return error;
1926 + if (override_release(name->release, sizeof(name->release)))
1927 + return -EFAULT;
1928 + if (override_architecture(name))
1929 + return -EFAULT;
1930 + return 0;
1931 }
1932
1933 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1934 {
1935 - int error;
1936 + struct oldold_utsname tmp = {};
1937
1938 if (!name)
1939 return -EFAULT;
1940 - if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1941 - return -EFAULT;
1942
1943 down_read(&uts_sem);
1944 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
1945 - __OLD_UTS_LEN);
1946 - error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1947 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1948 - __OLD_UTS_LEN);
1949 - error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1950 - error |= __copy_to_user(&name->release, &utsname()->release,
1951 - __OLD_UTS_LEN);
1952 - error |= __put_user(0, name->release + __OLD_UTS_LEN);
1953 - error |= __copy_to_user(&name->version, &utsname()->version,
1954 - __OLD_UTS_LEN);
1955 - error |= __put_user(0, name->version + __OLD_UTS_LEN);
1956 - error |= __copy_to_user(&name->machine, &utsname()->machine,
1957 - __OLD_UTS_LEN);
1958 - error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1959 + memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1960 + memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1961 + memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1962 + memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1963 + memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1964 up_read(&uts_sem);
1965 + if (copy_to_user(name, &tmp, sizeof(tmp)))
1966 + return -EFAULT;
1967
1968 - if (!error && override_architecture(name))
1969 - error = -EFAULT;
1970 - if (!error && override_release(name->release, sizeof(name->release)))
1971 - error = -EFAULT;
1972 - return error ? -EFAULT : 0;
1973 + if (override_architecture(name))
1974 + return -EFAULT;
1975 + if (override_release(name->release, sizeof(name->release)))
1976 + return -EFAULT;
1977 + return 0;
1978 }
1979 #endif
1980
1981 @@ -1224,17 +1216,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1982
1983 if (len < 0 || len > __NEW_UTS_LEN)
1984 return -EINVAL;
1985 - down_write(&uts_sem);
1986 errno = -EFAULT;
1987 if (!copy_from_user(tmp, name, len)) {
1988 - struct new_utsname *u = utsname();
1989 + struct new_utsname *u;
1990
1991 + down_write(&uts_sem);
1992 + u = utsname();
1993 memcpy(u->nodename, tmp, len);
1994 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1995 errno = 0;
1996 uts_proc_notify(UTS_PROC_HOSTNAME);
1997 + up_write(&uts_sem);
1998 }
1999 - up_write(&uts_sem);
2000 return errno;
2001 }
2002
2003 @@ -1242,8 +1235,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
2004
2005 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
2006 {
2007 - int i, errno;
2008 + int i;
2009 struct new_utsname *u;
2010 + char tmp[__NEW_UTS_LEN + 1];
2011
2012 if (len < 0)
2013 return -EINVAL;
2014 @@ -1252,11 +1246,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
2015 i = 1 + strlen(u->nodename);
2016 if (i > len)
2017 i = len;
2018 - errno = 0;
2019 - if (copy_to_user(name, u->nodename, i))
2020 - errno = -EFAULT;
2021 + memcpy(tmp, u->nodename, i);
2022 up_read(&uts_sem);
2023 - return errno;
2024 + if (copy_to_user(name, tmp, i))
2025 + return -EFAULT;
2026 + return 0;
2027 }
2028
2029 #endif
2030 @@ -1275,17 +1269,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
2031 if (len < 0 || len > __NEW_UTS_LEN)
2032 return -EINVAL;
2033
2034 - down_write(&uts_sem);
2035 errno = -EFAULT;
2036 if (!copy_from_user(tmp, name, len)) {
2037 - struct new_utsname *u = utsname();
2038 + struct new_utsname *u;
2039
2040 + down_write(&uts_sem);
2041 + u = utsname();
2042 memcpy(u->domainname, tmp, len);
2043 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
2044 errno = 0;
2045 uts_proc_notify(UTS_PROC_DOMAINNAME);
2046 + up_write(&uts_sem);
2047 }
2048 - up_write(&uts_sem);
2049 return errno;
2050 }
2051
2052 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
2053 index 4e17d55ba127..bfa8bb3a6e19 100644
2054 --- a/kernel/trace/blktrace.c
2055 +++ b/kernel/trace/blktrace.c
2056 @@ -1720,6 +1720,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
2057 mutex_lock(&bdev->bd_mutex);
2058
2059 if (attr == &dev_attr_enable) {
2060 + if (!!value == !!q->blk_trace) {
2061 + ret = 0;
2062 + goto out_unlock_bdev;
2063 + }
2064 if (value)
2065 ret = blk_trace_setup_queue(q, bdev);
2066 else
2067 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2068 index 148c2210a2b8..a47339b156ce 100644
2069 --- a/kernel/trace/trace.c
2070 +++ b/kernel/trace/trace.c
2071 @@ -6920,7 +6920,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2072
2073 if (buffer) {
2074 mutex_lock(&trace_types_lock);
2075 - if (val) {
2076 + if (!!val == tracer_tracing_is_on(tr)) {
2077 + val = 0; /* do nothing */
2078 + } else if (val) {
2079 tracer_tracing_on(tr);
2080 if (tr->current_trace->start)
2081 tr->current_trace->start(tr);
2082 diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
2083 index 788262984818..f0ab801a6437 100644
2084 --- a/kernel/trace/trace_uprobe.c
2085 +++ b/kernel/trace/trace_uprobe.c
2086 @@ -969,7 +969,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
2087
2088 list_del_rcu(&link->list);
2089 /* synchronize with u{,ret}probe_trace_func */
2090 - synchronize_sched();
2091 + synchronize_rcu();
2092 kfree(link);
2093
2094 if (!list_empty(&tu->tp.files))
2095 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
2096 index 86b7854fec8e..f789bbba9b8e 100644
2097 --- a/kernel/user_namespace.c
2098 +++ b/kernel/user_namespace.c
2099 @@ -649,7 +649,16 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2100 unsigned idx;
2101 struct uid_gid_extent *extent = NULL;
2102 char *kbuf = NULL, *pos, *next_line;
2103 - ssize_t ret = -EINVAL;
2104 + ssize_t ret;
2105 +
2106 + /* Only allow < page size writes at the beginning of the file */
2107 + if ((*ppos != 0) || (count >= PAGE_SIZE))
2108 + return -EINVAL;
2109 +
2110 + /* Slurp in the user data */
2111 + kbuf = memdup_user_nul(buf, count);
2112 + if (IS_ERR(kbuf))
2113 + return PTR_ERR(kbuf);
2114
2115 /*
2116 * The userns_state_mutex serializes all writes to any given map.
2117 @@ -683,19 +692,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2118 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
2119 goto out;
2120
2121 - /* Only allow < page size writes at the beginning of the file */
2122 - ret = -EINVAL;
2123 - if ((*ppos != 0) || (count >= PAGE_SIZE))
2124 - goto out;
2125 -
2126 - /* Slurp in the user data */
2127 - kbuf = memdup_user_nul(buf, count);
2128 - if (IS_ERR(kbuf)) {
2129 - ret = PTR_ERR(kbuf);
2130 - kbuf = NULL;
2131 - goto out;
2132 - }
2133 -
2134 /* Parse the user data */
2135 ret = -EINVAL;
2136 pos = kbuf;
2137 diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
2138 index c8eac43267e9..d2b3b2973456 100644
2139 --- a/kernel/utsname_sysctl.c
2140 +++ b/kernel/utsname_sysctl.c
2141 @@ -17,7 +17,7 @@
2142
2143 #ifdef CONFIG_PROC_SYSCTL
2144
2145 -static void *get_uts(struct ctl_table *table, int write)
2146 +static void *get_uts(struct ctl_table *table)
2147 {
2148 char *which = table->data;
2149 struct uts_namespace *uts_ns;
2150 @@ -25,21 +25,9 @@ static void *get_uts(struct ctl_table *table, int write)
2151 uts_ns = current->nsproxy->uts_ns;
2152 which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
2153
2154 - if (!write)
2155 - down_read(&uts_sem);
2156 - else
2157 - down_write(&uts_sem);
2158 return which;
2159 }
2160
2161 -static void put_uts(struct ctl_table *table, int write, void *which)
2162 -{
2163 - if (!write)
2164 - up_read(&uts_sem);
2165 - else
2166 - up_write(&uts_sem);
2167 -}
2168 -
2169 /*
2170 * Special case of dostring for the UTS structure. This has locks
2171 * to observe. Should this be in kernel/sys.c ????
2172 @@ -49,13 +37,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
2173 {
2174 struct ctl_table uts_table;
2175 int r;
2176 + char tmp_data[__NEW_UTS_LEN + 1];
2177 +
2178 memcpy(&uts_table, table, sizeof(uts_table));
2179 - uts_table.data = get_uts(table, write);
2180 + uts_table.data = tmp_data;
2181 +
2182 + /*
2183 + * Buffer the value in tmp_data so that proc_dostring() can be called
2184 + * without holding any locks.
2185 + * We also need to read the original value in the write==1 case to
2186 + * support partial writes.
2187 + */
2188 + down_read(&uts_sem);
2189 + memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
2190 + up_read(&uts_sem);
2191 r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
2192 - put_uts(table, write, uts_table.data);
2193
2194 - if (write)
2195 + if (write) {
2196 + /*
2197 + * Write back the new value.
2198 + * Note that, since we dropped uts_sem, the result can
2199 + * theoretically be incorrect if there are two parallel writes
2200 + * at non-zero offsets to the same sysctl.
2201 + */
2202 + down_write(&uts_sem);
2203 + memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
2204 + up_write(&uts_sem);
2205 proc_sys_poll_notify(table->poll);
2206 + }
2207
2208 return r;
2209 }
2210 diff --git a/mm/memory.c b/mm/memory.c
2211 index 0ff735601654..f3fef1df7402 100644
2212 --- a/mm/memory.c
2213 +++ b/mm/memory.c
2214 @@ -373,15 +373,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
2215 {
2216 struct mmu_table_batch **batch = &tlb->batch;
2217
2218 - /*
2219 - * When there's less then two users of this mm there cannot be a
2220 - * concurrent page-table walk.
2221 - */
2222 - if (atomic_read(&tlb->mm->mm_users) < 2) {
2223 - __tlb_remove_table(table);
2224 - return;
2225 - }
2226 -
2227 if (*batch == NULL) {
2228 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
2229 if (*batch == NULL) {
2230 diff --git a/net/9p/client.c b/net/9p/client.c
2231 index 1fd60190177e..98d299ea52ee 100644
2232 --- a/net/9p/client.c
2233 +++ b/net/9p/client.c
2234 @@ -931,7 +931,7 @@ static int p9_client_version(struct p9_client *c)
2235 {
2236 int err = 0;
2237 struct p9_req_t *req;
2238 - char *version;
2239 + char *version = NULL;
2240 int msize;
2241
2242 p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
2243 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
2244 index 7bc2208b6cc4..2b543532e2f1 100644
2245 --- a/net/9p/trans_fd.c
2246 +++ b/net/9p/trans_fd.c
2247 @@ -181,6 +181,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
2248 spin_lock_irqsave(&p9_poll_lock, flags);
2249 list_del_init(&m->poll_pending_link);
2250 spin_unlock_irqrestore(&p9_poll_lock, flags);
2251 +
2252 + flush_work(&p9_poll_work);
2253 }
2254
2255 /**
2256 @@ -937,7 +939,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
2257 if (err < 0)
2258 return err;
2259
2260 - if (valid_ipaddr4(addr) < 0)
2261 + if (addr == NULL || valid_ipaddr4(addr) < 0)
2262 return -EINVAL;
2263
2264 csocket = NULL;
2265 @@ -985,6 +987,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
2266
2267 csocket = NULL;
2268
2269 + if (addr == NULL)
2270 + return -EINVAL;
2271 +
2272 if (strlen(addr) >= UNIX_PATH_MAX) {
2273 pr_err("%s (%d): address too long: %s\n",
2274 __func__, task_pid_nr(current), addr);
2275 diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
2276 index 553ed4ecb6a0..5a2ad4707463 100644
2277 --- a/net/9p/trans_rdma.c
2278 +++ b/net/9p/trans_rdma.c
2279 @@ -622,6 +622,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
2280 struct rdma_conn_param conn_param;
2281 struct ib_qp_init_attr qp_attr;
2282
2283 + if (addr == NULL)
2284 + return -EINVAL;
2285 +
2286 /* Parse the transport specific mount options */
2287 err = parse_opts(args, &opts);
2288 if (err < 0)
2289 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
2290 index 3aa5a93ad107..da0d3b257459 100644
2291 --- a/net/9p/trans_virtio.c
2292 +++ b/net/9p/trans_virtio.c
2293 @@ -189,7 +189,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
2294 s = rest_of_page(data);
2295 if (s > count)
2296 s = count;
2297 - BUG_ON(index > limit);
2298 + BUG_ON(index >= limit);
2299 /* Make sure we don't terminate early. */
2300 sg_unmark_end(&sg[index]);
2301 sg_set_buf(&sg[index++], data, s);
2302 @@ -234,6 +234,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
2303 s = PAGE_SIZE - data_off;
2304 if (s > count)
2305 s = count;
2306 + BUG_ON(index >= limit);
2307 /* Make sure we don't terminate early. */
2308 sg_unmark_end(&sg[index]);
2309 sg_set_page(&sg[index++], pdata[i++], s, data_off);
2310 @@ -406,6 +407,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
2311 p9_debug(P9_DEBUG_TRANS, "virtio request\n");
2312
2313 if (uodata) {
2314 + __le32 sz;
2315 int n = p9_get_mapped_pages(chan, &out_pages, uodata,
2316 outlen, &offs, &need_drop);
2317 if (n < 0)
2318 @@ -416,6 +418,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
2319 memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
2320 outlen = n;
2321 }
2322 + /* The size field of the message must include the length of the
2323 + * header and the length of the data. We didn't actually know
2324 + * the length of the data until this point so add it in now.
2325 + */
2326 + sz = cpu_to_le32(req->tc->size + outlen);
2327 + memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
2328 } else if (uidata) {
2329 int n = p9_get_mapped_pages(chan, &in_pages, uidata,
2330 inlen, &offs, &need_drop);
2331 @@ -643,6 +651,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
2332 int ret = -ENOENT;
2333 int found = 0;
2334
2335 + if (devname == NULL)
2336 + return -EINVAL;
2337 +
2338 mutex_lock(&virtio_9p_lock);
2339 list_for_each_entry(chan, &virtio_chan_list, chan_list) {
2340 if (!strncmp(devname, chan->tag, chan->tag_len) &&
2341 diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
2342 index dbb476d7d38f..50ed47559bb7 100644
2343 --- a/net/ieee802154/6lowpan/tx.c
2344 +++ b/net/ieee802154/6lowpan/tx.c
2345 @@ -266,9 +266,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
2346 /* We must take a copy of the skb before we modify/replace the ipv6
2347 * header as the header could be used elsewhere
2348 */
2349 - skb = skb_unshare(skb, GFP_ATOMIC);
2350 - if (!skb)
2351 - return NET_XMIT_DROP;
2352 + if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
2353 + skb_tailroom(skb) < ldev->needed_tailroom)) {
2354 + struct sk_buff *nskb;
2355 +
2356 + nskb = skb_copy_expand(skb, ldev->needed_headroom,
2357 + ldev->needed_tailroom, GFP_ATOMIC);
2358 + if (likely(nskb)) {
2359 + consume_skb(skb);
2360 + skb = nskb;
2361 + } else {
2362 + kfree_skb(skb);
2363 + return NET_XMIT_DROP;
2364 + }
2365 + } else {
2366 + skb = skb_unshare(skb, GFP_ATOMIC);
2367 + if (!skb)
2368 + return NET_XMIT_DROP;
2369 + }
2370
2371 ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
2372 if (ret < 0) {
2373 diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
2374 index 7e253455f9dd..bcd1a5e6ebf4 100644
2375 --- a/net/mac802154/tx.c
2376 +++ b/net/mac802154/tx.c
2377 @@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
2378 int ret;
2379
2380 if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
2381 - u16 crc = crc_ccitt(0, skb->data, skb->len);
2382 + struct sk_buff *nskb;
2383 + u16 crc;
2384 +
2385 + if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
2386 + nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
2387 + GFP_ATOMIC);
2388 + if (likely(nskb)) {
2389 + consume_skb(skb);
2390 + skb = nskb;
2391 + } else {
2392 + goto err_tx;
2393 + }
2394 + }
2395
2396 + crc = crc_ccitt(0, skb->data, skb->len);
2397 put_unaligned_le16(crc, skb_put(skb, 2));
2398 }
2399
2400 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2401 index b2ae4f150ec6..244eac1bd648 100644
2402 --- a/net/sunrpc/clnt.c
2403 +++ b/net/sunrpc/clnt.c
2404 @@ -965,10 +965,20 @@ out:
2405 }
2406 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
2407
2408 +void rpc_task_release_transport(struct rpc_task *task)
2409 +{
2410 + struct rpc_xprt *xprt = task->tk_xprt;
2411 +
2412 + if (xprt) {
2413 + task->tk_xprt = NULL;
2414 + xprt_put(xprt);
2415 + }
2416 +}
2417 +EXPORT_SYMBOL_GPL(rpc_task_release_transport);
2418 +
2419 void rpc_task_release_client(struct rpc_task *task)
2420 {
2421 struct rpc_clnt *clnt = task->tk_client;
2422 - struct rpc_xprt *xprt = task->tk_xprt;
2423
2424 if (clnt != NULL) {
2425 /* Remove from client task list */
2426 @@ -979,12 +989,14 @@ void rpc_task_release_client(struct rpc_task *task)
2427
2428 rpc_release_client(clnt);
2429 }
2430 + rpc_task_release_transport(task);
2431 +}
2432
2433 - if (xprt != NULL) {
2434 - task->tk_xprt = NULL;
2435 -
2436 - xprt_put(xprt);
2437 - }
2438 +static
2439 +void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
2440 +{
2441 + if (!task->tk_xprt)
2442 + task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
2443 }
2444
2445 static
2446 @@ -992,8 +1004,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
2447 {
2448
2449 if (clnt != NULL) {
2450 - if (task->tk_xprt == NULL)
2451 - task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
2452 + rpc_task_set_transport(task, clnt);
2453 task->tk_client = clnt;
2454 atomic_inc(&clnt->cl_count);
2455 if (clnt->cl_softrtry)
2456 @@ -1550,6 +1561,7 @@ call_start(struct rpc_task *task)
2457 task->tk_msg.rpc_proc->p_count++;
2458 clnt->cl_stats->rpccnt++;
2459 task->tk_action = call_reserve;
2460 + rpc_task_set_transport(task, clnt);
2461 }
2462
2463 /*
2464 diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
2465 index 78bd632f144d..29d015e2d900 100644
2466 --- a/tools/perf/util/auxtrace.c
2467 +++ b/tools/perf/util/auxtrace.c
2468 @@ -195,6 +195,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
2469 for (i = 0; i < queues->nr_queues; i++) {
2470 list_splice_tail(&queues->queue_array[i].head,
2471 &queue_array[i].head);
2472 + queue_array[i].tid = queues->queue_array[i].tid;
2473 + queue_array[i].cpu = queues->queue_array[i].cpu;
2474 + queue_array[i].set = queues->queue_array[i].set;
2475 queue_array[i].priv = queues->queue_array[i].priv;
2476 }
2477