Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0168-4.14.69-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 105746 byte(s)
-added up to patches-4.14.79
1 diff --git a/Makefile b/Makefile
2 index 3da579058926..3ecda1d2e23a 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 14
9 -SUBLEVEL = 68
10 +SUBLEVEL = 69
11 EXTRAVERSION =
12 NAME = Petit Gorille
13
14 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
15 index a48976dc9bcd..918c3938ef66 100644
16 --- a/arch/alpha/kernel/osf_sys.c
17 +++ b/arch/alpha/kernel/osf_sys.c
18 @@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
19 SYSCALL_DEFINE1(osf_utsname, char __user *, name)
20 {
21 int error;
22 + char tmp[5 * 32];
23
24 down_read(&uts_sem);
25 - error = -EFAULT;
26 - if (copy_to_user(name + 0, utsname()->sysname, 32))
27 - goto out;
28 - if (copy_to_user(name + 32, utsname()->nodename, 32))
29 - goto out;
30 - if (copy_to_user(name + 64, utsname()->release, 32))
31 - goto out;
32 - if (copy_to_user(name + 96, utsname()->version, 32))
33 - goto out;
34 - if (copy_to_user(name + 128, utsname()->machine, 32))
35 - goto out;
36 + memcpy(tmp + 0 * 32, utsname()->sysname, 32);
37 + memcpy(tmp + 1 * 32, utsname()->nodename, 32);
38 + memcpy(tmp + 2 * 32, utsname()->release, 32);
39 + memcpy(tmp + 3 * 32, utsname()->version, 32);
40 + memcpy(tmp + 4 * 32, utsname()->machine, 32);
41 + up_read(&uts_sem);
42
43 - error = 0;
44 - out:
45 - up_read(&uts_sem);
46 - return error;
47 + if (copy_to_user(name, tmp, sizeof(tmp)))
48 + return -EFAULT;
49 + return 0;
50 }
51
52 SYSCALL_DEFINE0(getpagesize)
53 @@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
54 {
55 int len, err = 0;
56 char *kname;
57 + char tmp[32];
58
59 - if (namelen > 32)
60 + if (namelen < 0 || namelen > 32)
61 namelen = 32;
62
63 down_read(&uts_sem);
64 kname = utsname()->domainname;
65 len = strnlen(kname, namelen);
66 - if (copy_to_user(name, kname, min(len + 1, namelen)))
67 - err = -EFAULT;
68 + len = min(len + 1, namelen);
69 + memcpy(tmp, kname, len);
70 up_read(&uts_sem);
71
72 - return err;
73 + if (copy_to_user(name, tmp, len))
74 + return -EFAULT;
75 + return 0;
76 }
77
78 /*
79 @@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
80 };
81 unsigned long offset;
82 const char *res;
83 - long len, err = -EINVAL;
84 + long len;
85 + char tmp[__NEW_UTS_LEN + 1];
86
87 offset = command-1;
88 if (offset >= ARRAY_SIZE(sysinfo_table)) {
89 /* Digital UNIX has a few unpublished interfaces here */
90 printk("sysinfo(%d)", command);
91 - goto out;
92 + return -EINVAL;
93 }
94
95 down_read(&uts_sem);
96 @@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
97 len = strlen(res)+1;
98 if ((unsigned long)len > (unsigned long)count)
99 len = count;
100 - if (copy_to_user(buf, res, len))
101 - err = -EFAULT;
102 - else
103 - err = 0;
104 + memcpy(tmp, res, len);
105 up_read(&uts_sem);
106 - out:
107 - return err;
108 + if (copy_to_user(buf, tmp, len))
109 + return -EFAULT;
110 + return 0;
111 }
112
113 SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
114 diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
115 index 92a9740c533f..3b1db7b9ec50 100644
116 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
117 +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
118 @@ -206,6 +206,7 @@
119 #address-cells = <1>;
120 #size-cells = <0>;
121 reg = <0x70>;
122 + reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
123 };
124 };
125
126 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
127 index 1bbb89d37f57..c30cd78b6918 100644
128 --- a/arch/arm64/Kconfig
129 +++ b/arch/arm64/Kconfig
130 @@ -693,7 +693,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
131
132 config HOLES_IN_ZONE
133 def_bool y
134 - depends on NUMA
135
136 source kernel/Kconfig.preempt
137 source kernel/Kconfig.hz
138 diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
139 index 5a23010af600..1e7a33592e29 100644
140 --- a/arch/powerpc/include/asm/fadump.h
141 +++ b/arch/powerpc/include/asm/fadump.h
142 @@ -195,9 +195,6 @@ struct fadump_crash_info_header {
143 struct cpumask online_mask;
144 };
145
146 -/* Crash memory ranges */
147 -#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
148 -
149 struct fad_crash_memory_ranges {
150 unsigned long long base;
151 unsigned long long size;
152 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
153 index d0020bc1f209..5a6470383ca3 100644
154 --- a/arch/powerpc/kernel/fadump.c
155 +++ b/arch/powerpc/kernel/fadump.c
156 @@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
157 static const struct fadump_mem_struct *fdm_active;
158
159 static DEFINE_MUTEX(fadump_mutex);
160 -struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
161 +struct fad_crash_memory_ranges *crash_memory_ranges;
162 +int crash_memory_ranges_size;
163 int crash_mem_ranges;
164 +int max_crash_mem_ranges;
165
166 /* Scan the Firmware Assisted dump configuration details. */
167 int __init early_init_dt_scan_fw_dump(unsigned long node,
168 @@ -843,38 +845,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
169 return 0;
170 }
171
172 -static inline void fadump_add_crash_memory(unsigned long long base,
173 - unsigned long long end)
174 +static void free_crash_memory_ranges(void)
175 +{
176 + kfree(crash_memory_ranges);
177 + crash_memory_ranges = NULL;
178 + crash_memory_ranges_size = 0;
179 + max_crash_mem_ranges = 0;
180 +}
181 +
182 +/*
183 + * Allocate or reallocate crash memory ranges array in incremental units
184 + * of PAGE_SIZE.
185 + */
186 +static int allocate_crash_memory_ranges(void)
187 +{
188 + struct fad_crash_memory_ranges *new_array;
189 + u64 new_size;
190 +
191 + new_size = crash_memory_ranges_size + PAGE_SIZE;
192 + pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
193 + new_size);
194 +
195 + new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
196 + if (new_array == NULL) {
197 + pr_err("Insufficient memory for setting up crash memory ranges\n");
198 + free_crash_memory_ranges();
199 + return -ENOMEM;
200 + }
201 +
202 + crash_memory_ranges = new_array;
203 + crash_memory_ranges_size = new_size;
204 + max_crash_mem_ranges = (new_size /
205 + sizeof(struct fad_crash_memory_ranges));
206 + return 0;
207 +}
208 +
209 +static inline int fadump_add_crash_memory(unsigned long long base,
210 + unsigned long long end)
211 {
212 if (base == end)
213 - return;
214 + return 0;
215 +
216 + if (crash_mem_ranges == max_crash_mem_ranges) {
217 + int ret;
218 +
219 + ret = allocate_crash_memory_ranges();
220 + if (ret)
221 + return ret;
222 + }
223
224 pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
225 crash_mem_ranges, base, end - 1, (end - base));
226 crash_memory_ranges[crash_mem_ranges].base = base;
227 crash_memory_ranges[crash_mem_ranges].size = end - base;
228 crash_mem_ranges++;
229 + return 0;
230 }
231
232 -static void fadump_exclude_reserved_area(unsigned long long start,
233 +static int fadump_exclude_reserved_area(unsigned long long start,
234 unsigned long long end)
235 {
236 unsigned long long ra_start, ra_end;
237 + int ret = 0;
238
239 ra_start = fw_dump.reserve_dump_area_start;
240 ra_end = ra_start + fw_dump.reserve_dump_area_size;
241
242 if ((ra_start < end) && (ra_end > start)) {
243 if ((start < ra_start) && (end > ra_end)) {
244 - fadump_add_crash_memory(start, ra_start);
245 - fadump_add_crash_memory(ra_end, end);
246 + ret = fadump_add_crash_memory(start, ra_start);
247 + if (ret)
248 + return ret;
249 +
250 + ret = fadump_add_crash_memory(ra_end, end);
251 } else if (start < ra_start) {
252 - fadump_add_crash_memory(start, ra_start);
253 + ret = fadump_add_crash_memory(start, ra_start);
254 } else if (ra_end < end) {
255 - fadump_add_crash_memory(ra_end, end);
256 + ret = fadump_add_crash_memory(ra_end, end);
257 }
258 } else
259 - fadump_add_crash_memory(start, end);
260 + ret = fadump_add_crash_memory(start, end);
261 +
262 + return ret;
263 }
264
265 static int fadump_init_elfcore_header(char *bufp)
266 @@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(char *bufp)
267 * Traverse through memblock structure and setup crash memory ranges. These
268 * ranges will be used create PT_LOAD program headers in elfcore header.
269 */
270 -static void fadump_setup_crash_memory_ranges(void)
271 +static int fadump_setup_crash_memory_ranges(void)
272 {
273 struct memblock_region *reg;
274 unsigned long long start, end;
275 + int ret;
276
277 pr_debug("Setup crash memory ranges.\n");
278 crash_mem_ranges = 0;
279 @@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ranges(void)
280 * specified during fadump registration. We need to create a separate
281 * program header for this chunk with the correct offset.
282 */
283 - fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
284 + ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
285 + if (ret)
286 + return ret;
287
288 for_each_memblock(memory, reg) {
289 start = (unsigned long long)reg->base;
290 @@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ranges(void)
291 }
292
293 /* add this range excluding the reserved dump area. */
294 - fadump_exclude_reserved_area(start, end);
295 + ret = fadump_exclude_reserved_area(start, end);
296 + if (ret)
297 + return ret;
298 }
299 +
300 + return 0;
301 }
302
303 /*
304 @@ -1072,6 +1131,7 @@ static int register_fadump(void)
305 {
306 unsigned long addr;
307 void *vaddr;
308 + int ret;
309
310 /*
311 * If no memory is reserved then we can not register for firmware-
312 @@ -1080,7 +1140,9 @@ static int register_fadump(void)
313 if (!fw_dump.reserve_dump_area_size)
314 return -ENODEV;
315
316 - fadump_setup_crash_memory_ranges();
317 + ret = fadump_setup_crash_memory_ranges();
318 + if (ret)
319 + return ret;
320
321 addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
322 /* Initialize fadump crash info header. */
323 @@ -1158,6 +1220,7 @@ void fadump_cleanup(void)
324 } else if (fw_dump.dump_registered) {
325 /* Un-register Firmware-assisted dump if it was registered. */
326 fadump_unregister_dump(&fdm);
327 + free_crash_memory_ranges();
328 }
329 }
330
331 diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
332 index 816055927ee4..d735937d975c 100644
333 --- a/arch/powerpc/mm/mmu_context_iommu.c
334 +++ b/arch/powerpc/mm/mmu_context_iommu.c
335 @@ -130,6 +130,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
336 long i, j, ret = 0, locked_entries = 0;
337 unsigned int pageshift;
338 unsigned long flags;
339 + unsigned long cur_ua;
340 struct page *page = NULL;
341
342 mutex_lock(&mem_list_mutex);
343 @@ -178,7 +179,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
344 }
345
346 for (i = 0; i < entries; ++i) {
347 - if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
348 + cur_ua = ua + (i << PAGE_SHIFT);
349 + if (1 != get_user_pages_fast(cur_ua,
350 1/* pages */, 1/* iswrite */, &page)) {
351 ret = -EFAULT;
352 for (j = 0; j < i; ++j)
353 @@ -197,7 +199,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
354 if (is_migrate_cma_page(page)) {
355 if (mm_iommu_move_page_from_cma(page))
356 goto populate;
357 - if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
358 + if (1 != get_user_pages_fast(cur_ua,
359 1/* pages */, 1/* iswrite */,
360 &page)) {
361 ret = -EFAULT;
362 @@ -211,20 +213,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
363 }
364 populate:
365 pageshift = PAGE_SHIFT;
366 - if (PageCompound(page)) {
367 + if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
368 pte_t *pte;
369 struct page *head = compound_head(page);
370 unsigned int compshift = compound_order(head);
371 + unsigned int pteshift;
372
373 local_irq_save(flags); /* disables as well */
374 - pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
375 - local_irq_restore(flags);
376 + pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
377
378 /* Double check it is still the same pinned page */
379 if (pte && pte_page(*pte) == head &&
380 - pageshift == compshift)
381 - pageshift = max_t(unsigned int, pageshift,
382 + pteshift == compshift + PAGE_SHIFT)
383 + pageshift = max_t(unsigned int, pteshift,
384 PAGE_SHIFT);
385 + local_irq_restore(flags);
386 }
387 mem->pageshift = min(mem->pageshift, pageshift);
388 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
389 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
390 index 677b29ef4532..e919696c7137 100644
391 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
392 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
393 @@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
394 #endif /* CONFIG_DEBUG_FS */
395 }
396
397 +static void pnv_pci_enable_bridge(struct pci_bus *bus)
398 +{
399 + struct pci_dev *dev = bus->self;
400 + struct pci_bus *child;
401 +
402 + /* Empty bus ? bail */
403 + if (list_empty(&bus->devices))
404 + return;
405 +
406 + /*
407 + * If there's a bridge associated with that bus enable it. This works
408 + * around races in the generic code if the enabling is done during
409 + * parallel probing. This can be removed once those races have been
410 + * fixed.
411 + */
412 + if (dev) {
413 + int rc = pci_enable_device(dev);
414 + if (rc)
415 + pci_err(dev, "Error enabling bridge (%d)\n", rc);
416 + pci_set_master(dev);
417 + }
418 +
419 + /* Perform the same to child busses */
420 + list_for_each_entry(child, &bus->children, node)
421 + pnv_pci_enable_bridge(child);
422 +}
423 +
424 +static void pnv_pci_enable_bridges(void)
425 +{
426 + struct pci_controller *hose;
427 +
428 + list_for_each_entry(hose, &hose_list, list_node)
429 + pnv_pci_enable_bridge(hose->bus);
430 +}
431 +
432 static void pnv_pci_ioda_fixup(void)
433 {
434 pnv_pci_ioda_setup_PEs();
435 pnv_pci_ioda_setup_iommu_api();
436 pnv_pci_ioda_create_dbgfs();
437
438 + pnv_pci_enable_bridges();
439 +
440 #ifdef CONFIG_EEH
441 eeh_init();
442 eeh_addr_cache_build();
443 diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
444 index 5e1ef9150182..2edc673be137 100644
445 --- a/arch/powerpc/platforms/pseries/ras.c
446 +++ b/arch/powerpc/platforms/pseries/ras.c
447 @@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
448 }
449
450 savep = __va(regs->gpr[3]);
451 - regs->gpr[3] = savep[0]; /* restore original r3 */
452 + regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
453
454 /* If it isn't an extended log we can use the per cpu 64bit buffer */
455 h = (struct rtas_error_log *)&savep[1];
456 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
457 index 990703b7cf4d..4b7719b2a73c 100644
458 --- a/arch/sparc/kernel/sys_sparc_32.c
459 +++ b/arch/sparc/kernel/sys_sparc_32.c
460 @@ -204,23 +204,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
461
462 asmlinkage long sys_getdomainname(char __user *name, int len)
463 {
464 - int nlen, err;
465 -
466 + int nlen, err;
467 + char tmp[__NEW_UTS_LEN + 1];
468 +
469 if (len < 0)
470 return -EINVAL;
471
472 - down_read(&uts_sem);
473 -
474 + down_read(&uts_sem);
475 +
476 nlen = strlen(utsname()->domainname) + 1;
477 err = -EINVAL;
478 if (nlen > len)
479 - goto out;
480 + goto out_unlock;
481 + memcpy(tmp, utsname()->domainname, nlen);
482
483 - err = -EFAULT;
484 - if (!copy_to_user(name, utsname()->domainname, nlen))
485 - err = 0;
486 + up_read(&uts_sem);
487
488 -out:
489 + if (copy_to_user(name, tmp, nlen))
490 + return -EFAULT;
491 + return 0;
492 +
493 +out_unlock:
494 up_read(&uts_sem);
495 return err;
496 }
497 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
498 index 55416db482ad..d79c1c74873c 100644
499 --- a/arch/sparc/kernel/sys_sparc_64.c
500 +++ b/arch/sparc/kernel/sys_sparc_64.c
501 @@ -527,23 +527,27 @@ extern void check_pending(int signum);
502
503 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
504 {
505 - int nlen, err;
506 + int nlen, err;
507 + char tmp[__NEW_UTS_LEN + 1];
508
509 if (len < 0)
510 return -EINVAL;
511
512 - down_read(&uts_sem);
513 -
514 + down_read(&uts_sem);
515 +
516 nlen = strlen(utsname()->domainname) + 1;
517 err = -EINVAL;
518 if (nlen > len)
519 - goto out;
520 + goto out_unlock;
521 + memcpy(tmp, utsname()->domainname, nlen);
522 +
523 + up_read(&uts_sem);
524
525 - err = -EFAULT;
526 - if (!copy_to_user(name, utsname()->domainname, nlen))
527 - err = 0;
528 + if (copy_to_user(name, tmp, nlen))
529 + return -EFAULT;
530 + return 0;
531
532 -out:
533 +out_unlock:
534 up_read(&uts_sem);
535 return err;
536 }
537 diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
538 index f24cd9f1799a..928b0c6083c9 100644
539 --- a/arch/x86/kernel/kexec-bzimage64.c
540 +++ b/arch/x86/kernel/kexec-bzimage64.c
541 @@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data)
542 static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
543 {
544 return verify_pefile_signature(kernel, kernel_len,
545 - NULL,
546 + VERIFY_USE_SECONDARY_KEYRING,
547 VERIFYING_KEXEC_PE_SIGNATURE);
548 }
549 #endif
550 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
551 index 8958b35f6008..a466ee14ad41 100644
552 --- a/arch/x86/kvm/vmx.c
553 +++ b/arch/x86/kvm/vmx.c
554 @@ -200,12 +200,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_
555
556 static const struct {
557 const char *option;
558 - enum vmx_l1d_flush_state cmd;
559 + bool for_parse;
560 } vmentry_l1d_param[] = {
561 - {"auto", VMENTER_L1D_FLUSH_AUTO},
562 - {"never", VMENTER_L1D_FLUSH_NEVER},
563 - {"cond", VMENTER_L1D_FLUSH_COND},
564 - {"always", VMENTER_L1D_FLUSH_ALWAYS},
565 + [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
566 + [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
567 + [VMENTER_L1D_FLUSH_COND] = {"cond", true},
568 + [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
569 + [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
570 + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
571 };
572
573 #define L1D_CACHE_ORDER 4
574 @@ -289,8 +291,9 @@ static int vmentry_l1d_flush_parse(const char *s)
575
576 if (s) {
577 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
578 - if (sysfs_streq(s, vmentry_l1d_param[i].option))
579 - return vmentry_l1d_param[i].cmd;
580 + if (vmentry_l1d_param[i].for_parse &&
581 + sysfs_streq(s, vmentry_l1d_param[i].option))
582 + return i;
583 }
584 }
585 return -EINVAL;
586 @@ -300,13 +303,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
587 {
588 int l1tf, ret;
589
590 - if (!boot_cpu_has(X86_BUG_L1TF))
591 - return 0;
592 -
593 l1tf = vmentry_l1d_flush_parse(s);
594 if (l1tf < 0)
595 return l1tf;
596
597 + if (!boot_cpu_has(X86_BUG_L1TF))
598 + return 0;
599 +
600 /*
601 * Has vmx_init() run already? If not then this is the pre init
602 * parameter parsing. In that case just store the value and let
603 @@ -326,6 +329,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
604
605 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
606 {
607 + if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
608 + return sprintf(s, "???\n");
609 +
610 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
611 }
612
613 diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
614 index 2041abb10a23..34545ecfdd6b 100644
615 --- a/arch/xtensa/include/asm/cacheasm.h
616 +++ b/arch/xtensa/include/asm/cacheasm.h
617 @@ -31,16 +31,32 @@
618 *
619 */
620
621 - .macro __loop_cache_all ar at insn size line_width
622
623 - movi \ar, 0
624 + .macro __loop_cache_unroll ar at insn size line_width max_immed
625 +
626 + .if (1 << (\line_width)) > (\max_immed)
627 + .set _reps, 1
628 + .elseif (2 << (\line_width)) > (\max_immed)
629 + .set _reps, 2
630 + .else
631 + .set _reps, 4
632 + .endif
633 +
634 + __loopi \ar, \at, \size, (_reps << (\line_width))
635 + .set _index, 0
636 + .rep _reps
637 + \insn \ar, _index << (\line_width)
638 + .set _index, _index + 1
639 + .endr
640 + __endla \ar, \at, _reps << (\line_width)
641 +
642 + .endm
643 +
644
645 - __loopi \ar, \at, \size, (4 << (\line_width))
646 - \insn \ar, 0 << (\line_width)
647 - \insn \ar, 1 << (\line_width)
648 - \insn \ar, 2 << (\line_width)
649 - \insn \ar, 3 << (\line_width)
650 - __endla \ar, \at, 4 << (\line_width)
651 + .macro __loop_cache_all ar at insn size line_width max_immed
652 +
653 + movi \ar, 0
654 + __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
655
656 .endm
657
658 @@ -57,14 +73,9 @@
659 .endm
660
661
662 - .macro __loop_cache_page ar at insn line_width
663 + .macro __loop_cache_page ar at insn line_width max_immed
664
665 - __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
666 - \insn \ar, 0 << (\line_width)
667 - \insn \ar, 1 << (\line_width)
668 - \insn \ar, 2 << (\line_width)
669 - \insn \ar, 3 << (\line_width)
670 - __endla \ar, \at, 4 << (\line_width)
671 + __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
672
673 .endm
674
675 @@ -72,7 +83,8 @@
676 .macro ___unlock_dcache_all ar at
677
678 #if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
679 - __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
680 + __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
681 + XCHAL_DCACHE_LINEWIDTH 240
682 #endif
683
684 .endm
685 @@ -81,7 +93,8 @@
686 .macro ___unlock_icache_all ar at
687
688 #if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
689 - __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
690 + __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
691 + XCHAL_ICACHE_LINEWIDTH 240
692 #endif
693
694 .endm
695 @@ -90,7 +103,8 @@
696 .macro ___flush_invalidate_dcache_all ar at
697
698 #if XCHAL_DCACHE_SIZE
699 - __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
700 + __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
701 + XCHAL_DCACHE_LINEWIDTH 240
702 #endif
703
704 .endm
705 @@ -99,7 +113,8 @@
706 .macro ___flush_dcache_all ar at
707
708 #if XCHAL_DCACHE_SIZE
709 - __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
710 + __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
711 + XCHAL_DCACHE_LINEWIDTH 240
712 #endif
713
714 .endm
715 @@ -108,8 +123,8 @@
716 .macro ___invalidate_dcache_all ar at
717
718 #if XCHAL_DCACHE_SIZE
719 - __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
720 - XCHAL_DCACHE_LINEWIDTH
721 + __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
722 + XCHAL_DCACHE_LINEWIDTH 1020
723 #endif
724
725 .endm
726 @@ -118,8 +133,8 @@
727 .macro ___invalidate_icache_all ar at
728
729 #if XCHAL_ICACHE_SIZE
730 - __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
731 - XCHAL_ICACHE_LINEWIDTH
732 + __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
733 + XCHAL_ICACHE_LINEWIDTH 1020
734 #endif
735
736 .endm
737 @@ -166,7 +181,7 @@
738 .macro ___flush_invalidate_dcache_page ar as
739
740 #if XCHAL_DCACHE_SIZE
741 - __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
742 + __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
743 #endif
744
745 .endm
746 @@ -175,7 +190,7 @@
747 .macro ___flush_dcache_page ar as
748
749 #if XCHAL_DCACHE_SIZE
750 - __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
751 + __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
752 #endif
753
754 .endm
755 @@ -184,7 +199,7 @@
756 .macro ___invalidate_dcache_page ar as
757
758 #if XCHAL_DCACHE_SIZE
759 - __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
760 + __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
761 #endif
762
763 .endm
764 @@ -193,7 +208,7 @@
765 .macro ___invalidate_icache_page ar as
766
767 #if XCHAL_ICACHE_SIZE
768 - __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
769 + __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
770 #endif
771
772 .endm
773 diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
774 index 5d53e504acae..4b571f3ea009 100644
775 --- a/block/bfq-cgroup.c
776 +++ b/block/bfq-cgroup.c
777 @@ -887,7 +887,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
778 if (ret)
779 return ret;
780
781 - return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
782 + ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
783 + return ret ?: nbytes;
784 }
785
786 static int bfqg_print_stat(struct seq_file *sf, void *v)
787 diff --git a/block/blk-core.c b/block/blk-core.c
788 index 68bae6338ad4..1d27e2a152e0 100644
789 --- a/block/blk-core.c
790 +++ b/block/blk-core.c
791 @@ -1025,6 +1025,7 @@ out_exit_flush_rq:
792 q->exit_rq_fn(q, q->fq->flush_rq);
793 out_free_flush_queue:
794 blk_free_flush_queue(q->fq);
795 + q->fq = NULL;
796 return -ENOMEM;
797 }
798 EXPORT_SYMBOL(blk_init_allocated_queue);
799 @@ -3458,9 +3459,11 @@ EXPORT_SYMBOL(blk_finish_plug);
800 */
801 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
802 {
803 - /* not support for RQF_PM and ->rpm_status in blk-mq yet */
804 - if (q->mq_ops)
805 + /* Don't enable runtime PM for blk-mq until it is ready */
806 + if (q->mq_ops) {
807 + pm_runtime_disable(dev);
808 return;
809 + }
810
811 q->dev = dev;
812 q->rpm_status = RPM_ACTIVE;
813 diff --git a/certs/system_keyring.c b/certs/system_keyring.c
814 index 6251d1b27f0c..81728717523d 100644
815 --- a/certs/system_keyring.c
816 +++ b/certs/system_keyring.c
817 @@ -15,6 +15,7 @@
818 #include <linux/cred.h>
819 #include <linux/err.h>
820 #include <linux/slab.h>
821 +#include <linux/verification.h>
822 #include <keys/asymmetric-type.h>
823 #include <keys/system_keyring.h>
824 #include <crypto/pkcs7.h>
825 @@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len,
826
827 if (!trusted_keys) {
828 trusted_keys = builtin_trusted_keys;
829 - } else if (trusted_keys == (void *)1UL) {
830 + } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
831 #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
832 trusted_keys = secondary_trusted_keys;
833 #else
834 diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
835 index 1063b644efcd..b2aa925a84bc 100644
836 --- a/crypto/asymmetric_keys/pkcs7_key_type.c
837 +++ b/crypto/asymmetric_keys/pkcs7_key_type.c
838 @@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep)
839
840 return verify_pkcs7_signature(NULL, 0,
841 prep->data, prep->datalen,
842 - (void *)1UL, usage,
843 + VERIFY_USE_SECONDARY_KEYRING, usage,
844 pkcs7_view_content, prep);
845 }
846
847 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
848 index f149d3e61234..1e2648e4c286 100644
849 --- a/drivers/block/zram/zram_drv.c
850 +++ b/drivers/block/zram/zram_drv.c
851 @@ -321,6 +321,7 @@ static ssize_t backing_dev_store(struct device *dev,
852 struct device_attribute *attr, const char *buf, size_t len)
853 {
854 char *file_name;
855 + size_t sz;
856 struct file *backing_dev = NULL;
857 struct inode *inode;
858 struct address_space *mapping;
859 @@ -341,7 +342,11 @@ static ssize_t backing_dev_store(struct device *dev,
860 goto out;
861 }
862
863 - strlcpy(file_name, buf, len);
864 + strlcpy(file_name, buf, PATH_MAX);
865 + /* ignore trailing newline */
866 + sz = strlen(file_name);
867 + if (sz > 0 && file_name[sz - 1] == '\n')
868 + file_name[sz - 1] = 0x00;
869
870 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
871 if (IS_ERR(backing_dev)) {
872 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
873 index 43e14bb512c8..6a16d22bc604 100644
874 --- a/drivers/cpufreq/cpufreq_governor.c
875 +++ b/drivers/cpufreq/cpufreq_governor.c
876 @@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
877
878 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
879 {
880 - struct policy_dbs_info *policy_dbs = policy->governor_data;
881 + struct policy_dbs_info *policy_dbs;
882 +
883 + /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
884 + mutex_lock(&gov_dbs_data_mutex);
885 + policy_dbs = policy->governor_data;
886 + if (!policy_dbs)
887 + goto out;
888
889 mutex_lock(&policy_dbs->update_mutex);
890 cpufreq_policy_apply_limits(policy);
891 gov_update_sample_delay(policy_dbs, 0);
892 -
893 mutex_unlock(&policy_dbs->update_mutex);
894 +
895 +out:
896 + mutex_unlock(&gov_dbs_data_mutex);
897 }
898 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
899 diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
900 index e7966e37a5aa..ecc6d755d3c1 100644
901 --- a/drivers/crypto/caam/caamalg_qi.c
902 +++ b/drivers/crypto/caam/caamalg_qi.c
903 @@ -350,10 +350,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
904 int ret = 0;
905
906 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
907 - crypto_ablkcipher_set_flags(ablkcipher,
908 - CRYPTO_TFM_RES_BAD_KEY_LEN);
909 dev_err(jrdev, "key size mismatch\n");
910 - return -EINVAL;
911 + goto badkey;
912 }
913
914 memcpy(ctx->key, key, keylen);
915 @@ -388,7 +386,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
916 return ret;
917 badkey:
918 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
919 - return 0;
920 + return -EINVAL;
921 }
922
923 /*
924 diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
925 index 7ff4a25440ac..6f3f81bb880b 100644
926 --- a/drivers/crypto/caam/caampkc.c
927 +++ b/drivers/crypto/caam/caampkc.c
928 @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
929 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
930 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
931 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
932 - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
933 - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
934 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
935 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
936 }
937
938 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
939 @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
940 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
941 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
942 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
943 - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
944 - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
945 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
946 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
947 }
948
949 /* RSA Job Completion handler */
950 @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
951 goto unmap_p;
952 }
953
954 - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
955 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
956 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
957 dev_err(dev, "Unable to map RSA tmp1 memory\n");
958 goto unmap_q;
959 }
960
961 - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
962 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
963 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
964 dev_err(dev, "Unable to map RSA tmp2 memory\n");
965 goto unmap_tmp1;
966 @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
967 return 0;
968
969 unmap_tmp1:
970 - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
971 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
972 unmap_q:
973 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
974 unmap_p:
975 @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
976 goto unmap_dq;
977 }
978
979 - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
980 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
981 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
982 dev_err(dev, "Unable to map RSA tmp1 memory\n");
983 goto unmap_qinv;
984 }
985
986 - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
987 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
988 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
989 dev_err(dev, "Unable to map RSA tmp2 memory\n");
990 goto unmap_tmp1;
991 @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
992 return 0;
993
994 unmap_tmp1:
995 - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
996 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
997 unmap_qinv:
998 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
999 unmap_dq:
1000 diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
1001 index d258953ff488..7fa1be184553 100644
1002 --- a/drivers/crypto/caam/jr.c
1003 +++ b/drivers/crypto/caam/jr.c
1004 @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
1005 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
1006
1007 /* Unmap just-run descriptor so we can post-process */
1008 - dma_unmap_single(dev, jrp->outring[hw_idx].desc,
1009 + dma_unmap_single(dev,
1010 + caam_dma_to_cpu(jrp->outring[hw_idx].desc),
1011 jrp->entinfo[sw_idx].desc_size,
1012 DMA_TO_DEVICE);
1013
1014 diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
1015 index 5285ece4f33a..b71895871be3 100644
1016 --- a/drivers/crypto/vmx/aes_cbc.c
1017 +++ b/drivers/crypto/vmx/aes_cbc.c
1018 @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
1019 ret = crypto_skcipher_encrypt(req);
1020 skcipher_request_zero(req);
1021 } else {
1022 - preempt_disable();
1023 - pagefault_disable();
1024 - enable_kernel_vsx();
1025 -
1026 blkcipher_walk_init(&walk, dst, src, nbytes);
1027 ret = blkcipher_walk_virt(desc, &walk);
1028 while ((nbytes = walk.nbytes)) {
1029 + preempt_disable();
1030 + pagefault_disable();
1031 + enable_kernel_vsx();
1032 aes_p8_cbc_encrypt(walk.src.virt.addr,
1033 walk.dst.virt.addr,
1034 nbytes & AES_BLOCK_MASK,
1035 &ctx->enc_key, walk.iv, 1);
1036 + disable_kernel_vsx();
1037 + pagefault_enable();
1038 + preempt_enable();
1039 +
1040 nbytes &= AES_BLOCK_SIZE - 1;
1041 ret = blkcipher_walk_done(desc, &walk, nbytes);
1042 }
1043 -
1044 - disable_kernel_vsx();
1045 - pagefault_enable();
1046 - preempt_enable();
1047 }
1048
1049 return ret;
1050 @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
1051 ret = crypto_skcipher_decrypt(req);
1052 skcipher_request_zero(req);
1053 } else {
1054 - preempt_disable();
1055 - pagefault_disable();
1056 - enable_kernel_vsx();
1057 -
1058 blkcipher_walk_init(&walk, dst, src, nbytes);
1059 ret = blkcipher_walk_virt(desc, &walk);
1060 while ((nbytes = walk.nbytes)) {
1061 + preempt_disable();
1062 + pagefault_disable();
1063 + enable_kernel_vsx();
1064 aes_p8_cbc_encrypt(walk.src.virt.addr,
1065 walk.dst.virt.addr,
1066 nbytes & AES_BLOCK_MASK,
1067 &ctx->dec_key, walk.iv, 0);
1068 + disable_kernel_vsx();
1069 + pagefault_enable();
1070 + preempt_enable();
1071 +
1072 nbytes &= AES_BLOCK_SIZE - 1;
1073 ret = blkcipher_walk_done(desc, &walk, nbytes);
1074 }
1075 -
1076 - disable_kernel_vsx();
1077 - pagefault_enable();
1078 - preempt_enable();
1079 }
1080
1081 return ret;
1082 diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
1083 index 8bd9aff0f55f..e9954a7d4694 100644
1084 --- a/drivers/crypto/vmx/aes_xts.c
1085 +++ b/drivers/crypto/vmx/aes_xts.c
1086 @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
1087 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
1088 skcipher_request_zero(req);
1089 } else {
1090 + blkcipher_walk_init(&walk, dst, src, nbytes);
1091 +
1092 + ret = blkcipher_walk_virt(desc, &walk);
1093 +
1094 preempt_disable();
1095 pagefault_disable();
1096 enable_kernel_vsx();
1097
1098 - blkcipher_walk_init(&walk, dst, src, nbytes);
1099 -
1100 - ret = blkcipher_walk_virt(desc, &walk);
1101 iv = walk.iv;
1102 memset(tweak, 0, AES_BLOCK_SIZE);
1103 aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
1104
1105 + disable_kernel_vsx();
1106 + pagefault_enable();
1107 + preempt_enable();
1108 +
1109 while ((nbytes = walk.nbytes)) {
1110 + preempt_disable();
1111 + pagefault_disable();
1112 + enable_kernel_vsx();
1113 if (enc)
1114 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
1115 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
1116 else
1117 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
1118 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
1119 + disable_kernel_vsx();
1120 + pagefault_enable();
1121 + preempt_enable();
1122
1123 nbytes &= AES_BLOCK_SIZE - 1;
1124 ret = blkcipher_walk_done(desc, &walk, nbytes);
1125 }
1126 -
1127 - disable_kernel_vsx();
1128 - pagefault_enable();
1129 - preempt_enable();
1130 }
1131 return ret;
1132 }
1133 diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
1134 index 35e9fb885486..95e96f04bf6f 100644
1135 --- a/drivers/extcon/extcon.c
1136 +++ b/drivers/extcon/extcon.c
1137 @@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
1138 return index;
1139
1140 spin_lock_irqsave(&edev->lock, flags);
1141 -
1142 state = !!(edev->state & BIT(index));
1143 + spin_unlock_irqrestore(&edev->lock, flags);
1144
1145 /*
1146 * Call functions in a raw notifier chain for the specific one
1147 @@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
1148 */
1149 raw_notifier_call_chain(&edev->nh_all, state, edev);
1150
1151 + spin_lock_irqsave(&edev->lock, flags);
1152 /* This could be in interrupt handler */
1153 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
1154 if (!prop_buf) {
1155 diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
1156 index 709efe2357ea..05ae8c4a8a1b 100644
1157 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
1158 +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
1159 @@ -782,6 +782,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
1160 I915_USERPTR_UNSYNCHRONIZED))
1161 return -EINVAL;
1162
1163 + if (!args->user_size)
1164 + return -EINVAL;
1165 +
1166 if (offset_in_page(args->user_ptr | args->user_size))
1167 return -EINVAL;
1168
1169 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
1170 index 05964347008d..d96b09fea835 100644
1171 --- a/drivers/hv/channel.c
1172 +++ b/drivers/hv/channel.c
1173 @@ -541,11 +541,8 @@ static void reset_channel_cb(void *arg)
1174 channel->onchannel_callback = NULL;
1175 }
1176
1177 -static int vmbus_close_internal(struct vmbus_channel *channel)
1178 +void vmbus_reset_channel_cb(struct vmbus_channel *channel)
1179 {
1180 - struct vmbus_channel_close_channel *msg;
1181 - int ret;
1182 -
1183 /*
1184 * vmbus_on_event(), running in the per-channel tasklet, can race
1185 * with vmbus_close_internal() in the case of SMP guest, e.g., when
1186 @@ -555,6 +552,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
1187 */
1188 tasklet_disable(&channel->callback_event);
1189
1190 + channel->sc_creation_callback = NULL;
1191 +
1192 + /* Stop the callback asap */
1193 + if (channel->target_cpu != get_cpu()) {
1194 + put_cpu();
1195 + smp_call_function_single(channel->target_cpu, reset_channel_cb,
1196 + channel, true);
1197 + } else {
1198 + reset_channel_cb(channel);
1199 + put_cpu();
1200 + }
1201 +
1202 + /* Re-enable tasklet for use on re-open */
1203 + tasklet_enable(&channel->callback_event);
1204 +}
1205 +
1206 +static int vmbus_close_internal(struct vmbus_channel *channel)
1207 +{
1208 + struct vmbus_channel_close_channel *msg;
1209 + int ret;
1210 +
1211 + vmbus_reset_channel_cb(channel);
1212 +
1213 /*
1214 * In case a device driver's probe() fails (e.g.,
1215 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
1216 @@ -568,16 +588,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
1217 }
1218
1219 channel->state = CHANNEL_OPEN_STATE;
1220 - channel->sc_creation_callback = NULL;
1221 - /* Stop callback and cancel the timer asap */
1222 - if (channel->target_cpu != get_cpu()) {
1223 - put_cpu();
1224 - smp_call_function_single(channel->target_cpu, reset_channel_cb,
1225 - channel, true);
1226 - } else {
1227 - reset_channel_cb(channel);
1228 - put_cpu();
1229 - }
1230
1231 /* Send a closing message */
1232
1233 @@ -620,8 +630,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
1234 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
1235
1236 out:
1237 - /* re-enable tasklet for use on re-open */
1238 - tasklet_enable(&channel->callback_event);
1239 return ret;
1240 }
1241
1242 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
1243 index 1939c0ca3741..1700b4e7758d 100644
1244 --- a/drivers/hv/channel_mgmt.c
1245 +++ b/drivers/hv/channel_mgmt.c
1246 @@ -881,6 +881,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1247 return;
1248 }
1249
1250 + /*
1251 + * Before setting channel->rescind in vmbus_rescind_cleanup(), we
1252 + * should make sure the channel callback is not running any more.
1253 + */
1254 + vmbus_reset_channel_cb(channel);
1255 +
1256 /*
1257 * Now wait for offer handling to complete.
1258 */
1259 diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
1260 index 565f7d8d3304..f2761b385541 100644
1261 --- a/drivers/iio/accel/sca3000.c
1262 +++ b/drivers/iio/accel/sca3000.c
1263 @@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev,
1264 mutex_lock(&st->lock);
1265 ret = sca3000_write_3db_freq(st, val);
1266 mutex_unlock(&st->lock);
1267 + return ret;
1268 default:
1269 return -EINVAL;
1270 }
1271 diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
1272 index 99eba524f6dd..1642b55f70da 100644
1273 --- a/drivers/iio/frequency/ad9523.c
1274 +++ b/drivers/iio/frequency/ad9523.c
1275 @@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct device *dev,
1276 return ret;
1277
1278 if (!state)
1279 - return 0;
1280 + return len;
1281
1282 mutex_lock(&indio_dev->mlock);
1283 switch ((u32)this_attr->address) {
1284 @@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
1285 code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
1286 AD9523_CLK_DIST_DIV_REV(ret);
1287 *val = code / 1000000;
1288 - *val2 = (code % 1000000) * 10;
1289 + *val2 = code % 1000000;
1290 return IIO_VAL_INT_PLUS_MICRO;
1291 default:
1292 return -EINVAL;
1293 diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
1294 index 9eb12c2e3c74..83cfe44f070e 100644
1295 --- a/drivers/infiniband/sw/rxe/rxe_comp.c
1296 +++ b/drivers/infiniband/sw/rxe/rxe_comp.c
1297 @@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
1298 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
1299 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
1300 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
1301 + wqe->status = IB_WC_FATAL_ERR;
1302 return COMPST_ERROR;
1303 }
1304 reset_retry_counters(qp);
1305 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1306 index 97c2225829ea..60105ba77889 100644
1307 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1308 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1309 @@ -1713,8 +1713,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1310 int ret;
1311
1312 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1313 - pr_debug("%s-%d: already closed\n", ch->sess_name,
1314 - ch->qp->qp_num);
1315 + pr_debug("%s: already closed\n", ch->sess_name);
1316 return false;
1317 }
1318
1319 diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
1320 index e3dbb6101b4a..c0d1c4db5794 100644
1321 --- a/drivers/iommu/dmar.c
1322 +++ b/drivers/iommu/dmar.c
1323 @@ -1336,8 +1336,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1324 qi_submit_sync(&desc, iommu);
1325 }
1326
1327 -void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1328 - u64 addr, unsigned mask)
1329 +void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1330 + u16 qdep, u64 addr, unsigned mask)
1331 {
1332 struct qi_desc desc;
1333
1334 @@ -1352,7 +1352,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1335 qdep = 0;
1336
1337 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1338 - QI_DIOTLB_TYPE;
1339 + QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1340
1341 qi_submit_sync(&desc, iommu);
1342 }
1343 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1344 index e8414bcf8390..aaf3fed97477 100644
1345 --- a/drivers/iommu/intel-iommu.c
1346 +++ b/drivers/iommu/intel-iommu.c
1347 @@ -422,6 +422,7 @@ struct device_domain_info {
1348 struct list_head global; /* link to global list */
1349 u8 bus; /* PCI bus number */
1350 u8 devfn; /* PCI devfn number */
1351 + u16 pfsid; /* SRIOV physical function source ID */
1352 u8 pasid_supported:3;
1353 u8 pasid_enabled:1;
1354 u8 pri_supported:1;
1355 @@ -1502,6 +1503,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1356 return;
1357
1358 pdev = to_pci_dev(info->dev);
1359 + /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1360 + * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1361 + * queue depth at PF level. If DIT is not set, PFSID will be treated as
1362 + * reserved, which should be set to 0.
1363 + */
1364 + if (!ecap_dit(info->iommu->ecap))
1365 + info->pfsid = 0;
1366 + else {
1367 + struct pci_dev *pf_pdev;
1368 +
1369 + /* pdev will be returned if device is not a vf */
1370 + pf_pdev = pci_physfn(pdev);
1371 + info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
1372 + }
1373
1374 #ifdef CONFIG_INTEL_IOMMU_SVM
1375 /* The PCIe spec, in its wisdom, declares that the behaviour of
1376 @@ -1567,7 +1582,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1377
1378 sid = info->bus << 8 | info->devfn;
1379 qdep = info->ats_qdep;
1380 - qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1381 + qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1382 + qdep, addr, mask);
1383 }
1384 spin_unlock_irqrestore(&device_domain_lock, flags);
1385 }
1386 diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
1387 index a7040163dd43..b8b2b3533f46 100644
1388 --- a/drivers/mailbox/mailbox-xgene-slimpro.c
1389 +++ b/drivers/mailbox/mailbox-xgene-slimpro.c
1390 @@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
1391 platform_set_drvdata(pdev, ctx);
1392
1393 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1394 - mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
1395 - if (!mb_base)
1396 - return -ENOMEM;
1397 + mb_base = devm_ioremap_resource(&pdev->dev, regs);
1398 + if (IS_ERR(mb_base))
1399 + return PTR_ERR(mb_base);
1400
1401 /* Setup mailbox links */
1402 for (i = 0; i < MBOX_CNT; i++) {
1403 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
1404 index 930b00f6a3a2..5adb0c850b6c 100644
1405 --- a/drivers/md/bcache/writeback.c
1406 +++ b/drivers/md/bcache/writeback.c
1407 @@ -456,8 +456,10 @@ static int bch_writeback_thread(void *arg)
1408 * data on cache. BCACHE_DEV_DETACHING flag is set in
1409 * bch_cached_dev_detach().
1410 */
1411 - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1412 + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
1413 + up_write(&dc->writeback_lock);
1414 break;
1415 + }
1416 }
1417
1418 up_write(&dc->writeback_lock);
1419 diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1420 index 4a4e9c75fc4c..0a5a45f3ec5f 100644
1421 --- a/drivers/md/dm-cache-metadata.c
1422 +++ b/drivers/md/dm-cache-metadata.c
1423 @@ -362,7 +362,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
1424 disk_super->version = cpu_to_le32(cmd->version);
1425 memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
1426 memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
1427 - disk_super->policy_hint_size = 0;
1428 + disk_super->policy_hint_size = cpu_to_le32(0);
1429
1430 __copy_sm_root(cmd, disk_super);
1431
1432 @@ -700,6 +700,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
1433 disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
1434 disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
1435 disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
1436 + disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
1437
1438 disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
1439 disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
1440 @@ -1321,6 +1322,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1441
1442 dm_oblock_t oblock;
1443 unsigned flags;
1444 + bool dirty = true;
1445
1446 dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1447 memcpy(&mapping, mapping_value_le, sizeof(mapping));
1448 @@ -1331,8 +1333,10 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1449 dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1450 memcpy(&hint, hint_value_le, sizeof(hint));
1451 }
1452 + if (cmd->clean_when_opened)
1453 + dirty = flags & M_DIRTY;
1454
1455 - r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
1456 + r = fn(context, oblock, to_cblock(cb), dirty,
1457 le32_to_cpu(hint), hints_valid);
1458 if (r) {
1459 DMERR("policy couldn't load cache block %llu",
1460 @@ -1360,7 +1364,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1461
1462 dm_oblock_t oblock;
1463 unsigned flags;
1464 - bool dirty;
1465 + bool dirty = true;
1466
1467 dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1468 memcpy(&mapping, mapping_value_le, sizeof(mapping));
1469 @@ -1371,8 +1375,9 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1470 dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1471 memcpy(&hint, hint_value_le, sizeof(hint));
1472 }
1473 + if (cmd->clean_when_opened)
1474 + dirty = dm_bitset_cursor_get_value(dirty_cursor);
1475
1476 - dirty = dm_bitset_cursor_get_value(dirty_cursor);
1477 r = fn(context, oblock, to_cblock(cb), dirty,
1478 le32_to_cpu(hint), hints_valid);
1479 if (r) {
1480 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1481 index f575110454b6..c60d29d09687 100644
1482 --- a/drivers/md/dm-crypt.c
1483 +++ b/drivers/md/dm-crypt.c
1484 @@ -3072,11 +3072,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
1485 */
1486 limits->max_segment_size = PAGE_SIZE;
1487
1488 - if (cc->sector_size != (1 << SECTOR_SHIFT)) {
1489 - limits->logical_block_size = cc->sector_size;
1490 - limits->physical_block_size = cc->sector_size;
1491 - blk_limits_io_min(limits, cc->sector_size);
1492 - }
1493 + limits->logical_block_size =
1494 + max_t(unsigned short, limits->logical_block_size, cc->sector_size);
1495 + limits->physical_block_size =
1496 + max_t(unsigned, limits->physical_block_size, cc->sector_size);
1497 + limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
1498 }
1499
1500 static struct target_type crypt_target = {
1501 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1502 index cbc56372ff97..898286ed47a1 100644
1503 --- a/drivers/md/dm-integrity.c
1504 +++ b/drivers/md/dm-integrity.c
1505 @@ -177,7 +177,7 @@ struct dm_integrity_c {
1506 __u8 sectors_per_block;
1507
1508 unsigned char mode;
1509 - bool suspending;
1510 + int suspending;
1511
1512 int failed;
1513
1514 @@ -2209,7 +2209,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
1515
1516 del_timer_sync(&ic->autocommit_timer);
1517
1518 - ic->suspending = true;
1519 + WRITE_ONCE(ic->suspending, 1);
1520
1521 queue_work(ic->commit_wq, &ic->commit_work);
1522 drain_workqueue(ic->commit_wq);
1523 @@ -2219,7 +2219,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
1524 dm_integrity_flush_buffers(ic);
1525 }
1526
1527 - ic->suspending = false;
1528 + WRITE_ONCE(ic->suspending, 0);
1529
1530 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
1531
1532 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1533 index 72ae5dc50532..6cf9ad4e4e16 100644
1534 --- a/drivers/md/dm-thin.c
1535 +++ b/drivers/md/dm-thin.c
1536 @@ -2514,6 +2514,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1537 case PM_WRITE:
1538 if (old_mode != new_mode)
1539 notify_of_pool_mode_change(pool, "write");
1540 + if (old_mode == PM_OUT_OF_DATA_SPACE)
1541 + cancel_delayed_work_sync(&pool->no_space_timeout);
1542 pool->out_of_data_space = false;
1543 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
1544 dm_pool_metadata_read_write(pool->pmd);
1545 diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
1546 index 698fa764999c..59b0c1fce9be 100644
1547 --- a/drivers/media/i2c/tvp5150.c
1548 +++ b/drivers/media/i2c/tvp5150.c
1549 @@ -871,7 +871,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
1550 f = &format->format;
1551
1552 f->width = decoder->rect.width;
1553 - f->height = decoder->rect.height;
1554 + f->height = decoder->rect.height / 2;
1555
1556 f->code = MEDIA_BUS_FMT_UYVY8_2X8;
1557 f->field = V4L2_FIELD_ALTERNATE;
1558 diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
1559 index c37ccbfd52f2..96c07fa1802a 100644
1560 --- a/drivers/mfd/hi655x-pmic.c
1561 +++ b/drivers/mfd/hi655x-pmic.c
1562 @@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = {
1563 .reg_bits = 32,
1564 .reg_stride = HI655X_STRIDE,
1565 .val_bits = 8,
1566 - .max_register = HI655X_BUS_ADDR(0xFFF),
1567 + .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
1568 };
1569
1570 static struct resource pwrkey_resources[] = {
1571 diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
1572 index c1ba0d42cbc8..e0f29b8a872d 100644
1573 --- a/drivers/misc/cxl/main.c
1574 +++ b/drivers/misc/cxl/main.c
1575 @@ -287,7 +287,7 @@ int cxl_adapter_context_get(struct cxl *adapter)
1576 int rc;
1577
1578 rc = atomic_inc_unless_negative(&adapter->contexts_num);
1579 - return rc >= 0 ? 0 : -EBUSY;
1580 + return rc ? 0 : -EBUSY;
1581 }
1582
1583 void cxl_adapter_context_put(struct cxl *adapter)
1584 diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
1585 index 56c6f79a5c5a..5f8b583c6e41 100644
1586 --- a/drivers/misc/vmw_balloon.c
1587 +++ b/drivers/misc/vmw_balloon.c
1588 @@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
1589 success = false;
1590 }
1591
1592 - if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
1593 + /*
1594 + * 2MB pages are only supported with batching. If batching is for some
1595 + * reason disabled, do not use 2MB pages, since otherwise the legacy
1596 + * mechanism is used with 2MB pages, causing a failure.
1597 + */
1598 + if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
1599 + (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
1600 b->supported_page_sizes = 2;
1601 else
1602 b->supported_page_sizes = 1;
1603 @@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
1604
1605 pfn32 = (u32)pfn;
1606 if (pfn32 != pfn)
1607 - return -1;
1608 + return -EINVAL;
1609
1610 STATS_INC(b->stats.lock[false]);
1611
1612 @@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
1613
1614 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
1615 STATS_INC(b->stats.lock_fail[false]);
1616 - return 1;
1617 + return -EIO;
1618 }
1619
1620 static int vmballoon_send_batched_lock(struct vmballoon *b,
1621 @@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
1622
1623 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
1624 target);
1625 - if (locked > 0) {
1626 + if (locked) {
1627 STATS_INC(b->stats.refused_alloc[false]);
1628
1629 - if (hv_status == VMW_BALLOON_ERROR_RESET ||
1630 - hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
1631 + if (locked == -EIO &&
1632 + (hv_status == VMW_BALLOON_ERROR_RESET ||
1633 + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
1634 vmballoon_free_page(page, false);
1635 return -EIO;
1636 }
1637 @@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
1638 } else {
1639 vmballoon_free_page(page, false);
1640 }
1641 - return -EIO;
1642 + return locked;
1643 }
1644
1645 /* track allocated page */
1646 @@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
1647 */
1648 static int vmballoon_vmci_init(struct vmballoon *b)
1649 {
1650 - int error = 0;
1651 + unsigned long error, dummy;
1652
1653 - if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
1654 - error = vmci_doorbell_create(&b->vmci_doorbell,
1655 - VMCI_FLAG_DELAYED_CB,
1656 - VMCI_PRIVILEGE_FLAG_RESTRICTED,
1657 - vmballoon_doorbell, b);
1658 -
1659 - if (error == VMCI_SUCCESS) {
1660 - VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
1661 - b->vmci_doorbell.context,
1662 - b->vmci_doorbell.resource, error);
1663 - STATS_INC(b->stats.doorbell_set);
1664 - }
1665 - }
1666 + if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1667 + return 0;
1668
1669 - if (error != 0) {
1670 - vmballoon_vmci_cleanup(b);
1671 + error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1672 + VMCI_PRIVILEGE_FLAG_RESTRICTED,
1673 + vmballoon_doorbell, b);
1674
1675 - return -EIO;
1676 - }
1677 + if (error != VMCI_SUCCESS)
1678 + goto fail;
1679 +
1680 + error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
1681 + b->vmci_doorbell.resource, dummy);
1682 +
1683 + STATS_INC(b->stats.doorbell_set);
1684 +
1685 + if (error != VMW_BALLOON_SUCCESS)
1686 + goto fail;
1687
1688 return 0;
1689 +fail:
1690 + vmballoon_vmci_cleanup(b);
1691 + return -EIO;
1692 }
1693
1694 /*
1695 @@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
1696
1697 return 0;
1698 }
1699 -module_init(vmballoon_init);
1700 +
1701 +/*
1702 + * Using late_initcall() instead of module_init() allows the balloon to use the
1703 + * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1704 + * VMCI is probed only after the balloon is initialized. If the balloon is used
1705 + * as a module, late_initcall() is equivalent to module_init().
1706 + */
1707 +late_initcall(vmballoon_init);
1708
1709 static void __exit vmballoon_exit(void)
1710 {
1711 diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1712 index 8bae88a150fd..713658be6661 100644
1713 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1714 +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1715 @@ -44,7 +44,7 @@
1716 /* DM_CM_RST */
1717 #define RST_DTRANRST1 BIT(9)
1718 #define RST_DTRANRST0 BIT(8)
1719 -#define RST_RESERVED_BITS GENMASK_ULL(32, 0)
1720 +#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
1721
1722 /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
1723 #define INFO1_CLEAR 0
1724 diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
1725 index dd1ee1f0af48..469134930026 100644
1726 --- a/drivers/net/wireless/marvell/libertas/dev.h
1727 +++ b/drivers/net/wireless/marvell/libertas/dev.h
1728 @@ -104,6 +104,7 @@ struct lbs_private {
1729 u8 fw_ready;
1730 u8 surpriseremoved;
1731 u8 setup_fw_on_resume;
1732 + u8 power_up_on_resume;
1733 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
1734 void (*reset_card) (struct lbs_private *priv);
1735 int (*power_save) (struct lbs_private *priv);
1736 diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
1737 index 2300e796c6ab..43743c26c071 100644
1738 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c
1739 +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
1740 @@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func)
1741 static int if_sdio_suspend(struct device *dev)
1742 {
1743 struct sdio_func *func = dev_to_sdio_func(dev);
1744 - int ret;
1745 struct if_sdio_card *card = sdio_get_drvdata(func);
1746 + struct lbs_private *priv = card->priv;
1747 + int ret;
1748
1749 mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
1750 + priv->power_up_on_resume = false;
1751
1752 /* If we're powered off anyway, just let the mmc layer remove the
1753 * card. */
1754 - if (!lbs_iface_active(card->priv))
1755 - return -ENOSYS;
1756 + if (!lbs_iface_active(priv)) {
1757 + if (priv->fw_ready) {
1758 + priv->power_up_on_resume = true;
1759 + if_sdio_power_off(card);
1760 + }
1761 +
1762 + return 0;
1763 + }
1764
1765 dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
1766 sdio_func_id(func), flags);
1767 @@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev)
1768 /* If we aren't being asked to wake on anything, we should bail out
1769 * and let the SD stack power down the card.
1770 */
1771 - if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1772 + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1773 dev_info(dev, "Suspend without wake params -- powering down card\n");
1774 - return -ENOSYS;
1775 + if (priv->fw_ready) {
1776 + priv->power_up_on_resume = true;
1777 + if_sdio_power_off(card);
1778 + }
1779 +
1780 + return 0;
1781 }
1782
1783 if (!(flags & MMC_PM_KEEP_POWER)) {
1784 @@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev)
1785 if (ret)
1786 return ret;
1787
1788 - ret = lbs_suspend(card->priv);
1789 + ret = lbs_suspend(priv);
1790 if (ret)
1791 return ret;
1792
1793 @@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev)
1794
1795 dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
1796
1797 + if (card->priv->power_up_on_resume) {
1798 + if_sdio_power_on(card);
1799 + wait_event(card->pwron_waitq, card->priv->fw_ready);
1800 + }
1801 +
1802 ret = lbs_resume(card->priv);
1803
1804 return ret;
1805 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
1806 index 2fffd42767c7..fb5ab5812a22 100644
1807 --- a/drivers/nvdimm/bus.c
1808 +++ b/drivers/nvdimm/bus.c
1809 @@ -808,9 +808,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
1810 * overshoots the remainder by 4 bytes, assume it was
1811 * including 'status'.
1812 */
1813 - if (out_field[1] - 8 == remainder)
1814 + if (out_field[1] - 4 == remainder)
1815 return remainder;
1816 - return out_field[1] - 4;
1817 + return out_field[1] - 8;
1818 } else if (cmd == ND_CMD_CALL) {
1819 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
1820
1821 diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
1822 index 4c22cb395040..f7b8a86fa5c5 100644
1823 --- a/drivers/pwm/pwm-tiehrpwm.c
1824 +++ b/drivers/pwm/pwm-tiehrpwm.c
1825 @@ -33,10 +33,6 @@
1826 #define TBCTL 0x00
1827 #define TBPRD 0x0A
1828
1829 -#define TBCTL_RUN_MASK (BIT(15) | BIT(14))
1830 -#define TBCTL_STOP_NEXT 0
1831 -#define TBCTL_STOP_ON_CYCLE BIT(14)
1832 -#define TBCTL_FREE_RUN (BIT(15) | BIT(14))
1833 #define TBCTL_PRDLD_MASK BIT(3)
1834 #define TBCTL_PRDLD_SHDW 0
1835 #define TBCTL_PRDLD_IMDT BIT(3)
1836 @@ -360,7 +356,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
1837 /* Channels polarity can be configured from action qualifier module */
1838 configure_polarity(pc, pwm->hwpwm);
1839
1840 - /* Enable TBCLK before enabling PWM device */
1841 + /* Enable TBCLK */
1842 ret = clk_enable(pc->tbclk);
1843 if (ret) {
1844 dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n",
1845 @@ -368,9 +364,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
1846 return ret;
1847 }
1848
1849 - /* Enable time counter for free_run */
1850 - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
1851 -
1852 return 0;
1853 }
1854
1855 @@ -388,6 +381,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
1856 aqcsfrc_mask = AQCSFRC_CSFA_MASK;
1857 }
1858
1859 + /* Update shadow register first before modifying active register */
1860 + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
1861 /*
1862 * Changes to immediate action on Action Qualifier. This puts
1863 * Action Qualifier control on PWM output from next TBCLK
1864 @@ -400,9 +395,6 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
1865 /* Disabling TBCLK on PWM disable */
1866 clk_disable(pc->tbclk);
1867
1868 - /* Stop Time base counter */
1869 - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
1870 -
1871 /* Disable clock on PWM disable */
1872 pm_runtime_put_sync(chip->dev);
1873 }
1874 diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
1875 index 13f7cd11c07e..ac6e6a6a194c 100644
1876 --- a/drivers/rtc/rtc-omap.c
1877 +++ b/drivers/rtc/rtc-omap.c
1878 @@ -817,13 +817,6 @@ static int omap_rtc_probe(struct platform_device *pdev)
1879 goto err;
1880 }
1881
1882 - if (rtc->is_pmic_controller) {
1883 - if (!pm_power_off) {
1884 - omap_rtc_power_off_rtc = rtc;
1885 - pm_power_off = omap_rtc_power_off;
1886 - }
1887 - }
1888 -
1889 /* Support ext_wakeup pinconf */
1890 rtc_pinctrl_desc.name = dev_name(&pdev->dev);
1891
1892 @@ -833,6 +826,13 @@ static int omap_rtc_probe(struct platform_device *pdev)
1893 return PTR_ERR(rtc->pctldev);
1894 }
1895
1896 + if (rtc->is_pmic_controller) {
1897 + if (!pm_power_off) {
1898 + omap_rtc_power_off_rtc = rtc;
1899 + pm_power_off = omap_rtc_power_off;
1900 + }
1901 + }
1902 +
1903 return 0;
1904
1905 err:
1906 diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
1907 index 4a001634023e..02bd1eba045b 100644
1908 --- a/drivers/spi/spi-cadence.c
1909 +++ b/drivers/spi/spi-cadence.c
1910 @@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
1911 */
1912 if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
1913 CDNS_SPI_IXR_TXFULL)
1914 - usleep_range(10, 20);
1915 + udelay(10);
1916
1917 if (xspi->txbuf)
1918 cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
1919 diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
1920 index 6ddb6ef1fda4..c5bbe08771a4 100644
1921 --- a/drivers/spi/spi-davinci.c
1922 +++ b/drivers/spi/spi-davinci.c
1923 @@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
1924 pdata = &dspi->pdata;
1925
1926 /* program delay transfers if tx_delay is non zero */
1927 - if (spicfg->wdelay)
1928 + if (spicfg && spicfg->wdelay)
1929 spidat1 |= SPIDAT1_WDEL;
1930
1931 /*
1932 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
1933 index d89127f4a46d..ca013dd4ff6b 100644
1934 --- a/drivers/spi/spi-fsl-dspi.c
1935 +++ b/drivers/spi/spi-fsl-dspi.c
1936 @@ -1006,30 +1006,30 @@ static int dspi_probe(struct platform_device *pdev)
1937 goto out_master_put;
1938 }
1939
1940 + dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1941 + if (IS_ERR(dspi->clk)) {
1942 + ret = PTR_ERR(dspi->clk);
1943 + dev_err(&pdev->dev, "unable to get clock\n");
1944 + goto out_master_put;
1945 + }
1946 + ret = clk_prepare_enable(dspi->clk);
1947 + if (ret)
1948 + goto out_master_put;
1949 +
1950 dspi_init(dspi);
1951 dspi->irq = platform_get_irq(pdev, 0);
1952 if (dspi->irq < 0) {
1953 dev_err(&pdev->dev, "can't get platform irq\n");
1954 ret = dspi->irq;
1955 - goto out_master_put;
1956 + goto out_clk_put;
1957 }
1958
1959 ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
1960 pdev->name, dspi);
1961 if (ret < 0) {
1962 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1963 - goto out_master_put;
1964 - }
1965 -
1966 - dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1967 - if (IS_ERR(dspi->clk)) {
1968 - ret = PTR_ERR(dspi->clk);
1969 - dev_err(&pdev->dev, "unable to get clock\n");
1970 - goto out_master_put;
1971 + goto out_clk_put;
1972 }
1973 - ret = clk_prepare_enable(dspi->clk);
1974 - if (ret)
1975 - goto out_master_put;
1976
1977 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1978 ret = dspi_request_dma(dspi, res->start);
1979 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
1980 index 4cb515a3104c..3a2e46e49405 100644
1981 --- a/drivers/spi/spi-pxa2xx.c
1982 +++ b/drivers/spi/spi-pxa2xx.c
1983 @@ -1480,6 +1480,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
1984 { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
1985 { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
1986 { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
1987 + /* ICL-LP */
1988 + { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
1989 + { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
1990 + { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
1991 /* APL */
1992 { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
1993 { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
1994 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1995 index c8cb0b398cb1..6db8844ef3ec 100644
1996 --- a/drivers/tty/serial/serial_core.c
1997 +++ b/drivers/tty/serial/serial_core.c
1998 @@ -195,6 +195,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
1999 {
2000 struct uart_port *uport = uart_port_check(state);
2001 unsigned long page;
2002 + unsigned long flags = 0;
2003 int retval = 0;
2004
2005 if (uport->type == PORT_UNKNOWN)
2006 @@ -209,15 +210,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
2007 * Initialise and allocate the transmit and temporary
2008 * buffer.
2009 */
2010 - if (!state->xmit.buf) {
2011 - /* This is protected by the per port mutex */
2012 - page = get_zeroed_page(GFP_KERNEL);
2013 - if (!page)
2014 - return -ENOMEM;
2015 + page = get_zeroed_page(GFP_KERNEL);
2016 + if (!page)
2017 + return -ENOMEM;
2018
2019 + uart_port_lock(state, flags);
2020 + if (!state->xmit.buf) {
2021 state->xmit.buf = (unsigned char *) page;
2022 uart_circ_clear(&state->xmit);
2023 + } else {
2024 + free_page(page);
2025 }
2026 + uart_port_unlock(uport, flags);
2027
2028 retval = uport->ops->startup(uport);
2029 if (retval == 0) {
2030 @@ -276,6 +280,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
2031 {
2032 struct uart_port *uport = uart_port_check(state);
2033 struct tty_port *port = &state->port;
2034 + unsigned long flags = 0;
2035
2036 /*
2037 * Set the TTY IO error marker
2038 @@ -308,10 +313,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
2039 /*
2040 * Free the transmit buffer page.
2041 */
2042 + uart_port_lock(state, flags);
2043 if (state->xmit.buf) {
2044 free_page((unsigned long)state->xmit.buf);
2045 state->xmit.buf = NULL;
2046 }
2047 + uart_port_unlock(uport, flags);
2048 }
2049
2050 /**
2051 diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
2052 index f741ba8df01b..11d73b5fc885 100644
2053 --- a/drivers/video/fbdev/core/fbmem.c
2054 +++ b/drivers/video/fbdev/core/fbmem.c
2055 @@ -1716,12 +1716,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
2056 return 0;
2057 }
2058
2059 -static int do_unregister_framebuffer(struct fb_info *fb_info)
2060 +static int unbind_console(struct fb_info *fb_info)
2061 {
2062 struct fb_event event;
2063 - int i, ret = 0;
2064 + int ret;
2065 + int i = fb_info->node;
2066
2067 - i = fb_info->node;
2068 if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
2069 return -EINVAL;
2070
2071 @@ -1736,17 +1736,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
2072 unlock_fb_info(fb_info);
2073 console_unlock();
2074
2075 + return ret;
2076 +}
2077 +
2078 +static int __unlink_framebuffer(struct fb_info *fb_info);
2079 +
2080 +static int do_unregister_framebuffer(struct fb_info *fb_info)
2081 +{
2082 + struct fb_event event;
2083 + int ret;
2084 +
2085 + ret = unbind_console(fb_info);
2086 +
2087 if (ret)
2088 return -EINVAL;
2089
2090 pm_vt_switch_unregister(fb_info->dev);
2091
2092 - unlink_framebuffer(fb_info);
2093 + __unlink_framebuffer(fb_info);
2094 if (fb_info->pixmap.addr &&
2095 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
2096 kfree(fb_info->pixmap.addr);
2097 fb_destroy_modelist(&fb_info->modelist);
2098 - registered_fb[i] = NULL;
2099 + registered_fb[fb_info->node] = NULL;
2100 num_registered_fb--;
2101 fb_cleanup_device(fb_info);
2102 event.info = fb_info;
2103 @@ -1759,7 +1771,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
2104 return 0;
2105 }
2106
2107 -int unlink_framebuffer(struct fb_info *fb_info)
2108 +static int __unlink_framebuffer(struct fb_info *fb_info)
2109 {
2110 int i;
2111
2112 @@ -1771,6 +1783,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
2113 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
2114 fb_info->dev = NULL;
2115 }
2116 +
2117 + return 0;
2118 +}
2119 +
2120 +int unlink_framebuffer(struct fb_info *fb_info)
2121 +{
2122 + int ret;
2123 +
2124 + ret = __unlink_framebuffer(fb_info);
2125 + if (ret)
2126 + return ret;
2127 +
2128 + unbind_console(fb_info);
2129 +
2130 return 0;
2131 }
2132 EXPORT_SYMBOL(unlink_framebuffer);
2133 diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
2134 index f329eee6dc93..352abc39e891 100644
2135 --- a/fs/9p/xattr.c
2136 +++ b/fs/9p/xattr.c
2137 @@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
2138 {
2139 struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
2140 struct iov_iter from;
2141 - int retval;
2142 + int retval, err;
2143
2144 iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
2145
2146 @@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
2147 retval);
2148 else
2149 p9_client_write(fid, 0, &from, &retval);
2150 - p9_client_clunk(fid);
2151 + err = p9_client_clunk(fid);
2152 + if (!retval && err)
2153 + retval = err;
2154 return retval;
2155 }
2156
2157 diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
2158 index 95f74bd2c067..70c4165d2d74 100644
2159 --- a/fs/nfs/blocklayout/dev.c
2160 +++ b/fs/nfs/blocklayout/dev.c
2161 @@ -204,7 +204,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
2162 chunk = div_u64(offset, dev->chunk_size);
2163 div_u64_rem(chunk, dev->nr_children, &chunk_idx);
2164
2165 - if (chunk_idx > dev->nr_children) {
2166 + if (chunk_idx >= dev->nr_children) {
2167 dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
2168 __func__, chunk_idx, offset, dev->chunk_size);
2169 /* error, should not happen */
2170 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
2171 index 516b2248cafe..2c3f398995f6 100644
2172 --- a/fs/nfs/callback_proc.c
2173 +++ b/fs/nfs/callback_proc.c
2174 @@ -433,11 +433,14 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
2175 * a match. If the slot is in use and the sequence numbers match, the
2176 * client is still waiting for a response to the original request.
2177 */
2178 -static bool referring_call_exists(struct nfs_client *clp,
2179 +static int referring_call_exists(struct nfs_client *clp,
2180 uint32_t nrclists,
2181 - struct referring_call_list *rclists)
2182 + struct referring_call_list *rclists,
2183 + spinlock_t *lock)
2184 + __releases(lock)
2185 + __acquires(lock)
2186 {
2187 - bool status = 0;
2188 + int status = 0;
2189 int i, j;
2190 struct nfs4_session *session;
2191 struct nfs4_slot_table *tbl;
2192 @@ -460,8 +463,10 @@ static bool referring_call_exists(struct nfs_client *clp,
2193
2194 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
2195 ref = &rclist->rcl_refcalls[j];
2196 + spin_unlock(lock);
2197 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
2198 ref->rc_sequenceid, HZ >> 1) < 0;
2199 + spin_lock(lock);
2200 if (status)
2201 goto out;
2202 }
2203 @@ -538,7 +543,8 @@ __be32 nfs4_callback_sequence(void *argp, void *resp,
2204 * related callback was received before the response to the original
2205 * call.
2206 */
2207 - if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
2208 + if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
2209 + &tbl->slot_tbl_lock) < 0) {
2210 status = htonl(NFS4ERR_DELAY);
2211 goto out_unlock;
2212 }
2213 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2214 index 51deff8e1f86..dda4a3a3ef6e 100644
2215 --- a/fs/nfs/nfs4proc.c
2216 +++ b/fs/nfs/nfs4proc.c
2217 @@ -547,8 +547,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
2218 ret = -EIO;
2219 return ret;
2220 out_retry:
2221 - if (ret == 0)
2222 + if (ret == 0) {
2223 exception->retry = 1;
2224 + /*
2225 + * For NFS4ERR_MOVED, the client transport will need to
2226 + * be recomputed after migration recovery has completed.
2227 + */
2228 + if (errorcode == -NFS4ERR_MOVED)
2229 + rpc_task_release_transport(task);
2230 + }
2231 return ret;
2232 }
2233
2234 diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
2235 index 60da59be83b6..4a3dd66175fe 100644
2236 --- a/fs/nfs/pnfs_nfs.c
2237 +++ b/fs/nfs/pnfs_nfs.c
2238 @@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
2239
2240 /* The generic layer is about to remove the req from the commit list.
2241 * If this will make the bucket empty, it will need to put the lseg reference.
2242 - * Note this must be called holding i_lock
2243 + * Note this must be called holding nfsi->commit_mutex
2244 */
2245 void
2246 pnfs_generic_clear_request_commit(struct nfs_page *req,
2247 @@ -149,9 +149,7 @@ restart:
2248 if (list_empty(&b->written)) {
2249 freeme = b->wlseg;
2250 b->wlseg = NULL;
2251 - spin_unlock(&cinfo->inode->i_lock);
2252 pnfs_put_lseg(freeme);
2253 - spin_lock(&cinfo->inode->i_lock);
2254 goto restart;
2255 }
2256 }
2257 @@ -167,7 +165,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
2258 LIST_HEAD(pages);
2259 int i;
2260
2261 - spin_lock(&cinfo->inode->i_lock);
2262 + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
2263 for (i = idx; i < fl_cinfo->nbuckets; i++) {
2264 bucket = &fl_cinfo->buckets[i];
2265 if (list_empty(&bucket->committing))
2266 @@ -177,12 +175,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
2267 list_for_each(pos, &bucket->committing)
2268 cinfo->ds->ncommitting--;
2269 list_splice_init(&bucket->committing, &pages);
2270 - spin_unlock(&cinfo->inode->i_lock);
2271 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
2272 nfs_retry_commit(&pages, freeme, cinfo, i);
2273 pnfs_put_lseg(freeme);
2274 - spin_lock(&cinfo->inode->i_lock);
2275 + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
2276 }
2277 - spin_unlock(&cinfo->inode->i_lock);
2278 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
2279 }
2280
2281 static unsigned int
2282 @@ -222,13 +220,13 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
2283 struct list_head *pos;
2284
2285 bucket = &cinfo->ds->buckets[data->ds_commit_index];
2286 - spin_lock(&cinfo->inode->i_lock);
2287 + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
2288 list_for_each(pos, &bucket->committing)
2289 cinfo->ds->ncommitting--;
2290 list_splice_init(&bucket->committing, pages);
2291 data->lseg = bucket->clseg;
2292 bucket->clseg = NULL;
2293 - spin_unlock(&cinfo->inode->i_lock);
2294 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
2295
2296 }
2297
2298 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
2299 index 7fa7d68baa6d..1d4f9997236f 100644
2300 --- a/fs/overlayfs/readdir.c
2301 +++ b/fs/overlayfs/readdir.c
2302 @@ -623,6 +623,21 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name,
2303 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
2304 }
2305
2306 +static bool ovl_is_impure_dir(struct file *file)
2307 +{
2308 + struct ovl_dir_file *od = file->private_data;
2309 + struct inode *dir = d_inode(file->f_path.dentry);
2310 +
2311 + /*
2312 + * Only upper dir can be impure, but if we are in the middle of
2313 + * iterating a lower real dir, dir could be copied up and marked
2314 + * impure. We only want the impure cache if we started iterating
2315 + * a real upper dir to begin with.
2316 + */
2317 + return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
2318 +
2319 +}
2320 +
2321 static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
2322 {
2323 int err;
2324 @@ -646,7 +661,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
2325 rdt.parent_ino = stat.ino;
2326 }
2327
2328 - if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) {
2329 + if (ovl_is_impure_dir(file)) {
2330 rdt.cache = ovl_cache_get_impure(&file->f_path);
2331 if (IS_ERR(rdt.cache))
2332 return PTR_ERR(rdt.cache);
2333 @@ -676,7 +691,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
2334 * entries.
2335 */
2336 if (ovl_same_sb(dentry->d_sb) &&
2337 - (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
2338 + (ovl_is_impure_dir(file) ||
2339 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
2340 return ovl_iterate_real(file, ctx);
2341 }
2342 diff --git a/fs/quota/quota.c b/fs/quota/quota.c
2343 index 43612e2a73af..3f02bab0db4e 100644
2344 --- a/fs/quota/quota.c
2345 +++ b/fs/quota/quota.c
2346 @@ -18,6 +18,7 @@
2347 #include <linux/quotaops.h>
2348 #include <linux/types.h>
2349 #include <linux/writeback.h>
2350 +#include <linux/nospec.h>
2351
2352 static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
2353 qid_t id)
2354 @@ -703,6 +704,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
2355
2356 if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
2357 return -EINVAL;
2358 + type = array_index_nospec(type, MAXQUOTAS);
2359 /*
2360 * Quota not supported on this fs? Check this before s_quota_types
2361 * since they needn't be set if quota is not supported at all.
2362 diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
2363 index 8ae1cd8611cc..69051f7a9606 100644
2364 --- a/fs/ubifs/journal.c
2365 +++ b/fs/ubifs/journal.c
2366 @@ -665,6 +665,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
2367 spin_lock(&ui->ui_lock);
2368 ui->synced_i_size = ui->ui_size;
2369 spin_unlock(&ui->ui_lock);
2370 + if (xent) {
2371 + spin_lock(&host_ui->ui_lock);
2372 + host_ui->synced_i_size = host_ui->ui_size;
2373 + spin_unlock(&host_ui->ui_lock);
2374 + }
2375 mark_inode_clean(c, ui);
2376 mark_inode_clean(c, host_ui);
2377 return 0;
2378 @@ -1283,11 +1288,10 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
2379 int *new_len)
2380 {
2381 void *buf;
2382 - int err, compr_type;
2383 - u32 dlen, out_len, old_dlen;
2384 + int err, dlen, compr_type, out_len, old_dlen;
2385
2386 out_len = le32_to_cpu(dn->size);
2387 - buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
2388 + buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
2389 if (!buf)
2390 return -ENOMEM;
2391
2392 @@ -1389,7 +1393,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
2393 else if (err)
2394 goto out_free;
2395 else {
2396 - if (le32_to_cpu(dn->size) <= dlen)
2397 + int dn_len = le32_to_cpu(dn->size);
2398 +
2399 + if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
2400 + ubifs_err(c, "bad data node (block %u, inode %lu)",
2401 + blk, inode->i_ino);
2402 + ubifs_dump_node(c, dn);
2403 + goto out_free;
2404 + }
2405 +
2406 + if (dn_len <= dlen)
2407 dlen = 0; /* Nothing to do */
2408 else {
2409 err = truncate_data_node(c, inode, blk, dn, &dlen);
2410 diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
2411 index 6c3a1abd0e22..780a436d8c45 100644
2412 --- a/fs/ubifs/lprops.c
2413 +++ b/fs/ubifs/lprops.c
2414 @@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
2415 }
2416 }
2417
2418 - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
2419 - if (!buf)
2420 - return -ENOMEM;
2421 -
2422 /*
2423 * After an unclean unmount, empty and freeable LEBs
2424 * may contain garbage - do not scan them.
2425 @@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
2426 return LPT_SCAN_CONTINUE;
2427 }
2428
2429 + buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
2430 + if (!buf)
2431 + return -ENOMEM;
2432 +
2433 sleb = ubifs_scan(c, lnum, 0, buf, 0);
2434 if (IS_ERR(sleb)) {
2435 ret = PTR_ERR(sleb);
2436 diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
2437 index c13eae819cbc..d47f16c0d582 100644
2438 --- a/fs/ubifs/xattr.c
2439 +++ b/fs/ubifs/xattr.c
2440 @@ -152,6 +152,12 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
2441 ui->data_len = size;
2442
2443 mutex_lock(&host_ui->ui_mutex);
2444 +
2445 + if (!host->i_nlink) {
2446 + err = -ENOENT;
2447 + goto out_noent;
2448 + }
2449 +
2450 host->i_ctime = current_time(host);
2451 host_ui->xattr_cnt += 1;
2452 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
2453 @@ -183,6 +189,7 @@ out_cancel:
2454 host_ui->xattr_size -= CALC_XATTR_BYTES(size);
2455 host_ui->xattr_names -= fname_len(nm);
2456 host_ui->flags &= ~UBIFS_CRYPT_FL;
2457 +out_noent:
2458 mutex_unlock(&host_ui->ui_mutex);
2459 out_free:
2460 make_bad_inode(inode);
2461 @@ -234,6 +241,12 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
2462 mutex_unlock(&ui->ui_mutex);
2463
2464 mutex_lock(&host_ui->ui_mutex);
2465 +
2466 + if (!host->i_nlink) {
2467 + err = -ENOENT;
2468 + goto out_noent;
2469 + }
2470 +
2471 host->i_ctime = current_time(host);
2472 host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
2473 host_ui->xattr_size += CALC_XATTR_BYTES(size);
2474 @@ -255,6 +268,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
2475 out_cancel:
2476 host_ui->xattr_size -= CALC_XATTR_BYTES(size);
2477 host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
2478 +out_noent:
2479 mutex_unlock(&host_ui->ui_mutex);
2480 make_bad_inode(inode);
2481 out_free:
2482 @@ -483,6 +497,12 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
2483 return err;
2484
2485 mutex_lock(&host_ui->ui_mutex);
2486 +
2487 + if (!host->i_nlink) {
2488 + err = -ENOENT;
2489 + goto out_noent;
2490 + }
2491 +
2492 host->i_ctime = current_time(host);
2493 host_ui->xattr_cnt -= 1;
2494 host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
2495 @@ -502,6 +522,7 @@ out_cancel:
2496 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
2497 host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
2498 host_ui->xattr_names += fname_len(nm);
2499 +out_noent:
2500 mutex_unlock(&host_ui->ui_mutex);
2501 ubifs_release_budget(c, &req);
2502 make_bad_inode(inode);
2503 @@ -541,6 +562,9 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
2504
2505 ubifs_assert(inode_is_locked(host));
2506
2507 + if (!host->i_nlink)
2508 + return -ENOENT;
2509 +
2510 if (fname_len(&nm) > UBIFS_MAX_NLEN)
2511 return -ENAMETOOLONG;
2512
2513 diff --git a/fs/xattr.c b/fs/xattr.c
2514 index 61cd28ba25f3..be2ce57cd6ad 100644
2515 --- a/fs/xattr.c
2516 +++ b/fs/xattr.c
2517 @@ -541,7 +541,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
2518 if (error > 0) {
2519 if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
2520 (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
2521 - posix_acl_fix_xattr_to_user(kvalue, size);
2522 + posix_acl_fix_xattr_to_user(kvalue, error);
2523 if (size && copy_to_user(value, kvalue, error))
2524 error = -EFAULT;
2525 } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
2526 diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
2527 index ba74eaa8eadf..0c51f753652d 100644
2528 --- a/include/linux/hyperv.h
2529 +++ b/include/linux/hyperv.h
2530 @@ -1026,6 +1026,8 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
2531 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
2532 u32 gpadl_handle);
2533
2534 +void vmbus_reset_channel_cb(struct vmbus_channel *channel);
2535 +
2536 extern int vmbus_recvpacket(struct vmbus_channel *channel,
2537 void *buffer,
2538 u32 bufferlen,
2539 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
2540 index 485a5b48f038..a6ab2f51f703 100644
2541 --- a/include/linux/intel-iommu.h
2542 +++ b/include/linux/intel-iommu.h
2543 @@ -112,6 +112,7 @@
2544 * Extended Capability Register
2545 */
2546
2547 +#define ecap_dit(e) ((e >> 41) & 0x1)
2548 #define ecap_pasid(e) ((e >> 40) & 0x1)
2549 #define ecap_pss(e) ((e >> 35) & 0x1f)
2550 #define ecap_eafs(e) ((e >> 34) & 0x1)
2551 @@ -281,6 +282,7 @@ enum {
2552 #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
2553 #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
2554 #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
2555 +#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
2556 #define QI_DEV_IOTLB_SIZE 1
2557 #define QI_DEV_IOTLB_MAX_INVS 32
2558
2559 @@ -305,6 +307,7 @@ enum {
2560 #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
2561 #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
2562 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
2563 +#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
2564 #define QI_DEV_EIOTLB_MAX_INVS 32
2565
2566 #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
2567 @@ -450,9 +453,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
2568 u8 fm, u64 type);
2569 extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
2570 unsigned int size_order, u64 type);
2571 -extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
2572 - u64 addr, unsigned mask);
2573 -
2574 +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
2575 + u16 qdep, u64 addr, unsigned mask);
2576 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
2577
2578 extern int dmar_ir_support(void);
2579 diff --git a/include/linux/pci.h b/include/linux/pci.h
2580 index 9d6fae809c09..b1abbcc614cf 100644
2581 --- a/include/linux/pci.h
2582 +++ b/include/linux/pci.h
2583 @@ -2292,4 +2292,16 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2584 /* provide the legacy pci_dma_* API */
2585 #include <linux/pci-dma-compat.h>
2586
2587 +#define pci_printk(level, pdev, fmt, arg...) \
2588 + dev_printk(level, &(pdev)->dev, fmt, ##arg)
2589 +
2590 +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2591 +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2592 +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2593 +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2594 +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2595 +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2596 +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2597 +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2598 +
2599 #endif /* LINUX_PCI_H */
2600 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
2601 index 71c237e8240e..166fc4e76df6 100644
2602 --- a/include/linux/sunrpc/clnt.h
2603 +++ b/include/linux/sunrpc/clnt.h
2604 @@ -156,6 +156,7 @@ int rpc_switch_client_transport(struct rpc_clnt *,
2605
2606 void rpc_shutdown_client(struct rpc_clnt *);
2607 void rpc_release_client(struct rpc_clnt *);
2608 +void rpc_task_release_transport(struct rpc_task *);
2609 void rpc_task_release_client(struct rpc_task *);
2610
2611 int rpcb_create_local(struct net *);
2612 diff --git a/include/linux/verification.h b/include/linux/verification.h
2613 index a10549a6c7cd..cfa4730d607a 100644
2614 --- a/include/linux/verification.h
2615 +++ b/include/linux/verification.h
2616 @@ -12,6 +12,12 @@
2617 #ifndef _LINUX_VERIFICATION_H
2618 #define _LINUX_VERIFICATION_H
2619
2620 +/*
2621 + * Indicate that both builtin trusted keys and secondary trusted keys
2622 + * should be used.
2623 + */
2624 +#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
2625 +
2626 /*
2627 * The use to which an asymmetric key is being put.
2628 */
2629 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
2630 index 1252a7a89bc0..85e32ee739fc 100644
2631 --- a/include/video/udlfb.h
2632 +++ b/include/video/udlfb.h
2633 @@ -88,7 +88,7 @@ struct dlfb_data {
2634 #define MIN_RAW_PIX_BYTES 2
2635 #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
2636
2637 -#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
2638 +#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
2639 #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
2640
2641 /* remove these once align.h patch is taken into kernel */
2642 diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
2643 index bf8c8fd72589..7c51f065b212 100644
2644 --- a/kernel/livepatch/core.c
2645 +++ b/kernel/livepatch/core.c
2646 @@ -605,6 +605,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
2647 if (!func->old_name || !func->new_func)
2648 return -EINVAL;
2649
2650 + if (strlen(func->old_name) >= KSYM_NAME_LEN)
2651 + return -EINVAL;
2652 +
2653 INIT_LIST_HEAD(&func->stack_node);
2654 func->patched = false;
2655 func->transition = false;
2656 @@ -678,6 +681,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
2657 if (!obj->funcs)
2658 return -EINVAL;
2659
2660 + if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
2661 + return -EINVAL;
2662 +
2663 obj->patched = false;
2664 obj->mod = NULL;
2665
2666 diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
2667 index e8517b63eb37..dd2b5a4d89a5 100644
2668 --- a/kernel/power/Kconfig
2669 +++ b/kernel/power/Kconfig
2670 @@ -105,6 +105,7 @@ config PM_SLEEP
2671 def_bool y
2672 depends on SUSPEND || HIBERNATE_CALLBACKS
2673 select PM
2674 + select SRCU
2675
2676 config PM_SLEEP_SMP
2677 def_bool y
2678 diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
2679 index d482fd61ac67..64f8046586b6 100644
2680 --- a/kernel/printk/printk_safe.c
2681 +++ b/kernel/printk/printk_safe.c
2682 @@ -309,12 +309,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
2683 return printk_safe_log_store(s, fmt, args);
2684 }
2685
2686 -void printk_nmi_enter(void)
2687 +void notrace printk_nmi_enter(void)
2688 {
2689 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
2690 }
2691
2692 -void printk_nmi_exit(void)
2693 +void notrace printk_nmi_exit(void)
2694 {
2695 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
2696 }
2697 diff --git a/kernel/sys.c b/kernel/sys.c
2698 index de4ed027dfd7..e25ec93aea22 100644
2699 --- a/kernel/sys.c
2700 +++ b/kernel/sys.c
2701 @@ -1176,18 +1176,19 @@ static int override_release(char __user *release, size_t len)
2702
2703 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
2704 {
2705 - int errno = 0;
2706 + struct new_utsname tmp;
2707
2708 down_read(&uts_sem);
2709 - if (copy_to_user(name, utsname(), sizeof *name))
2710 - errno = -EFAULT;
2711 + memcpy(&tmp, utsname(), sizeof(tmp));
2712 up_read(&uts_sem);
2713 + if (copy_to_user(name, &tmp, sizeof(tmp)))
2714 + return -EFAULT;
2715
2716 - if (!errno && override_release(name->release, sizeof(name->release)))
2717 - errno = -EFAULT;
2718 - if (!errno && override_architecture(name))
2719 - errno = -EFAULT;
2720 - return errno;
2721 + if (override_release(name->release, sizeof(name->release)))
2722 + return -EFAULT;
2723 + if (override_architecture(name))
2724 + return -EFAULT;
2725 + return 0;
2726 }
2727
2728 #ifdef __ARCH_WANT_SYS_OLD_UNAME
2729 @@ -1196,55 +1197,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
2730 */
2731 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
2732 {
2733 - int error = 0;
2734 + struct old_utsname tmp;
2735
2736 if (!name)
2737 return -EFAULT;
2738
2739 down_read(&uts_sem);
2740 - if (copy_to_user(name, utsname(), sizeof(*name)))
2741 - error = -EFAULT;
2742 + memcpy(&tmp, utsname(), sizeof(tmp));
2743 up_read(&uts_sem);
2744 + if (copy_to_user(name, &tmp, sizeof(tmp)))
2745 + return -EFAULT;
2746
2747 - if (!error && override_release(name->release, sizeof(name->release)))
2748 - error = -EFAULT;
2749 - if (!error && override_architecture(name))
2750 - error = -EFAULT;
2751 - return error;
2752 + if (override_release(name->release, sizeof(name->release)))
2753 + return -EFAULT;
2754 + if (override_architecture(name))
2755 + return -EFAULT;
2756 + return 0;
2757 }
2758
2759 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
2760 {
2761 - int error;
2762 + struct oldold_utsname tmp = {};
2763
2764 if (!name)
2765 return -EFAULT;
2766 - if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
2767 - return -EFAULT;
2768
2769 down_read(&uts_sem);
2770 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
2771 - __OLD_UTS_LEN);
2772 - error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
2773 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
2774 - __OLD_UTS_LEN);
2775 - error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
2776 - error |= __copy_to_user(&name->release, &utsname()->release,
2777 - __OLD_UTS_LEN);
2778 - error |= __put_user(0, name->release + __OLD_UTS_LEN);
2779 - error |= __copy_to_user(&name->version, &utsname()->version,
2780 - __OLD_UTS_LEN);
2781 - error |= __put_user(0, name->version + __OLD_UTS_LEN);
2782 - error |= __copy_to_user(&name->machine, &utsname()->machine,
2783 - __OLD_UTS_LEN);
2784 - error |= __put_user(0, name->machine + __OLD_UTS_LEN);
2785 + memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
2786 + memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
2787 + memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
2788 + memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
2789 + memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
2790 up_read(&uts_sem);
2791 + if (copy_to_user(name, &tmp, sizeof(tmp)))
2792 + return -EFAULT;
2793
2794 - if (!error && override_architecture(name))
2795 - error = -EFAULT;
2796 - if (!error && override_release(name->release, sizeof(name->release)))
2797 - error = -EFAULT;
2798 - return error ? -EFAULT : 0;
2799 + if (override_architecture(name))
2800 + return -EFAULT;
2801 + if (override_release(name->release, sizeof(name->release)))
2802 + return -EFAULT;
2803 + return 0;
2804 }
2805 #endif
2806
2807 @@ -1258,17 +1250,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
2808
2809 if (len < 0 || len > __NEW_UTS_LEN)
2810 return -EINVAL;
2811 - down_write(&uts_sem);
2812 errno = -EFAULT;
2813 if (!copy_from_user(tmp, name, len)) {
2814 - struct new_utsname *u = utsname();
2815 + struct new_utsname *u;
2816
2817 + down_write(&uts_sem);
2818 + u = utsname();
2819 memcpy(u->nodename, tmp, len);
2820 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
2821 errno = 0;
2822 uts_proc_notify(UTS_PROC_HOSTNAME);
2823 + up_write(&uts_sem);
2824 }
2825 - up_write(&uts_sem);
2826 return errno;
2827 }
2828
2829 @@ -1276,8 +1269,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
2830
2831 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
2832 {
2833 - int i, errno;
2834 + int i;
2835 struct new_utsname *u;
2836 + char tmp[__NEW_UTS_LEN + 1];
2837
2838 if (len < 0)
2839 return -EINVAL;
2840 @@ -1286,11 +1280,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
2841 i = 1 + strlen(u->nodename);
2842 if (i > len)
2843 i = len;
2844 - errno = 0;
2845 - if (copy_to_user(name, u->nodename, i))
2846 - errno = -EFAULT;
2847 + memcpy(tmp, u->nodename, i);
2848 up_read(&uts_sem);
2849 - return errno;
2850 + if (copy_to_user(name, tmp, i))
2851 + return -EFAULT;
2852 + return 0;
2853 }
2854
2855 #endif
2856 @@ -1309,17 +1303,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
2857 if (len < 0 || len > __NEW_UTS_LEN)
2858 return -EINVAL;
2859
2860 - down_write(&uts_sem);
2861 errno = -EFAULT;
2862 if (!copy_from_user(tmp, name, len)) {
2863 - struct new_utsname *u = utsname();
2864 + struct new_utsname *u;
2865
2866 + down_write(&uts_sem);
2867 + u = utsname();
2868 memcpy(u->domainname, tmp, len);
2869 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
2870 errno = 0;
2871 uts_proc_notify(UTS_PROC_DOMAINNAME);
2872 + up_write(&uts_sem);
2873 }
2874 - up_write(&uts_sem);
2875 return errno;
2876 }
2877
2878 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
2879 index e73dcab8e9f0..71a8ee6e60dc 100644
2880 --- a/kernel/trace/blktrace.c
2881 +++ b/kernel/trace/blktrace.c
2882 @@ -1809,6 +1809,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
2883 mutex_lock(&q->blk_trace_mutex);
2884
2885 if (attr == &dev_attr_enable) {
2886 + if (!!value == !!q->blk_trace) {
2887 + ret = 0;
2888 + goto out_unlock_bdev;
2889 + }
2890 if (value)
2891 ret = blk_trace_setup_queue(q, bdev);
2892 else
2893 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2894 index b7302c37c064..e9cbb96cd99e 100644
2895 --- a/kernel/trace/trace.c
2896 +++ b/kernel/trace/trace.c
2897 @@ -7545,7 +7545,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2898
2899 if (buffer) {
2900 mutex_lock(&trace_types_lock);
2901 - if (val) {
2902 + if (!!val == tracer_tracing_is_on(tr)) {
2903 + val = 0; /* do nothing */
2904 + } else if (val) {
2905 tracer_tracing_on(tr);
2906 if (tr->current_trace->start)
2907 tr->current_trace->start(tr);
2908 diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
2909 index 7197ff9f0bbd..ea0d90a31fc9 100644
2910 --- a/kernel/trace/trace_uprobe.c
2911 +++ b/kernel/trace/trace_uprobe.c
2912 @@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
2913
2914 list_del_rcu(&link->list);
2915 /* synchronize with u{,ret}probe_trace_func */
2916 - synchronize_sched();
2917 + synchronize_rcu();
2918 kfree(link);
2919
2920 if (!list_empty(&tu->tp.files))
2921 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
2922 index c490f1e4313b..ed80a88980f0 100644
2923 --- a/kernel/user_namespace.c
2924 +++ b/kernel/user_namespace.c
2925 @@ -650,7 +650,16 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2926 unsigned idx;
2927 struct uid_gid_extent *extent = NULL;
2928 char *kbuf = NULL, *pos, *next_line;
2929 - ssize_t ret = -EINVAL;
2930 + ssize_t ret;
2931 +
2932 + /* Only allow < page size writes at the beginning of the file */
2933 + if ((*ppos != 0) || (count >= PAGE_SIZE))
2934 + return -EINVAL;
2935 +
2936 + /* Slurp in the user data */
2937 + kbuf = memdup_user_nul(buf, count);
2938 + if (IS_ERR(kbuf))
2939 + return PTR_ERR(kbuf);
2940
2941 /*
2942 * The userns_state_mutex serializes all writes to any given map.
2943 @@ -684,19 +693,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2944 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
2945 goto out;
2946
2947 - /* Only allow < page size writes at the beginning of the file */
2948 - ret = -EINVAL;
2949 - if ((*ppos != 0) || (count >= PAGE_SIZE))
2950 - goto out;
2951 -
2952 - /* Slurp in the user data */
2953 - kbuf = memdup_user_nul(buf, count);
2954 - if (IS_ERR(kbuf)) {
2955 - ret = PTR_ERR(kbuf);
2956 - kbuf = NULL;
2957 - goto out;
2958 - }
2959 -
2960 /* Parse the user data */
2961 ret = -EINVAL;
2962 pos = kbuf;
2963 diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
2964 index 233cd8fc6910..258033d62cb3 100644
2965 --- a/kernel/utsname_sysctl.c
2966 +++ b/kernel/utsname_sysctl.c
2967 @@ -18,7 +18,7 @@
2968
2969 #ifdef CONFIG_PROC_SYSCTL
2970
2971 -static void *get_uts(struct ctl_table *table, int write)
2972 +static void *get_uts(struct ctl_table *table)
2973 {
2974 char *which = table->data;
2975 struct uts_namespace *uts_ns;
2976 @@ -26,21 +26,9 @@ static void *get_uts(struct ctl_table *table, int write)
2977 uts_ns = current->nsproxy->uts_ns;
2978 which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
2979
2980 - if (!write)
2981 - down_read(&uts_sem);
2982 - else
2983 - down_write(&uts_sem);
2984 return which;
2985 }
2986
2987 -static void put_uts(struct ctl_table *table, int write, void *which)
2988 -{
2989 - if (!write)
2990 - up_read(&uts_sem);
2991 - else
2992 - up_write(&uts_sem);
2993 -}
2994 -
2995 /*
2996 * Special case of dostring for the UTS structure. This has locks
2997 * to observe. Should this be in kernel/sys.c ????
2998 @@ -50,13 +38,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
2999 {
3000 struct ctl_table uts_table;
3001 int r;
3002 + char tmp_data[__NEW_UTS_LEN + 1];
3003 +
3004 memcpy(&uts_table, table, sizeof(uts_table));
3005 - uts_table.data = get_uts(table, write);
3006 + uts_table.data = tmp_data;
3007 +
3008 + /*
3009 + * Buffer the value in tmp_data so that proc_dostring() can be called
3010 + * without holding any locks.
3011 + * We also need to read the original value in the write==1 case to
3012 + * support partial writes.
3013 + */
3014 + down_read(&uts_sem);
3015 + memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
3016 + up_read(&uts_sem);
3017 r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
3018 - put_uts(table, write, uts_table.data);
3019
3020 - if (write)
3021 + if (write) {
3022 + /*
3023 + * Write back the new value.
3024 + * Note that, since we dropped uts_sem, the result can
3025 + * theoretically be incorrect if there are two parallel writes
3026 + * at non-zero offsets to the same sysctl.
3027 + */
3028 + down_write(&uts_sem);
3029 + memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
3030 + up_write(&uts_sem);
3031 proc_sys_poll_notify(table->poll);
3032 + }
3033
3034 return r;
3035 }
3036 diff --git a/mm/memory.c b/mm/memory.c
3037 index c9657f013a4d..93d5d324904b 100644
3038 --- a/mm/memory.c
3039 +++ b/mm/memory.c
3040 @@ -392,15 +392,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
3041 {
3042 struct mmu_table_batch **batch = &tlb->batch;
3043
3044 - /*
3045 - * When there's less then two users of this mm there cannot be a
3046 - * concurrent page-table walk.
3047 - */
3048 - if (atomic_read(&tlb->mm->mm_users) < 2) {
3049 - __tlb_remove_table(table);
3050 - return;
3051 - }
3052 -
3053 if (*batch == NULL) {
3054 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
3055 if (*batch == NULL) {
3056 diff --git a/mm/readahead.c b/mm/readahead.c
3057 index c4ca70239233..59aa0d06f254 100644
3058 --- a/mm/readahead.c
3059 +++ b/mm/readahead.c
3060 @@ -380,6 +380,7 @@ ondemand_readahead(struct address_space *mapping,
3061 {
3062 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
3063 unsigned long max_pages = ra->ra_pages;
3064 + unsigned long add_pages;
3065 pgoff_t prev_offset;
3066
3067 /*
3068 @@ -469,10 +470,17 @@ readit:
3069 * Will this read hit the readahead marker made by itself?
3070 * If so, trigger the readahead marker hit now, and merge
3071 * the resulted next readahead window into the current one.
3072 + * Take care of maximum IO pages as above.
3073 */
3074 if (offset == ra->start && ra->size == ra->async_size) {
3075 - ra->async_size = get_next_ra_size(ra, max_pages);
3076 - ra->size += ra->async_size;
3077 + add_pages = get_next_ra_size(ra, max_pages);
3078 + if (ra->size + add_pages <= max_pages) {
3079 + ra->async_size = add_pages;
3080 + ra->size += add_pages;
3081 + } else {
3082 + ra->size = max_pages;
3083 + ra->async_size = max_pages >> 1;
3084 + }
3085 }
3086
3087 return ra_submit(ra, mapping, filp);
3088 diff --git a/net/9p/client.c b/net/9p/client.c
3089 index b433aff5ff13..3ec5a82929b2 100644
3090 --- a/net/9p/client.c
3091 +++ b/net/9p/client.c
3092 @@ -955,7 +955,7 @@ static int p9_client_version(struct p9_client *c)
3093 {
3094 int err = 0;
3095 struct p9_req_t *req;
3096 - char *version;
3097 + char *version = NULL;
3098 int msize;
3099
3100 p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
3101 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
3102 index 985046ae4231..38e21a1e97bc 100644
3103 --- a/net/9p/trans_fd.c
3104 +++ b/net/9p/trans_fd.c
3105 @@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
3106 spin_lock_irqsave(&p9_poll_lock, flags);
3107 list_del_init(&m->poll_pending_link);
3108 spin_unlock_irqrestore(&p9_poll_lock, flags);
3109 +
3110 + flush_work(&p9_poll_work);
3111 }
3112
3113 /**
3114 @@ -951,7 +953,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
3115 if (err < 0)
3116 return err;
3117
3118 - if (valid_ipaddr4(addr) < 0)
3119 + if (addr == NULL || valid_ipaddr4(addr) < 0)
3120 return -EINVAL;
3121
3122 csocket = NULL;
3123 @@ -1001,6 +1003,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
3124
3125 csocket = NULL;
3126
3127 + if (addr == NULL)
3128 + return -EINVAL;
3129 +
3130 if (strlen(addr) >= UNIX_PATH_MAX) {
3131 pr_err("%s (%d): address too long: %s\n",
3132 __func__, task_pid_nr(current), addr);
3133 diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
3134 index 6d8e3031978f..f58467a49090 100644
3135 --- a/net/9p/trans_rdma.c
3136 +++ b/net/9p/trans_rdma.c
3137 @@ -646,6 +646,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
3138 struct rdma_conn_param conn_param;
3139 struct ib_qp_init_attr qp_attr;
3140
3141 + if (addr == NULL)
3142 + return -EINVAL;
3143 +
3144 /* Parse the transport specific mount options */
3145 err = parse_opts(args, &opts);
3146 if (err < 0)
3147 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
3148 index 3aa5a93ad107..da0d3b257459 100644
3149 --- a/net/9p/trans_virtio.c
3150 +++ b/net/9p/trans_virtio.c
3151 @@ -189,7 +189,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
3152 s = rest_of_page(data);
3153 if (s > count)
3154 s = count;
3155 - BUG_ON(index > limit);
3156 + BUG_ON(index >= limit);
3157 /* Make sure we don't terminate early. */
3158 sg_unmark_end(&sg[index]);
3159 sg_set_buf(&sg[index++], data, s);
3160 @@ -234,6 +234,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
3161 s = PAGE_SIZE - data_off;
3162 if (s > count)
3163 s = count;
3164 + BUG_ON(index >= limit);
3165 /* Make sure we don't terminate early. */
3166 sg_unmark_end(&sg[index]);
3167 sg_set_page(&sg[index++], pdata[i++], s, data_off);
3168 @@ -406,6 +407,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
3169 p9_debug(P9_DEBUG_TRANS, "virtio request\n");
3170
3171 if (uodata) {
3172 + __le32 sz;
3173 int n = p9_get_mapped_pages(chan, &out_pages, uodata,
3174 outlen, &offs, &need_drop);
3175 if (n < 0)
3176 @@ -416,6 +418,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
3177 memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
3178 outlen = n;
3179 }
3180 + /* The size field of the message must include the length of the
3181 + * header and the length of the data. We didn't actually know
3182 + * the length of the data until this point so add it in now.
3183 + */
3184 + sz = cpu_to_le32(req->tc->size + outlen);
3185 + memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
3186 } else if (uidata) {
3187 int n = p9_get_mapped_pages(chan, &in_pages, uidata,
3188 inlen, &offs, &need_drop);
3189 @@ -643,6 +651,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
3190 int ret = -ENOENT;
3191 int found = 0;
3192
3193 + if (devname == NULL)
3194 + return -EINVAL;
3195 +
3196 mutex_lock(&virtio_9p_lock);
3197 list_for_each_entry(chan, &virtio_chan_list, chan_list) {
3198 if (!strncmp(devname, chan->tag, chan->tag_len) &&
3199 diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
3200 index 325c56043007..c10bdf63eae7 100644
3201 --- a/net/9p/trans_xen.c
3202 +++ b/net/9p/trans_xen.c
3203 @@ -95,6 +95,9 @@ static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
3204 {
3205 struct xen_9pfs_front_priv *priv;
3206
3207 + if (addr == NULL)
3208 + return -EINVAL;
3209 +
3210 read_lock(&xen_9pfs_lock);
3211 list_for_each_entry(priv, &xen_9pfs_devs, list) {
3212 if (!strcmp(priv->tag, addr)) {
3213 diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
3214 index e6ff5128e61a..ca53efa17be1 100644
3215 --- a/net/ieee802154/6lowpan/tx.c
3216 +++ b/net/ieee802154/6lowpan/tx.c
3217 @@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
3218 /* We must take a copy of the skb before we modify/replace the ipv6
3219 * header as the header could be used elsewhere
3220 */
3221 - skb = skb_unshare(skb, GFP_ATOMIC);
3222 - if (!skb)
3223 - return NET_XMIT_DROP;
3224 + if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
3225 + skb_tailroom(skb) < ldev->needed_tailroom)) {
3226 + struct sk_buff *nskb;
3227 +
3228 + nskb = skb_copy_expand(skb, ldev->needed_headroom,
3229 + ldev->needed_tailroom, GFP_ATOMIC);
3230 + if (likely(nskb)) {
3231 + consume_skb(skb);
3232 + skb = nskb;
3233 + } else {
3234 + kfree_skb(skb);
3235 + return NET_XMIT_DROP;
3236 + }
3237 + } else {
3238 + skb = skb_unshare(skb, GFP_ATOMIC);
3239 + if (!skb)
3240 + return NET_XMIT_DROP;
3241 + }
3242
3243 ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
3244 if (ret < 0) {
3245 diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
3246 index 7e253455f9dd..bcd1a5e6ebf4 100644
3247 --- a/net/mac802154/tx.c
3248 +++ b/net/mac802154/tx.c
3249 @@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
3250 int ret;
3251
3252 if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
3253 - u16 crc = crc_ccitt(0, skb->data, skb->len);
3254 + struct sk_buff *nskb;
3255 + u16 crc;
3256 +
3257 + if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
3258 + nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
3259 + GFP_ATOMIC);
3260 + if (likely(nskb)) {
3261 + consume_skb(skb);
3262 + skb = nskb;
3263 + } else {
3264 + goto err_tx;
3265 + }
3266 + }
3267
3268 + crc = crc_ccitt(0, skb->data, skb->len);
3269 put_unaligned_le16(crc, skb_put(skb, 2));
3270 }
3271
3272 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3273 index 2ad827db2704..6d118357d9dc 100644
3274 --- a/net/sunrpc/clnt.c
3275 +++ b/net/sunrpc/clnt.c
3276 @@ -965,10 +965,20 @@ out:
3277 }
3278 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
3279
3280 +void rpc_task_release_transport(struct rpc_task *task)
3281 +{
3282 + struct rpc_xprt *xprt = task->tk_xprt;
3283 +
3284 + if (xprt) {
3285 + task->tk_xprt = NULL;
3286 + xprt_put(xprt);
3287 + }
3288 +}
3289 +EXPORT_SYMBOL_GPL(rpc_task_release_transport);
3290 +
3291 void rpc_task_release_client(struct rpc_task *task)
3292 {
3293 struct rpc_clnt *clnt = task->tk_client;
3294 - struct rpc_xprt *xprt = task->tk_xprt;
3295
3296 if (clnt != NULL) {
3297 /* Remove from client task list */
3298 @@ -979,12 +989,14 @@ void rpc_task_release_client(struct rpc_task *task)
3299
3300 rpc_release_client(clnt);
3301 }
3302 + rpc_task_release_transport(task);
3303 +}
3304
3305 - if (xprt != NULL) {
3306 - task->tk_xprt = NULL;
3307 -
3308 - xprt_put(xprt);
3309 - }
3310 +static
3311 +void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
3312 +{
3313 + if (!task->tk_xprt)
3314 + task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
3315 }
3316
3317 static
3318 @@ -992,8 +1004,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
3319 {
3320
3321 if (clnt != NULL) {
3322 - if (task->tk_xprt == NULL)
3323 - task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
3324 + rpc_task_set_transport(task, clnt);
3325 task->tk_client = clnt;
3326 atomic_inc(&clnt->cl_count);
3327 if (clnt->cl_softrtry)
3328 @@ -1529,6 +1540,7 @@ call_start(struct rpc_task *task)
3329 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
3330 clnt->cl_stats->rpccnt++;
3331 task->tk_action = call_reserve;
3332 + rpc_task_set_transport(task, clnt);
3333 }
3334
3335 /*
3336 diff --git a/security/commoncap.c b/security/commoncap.c
3337 index 1c1f64582bb5..ae26ef006988 100644
3338 --- a/security/commoncap.c
3339 +++ b/security/commoncap.c
3340 @@ -388,7 +388,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
3341 if (strcmp(name, "capability") != 0)
3342 return -EOPNOTSUPP;
3343
3344 - dentry = d_find_alias(inode);
3345 + dentry = d_find_any_alias(inode);
3346 if (!dentry)
3347 return -EINVAL;
3348
3349 diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
3350 index 5547457566a7..bbb9823e93b9 100644
3351 --- a/tools/perf/util/auxtrace.c
3352 +++ b/tools/perf/util/auxtrace.c
3353 @@ -197,6 +197,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
3354 for (i = 0; i < queues->nr_queues; i++) {
3355 list_splice_tail(&queues->queue_array[i].head,
3356 &queue_array[i].head);
3357 + queue_array[i].tid = queues->queue_array[i].tid;
3358 + queue_array[i].cpu = queues->queue_array[i].cpu;
3359 + queue_array[i].set = queues->queue_array[i].set;
3360 queue_array[i].priv = queues->queue_array[i].priv;
3361 }
3362