Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0123-4.9.24-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 102408 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index 0de75976cad5..50436f502d81 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 23
9 +SUBLEVEL = 24
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
14 index 0ddf3698b85d..8ac0e5994ed2 100644
15 --- a/arch/mips/lantiq/irq.c
16 +++ b/arch/mips/lantiq/irq.c
17 @@ -269,11 +269,6 @@ static void ltq_hw5_irqdispatch(void)
18 DEFINE_HWx_IRQDISPATCH(5)
19 #endif
20
21 -static void ltq_hw_irq_handler(struct irq_desc *desc)
22 -{
23 - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
24 -}
25 -
26 #ifdef CONFIG_MIPS_MT_SMP
27 void __init arch_init_ipiirq(int irq, struct irqaction *action)
28 {
29 @@ -318,19 +313,23 @@ static struct irqaction irq_call = {
30 asmlinkage void plat_irq_dispatch(void)
31 {
32 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
33 - int irq;
34 -
35 - if (!pending) {
36 - spurious_interrupt();
37 - return;
38 + unsigned int i;
39 +
40 + if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
41 + do_IRQ(MIPS_CPU_TIMER_IRQ);
42 + goto out;
43 + } else {
44 + for (i = 0; i < MAX_IM; i++) {
45 + if (pending & (CAUSEF_IP2 << i)) {
46 + ltq_hw_irqdispatch(i);
47 + goto out;
48 + }
49 + }
50 }
51 + pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
52
53 - pending >>= CAUSEB_IP;
54 - while (pending) {
55 - irq = fls(pending) - 1;
56 - do_IRQ(MIPS_CPU_IRQ_BASE + irq);
57 - pending &= ~BIT(irq);
58 - }
59 +out:
60 + return;
61 }
62
63 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
64 @@ -355,6 +354,11 @@ static const struct irq_domain_ops irq_domain_ops = {
65 .map = icu_map,
66 };
67
68 +static struct irqaction cascade = {
69 + .handler = no_action,
70 + .name = "cascade",
71 +};
72 +
73 int __init icu_of_init(struct device_node *node, struct device_node *parent)
74 {
75 struct device_node *eiu_node;
76 @@ -386,7 +390,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
77 mips_cpu_irq_init();
78
79 for (i = 0; i < MAX_IM; i++)
80 - irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
81 + setup_irq(i + 2, &cascade);
82
83 if (cpu_has_vint) {
84 pr_info("Setting up vectored interrupts\n");
85 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
86 index 7fcf5128996a..0497ceceeb85 100644
87 --- a/arch/parisc/include/asm/uaccess.h
88 +++ b/arch/parisc/include/asm/uaccess.h
89 @@ -42,10 +42,10 @@ static inline long access_ok(int type, const void __user * addr,
90 #define get_user __get_user
91
92 #if !defined(CONFIG_64BIT)
93 -#define LDD_USER(ptr) __get_user_asm64(ptr)
94 +#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
95 #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
96 #else
97 -#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
98 +#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
99 #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
100 #endif
101
102 @@ -100,63 +100,87 @@ struct exception_data {
103 " mtsp %0,%%sr2\n\t" \
104 : : "r"(get_fs()) : )
105
106 -#define __get_user(x, ptr) \
107 -({ \
108 - register long __gu_err __asm__ ("r8") = 0; \
109 - register long __gu_val; \
110 - \
111 - load_sr2(); \
112 - switch (sizeof(*(ptr))) { \
113 - case 1: __get_user_asm("ldb", ptr); break; \
114 - case 2: __get_user_asm("ldh", ptr); break; \
115 - case 4: __get_user_asm("ldw", ptr); break; \
116 - case 8: LDD_USER(ptr); break; \
117 - default: BUILD_BUG(); break; \
118 - } \
119 - \
120 - (x) = (__force __typeof__(*(ptr))) __gu_val; \
121 - __gu_err; \
122 +#define __get_user_internal(val, ptr) \
123 +({ \
124 + register long __gu_err __asm__ ("r8") = 0; \
125 + \
126 + switch (sizeof(*(ptr))) { \
127 + case 1: __get_user_asm(val, "ldb", ptr); break; \
128 + case 2: __get_user_asm(val, "ldh", ptr); break; \
129 + case 4: __get_user_asm(val, "ldw", ptr); break; \
130 + case 8: LDD_USER(val, ptr); break; \
131 + default: BUILD_BUG(); \
132 + } \
133 + \
134 + __gu_err; \
135 })
136
137 -#define __get_user_asm(ldx, ptr) \
138 +#define __get_user(val, ptr) \
139 +({ \
140 + load_sr2(); \
141 + __get_user_internal(val, ptr); \
142 +})
143 +
144 +#define __get_user_asm(val, ldx, ptr) \
145 +{ \
146 + register long __gu_val; \
147 + \
148 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
149 "9:\n" \
150 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
151 : "=r"(__gu_val), "=r"(__gu_err) \
152 - : "r"(ptr), "1"(__gu_err));
153 + : "r"(ptr), "1"(__gu_err)); \
154 + \
155 + (val) = (__force __typeof__(*(ptr))) __gu_val; \
156 +}
157
158 #if !defined(CONFIG_64BIT)
159
160 -#define __get_user_asm64(ptr) \
161 +#define __get_user_asm64(val, ptr) \
162 +{ \
163 + union { \
164 + unsigned long long l; \
165 + __typeof__(*(ptr)) t; \
166 + } __gu_tmp; \
167 + \
168 __asm__(" copy %%r0,%R0\n" \
169 "1: ldw 0(%%sr2,%2),%0\n" \
170 "2: ldw 4(%%sr2,%2),%R0\n" \
171 "9:\n" \
172 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
173 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
174 - : "=r"(__gu_val), "=r"(__gu_err) \
175 - : "r"(ptr), "1"(__gu_err));
176 + : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
177 + : "r"(ptr), "1"(__gu_err)); \
178 + \
179 + (val) = __gu_tmp.t; \
180 +}
181
182 #endif /* !defined(CONFIG_64BIT) */
183
184
185 -#define __put_user(x, ptr) \
186 +#define __put_user_internal(x, ptr) \
187 ({ \
188 register long __pu_err __asm__ ("r8") = 0; \
189 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
190 \
191 - load_sr2(); \
192 switch (sizeof(*(ptr))) { \
193 - case 1: __put_user_asm("stb", __x, ptr); break; \
194 - case 2: __put_user_asm("sth", __x, ptr); break; \
195 - case 4: __put_user_asm("stw", __x, ptr); break; \
196 - case 8: STD_USER(__x, ptr); break; \
197 - default: BUILD_BUG(); break; \
198 - } \
199 + case 1: __put_user_asm("stb", __x, ptr); break; \
200 + case 2: __put_user_asm("sth", __x, ptr); break; \
201 + case 4: __put_user_asm("stw", __x, ptr); break; \
202 + case 8: STD_USER(__x, ptr); break; \
203 + default: BUILD_BUG(); \
204 + } \
205 \
206 __pu_err; \
207 })
208
209 +#define __put_user(x, ptr) \
210 +({ \
211 + load_sr2(); \
212 + __put_user_internal(x, ptr); \
213 +})
214 +
215 +
216 /*
217 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
218 * instead of writing. This is because they do not write to any memory
219 diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
220 index f01188c044ee..85c28bb80fb7 100644
221 --- a/arch/parisc/lib/lusercopy.S
222 +++ b/arch/parisc/lib/lusercopy.S
223 @@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
224 add dst,len,end
225
226 /* short copy with less than 16 bytes? */
227 - cmpib,>>=,n 15,len,.Lbyte_loop
228 + cmpib,COND(>>=),n 15,len,.Lbyte_loop
229
230 /* same alignment? */
231 xor src,dst,t0
232 @@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
233 /* loop until we are 64-bit aligned */
234 .Lalign_loop64:
235 extru dst,31,3,t1
236 - cmpib,=,n 0,t1,.Lcopy_loop_16
237 + cmpib,=,n 0,t1,.Lcopy_loop_16_start
238 20: ldb,ma 1(srcspc,src),t1
239 21: stb,ma t1,1(dstspc,dst)
240 b .Lalign_loop64
241 @@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
242 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
243 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
244
245 +.Lcopy_loop_16_start:
246 ldi 31,t0
247 .Lcopy_loop_16:
248 cmpb,COND(>>=),n t0,len,.Lword_loop
249 @@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
250 /* loop until we are 32-bit aligned */
251 .Lalign_loop32:
252 extru dst,31,2,t1
253 - cmpib,=,n 0,t1,.Lcopy_loop_4
254 + cmpib,=,n 0,t1,.Lcopy_loop_8
255 20: ldb,ma 1(srcspc,src),t1
256 21: stb,ma t1,1(dstspc,dst)
257 b .Lalign_loop32
258 @@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
259 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
260
261
262 -.Lcopy_loop_4:
263 +.Lcopy_loop_8:
264 cmpib,COND(>>=),n 15,len,.Lbyte_loop
265
266 10: ldw 0(srcspc,src),t1
267 @@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
268 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
269 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
270
271 - b .Lcopy_loop_4
272 + b .Lcopy_loop_8
273 ldo -16(len),len
274
275 .Lbyte_loop:
276 @@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
277 .Lunaligned_copy:
278 /* align until dst is 32bit-word-aligned */
279 extru dst,31,2,t1
280 - cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
281 + cmpib,=,n 0,t1,.Lcopy_dstaligned
282 20: ldb 0(srcspc,src),t1
283 ldo 1(src),src
284 21: stb,ma t1,1(dstspc,dst)
285 @@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
286 cmpiclr,<> 1,t0,%r0
287 b,n .Lcase1
288 .Lcase0:
289 - cmpb,= %r0,len,.Lcda_finish
290 + cmpb,COND(=) %r0,len,.Lcda_finish
291 nop
292
293 1: ldw,ma 4(srcspc,src), a3
294 @@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
295 1: ldw,ma 4(srcspc,src), a3
296 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
297 ldo -1(len),len
298 - cmpb,=,n %r0,len,.Ldo0
299 + cmpb,COND(=),n %r0,len,.Ldo0
300 .Ldo4:
301 1: ldw,ma 4(srcspc,src), a0
302 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
303 @@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
304 1: stw,ma t0, 4(dstspc,dst)
305 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
306 ldo -4(len),len
307 - cmpb,<> %r0,len,.Ldo4
308 + cmpb,COND(<>) %r0,len,.Ldo4
309 nop
310 .Ldo0:
311 shrpw a2, a3, %sar, t0
312 @@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
313 /* fault exception fixup handlers: */
314 #ifdef CONFIG_64BIT
315 .Lcopy16_fault:
316 -10: b .Lcopy_done
317 - std,ma t1,8(dstspc,dst)
318 + b .Lcopy_done
319 +10: std,ma t1,8(dstspc,dst)
320 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
321 #endif
322
323 .Lcopy8_fault:
324 -10: b .Lcopy_done
325 - stw,ma t1,4(dstspc,dst)
326 + b .Lcopy_done
327 +10: stw,ma t1,4(dstspc,dst)
328 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
329
330 .exit
331 diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
332 index 7853b53959cd..3f9d1a83891a 100644
333 --- a/arch/x86/entry/vdso/vdso32-setup.c
334 +++ b/arch/x86/entry/vdso/vdso32-setup.c
335 @@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
336 {
337 vdso32_enabled = simple_strtoul(s, NULL, 0);
338
339 - if (vdso32_enabled > 1)
340 + if (vdso32_enabled > 1) {
341 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
342 + vdso32_enabled = 0;
343 + }
344
345 return 1;
346 }
347 @@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
348 /* Register vsyscall32 into the ABI table */
349 #include <linux/sysctl.h>
350
351 +static const int zero;
352 +static const int one = 1;
353 +
354 static struct ctl_table abi_table2[] = {
355 {
356 .procname = "vsyscall32",
357 .data = &vdso32_enabled,
358 .maxlen = sizeof(int),
359 .mode = 0644,
360 - .proc_handler = proc_dointvec
361 + .proc_handler = proc_dointvec_minmax,
362 + .extra1 = (int *)&zero,
363 + .extra2 = (int *)&one,
364 },
365 {}
366 };
367 diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
368 index 81b321ace8e0..f924629836a8 100644
369 --- a/arch/x86/events/intel/lbr.c
370 +++ b/arch/x86/events/intel/lbr.c
371 @@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
372 cpuc->lbr_entries[i].to = msr_lastbranch.to;
373 cpuc->lbr_entries[i].mispred = 0;
374 cpuc->lbr_entries[i].predicted = 0;
375 + cpuc->lbr_entries[i].in_tx = 0;
376 + cpuc->lbr_entries[i].abort = 0;
377 + cpuc->lbr_entries[i].cycles = 0;
378 cpuc->lbr_entries[i].reserved = 0;
379 }
380 cpuc->lbr_stack.nr = i;
381 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
382 index e7f155c3045e..94aad6364b47 100644
383 --- a/arch/x86/include/asm/elf.h
384 +++ b/arch/x86/include/asm/elf.h
385 @@ -278,7 +278,7 @@ struct task_struct;
386
387 #define ARCH_DLINFO_IA32 \
388 do { \
389 - if (vdso32_enabled) { \
390 + if (VDSO_CURRENT_BASE) { \
391 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
392 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
393 } \
394 diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
395 index 2c1ebeb4d737..529bb4a6487a 100644
396 --- a/arch/x86/include/asm/pmem.h
397 +++ b/arch/x86/include/asm/pmem.h
398 @@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
399 * @size: number of bytes to write back
400 *
401 * Write back a cache range using the CLWB (cache line write back)
402 - * instruction.
403 + * instruction. Note that @size is internally rounded up to be cache
404 + * line size aligned.
405 */
406 static inline void arch_wb_cache_pmem(void *addr, size_t size)
407 {
408 @@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
409 clwb(p);
410 }
411
412 -/*
413 - * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
414 - * iterators, so for other types (bvec & kvec) we must do a cache write-back.
415 - */
416 -static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
417 -{
418 - return iter_is_iovec(i) == false;
419 -}
420 -
421 /**
422 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
423 * @addr: PMEM destination address
424 @@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
425 /* TODO: skip the write-back by always using non-temporal stores */
426 len = copy_from_iter_nocache(addr, bytes, i);
427
428 - if (__iter_needs_pmem_wb(i))
429 + /*
430 + * In the iovec case on x86_64 copy_from_iter_nocache() uses
431 + * non-temporal stores for the bulk of the transfer, but we need
432 + * to manually flush if the transfer is unaligned. A cached
433 + * memory copy is used when destination or size is not naturally
434 + * aligned. That is:
435 + * - Require 8-byte alignment when size is 8 bytes or larger.
436 + * - Require 4-byte alignment when size is 4 bytes.
437 + *
438 + * In the non-iovec case the entire destination needs to be
439 + * flushed.
440 + */
441 + if (iter_is_iovec(i)) {
442 + unsigned long flushed, dest = (unsigned long) addr;
443 +
444 + if (bytes < 8) {
445 + if (!IS_ALIGNED(dest, 4) || (bytes != 4))
446 + arch_wb_cache_pmem(addr, 1);
447 + } else {
448 + if (!IS_ALIGNED(dest, 8)) {
449 + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
450 + arch_wb_cache_pmem(addr, 1);
451 + }
452 +
453 + flushed = dest - (unsigned long) addr;
454 + if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
455 + arch_wb_cache_pmem(addr + bytes - 1, 1);
456 + }
457 + } else
458 arch_wb_cache_pmem(addr, bytes);
459
460 return len;
461 diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
462 index ec1f756f9dc9..71beb28600d4 100644
463 --- a/arch/x86/kernel/signal_compat.c
464 +++ b/arch/x86/kernel/signal_compat.c
465 @@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
466
467 if (from->si_signo == SIGSEGV) {
468 if (from->si_code == SEGV_BNDERR) {
469 - compat_uptr_t lower = (unsigned long)&to->si_lower;
470 - compat_uptr_t upper = (unsigned long)&to->si_upper;
471 + compat_uptr_t lower = (unsigned long)from->si_lower;
472 + compat_uptr_t upper = (unsigned long)from->si_upper;
473 put_user_ex(lower, &to->si_lower);
474 put_user_ex(upper, &to->si_upper);
475 }
476 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
477 index 69b8f8a5ecb0..43b55ef82bac 100644
478 --- a/arch/x86/kvm/vmx.c
479 +++ b/arch/x86/kvm/vmx.c
480 @@ -6925,14 +6925,20 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
481 }
482
483 page = nested_get_page(vcpu, vmptr);
484 - if (page == NULL ||
485 - *(u32 *)kmap(page) != VMCS12_REVISION) {
486 + if (page == NULL) {
487 nested_vmx_failInvalid(vcpu);
488 + skip_emulated_instruction(vcpu);
489 + return 1;
490 + }
491 + if (*(u32 *)kmap(page) != VMCS12_REVISION) {
492 kunmap(page);
493 + nested_release_page_clean(page);
494 + nested_vmx_failInvalid(vcpu);
495 skip_emulated_instruction(vcpu);
496 return 1;
497 }
498 kunmap(page);
499 + nested_release_page_clean(page);
500 vmx->nested.vmxon_ptr = vmptr;
501 break;
502 case EXIT_REASON_VMCLEAR:
503 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
504 index 22af912d66d2..889e7619a091 100644
505 --- a/arch/x86/mm/init.c
506 +++ b/arch/x86/mm/init.c
507 @@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
508 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
509 * is valid. The argument is a physical page number.
510 *
511 - *
512 - * On x86, access has to be given to the first megabyte of ram because that area
513 - * contains BIOS code and data regions used by X and dosemu and similar apps.
514 - * Access has to be given to non-kernel-ram areas as well, these contain the PCI
515 - * mmio resources as well as potential bios/acpi data regions.
516 + * On x86, access has to be given to the first megabyte of RAM because that
517 + * area traditionally contains BIOS code and data regions used by X, dosemu,
518 + * and similar apps. Since they map the entire memory range, the whole range
519 + * must be allowed (for mapping), but any areas that would otherwise be
520 + * disallowed are flagged as being "zero filled" instead of rejected.
521 + * Access has to be given to non-kernel-ram areas as well, these contain the
522 + * PCI mmio resources as well as potential bios/acpi data regions.
523 */
524 int devmem_is_allowed(unsigned long pagenr)
525 {
526 - if (pagenr < 256)
527 - return 1;
528 - if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
529 + if (page_is_ram(pagenr)) {
530 + /*
531 + * For disallowed memory regions in the low 1MB range,
532 + * request that the page be shown as all zeros.
533 + */
534 + if (pagenr < 256)
535 + return 2;
536 +
537 + return 0;
538 + }
539 +
540 + /*
541 + * This must follow RAM test, since System RAM is considered a
542 + * restricted resource under CONFIG_STRICT_IOMEM.
543 + */
544 + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
545 + /* Low 1MB bypasses iomem restrictions. */
546 + if (pagenr < 256)
547 + return 1;
548 +
549 return 0;
550 - if (!page_is_ram(pagenr))
551 - return 1;
552 - return 0;
553 + }
554 +
555 + return 1;
556 }
557
558 void free_init_pages(char *what, unsigned long begin, unsigned long end)
559 diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
560 index 30031d5293c4..cdfe8c628959 100644
561 --- a/arch/x86/platform/efi/quirks.c
562 +++ b/arch/x86/platform/efi/quirks.c
563 @@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
564 return;
565 }
566
567 + /* No need to reserve regions that will never be freed. */
568 + if (md.attribute & EFI_MEMORY_RUNTIME)
569 + return;
570 +
571 size += addr % EFI_PAGE_SIZE;
572 size = round_up(size, EFI_PAGE_SIZE);
573 addr = round_down(addr, EFI_PAGE_SIZE);
574 diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
575 index 44c88ad1841a..bcea81f36fc5 100644
576 --- a/arch/x86/xen/apic.c
577 +++ b/arch/x86/xen/apic.c
578 @@ -145,7 +145,7 @@ static void xen_silent_inquire(int apicid)
579 static int xen_cpu_present_to_apicid(int cpu)
580 {
581 if (cpu_present(cpu))
582 - return xen_get_apic_id(xen_apic_read(APIC_ID));
583 + return cpu_data(cpu).apicid;
584 else
585 return BAD_APICID;
586 }
587 diff --git a/crypto/ahash.c b/crypto/ahash.c
588 index 2ce8bcb9049c..cce0268a13fe 100644
589 --- a/crypto/ahash.c
590 +++ b/crypto/ahash.c
591 @@ -31,6 +31,7 @@ struct ahash_request_priv {
592 crypto_completion_t complete;
593 void *data;
594 u8 *result;
595 + u32 flags;
596 void *ubuf[] CRYPTO_MINALIGN_ATTR;
597 };
598
599 @@ -252,6 +253,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
600 priv->result = req->result;
601 priv->complete = req->base.complete;
602 priv->data = req->base.data;
603 + priv->flags = req->base.flags;
604 +
605 /*
606 * WARNING: We do not backup req->priv here! The req->priv
607 * is for internal use of the Crypto API and the
608 @@ -266,38 +269,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
609 return 0;
610 }
611
612 -static void ahash_restore_req(struct ahash_request *req)
613 +static void ahash_restore_req(struct ahash_request *req, int err)
614 {
615 struct ahash_request_priv *priv = req->priv;
616
617 + if (!err)
618 + memcpy(priv->result, req->result,
619 + crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
620 +
621 /* Restore the original crypto request. */
622 req->result = priv->result;
623 - req->base.complete = priv->complete;
624 - req->base.data = priv->data;
625 +
626 + ahash_request_set_callback(req, priv->flags,
627 + priv->complete, priv->data);
628 req->priv = NULL;
629
630 /* Free the req->priv.priv from the ADJUSTED request. */
631 kzfree(priv);
632 }
633
634 -static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
635 +static void ahash_notify_einprogress(struct ahash_request *req)
636 {
637 struct ahash_request_priv *priv = req->priv;
638 + struct crypto_async_request oreq;
639
640 - if (err == -EINPROGRESS)
641 - return;
642 -
643 - if (!err)
644 - memcpy(priv->result, req->result,
645 - crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
646 + oreq.data = priv->data;
647
648 - ahash_restore_req(req);
649 + priv->complete(&oreq, -EINPROGRESS);
650 }
651
652 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
653 {
654 struct ahash_request *areq = req->data;
655
656 + if (err == -EINPROGRESS) {
657 + ahash_notify_einprogress(areq);
658 + return;
659 + }
660 +
661 /*
662 * Restore the original request, see ahash_op_unaligned() for what
663 * goes where.
664 @@ -308,7 +317,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
665 */
666
667 /* First copy req->result into req->priv.result */
668 - ahash_op_unaligned_finish(areq, err);
669 + ahash_restore_req(areq, err);
670
671 /* Complete the ORIGINAL request. */
672 areq->base.complete(&areq->base, err);
673 @@ -324,7 +333,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
674 return err;
675
676 err = op(req);
677 - ahash_op_unaligned_finish(req, err);
678 + if (err == -EINPROGRESS ||
679 + (err == -EBUSY && (ahash_request_flags(req) &
680 + CRYPTO_TFM_REQ_MAY_BACKLOG)))
681 + return err;
682 +
683 + ahash_restore_req(req, err);
684
685 return err;
686 }
687 @@ -359,25 +373,14 @@ int crypto_ahash_digest(struct ahash_request *req)
688 }
689 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
690
691 -static void ahash_def_finup_finish2(struct ahash_request *req, int err)
692 +static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
693 {
694 - struct ahash_request_priv *priv = req->priv;
695 + struct ahash_request *areq = req->data;
696
697 if (err == -EINPROGRESS)
698 return;
699
700 - if (!err)
701 - memcpy(priv->result, req->result,
702 - crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
703 -
704 - ahash_restore_req(req);
705 -}
706 -
707 -static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
708 -{
709 - struct ahash_request *areq = req->data;
710 -
711 - ahash_def_finup_finish2(areq, err);
712 + ahash_restore_req(areq, err);
713
714 areq->base.complete(&areq->base, err);
715 }
716 @@ -388,11 +391,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
717 goto out;
718
719 req->base.complete = ahash_def_finup_done2;
720 - req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
721 +
722 err = crypto_ahash_reqtfm(req)->final(req);
723 + if (err == -EINPROGRESS ||
724 + (err == -EBUSY && (ahash_request_flags(req) &
725 + CRYPTO_TFM_REQ_MAY_BACKLOG)))
726 + return err;
727
728 out:
729 - ahash_def_finup_finish2(req, err);
730 + ahash_restore_req(req, err);
731 return err;
732 }
733
734 @@ -400,7 +407,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
735 {
736 struct ahash_request *areq = req->data;
737
738 + if (err == -EINPROGRESS) {
739 + ahash_notify_einprogress(areq);
740 + return;
741 + }
742 +
743 + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
744 +
745 err = ahash_def_finup_finish1(areq, err);
746 + if (areq->priv)
747 + return;
748
749 areq->base.complete(&areq->base, err);
750 }
751 @@ -415,6 +431,11 @@ static int ahash_def_finup(struct ahash_request *req)
752 return err;
753
754 err = tfm->update(req);
755 + if (err == -EINPROGRESS ||
756 + (err == -EBUSY && (ahash_request_flags(req) &
757 + CRYPTO_TFM_REQ_MAY_BACKLOG)))
758 + return err;
759 +
760 return ahash_def_finup_finish1(req, err);
761 }
762
763 diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
764 index e8817e2f0597..fde8d885f7b6 100644
765 --- a/crypto/algif_aead.c
766 +++ b/crypto/algif_aead.c
767 @@ -39,6 +39,7 @@ struct aead_async_req {
768 struct aead_async_rsgl first_rsgl;
769 struct list_head list;
770 struct kiocb *iocb;
771 + struct sock *sk;
772 unsigned int tsgls;
773 char iv[];
774 };
775 @@ -379,12 +380,10 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page,
776
777 static void aead_async_cb(struct crypto_async_request *_req, int err)
778 {
779 - struct sock *sk = _req->data;
780 - struct alg_sock *ask = alg_sk(sk);
781 - struct aead_ctx *ctx = ask->private;
782 - struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
783 - struct aead_request *req = aead_request_cast(_req);
784 + struct aead_request *req = _req->data;
785 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
786 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
787 + struct sock *sk = areq->sk;
788 struct scatterlist *sg = areq->tsgl;
789 struct aead_async_rsgl *rsgl;
790 struct kiocb *iocb = areq->iocb;
791 @@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
792 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
793 INIT_LIST_HEAD(&areq->list);
794 areq->iocb = msg->msg_iocb;
795 + areq->sk = sk;
796 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
797 aead_request_set_tfm(req, tfm);
798 aead_request_set_ad(req, ctx->aead_assoclen);
799 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
800 - aead_async_cb, sk);
801 + aead_async_cb, req);
802 used -= ctx->aead_assoclen;
803
804 /* take over all tx sgls from ctx */
805 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
806 index 48e19d013170..22ca89242518 100644
807 --- a/drivers/acpi/ec.c
808 +++ b/drivers/acpi/ec.c
809 @@ -729,12 +729,12 @@ static void start_transaction(struct acpi_ec *ec)
810
811 static int ec_guard(struct acpi_ec *ec)
812 {
813 - unsigned long guard = usecs_to_jiffies(ec_polling_guard);
814 + unsigned long guard = usecs_to_jiffies(ec->polling_guard);
815 unsigned long timeout = ec->timestamp + guard;
816
817 /* Ensure guarding period before polling EC status */
818 do {
819 - if (ec_busy_polling) {
820 + if (ec->busy_polling) {
821 /* Perform busy polling */
822 if (ec_transaction_completed(ec))
823 return 0;
824 @@ -998,6 +998,28 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
825 spin_unlock_irqrestore(&ec->lock, flags);
826 }
827
828 +static void acpi_ec_enter_noirq(struct acpi_ec *ec)
829 +{
830 + unsigned long flags;
831 +
832 + spin_lock_irqsave(&ec->lock, flags);
833 + ec->busy_polling = true;
834 + ec->polling_guard = 0;
835 + ec_log_drv("interrupt blocked");
836 + spin_unlock_irqrestore(&ec->lock, flags);
837 +}
838 +
839 +static void acpi_ec_leave_noirq(struct acpi_ec *ec)
840 +{
841 + unsigned long flags;
842 +
843 + spin_lock_irqsave(&ec->lock, flags);
844 + ec->busy_polling = ec_busy_polling;
845 + ec->polling_guard = ec_polling_guard;
846 + ec_log_drv("interrupt unblocked");
847 + spin_unlock_irqrestore(&ec->lock, flags);
848 +}
849 +
850 void acpi_ec_block_transactions(void)
851 {
852 struct acpi_ec *ec = first_ec;
853 @@ -1278,7 +1300,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
854 if (function != ACPI_READ && function != ACPI_WRITE)
855 return AE_BAD_PARAMETER;
856
857 - if (ec_busy_polling || bits > 8)
858 + if (ec->busy_polling || bits > 8)
859 acpi_ec_burst_enable(ec);
860
861 for (i = 0; i < bytes; ++i, ++address, ++value)
862 @@ -1286,7 +1308,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
863 acpi_ec_read(ec, address, value) :
864 acpi_ec_write(ec, address, *value);
865
866 - if (ec_busy_polling || bits > 8)
867 + if (ec->busy_polling || bits > 8)
868 acpi_ec_burst_disable(ec);
869
870 switch (result) {
871 @@ -1329,6 +1351,8 @@ static struct acpi_ec *acpi_ec_alloc(void)
872 spin_lock_init(&ec->lock);
873 INIT_WORK(&ec->work, acpi_ec_event_handler);
874 ec->timestamp = jiffies;
875 + ec->busy_polling = true;
876 + ec->polling_guard = 0;
877 return ec;
878 }
879
880 @@ -1390,6 +1414,7 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
881 acpi_ec_start(ec, false);
882
883 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
884 + acpi_ec_enter_noirq(ec);
885 status = acpi_install_address_space_handler(ec->handle,
886 ACPI_ADR_SPACE_EC,
887 &acpi_ec_space_handler,
888 @@ -1429,6 +1454,7 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
889 /* This is not fatal as we can poll EC events */
890 if (ACPI_SUCCESS(status)) {
891 set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
892 + acpi_ec_leave_noirq(ec);
893 if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
894 ec->reference_count >= 1)
895 acpi_ec_enable_gpe(ec, true);
896 @@ -1839,34 +1865,6 @@ int __init acpi_ec_ecdt_probe(void)
897 }
898
899 #ifdef CONFIG_PM_SLEEP
900 -static void acpi_ec_enter_noirq(struct acpi_ec *ec)
901 -{
902 - unsigned long flags;
903 -
904 - if (ec == first_ec) {
905 - spin_lock_irqsave(&ec->lock, flags);
906 - ec->saved_busy_polling = ec_busy_polling;
907 - ec->saved_polling_guard = ec_polling_guard;
908 - ec_busy_polling = true;
909 - ec_polling_guard = 0;
910 - ec_log_drv("interrupt blocked");
911 - spin_unlock_irqrestore(&ec->lock, flags);
912 - }
913 -}
914 -
915 -static void acpi_ec_leave_noirq(struct acpi_ec *ec)
916 -{
917 - unsigned long flags;
918 -
919 - if (ec == first_ec) {
920 - spin_lock_irqsave(&ec->lock, flags);
921 - ec_busy_polling = ec->saved_busy_polling;
922 - ec_polling_guard = ec->saved_polling_guard;
923 - ec_log_drv("interrupt unblocked");
924 - spin_unlock_irqrestore(&ec->lock, flags);
925 - }
926 -}
927 -
928 static int acpi_ec_suspend_noirq(struct device *dev)
929 {
930 struct acpi_ec *ec =
931 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
932 index 0c452265c111..219b90bc0922 100644
933 --- a/drivers/acpi/internal.h
934 +++ b/drivers/acpi/internal.h
935 @@ -172,8 +172,8 @@ struct acpi_ec {
936 struct work_struct work;
937 unsigned long timestamp;
938 unsigned long nr_pending_queries;
939 - bool saved_busy_polling;
940 - unsigned int saved_polling_guard;
941 + bool busy_polling;
942 + unsigned int polling_guard;
943 };
944
945 extern struct acpi_ec *first_ec;
946 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
947 index d1664df001f8..9ef3941eeff0 100644
948 --- a/drivers/acpi/nfit/core.c
949 +++ b/drivers/acpi/nfit/core.c
950 @@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
951 const struct nfit_set_info_map *map0 = m0;
952 const struct nfit_set_info_map *map1 = m1;
953
954 - return map0->region_offset - map1->region_offset;
955 + if (map0->region_offset < map1->region_offset)
956 + return -1;
957 + else if (map0->region_offset > map1->region_offset)
958 + return 1;
959 + return 0;
960 }
961
962 /* Retrieve the nth entry referencing this spa */
963 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
964 index 5a2fdf156ec9..dd3786acba89 100644
965 --- a/drivers/acpi/scan.c
966 +++ b/drivers/acpi/scan.c
967 @@ -1827,15 +1827,20 @@ static void acpi_bus_attach(struct acpi_device *device)
968 return;
969
970 device->flags.match_driver = true;
971 - if (!ret) {
972 - ret = device_attach(&device->dev);
973 - if (ret < 0)
974 - return;
975 -
976 - if (!ret && device->pnp.type.platform_id)
977 - acpi_default_enumeration(device);
978 + if (ret > 0) {
979 + acpi_device_set_enumerated(device);
980 + goto ok;
981 }
982
983 + ret = device_attach(&device->dev);
984 + if (ret < 0)
985 + return;
986 +
987 + if (ret > 0 || !device->pnp.type.platform_id)
988 + acpi_device_set_enumerated(device);
989 + else
990 + acpi_default_enumeration(device);
991 +
992 ok:
993 list_for_each_entry(child, &device->children, node)
994 acpi_bus_attach(child);
995 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
996 index 7a1048755914..c9441f9d4585 100644
997 --- a/drivers/block/nbd.c
998 +++ b/drivers/block/nbd.c
999 @@ -54,7 +54,7 @@ struct nbd_device {
1000
1001 struct mutex tx_lock;
1002 struct gendisk *disk;
1003 - int blksize;
1004 + loff_t blksize;
1005 loff_t bytesize;
1006
1007 /* protects initialization and shutdown of the socket */
1008 @@ -126,7 +126,7 @@ static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
1009 }
1010
1011 static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
1012 - int blocksize, int nr_blocks)
1013 + loff_t blocksize, loff_t nr_blocks)
1014 {
1015 int ret;
1016
1017 @@ -135,7 +135,7 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
1018 return ret;
1019
1020 nbd->blksize = blocksize;
1021 - nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
1022 + nbd->bytesize = blocksize * nr_blocks;
1023
1024 nbd_size_update(nbd, bdev);
1025
1026 @@ -648,7 +648,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1027
1028 case NBD_SET_SIZE:
1029 return nbd_size_set(nbd, bdev, nbd->blksize,
1030 - arg / nbd->blksize);
1031 + div_s64(arg, nbd->blksize));
1032
1033 case NBD_SET_SIZE_BLOCKS:
1034 return nbd_size_set(nbd, bdev, nbd->blksize, arg);
1035 @@ -817,7 +817,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
1036 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1037 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
1038 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1039 - debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
1040 + debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
1041 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1042
1043 return 0;
1044 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
1045 index d2ef51ca9cf4..c9914d653968 100644
1046 --- a/drivers/block/zram/zram_drv.c
1047 +++ b/drivers/block/zram/zram_drv.c
1048 @@ -582,13 +582,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
1049
1050 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
1051 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
1052 - clear_page(mem);
1053 + memset(mem, 0, PAGE_SIZE);
1054 return 0;
1055 }
1056
1057 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
1058 if (size == PAGE_SIZE) {
1059 - copy_page(mem, cmem);
1060 + memcpy(mem, cmem, PAGE_SIZE);
1061 } else {
1062 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
1063
1064 @@ -780,7 +780,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
1065
1066 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
1067 src = kmap_atomic(page);
1068 - copy_page(cmem, src);
1069 + memcpy(cmem, src, PAGE_SIZE);
1070 kunmap_atomic(src);
1071 } else {
1072 memcpy(cmem, src, clen);
1073 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
1074 index dcc09739a54e..8453a49471d7 100644
1075 --- a/drivers/char/Kconfig
1076 +++ b/drivers/char/Kconfig
1077 @@ -571,9 +571,12 @@ config TELCLOCK
1078 controlling the behavior of this hardware.
1079
1080 config DEVPORT
1081 - bool
1082 + bool "/dev/port character device"
1083 depends on ISA || PCI
1084 default y
1085 + help
1086 + Say Y here if you want to support the /dev/port device. The /dev/port
1087 + device is similar to /dev/mem, but for I/O ports.
1088
1089 source "drivers/s390/char/Kconfig"
1090
1091 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
1092 index 6d9cc2d39d22..7e4a9d1296bb 100644
1093 --- a/drivers/char/mem.c
1094 +++ b/drivers/char/mem.c
1095 @@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
1096 #endif
1097
1098 #ifdef CONFIG_STRICT_DEVMEM
1099 +static inline int page_is_allowed(unsigned long pfn)
1100 +{
1101 + return devmem_is_allowed(pfn);
1102 +}
1103 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
1104 {
1105 u64 from = ((u64)pfn) << PAGE_SHIFT;
1106 @@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
1107 return 1;
1108 }
1109 #else
1110 +static inline int page_is_allowed(unsigned long pfn)
1111 +{
1112 + return 1;
1113 +}
1114 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
1115 {
1116 return 1;
1117 @@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
1118
1119 while (count > 0) {
1120 unsigned long remaining;
1121 + int allowed;
1122
1123 sz = size_inside_page(p, count);
1124
1125 - if (!range_is_allowed(p >> PAGE_SHIFT, count))
1126 + allowed = page_is_allowed(p >> PAGE_SHIFT);
1127 + if (!allowed)
1128 return -EPERM;
1129 + if (allowed == 2) {
1130 + /* Show zeros for restricted memory. */
1131 + remaining = clear_user(buf, sz);
1132 + } else {
1133 + /*
1134 + * On ia64 if a page has been mapped somewhere as
1135 + * uncached, then it must also be accessed uncached
1136 + * by the kernel or data corruption may occur.
1137 + */
1138 + ptr = xlate_dev_mem_ptr(p);
1139 + if (!ptr)
1140 + return -EFAULT;
1141
1142 - /*
1143 - * On ia64 if a page has been mapped somewhere as uncached, then
1144 - * it must also be accessed uncached by the kernel or data
1145 - * corruption may occur.
1146 - */
1147 - ptr = xlate_dev_mem_ptr(p);
1148 - if (!ptr)
1149 - return -EFAULT;
1150 + remaining = copy_to_user(buf, ptr, sz);
1151 +
1152 + unxlate_dev_mem_ptr(p, ptr);
1153 + }
1154
1155 - remaining = copy_to_user(buf, ptr, sz);
1156 - unxlate_dev_mem_ptr(p, ptr);
1157 if (remaining)
1158 return -EFAULT;
1159
1160 @@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
1161 #endif
1162
1163 while (count > 0) {
1164 + int allowed;
1165 +
1166 sz = size_inside_page(p, count);
1167
1168 - if (!range_is_allowed(p >> PAGE_SHIFT, sz))
1169 + allowed = page_is_allowed(p >> PAGE_SHIFT);
1170 + if (!allowed)
1171 return -EPERM;
1172
1173 - /*
1174 - * On ia64 if a page has been mapped somewhere as uncached, then
1175 - * it must also be accessed uncached by the kernel or data
1176 - * corruption may occur.
1177 - */
1178 - ptr = xlate_dev_mem_ptr(p);
1179 - if (!ptr) {
1180 - if (written)
1181 - break;
1182 - return -EFAULT;
1183 - }
1184 + /* Skip actual writing when a page is marked as restricted. */
1185 + if (allowed == 1) {
1186 + /*
1187 + * On ia64 if a page has been mapped somewhere as
1188 + * uncached, then it must also be accessed uncached
1189 + * by the kernel or data corruption may occur.
1190 + */
1191 + ptr = xlate_dev_mem_ptr(p);
1192 + if (!ptr) {
1193 + if (written)
1194 + break;
1195 + return -EFAULT;
1196 + }
1197
1198 - copied = copy_from_user(ptr, buf, sz);
1199 - unxlate_dev_mem_ptr(p, ptr);
1200 - if (copied) {
1201 - written += sz - copied;
1202 - if (written)
1203 - break;
1204 - return -EFAULT;
1205 + copied = copy_from_user(ptr, buf, sz);
1206 + unxlate_dev_mem_ptr(p, ptr);
1207 + if (copied) {
1208 + written += sz - copied;
1209 + if (written)
1210 + break;
1211 + return -EFAULT;
1212 + }
1213 }
1214
1215 buf += sz;
1216 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1217 index 5649234b7316..471a301d63e3 100644
1218 --- a/drivers/char/virtio_console.c
1219 +++ b/drivers/char/virtio_console.c
1220 @@ -1136,6 +1136,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
1221 {
1222 struct port *port;
1223 struct scatterlist sg[1];
1224 + void *data;
1225 + int ret;
1226
1227 if (unlikely(early_put_chars))
1228 return early_put_chars(vtermno, buf, count);
1229 @@ -1144,8 +1146,14 @@ static int put_chars(u32 vtermno, const char *buf, int count)
1230 if (!port)
1231 return -EPIPE;
1232
1233 - sg_init_one(sg, buf, count);
1234 - return __send_to_port(port, sg, 1, count, (void *)buf, false);
1235 + data = kmemdup(buf, count, GFP_ATOMIC);
1236 + if (!data)
1237 + return -ENOMEM;
1238 +
1239 + sg_init_one(sg, data, count);
1240 + ret = __send_to_port(port, sg, 1, count, data, false);
1241 + kfree(data);
1242 + return ret;
1243 }
1244
1245 /*
1246 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1247 index cac4a92259da..6153b66139d5 100644
1248 --- a/drivers/cpufreq/cpufreq.c
1249 +++ b/drivers/cpufreq/cpufreq.c
1250 @@ -2404,6 +2404,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
1251 *********************************************************************/
1252 static enum cpuhp_state hp_online;
1253
1254 +static int cpuhp_cpufreq_online(unsigned int cpu)
1255 +{
1256 + cpufreq_online(cpu);
1257 +
1258 + return 0;
1259 +}
1260 +
1261 +static int cpuhp_cpufreq_offline(unsigned int cpu)
1262 +{
1263 + cpufreq_offline(cpu);
1264 +
1265 + return 0;
1266 +}
1267 +
1268 /**
1269 * cpufreq_register_driver - register a CPU Frequency driver
1270 * @driver_data: A struct cpufreq_driver containing the values#
1271 @@ -2466,8 +2480,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1272 }
1273
1274 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
1275 - cpufreq_online,
1276 - cpufreq_offline);
1277 + cpuhp_cpufreq_online,
1278 + cpuhp_cpufreq_offline);
1279 if (ret < 0)
1280 goto err_if_unreg;
1281 hp_online = ret;
1282 diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
1283 index 932742e4cf23..24c461dea7af 100644
1284 --- a/drivers/firmware/efi/libstub/gop.c
1285 +++ b/drivers/firmware/efi/libstub/gop.c
1286 @@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
1287
1288 status = __gop_query32(sys_table_arg, gop32, &info, &size,
1289 &current_fb_base);
1290 - if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
1291 + if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
1292 + info->pixel_format != PIXEL_BLT_ONLY) {
1293 /*
1294 * Systems that use the UEFI Console Splitter may
1295 * provide multiple GOP devices, not all of which are
1296 @@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
1297
1298 status = __gop_query64(sys_table_arg, gop64, &info, &size,
1299 &current_fb_base);
1300 - if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
1301 + if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
1302 + info->pixel_format != PIXEL_BLT_ONLY) {
1303 /*
1304 * Systems that use the UEFI Console Splitter may
1305 * provide multiple GOP devices, not all of which are
1306 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1307 index b87d27859141..a336754698f8 100644
1308 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1309 +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1310 @@ -1305,7 +1305,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1311 if (!fence) {
1312 event_free(gpu, event);
1313 ret = -ENOMEM;
1314 - goto out_pm_put;
1315 + goto out_unlock;
1316 }
1317
1318 gpu->event[event].fence = fence;
1319 @@ -1345,6 +1345,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1320 hangcheck_timer_reset(gpu);
1321 ret = 0;
1322
1323 +out_unlock:
1324 mutex_unlock(&gpu->lock);
1325
1326 out_pm_put:
1327 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1328 index e0d7f8472ac6..d741ff88e405 100644
1329 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1330 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1331 @@ -714,7 +714,7 @@ nv4a_chipset = {
1332 .i2c = nv04_i2c_new,
1333 .imem = nv40_instmem_new,
1334 .mc = nv44_mc_new,
1335 - .mmu = nv44_mmu_new,
1336 + .mmu = nv04_mmu_new,
1337 .pci = nv40_pci_new,
1338 .therm = nv40_therm_new,
1339 .timer = nv41_timer_new,
1340 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1341 index fbb8c7dc18fd..0d65e7f15451 100644
1342 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1343 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1344 @@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
1345 case 0x94:
1346 case 0x96:
1347 case 0x98:
1348 - case 0xaa:
1349 - case 0xac:
1350 return true;
1351 default:
1352 break;
1353 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
1354 index 003ac915eaad..8a8895246d26 100644
1355 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
1356 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
1357 @@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
1358 }
1359
1360 if (type == 0x00000010) {
1361 - if (!nv31_mpeg_mthd(mpeg, mthd, data))
1362 + if (nv31_mpeg_mthd(mpeg, mthd, data))
1363 show &= ~0x01000000;
1364 }
1365 }
1366 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
1367 index e536f37e24b0..c3cf02ed468e 100644
1368 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
1369 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
1370 @@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
1371 }
1372
1373 if (type == 0x00000010) {
1374 - if (!nv44_mpeg_mthd(subdev->device, mthd, data))
1375 + if (nv44_mpeg_mthd(subdev->device, mthd, data))
1376 show &= ~0x01000000;
1377 }
1378 }
1379 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1380 index bbe15243b8e7..f397a5b6910f 100644
1381 --- a/drivers/input/joystick/xpad.c
1382 +++ b/drivers/input/joystick/xpad.c
1383 @@ -201,6 +201,7 @@ static const struct xpad_device {
1384 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
1385 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
1386 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
1387 + { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
1388 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
1389 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
1390 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
1391 @@ -329,6 +330,7 @@ static struct usb_device_id xpad_table[] = {
1392 XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
1393 XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
1394 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
1395 + XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
1396 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
1397 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
1398 { }
1399 diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
1400 index 15af9a9753e5..2d203b422129 100644
1401 --- a/drivers/irqchip/irq-imx-gpcv2.c
1402 +++ b/drivers/irqchip/irq-imx-gpcv2.c
1403 @@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
1404 return -ENOMEM;
1405 }
1406
1407 + raw_spin_lock_init(&cd->rlock);
1408 +
1409 cd->gpc_base = of_iomap(node, 0);
1410 if (!cd->gpc_base) {
1411 pr_err("fsl-gpcv2: unable to map gpc registers\n");
1412 diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
1413 index a8e6624fbe83..a9bb2dde98ea 100644
1414 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
1415 +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
1416 @@ -1013,8 +1013,8 @@ EXPORT_SYMBOL(dvb_usbv2_probe);
1417 void dvb_usbv2_disconnect(struct usb_interface *intf)
1418 {
1419 struct dvb_usb_device *d = usb_get_intfdata(intf);
1420 - const char *name = d->name;
1421 - struct device dev = d->udev->dev;
1422 + const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
1423 + const char *drvname = d->name;
1424
1425 dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
1426 intf->cur_altsetting->desc.bInterfaceNumber);
1427 @@ -1024,8 +1024,9 @@ void dvb_usbv2_disconnect(struct usb_interface *intf)
1428
1429 dvb_usbv2_exit(d);
1430
1431 - dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
1432 - KBUILD_MODNAME, name);
1433 + pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
1434 + KBUILD_MODNAME, drvname, devname);
1435 + kfree(devname);
1436 }
1437 EXPORT_SYMBOL(dvb_usbv2_disconnect);
1438
1439 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
1440 index 243403081fa5..9fd43a37154c 100644
1441 --- a/drivers/media/usb/dvb-usb/cxusb.c
1442 +++ b/drivers/media/usb/dvb-usb/cxusb.c
1443 @@ -59,23 +59,24 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
1444 u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
1445 {
1446 struct cxusb_state *st = d->priv;
1447 - int ret, wo;
1448 + int ret;
1449
1450 if (1 + wlen > MAX_XFER_SIZE) {
1451 warn("i2c wr: len=%d is too big!\n", wlen);
1452 return -EOPNOTSUPP;
1453 }
1454
1455 - wo = (rbuf == NULL || rlen == 0); /* write-only */
1456 + if (rlen > MAX_XFER_SIZE) {
1457 + warn("i2c rd: len=%d is too big!\n", rlen);
1458 + return -EOPNOTSUPP;
1459 + }
1460
1461 mutex_lock(&d->data_mutex);
1462 st->data[0] = cmd;
1463 memcpy(&st->data[1], wbuf, wlen);
1464 - if (wo)
1465 - ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
1466 - else
1467 - ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
1468 - rbuf, rlen, 0);
1469 + ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, st->data, rlen, 0);
1470 + if (!ret && rbuf && rlen)
1471 + memcpy(rbuf, st->data, rlen);
1472
1473 mutex_unlock(&d->data_mutex);
1474 return ret;
1475 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
1476 index dd048a7c461c..b8d2ac5833e9 100644
1477 --- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
1478 +++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
1479 @@ -35,42 +35,51 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
1480
1481 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
1482 {
1483 - struct hexline hx;
1484 - u8 reset;
1485 - int ret,pos=0;
1486 + struct hexline *hx;
1487 + u8 *buf;
1488 + int ret, pos = 0;
1489 + u16 cpu_cs_register = cypress[type].cpu_cs_register;
1490 +
1491 + buf = kmalloc(sizeof(*hx), GFP_KERNEL);
1492 + if (!buf)
1493 + return -ENOMEM;
1494 + hx = (struct hexline *)buf;
1495
1496 /* stop the CPU */
1497 - reset = 1;
1498 - if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
1499 + buf[0] = 1;
1500 + if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
1501 err("could not stop the USB controller CPU.");
1502
1503 - while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
1504 - deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
1505 - ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
1506 + while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
1507 + deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
1508 + ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
1509
1510 - if (ret != hx.len) {
1511 + if (ret != hx->len) {
1512 err("error while transferring firmware "
1513 "(transferred size: %d, block size: %d)",
1514 - ret,hx.len);
1515 + ret, hx->len);
1516 ret = -EINVAL;
1517 break;
1518 }
1519 }
1520 if (ret < 0) {
1521 err("firmware download failed at %d with %d",pos,ret);
1522 + kfree(buf);
1523 return ret;
1524 }
1525
1526 if (ret == 0) {
1527 /* restart the CPU */
1528 - reset = 0;
1529 - if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
1530 + buf[0] = 0;
1531 + if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
1532 err("could not restart the USB controller CPU.");
1533 ret = -EINVAL;
1534 }
1535 } else
1536 ret = -EIO;
1537
1538 + kfree(buf);
1539 +
1540 return ret;
1541 }
1542 EXPORT_SYMBOL(usb_cypress_load_firmware);
1543 diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
1544 index 368bb0710d8f..481895b2f9f4 100644
1545 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c
1546 +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
1547 @@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
1548 int work_done = 0;
1549
1550 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
1551 - u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
1552 + u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
1553 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
1554
1555 /* Handle bus state changes */
1556 diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
1557 index e2512d5bc0e1..eedf86b67cf5 100644
1558 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c
1559 +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
1560 @@ -528,6 +528,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
1561 if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
1562 return 0;
1563
1564 + if (!spec_priv->rfs_chan_spec_scan)
1565 + return 1;
1566 +
1567 /* Output buffers are full, no need to process anything
1568 * since there is no space to put the result anyway
1569 */
1570 @@ -1072,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
1571
1572 void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
1573 {
1574 - if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
1575 + if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
1576 relay_close(spec_priv->rfs_chan_spec_scan);
1577 spec_priv->rfs_chan_spec_scan = NULL;
1578 }
1579 @@ -1086,6 +1089,9 @@ void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
1580 debugfs_phy,
1581 1024, 256, &rfs_spec_scan_cb,
1582 NULL);
1583 + if (!spec_priv->rfs_chan_spec_scan)
1584 + return;
1585 +
1586 debugfs_create_file("spectral_scan_ctl",
1587 S_IRUSR | S_IWUSR,
1588 debugfs_phy, spec_priv,
1589 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
1590 index 23d4a1728cdf..351bac8f6503 100644
1591 --- a/drivers/nvdimm/bus.c
1592 +++ b/drivers/nvdimm/bus.c
1593 @@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1594 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
1595 if (rc < 0)
1596 goto out_unlock;
1597 + nvdimm_bus_unlock(&nvdimm_bus->dev);
1598 +
1599 if (copy_to_user(p, buf, buf_len))
1600 rc = -EFAULT;
1601 +
1602 + vfree(buf);
1603 + return rc;
1604 +
1605 out_unlock:
1606 nvdimm_bus_unlock(&nvdimm_bus->dev);
1607 out:
1608 diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
1609 index d614493ad5ac..dcb32f34a302 100644
1610 --- a/drivers/nvdimm/dimm_devs.c
1611 +++ b/drivers/nvdimm/dimm_devs.c
1612 @@ -388,7 +388,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
1613
1614 int alias_dpa_busy(struct device *dev, void *data)
1615 {
1616 - resource_size_t map_end, blk_start, new, busy;
1617 + resource_size_t map_end, blk_start, new;
1618 struct blk_alloc_info *info = data;
1619 struct nd_mapping *nd_mapping;
1620 struct nd_region *nd_region;
1621 @@ -429,29 +429,19 @@ int alias_dpa_busy(struct device *dev, void *data)
1622 retry:
1623 /*
1624 * Find the free dpa from the end of the last pmem allocation to
1625 - * the end of the interleave-set mapping that is not already
1626 - * covered by a blk allocation.
1627 + * the end of the interleave-set mapping.
1628 */
1629 - busy = 0;
1630 for_each_dpa_resource(ndd, res) {
1631 + if (strncmp(res->name, "pmem", 4) != 0)
1632 + continue;
1633 if ((res->start >= blk_start && res->start < map_end)
1634 || (res->end >= blk_start
1635 && res->end <= map_end)) {
1636 - if (strncmp(res->name, "pmem", 4) == 0) {
1637 - new = max(blk_start, min(map_end + 1,
1638 - res->end + 1));
1639 - if (new != blk_start) {
1640 - blk_start = new;
1641 - goto retry;
1642 - }
1643 - } else
1644 - busy += min(map_end, res->end)
1645 - - max(nd_mapping->start, res->start) + 1;
1646 - } else if (nd_mapping->start > res->start
1647 - && map_end < res->end) {
1648 - /* total eclipse of the PMEM region mapping */
1649 - busy += nd_mapping->size;
1650 - break;
1651 + new = max(blk_start, min(map_end + 1, res->end + 1));
1652 + if (new != blk_start) {
1653 + blk_start = new;
1654 + goto retry;
1655 + }
1656 }
1657 }
1658
1659 @@ -463,52 +453,11 @@ int alias_dpa_busy(struct device *dev, void *data)
1660 return 1;
1661 }
1662
1663 - info->available -= blk_start - nd_mapping->start + busy;
1664 + info->available -= blk_start - nd_mapping->start;
1665
1666 return 0;
1667 }
1668
1669 -static int blk_dpa_busy(struct device *dev, void *data)
1670 -{
1671 - struct blk_alloc_info *info = data;
1672 - struct nd_mapping *nd_mapping;
1673 - struct nd_region *nd_region;
1674 - resource_size_t map_end;
1675 - int i;
1676 -
1677 - if (!is_nd_pmem(dev))
1678 - return 0;
1679 -
1680 - nd_region = to_nd_region(dev);
1681 - for (i = 0; i < nd_region->ndr_mappings; i++) {
1682 - nd_mapping = &nd_region->mapping[i];
1683 - if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
1684 - break;
1685 - }
1686 -
1687 - if (i >= nd_region->ndr_mappings)
1688 - return 0;
1689 -
1690 - map_end = nd_mapping->start + nd_mapping->size - 1;
1691 - if (info->res->start >= nd_mapping->start
1692 - && info->res->start < map_end) {
1693 - if (info->res->end <= map_end) {
1694 - info->busy = 0;
1695 - return 1;
1696 - } else {
1697 - info->busy -= info->res->end - map_end;
1698 - return 0;
1699 - }
1700 - } else if (info->res->end >= nd_mapping->start
1701 - && info->res->end <= map_end) {
1702 - info->busy -= nd_mapping->start - info->res->start;
1703 - return 0;
1704 - } else {
1705 - info->busy -= nd_mapping->size;
1706 - return 0;
1707 - }
1708 -}
1709 -
1710 /**
1711 * nd_blk_available_dpa - account the unused dpa of BLK region
1712 * @nd_mapping: container of dpa-resource-root + labels
1713 @@ -538,11 +487,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
1714 for_each_dpa_resource(ndd, res) {
1715 if (strncmp(res->name, "blk", 3) != 0)
1716 continue;
1717 -
1718 - info.res = res;
1719 - info.busy = resource_size(res);
1720 - device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
1721 - info.available -= info.busy;
1722 + info.available -= resource_size(res);
1723 }
1724
1725 return info.available;
1726 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
1727 index a66192f692e3..c29b9b611ab2 100644
1728 --- a/drivers/platform/x86/acer-wmi.c
1729 +++ b/drivers/platform/x86/acer-wmi.c
1730 @@ -1846,11 +1846,24 @@ static int __init acer_wmi_enable_lm(void)
1731 return status;
1732 }
1733
1734 +#define ACER_WMID_ACCEL_HID "BST0001"
1735 +
1736 static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
1737 void *ctx, void **retval)
1738 {
1739 + struct acpi_device *dev;
1740 +
1741 + if (!strcmp(ctx, "SENR")) {
1742 + if (acpi_bus_get_device(ah, &dev))
1743 + return AE_OK;
1744 + if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
1745 + return AE_OK;
1746 + } else
1747 + return AE_OK;
1748 +
1749 *(acpi_handle *)retval = ah;
1750 - return AE_OK;
1751 +
1752 + return AE_CTRL_TERMINATE;
1753 }
1754
1755 static int __init acer_wmi_get_handle(const char *name, const char *prop,
1756 @@ -1877,7 +1890,7 @@ static int __init acer_wmi_accel_setup(void)
1757 {
1758 int err;
1759
1760 - err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
1761 + err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
1762 if (err)
1763 return err;
1764
1765 @@ -2233,10 +2246,11 @@ static int __init acer_wmi_init(void)
1766 err = acer_wmi_input_setup();
1767 if (err)
1768 return err;
1769 + err = acer_wmi_accel_setup();
1770 + if (err)
1771 + return err;
1772 }
1773
1774 - acer_wmi_accel_setup();
1775 -
1776 err = platform_driver_register(&acer_platform_driver);
1777 if (err) {
1778 pr_err("Unable to register platform driver\n");
1779 diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
1780 index ef89df1f7336..744d56197286 100644
1781 --- a/drivers/pwm/pwm-rockchip.c
1782 +++ b/drivers/pwm/pwm-rockchip.c
1783 @@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
1784 return 0;
1785 }
1786
1787 +static int rockchip_pwm_enable(struct pwm_chip *chip,
1788 + struct pwm_device *pwm,
1789 + bool enable,
1790 + enum pwm_polarity polarity)
1791 +{
1792 + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
1793 + int ret;
1794 +
1795 + if (enable) {
1796 + ret = clk_enable(pc->clk);
1797 + if (ret)
1798 + return ret;
1799 + }
1800 +
1801 + pc->data->set_enable(chip, pwm, enable, polarity);
1802 +
1803 + if (!enable)
1804 + clk_disable(pc->clk);
1805 +
1806 + return 0;
1807 +}
1808 +
1809 static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1810 struct pwm_state *state)
1811 {
1812 @@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1813 return ret;
1814
1815 if (state->polarity != curstate.polarity && enabled) {
1816 - pc->data->set_enable(chip, pwm, false, state->polarity);
1817 + ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
1818 + if (ret)
1819 + goto out;
1820 enabled = false;
1821 }
1822
1823 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
1824 if (ret) {
1825 if (enabled != curstate.enabled)
1826 - pc->data->set_enable(chip, pwm, !enabled,
1827 - state->polarity);
1828 -
1829 + rockchip_pwm_enable(chip, pwm, !enabled,
1830 + state->polarity);
1831 goto out;
1832 }
1833
1834 - if (state->enabled != enabled)
1835 - pc->data->set_enable(chip, pwm, state->enabled,
1836 - state->polarity);
1837 + if (state->enabled != enabled) {
1838 + ret = rockchip_pwm_enable(chip, pwm, state->enabled,
1839 + state->polarity);
1840 + if (ret)
1841 + goto out;
1842 + }
1843
1844 /*
1845 * Update the state with the real hardware, which can differ a bit
1846 diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
1847 index 3853ba963bb5..19e03d0b956b 100644
1848 --- a/drivers/rtc/rtc-tegra.c
1849 +++ b/drivers/rtc/rtc-tegra.c
1850 @@ -18,6 +18,7 @@
1851 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1852 */
1853 #include <linux/kernel.h>
1854 +#include <linux/clk.h>
1855 #include <linux/init.h>
1856 #include <linux/module.h>
1857 #include <linux/slab.h>
1858 @@ -59,6 +60,7 @@ struct tegra_rtc_info {
1859 struct platform_device *pdev;
1860 struct rtc_device *rtc_dev;
1861 void __iomem *rtc_base; /* NULL if not initialized. */
1862 + struct clk *clk;
1863 int tegra_rtc_irq; /* alarm and periodic irq */
1864 spinlock_t tegra_rtc_lock;
1865 };
1866 @@ -326,6 +328,14 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
1867 if (info->tegra_rtc_irq <= 0)
1868 return -EBUSY;
1869
1870 + info->clk = devm_clk_get(&pdev->dev, NULL);
1871 + if (IS_ERR(info->clk))
1872 + return PTR_ERR(info->clk);
1873 +
1874 + ret = clk_prepare_enable(info->clk);
1875 + if (ret < 0)
1876 + return ret;
1877 +
1878 /* set context info. */
1879 info->pdev = pdev;
1880 spin_lock_init(&info->tegra_rtc_lock);
1881 @@ -346,7 +356,7 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
1882 ret = PTR_ERR(info->rtc_dev);
1883 dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
1884 ret);
1885 - return ret;
1886 + goto disable_clk;
1887 }
1888
1889 ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
1890 @@ -356,12 +366,25 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
1891 dev_err(&pdev->dev,
1892 "Unable to request interrupt for device (err=%d).\n",
1893 ret);
1894 - return ret;
1895 + goto disable_clk;
1896 }
1897
1898 dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
1899
1900 return 0;
1901 +
1902 +disable_clk:
1903 + clk_disable_unprepare(info->clk);
1904 + return ret;
1905 +}
1906 +
1907 +static int tegra_rtc_remove(struct platform_device *pdev)
1908 +{
1909 + struct tegra_rtc_info *info = platform_get_drvdata(pdev);
1910 +
1911 + clk_disable_unprepare(info->clk);
1912 +
1913 + return 0;
1914 }
1915
1916 #ifdef CONFIG_PM_SLEEP
1917 @@ -413,6 +436,7 @@ static void tegra_rtc_shutdown(struct platform_device *pdev)
1918
1919 MODULE_ALIAS("platform:tegra_rtc");
1920 static struct platform_driver tegra_rtc_driver = {
1921 + .remove = tegra_rtc_remove,
1922 .shutdown = tegra_rtc_shutdown,
1923 .driver = {
1924 .name = "tegra_rtc",
1925 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1926 index 4f361d8d84be..734e592a247e 100644
1927 --- a/drivers/scsi/qla2xxx/qla_os.c
1928 +++ b/drivers/scsi/qla2xxx/qla_os.c
1929 @@ -968,8 +968,13 @@ static inline
1930 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1931 {
1932 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1933 + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1934
1935 - return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
1936 + if (IS_P3P_TYPE(ha))
1937 + return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
1938 + else
1939 + return ((RD_REG_DWORD(&reg->host_status)) ==
1940 + ISP_REG_DISCONNECT);
1941 }
1942
1943 /**************************************************************************
1944 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1945 index 51e56296f465..931af0793951 100644
1946 --- a/drivers/scsi/sd.c
1947 +++ b/drivers/scsi/sd.c
1948 @@ -2057,6 +2057,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1949
1950 #define READ_CAPACITY_RETRIES_ON_RESET 10
1951
1952 +/*
1953 + * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
1954 + * and the reported logical block size is bigger than 512 bytes. Note
1955 + * that last_sector is a u64 and therefore logical_to_sectors() is not
1956 + * applicable.
1957 + */
1958 +static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
1959 +{
1960 + u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
1961 +
1962 + if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
1963 + return false;
1964 +
1965 + return true;
1966 +}
1967 +
1968 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1969 unsigned char *buffer)
1970 {
1971 @@ -2122,7 +2138,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1972 return -ENODEV;
1973 }
1974
1975 - if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
1976 + if (!sd_addressable_capacity(lba, sector_size)) {
1977 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1978 "kernel compiled with support for large block "
1979 "devices.\n");
1980 @@ -2208,7 +2224,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1981 return sector_size;
1982 }
1983
1984 - if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
1985 + if (!sd_addressable_capacity(lba, sector_size)) {
1986 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1987 "kernel compiled with support for large block "
1988 "devices.\n");
1989 @@ -2877,7 +2893,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
1990 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
1991 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
1992 } else
1993 - rw_max = BLK_DEF_MAX_SECTORS;
1994 + rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
1995 + (sector_t)BLK_DEF_MAX_SECTORS);
1996
1997 /* Combine with controller limits */
1998 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
1999 diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
2000 index bed2bbd6b923..e63597342c96 100644
2001 --- a/drivers/scsi/sr.c
2002 +++ b/drivers/scsi/sr.c
2003 @@ -833,6 +833,7 @@ static void get_capabilities(struct scsi_cd *cd)
2004 unsigned char *buffer;
2005 struct scsi_mode_data data;
2006 struct scsi_sense_hdr sshdr;
2007 + unsigned int ms_len = 128;
2008 int rc, n;
2009
2010 static const char *loadmech[] =
2011 @@ -859,10 +860,11 @@ static void get_capabilities(struct scsi_cd *cd)
2012 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
2013
2014 /* ask for mode page 0x2a */
2015 - rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
2016 + rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
2017 SR_TIMEOUT, 3, &data, NULL);
2018
2019 - if (!scsi_status_is_good(rc)) {
2020 + if (!scsi_status_is_good(rc) || data.length > ms_len ||
2021 + data.header_length + data.block_descriptor_length > data.length) {
2022 /* failed, drive doesn't have capabilities mode page */
2023 cd->cdi.speed = 1;
2024 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
2025 diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
2026 index 0efa80bb8962..4a073339ae2e 100644
2027 --- a/drivers/target/iscsi/iscsi_target_parameters.c
2028 +++ b/drivers/target/iscsi/iscsi_target_parameters.c
2029 @@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
2030 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
2031 SET_PSTATE_REPLY_OPTIONAL(param);
2032 /*
2033 - * The GlobalSAN iSCSI Initiator for MacOSX does
2034 - * not respond to MaxBurstLength, FirstBurstLength,
2035 - * DefaultTime2Wait or DefaultTime2Retain parameter keys.
2036 - * So, we set them to 'reply optional' here, and assume the
2037 - * the defaults from iscsi_parameters.h if the initiator
2038 - * is not RFC compliant and the keys are not negotiated.
2039 - */
2040 - if (!strcmp(param->name, MAXBURSTLENGTH))
2041 - SET_PSTATE_REPLY_OPTIONAL(param);
2042 - if (!strcmp(param->name, FIRSTBURSTLENGTH))
2043 - SET_PSTATE_REPLY_OPTIONAL(param);
2044 - if (!strcmp(param->name, DEFAULTTIME2WAIT))
2045 - SET_PSTATE_REPLY_OPTIONAL(param);
2046 - if (!strcmp(param->name, DEFAULTTIME2RETAIN))
2047 - SET_PSTATE_REPLY_OPTIONAL(param);
2048 - /*
2049 * Required for gPXE iSCSI boot client
2050 */
2051 if (!strcmp(param->name, MAXCONNECTIONS))
2052 diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
2053 index 1f38177207e0..da5a5fcb8c29 100644
2054 --- a/drivers/target/iscsi/iscsi_target_util.c
2055 +++ b/drivers/target/iscsi/iscsi_target_util.c
2056 @@ -735,21 +735,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
2057 {
2058 struct se_cmd *se_cmd = NULL;
2059 int rc;
2060 + bool op_scsi = false;
2061 /*
2062 * Determine if a struct se_cmd is associated with
2063 * this struct iscsi_cmd.
2064 */
2065 switch (cmd->iscsi_opcode) {
2066 case ISCSI_OP_SCSI_CMD:
2067 - se_cmd = &cmd->se_cmd;
2068 - __iscsit_free_cmd(cmd, true, shutdown);
2069 + op_scsi = true;
2070 /*
2071 * Fallthrough
2072 */
2073 case ISCSI_OP_SCSI_TMFUNC:
2074 - rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
2075 - if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
2076 - __iscsit_free_cmd(cmd, true, shutdown);
2077 + se_cmd = &cmd->se_cmd;
2078 + __iscsit_free_cmd(cmd, op_scsi, shutdown);
2079 + rc = transport_generic_free_cmd(se_cmd, shutdown);
2080 + if (!rc && shutdown && se_cmd->se_sess) {
2081 + __iscsit_free_cmd(cmd, op_scsi, shutdown);
2082 target_put_sess_cmd(se_cmd);
2083 }
2084 break;
2085 diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
2086 index 31a096aa16ab..6e456de5e564 100644
2087 --- a/drivers/target/target_core_fabric_configfs.c
2088 +++ b/drivers/target/target_core_fabric_configfs.c
2089 @@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
2090 pr_err("Source se_lun->lun_se_dev does not exist\n");
2091 return -EINVAL;
2092 }
2093 + if (lun->lun_shutdown) {
2094 + pr_err("Unable to create mappedlun symlink because"
2095 + " lun->lun_shutdown=true\n");
2096 + return -EINVAL;
2097 + }
2098 se_tpg = lun->lun_tpg;
2099
2100 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
2101 diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
2102 index 2744251178ad..1949f50725a5 100644
2103 --- a/drivers/target/target_core_tpg.c
2104 +++ b/drivers/target/target_core_tpg.c
2105 @@ -640,6 +640,8 @@ void core_tpg_remove_lun(
2106 */
2107 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2108
2109 + lun->lun_shutdown = true;
2110 +
2111 core_clear_lun_from_tpg(lun, tpg);
2112 /*
2113 * Wait for any active I/O references to percpu se_lun->lun_ref to
2114 @@ -661,6 +663,8 @@ void core_tpg_remove_lun(
2115 }
2116 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2117 hlist_del_rcu(&lun->link);
2118 +
2119 + lun->lun_shutdown = false;
2120 mutex_unlock(&tpg->tpg_lun_mutex);
2121
2122 percpu_ref_exit(&lun->lun_ref);
2123 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
2124 index 70c143a5c38c..1a83456a65a0 100644
2125 --- a/drivers/target/target_core_user.c
2126 +++ b/drivers/target/target_core_user.c
2127 @@ -306,24 +306,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
2128 DATA_BLOCK_BITS);
2129 }
2130
2131 -static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
2132 - struct scatterlist *data_sg, unsigned int data_nents)
2133 +static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
2134 + bool bidi)
2135 {
2136 + struct se_cmd *se_cmd = cmd->se_cmd;
2137 int i, block;
2138 int block_remaining = 0;
2139 void *from, *to;
2140 size_t copy_bytes, from_offset;
2141 - struct scatterlist *sg;
2142 + struct scatterlist *sg, *data_sg;
2143 + unsigned int data_nents;
2144 + DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
2145 +
2146 + bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
2147 +
2148 + if (!bidi) {
2149 + data_sg = se_cmd->t_data_sg;
2150 + data_nents = se_cmd->t_data_nents;
2151 + } else {
2152 + uint32_t count;
2153 +
2154 + /*
2155 + * For bidi case, the first count blocks are for Data-Out
2156 + * buffer blocks, and before gathering the Data-In buffer
2157 + * the Data-Out buffer blocks should be discarded.
2158 + */
2159 + count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
2160 + while (count--) {
2161 + block = find_first_bit(bitmap, DATA_BLOCK_BITS);
2162 + clear_bit(block, bitmap);
2163 + }
2164 +
2165 + data_sg = se_cmd->t_bidi_data_sg;
2166 + data_nents = se_cmd->t_bidi_data_nents;
2167 + }
2168
2169 for_each_sg(data_sg, sg, data_nents, i) {
2170 int sg_remaining = sg->length;
2171 to = kmap_atomic(sg_page(sg)) + sg->offset;
2172 while (sg_remaining > 0) {
2173 if (block_remaining == 0) {
2174 - block = find_first_bit(cmd_bitmap,
2175 + block = find_first_bit(bitmap,
2176 DATA_BLOCK_BITS);
2177 block_remaining = DATA_BLOCK_SIZE;
2178 - clear_bit(block, cmd_bitmap);
2179 + clear_bit(block, bitmap);
2180 }
2181 copy_bytes = min_t(size_t, sg_remaining,
2182 block_remaining);
2183 @@ -389,6 +415,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
2184 return true;
2185 }
2186
2187 +static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
2188 +{
2189 + struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
2190 + size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
2191 +
2192 + if (se_cmd->se_cmd_flags & SCF_BIDI) {
2193 + BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
2194 + data_length += round_up(se_cmd->t_bidi_data_sg->length,
2195 + DATA_BLOCK_SIZE);
2196 + }
2197 +
2198 + return data_length;
2199 +}
2200 +
2201 +static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
2202 +{
2203 + size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
2204 +
2205 + return data_length / DATA_BLOCK_SIZE;
2206 +}
2207 +
2208 static sense_reason_t
2209 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2210 {
2211 @@ -402,7 +449,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2212 uint32_t cmd_head;
2213 uint64_t cdb_off;
2214 bool copy_to_data_area;
2215 - size_t data_length;
2216 + size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
2217 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
2218
2219 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
2220 @@ -416,8 +463,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2221 * expensive to tell how many regions are freed in the bitmap
2222 */
2223 base_command_size = max(offsetof(struct tcmu_cmd_entry,
2224 - req.iov[se_cmd->t_bidi_data_nents +
2225 - se_cmd->t_data_nents]),
2226 + req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
2227 sizeof(struct tcmu_cmd_entry));
2228 command_size = base_command_size
2229 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
2230 @@ -428,11 +474,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2231
2232 mb = udev->mb_addr;
2233 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
2234 - data_length = se_cmd->data_length;
2235 - if (se_cmd->se_cmd_flags & SCF_BIDI) {
2236 - BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
2237 - data_length += se_cmd->t_bidi_data_sg->length;
2238 - }
2239 if ((command_size > (udev->cmdr_size / 2)) ||
2240 data_length > udev->data_size) {
2241 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
2242 @@ -502,11 +543,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2243 entry->req.iov_dif_cnt = 0;
2244
2245 /* Handle BIDI commands */
2246 - iov_cnt = 0;
2247 - alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
2248 - se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
2249 - entry->req.iov_bidi_cnt = iov_cnt;
2250 -
2251 + if (se_cmd->se_cmd_flags & SCF_BIDI) {
2252 + iov_cnt = 0;
2253 + iov++;
2254 + alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
2255 + se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
2256 + false);
2257 + entry->req.iov_bidi_cnt = iov_cnt;
2258 + }
2259 /* cmd's data_bitmap is what changed in process */
2260 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
2261 DATA_BLOCK_BITS);
2262 @@ -582,19 +626,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
2263 se_cmd->scsi_sense_length);
2264 free_data_area(udev, cmd);
2265 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
2266 - DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
2267 -
2268 /* Get Data-In buffer before clean up */
2269 - bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
2270 - gather_data_area(udev, bitmap,
2271 - se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
2272 + gather_data_area(udev, cmd, true);
2273 free_data_area(udev, cmd);
2274 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
2275 - DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
2276 -
2277 - bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
2278 - gather_data_area(udev, bitmap,
2279 - se_cmd->t_data_sg, se_cmd->t_data_nents);
2280 + gather_data_area(udev, cmd, false);
2281 free_data_area(udev, cmd);
2282 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
2283 free_data_area(udev, cmd);
2284 diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2285 index 37a37c4d04cb..6f2e729a308f 100644
2286 --- a/drivers/video/fbdev/efifb.c
2287 +++ b/drivers/video/fbdev/efifb.c
2288 @@ -10,6 +10,7 @@
2289 #include <linux/efi.h>
2290 #include <linux/errno.h>
2291 #include <linux/fb.h>
2292 +#include <linux/pci.h>
2293 #include <linux/platform_device.h>
2294 #include <linux/screen_info.h>
2295 #include <video/vga.h>
2296 @@ -118,6 +119,8 @@ static inline bool fb_base_is_valid(void)
2297 return false;
2298 }
2299
2300 +static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
2301 +
2302 static int efifb_probe(struct platform_device *dev)
2303 {
2304 struct fb_info *info;
2305 @@ -127,7 +130,7 @@ static int efifb_probe(struct platform_device *dev)
2306 unsigned int size_total;
2307 char *option = NULL;
2308
2309 - if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
2310 + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
2311 return -ENODEV;
2312
2313 if (fb_get_options("efifb", &option))
2314 @@ -327,3 +330,64 @@ static struct platform_driver efifb_driver = {
2315 };
2316
2317 builtin_platform_driver(efifb_driver);
2318 +
2319 +#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
2320 +
2321 +static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
2322 +
2323 +static void claim_efifb_bar(struct pci_dev *dev, int idx)
2324 +{
2325 + u16 word;
2326 +
2327 + pci_bar_found = true;
2328 +
2329 + pci_read_config_word(dev, PCI_COMMAND, &word);
2330 + if (!(word & PCI_COMMAND_MEMORY)) {
2331 + pci_dev_disabled = true;
2332 + dev_err(&dev->dev,
2333 + "BAR %d: assigned to efifb but device is disabled!\n",
2334 + idx);
2335 + return;
2336 + }
2337 +
2338 + if (pci_claim_resource(dev, idx)) {
2339 + pci_dev_disabled = true;
2340 + dev_err(&dev->dev,
2341 + "BAR %d: failed to claim resource for efifb!\n", idx);
2342 + return;
2343 + }
2344 +
2345 + dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
2346 +}
2347 +
2348 +static void efifb_fixup_resources(struct pci_dev *dev)
2349 +{
2350 + u64 base = screen_info.lfb_base;
2351 + u64 size = screen_info.lfb_size;
2352 + int i;
2353 +
2354 + if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
2355 + return;
2356 +
2357 + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
2358 + base |= (u64)screen_info.ext_lfb_base << 32;
2359 +
2360 + if (!base)
2361 + return;
2362 +
2363 + for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
2364 + struct resource *res = &dev->resource[i];
2365 +
2366 + if (!(res->flags & IORESOURCE_MEM))
2367 + continue;
2368 +
2369 + if (res->start <= base && res->end >= base + size - 1) {
2370 + claim_efifb_bar(dev, i);
2371 + break;
2372 + }
2373 + }
2374 +}
2375 +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
2376 + 16, efifb_fixup_resources);
2377 +
2378 +#endif
2379 diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
2380 index 0567d517eed3..ea2f19f5fbde 100644
2381 --- a/drivers/video/fbdev/xen-fbfront.c
2382 +++ b/drivers/video/fbdev/xen-fbfront.c
2383 @@ -644,7 +644,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
2384 break;
2385
2386 case XenbusStateInitWait:
2387 -InitWait:
2388 xenbus_switch_state(dev, XenbusStateConnected);
2389 break;
2390
2391 @@ -655,7 +654,8 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
2392 * get Connected twice here.
2393 */
2394 if (dev->state != XenbusStateConnected)
2395 - goto InitWait; /* no InitWait seen yet, fudge it */
2396 + /* no InitWait seen yet, fudge it */
2397 + xenbus_switch_state(dev, XenbusStateConnected);
2398
2399 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2400 "request-update", "%d", &val) < 0)
2401 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2402 index 1cd0e2eefc66..3925758f6dde 100644
2403 --- a/fs/cifs/file.c
2404 +++ b/fs/cifs/file.c
2405 @@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2406 wdata->credits = credits;
2407
2408 if (!wdata->cfile->invalidHandle ||
2409 - !cifs_reopen_file(wdata->cfile, false))
2410 + !(rc = cifs_reopen_file(wdata->cfile, false)))
2411 rc = server->ops->async_writev(wdata,
2412 cifs_uncached_writedata_release);
2413 if (rc) {
2414 @@ -3002,7 +3002,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2415 rdata->credits = credits;
2416
2417 if (!rdata->cfile->invalidHandle ||
2418 - !cifs_reopen_file(rdata->cfile, true))
2419 + !(rc = cifs_reopen_file(rdata->cfile, true)))
2420 rc = server->ops->async_readv(rdata);
2421 error:
2422 if (rc) {
2423 @@ -3577,7 +3577,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2424 }
2425
2426 if (!rdata->cfile->invalidHandle ||
2427 - !cifs_reopen_file(rdata->cfile, true))
2428 + !(rc = cifs_reopen_file(rdata->cfile, true)))
2429 rc = server->ops->async_readv(rdata);
2430 if (rc) {
2431 add_credits_and_wake_if(server, rdata->credits, 0);
2432 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2433 index bdd32925a15e..7080dac3592c 100644
2434 --- a/fs/cifs/smb2pdu.c
2435 +++ b/fs/cifs/smb2pdu.c
2436 @@ -1987,6 +1987,9 @@ void smb2_reconnect_server(struct work_struct *work)
2437 struct cifs_tcon *tcon, *tcon2;
2438 struct list_head tmp_list;
2439 int tcon_exist = false;
2440 + int rc;
2441 + int resched = false;
2442 +
2443
2444 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
2445 mutex_lock(&server->reconnect_mutex);
2446 @@ -2014,13 +2017,18 @@ void smb2_reconnect_server(struct work_struct *work)
2447 spin_unlock(&cifs_tcp_ses_lock);
2448
2449 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
2450 - if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
2451 + rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
2452 + if (!rc)
2453 cifs_reopen_persistent_handles(tcon);
2454 + else
2455 + resched = true;
2456 list_del_init(&tcon->rlist);
2457 cifs_put_tcon(tcon);
2458 }
2459
2460 cifs_dbg(FYI, "Reconnecting tcons finished\n");
2461 + if (resched)
2462 + queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
2463 mutex_unlock(&server->reconnect_mutex);
2464
2465 /* now we can safely release srv struct */
2466 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2467 index dc9d64ac5969..c78fce404654 100644
2468 --- a/fs/ext4/inode.c
2469 +++ b/fs/ext4/inode.c
2470 @@ -71,10 +71,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
2471 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
2472 csum_size);
2473 offset += csum_size;
2474 - csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
2475 - EXT4_INODE_SIZE(inode->i_sb) -
2476 - offset);
2477 }
2478 + csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
2479 + EXT4_INODE_SIZE(inode->i_sb) - offset);
2480 }
2481
2482 return csum;
2483 diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
2484 index f419dd999581..fe2cbeb90772 100644
2485 --- a/fs/orangefs/devorangefs-req.c
2486 +++ b/fs/orangefs/devorangefs-req.c
2487 @@ -208,14 +208,19 @@ static ssize_t orangefs_devreq_read(struct file *file,
2488 continue;
2489 /*
2490 * Skip ops whose filesystem we don't know about unless
2491 - * it is being mounted.
2492 + * it is being mounted or unmounted. It is possible for
2493 + * a filesystem we don't know about to be unmounted if
2494 + * it fails to mount in the kernel after userspace has
2495 + * been sent the mount request.
2496 */
2497 /* XXX: is there a better way to detect this? */
2498 } else if (ret == -1 &&
2499 !(op->upcall.type ==
2500 ORANGEFS_VFS_OP_FS_MOUNT ||
2501 op->upcall.type ==
2502 - ORANGEFS_VFS_OP_GETATTR)) {
2503 + ORANGEFS_VFS_OP_GETATTR ||
2504 + op->upcall.type ==
2505 + ORANGEFS_VFS_OP_FS_UMOUNT)) {
2506 gossip_debug(GOSSIP_DEV_DEBUG,
2507 "orangefs: skipping op tag %llu %s\n",
2508 llu(op->tag), get_opname_string(op));
2509 diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
2510 index 3bf803d732c5..45dd8f27b2ac 100644
2511 --- a/fs/orangefs/orangefs-kernel.h
2512 +++ b/fs/orangefs/orangefs-kernel.h
2513 @@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
2514 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
2515 struct super_block *sb;
2516 int mount_pending;
2517 + int no_list;
2518 struct list_head list;
2519 };
2520
2521 diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
2522 index cd261c8de53a..629d8c917fa6 100644
2523 --- a/fs/orangefs/super.c
2524 +++ b/fs/orangefs/super.c
2525 @@ -493,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
2526
2527 if (ret) {
2528 d = ERR_PTR(ret);
2529 - goto free_op;
2530 + goto free_sb_and_op;
2531 }
2532
2533 /*
2534 @@ -519,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
2535 spin_unlock(&orangefs_superblocks_lock);
2536 op_release(new_op);
2537
2538 + /* Must be removed from the list now. */
2539 + ORANGEFS_SB(sb)->no_list = 0;
2540 +
2541 if (orangefs_userspace_version >= 20906) {
2542 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
2543 if (!new_op)
2544 @@ -533,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
2545
2546 return dget(sb->s_root);
2547
2548 +free_sb_and_op:
2549 + /* Will call orangefs_kill_sb with sb not in list. */
2550 + ORANGEFS_SB(sb)->no_list = 1;
2551 + deactivate_locked_super(sb);
2552 free_op:
2553 gossip_err("orangefs_mount: mount request failed with %d\n", ret);
2554 if (ret == -EINVAL) {
2555 @@ -558,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
2556 */
2557 orangefs_unmount_sb(sb);
2558
2559 - /* remove the sb from our list of orangefs specific sb's */
2560 -
2561 - spin_lock(&orangefs_superblocks_lock);
2562 - __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */
2563 - ORANGEFS_SB(sb)->list.prev = NULL;
2564 - spin_unlock(&orangefs_superblocks_lock);
2565 + if (!ORANGEFS_SB(sb)->no_list) {
2566 + /* remove the sb from our list of orangefs specific sb's */
2567 + spin_lock(&orangefs_superblocks_lock);
2568 + /* not list_del_init */
2569 + __list_del_entry(&ORANGEFS_SB(sb)->list);
2570 + ORANGEFS_SB(sb)->list.prev = NULL;
2571 + spin_unlock(&orangefs_superblocks_lock);
2572 + }
2573
2574 /*
2575 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
2576 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2577 index 35b92d81692f..b1517b6dcbdd 100644
2578 --- a/fs/proc/task_mmu.c
2579 +++ b/fs/proc/task_mmu.c
2580 @@ -899,7 +899,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
2581 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
2582 unsigned long addr, pmd_t *pmdp)
2583 {
2584 - pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
2585 + pmd_t pmd = *pmdp;
2586 +
2587 + /* See comment in change_huge_pmd() */
2588 + pmdp_invalidate(vma, addr, pmdp);
2589 + if (pmd_dirty(*pmdp))
2590 + pmd = pmd_mkdirty(pmd);
2591 + if (pmd_young(*pmdp))
2592 + pmd = pmd_mkyoung(pmd);
2593
2594 pmd = pmd_wrprotect(pmd);
2595 pmd = pmd_clear_soft_dirty(pmd);
2596 diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
2597 index 1d4f365d8f03..f6d9af3efa45 100644
2598 --- a/include/crypto/internal/hash.h
2599 +++ b/include/crypto/internal/hash.h
2600 @@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
2601 return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
2602 }
2603
2604 +static inline void ahash_request_complete(struct ahash_request *req, int err)
2605 +{
2606 + req->base.complete(&req->base, err);
2607 +}
2608 +
2609 +static inline u32 ahash_request_flags(struct ahash_request *req)
2610 +{
2611 + return req->base.flags;
2612 +}
2613 +
2614 static inline struct crypto_ahash *crypto_spawn_ahash(
2615 struct crypto_ahash_spawn *spawn)
2616 {
2617 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
2618 index c83c23f0577b..307ae63ef262 100644
2619 --- a/include/linux/cgroup.h
2620 +++ b/include/linux/cgroup.h
2621 @@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
2622 pr_cont_kernfs_path(cgrp->kn);
2623 }
2624
2625 +static inline void cgroup_init_kthreadd(void)
2626 +{
2627 + /*
2628 + * kthreadd is inherited by all kthreads, keep it in the root so
2629 + * that the new kthreads are guaranteed to stay in the root until
2630 + * initialization is finished.
2631 + */
2632 + current->no_cgroup_migration = 1;
2633 +}
2634 +
2635 +static inline void cgroup_kthread_ready(void)
2636 +{
2637 + /*
2638 + * This kthread finished initialization. The creator should have
2639 + * set PF_NO_SETAFFINITY if this kthread should stay in the root.
2640 + */
2641 + current->no_cgroup_migration = 0;
2642 +}
2643 +
2644 #else /* !CONFIG_CGROUPS */
2645
2646 struct cgroup_subsys_state;
2647 @@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
2648
2649 static inline int cgroup_init_early(void) { return 0; }
2650 static inline int cgroup_init(void) { return 0; }
2651 +static inline void cgroup_init_kthreadd(void) {}
2652 +static inline void cgroup_kthread_ready(void) {}
2653
2654 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
2655 struct cgroup *ancestor)
2656 diff --git a/include/linux/sched.h b/include/linux/sched.h
2657 index 75d9a57e212e..f425eb3318ab 100644
2658 --- a/include/linux/sched.h
2659 +++ b/include/linux/sched.h
2660 @@ -1584,6 +1584,10 @@ struct task_struct {
2661 #ifdef CONFIG_COMPAT_BRK
2662 unsigned brk_randomized:1;
2663 #endif
2664 +#ifdef CONFIG_CGROUPS
2665 + /* disallow userland-initiated cgroup migration */
2666 + unsigned no_cgroup_migration:1;
2667 +#endif
2668
2669 unsigned long atomic_flags; /* Flags needing atomic access. */
2670
2671 diff --git a/include/linux/uio.h b/include/linux/uio.h
2672 index 6e22b544d039..c146ebc69c53 100644
2673 --- a/include/linux/uio.h
2674 +++ b/include/linux/uio.h
2675 @@ -39,7 +39,10 @@ struct iov_iter {
2676 };
2677 union {
2678 unsigned long nr_segs;
2679 - int idx;
2680 + struct {
2681 + int idx;
2682 + int start_idx;
2683 + };
2684 };
2685 };
2686
2687 @@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
2688 size_t iov_iter_copy_from_user_atomic(struct page *page,
2689 struct iov_iter *i, unsigned long offset, size_t bytes);
2690 void iov_iter_advance(struct iov_iter *i, size_t bytes);
2691 +void iov_iter_revert(struct iov_iter *i, size_t bytes);
2692 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
2693 size_t iov_iter_single_seg_count(const struct iov_iter *i);
2694 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
2695 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
2696 index 6233e8fd95b5..0383c601e17c 100644
2697 --- a/include/target/target_core_base.h
2698 +++ b/include/target/target_core_base.h
2699 @@ -705,6 +705,7 @@ struct se_lun {
2700 u64 unpacked_lun;
2701 #define SE_LUN_LINK_MAGIC 0xffff7771
2702 u32 lun_link_magic;
2703 + bool lun_shutdown;
2704 bool lun_access_ro;
2705 u32 lun_index;
2706
2707 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2708 index 4e2f3de0e40b..a3d2aad2443f 100644
2709 --- a/kernel/cgroup.c
2710 +++ b/kernel/cgroup.c
2711 @@ -2920,11 +2920,12 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2712 tsk = tsk->group_leader;
2713
2714 /*
2715 - * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2716 - * trapped in a cpuset, or RT worker may be born in a cgroup
2717 - * with no rt_runtime allocated. Just say no.
2718 + * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2719 + * If userland migrates such a kthread to a non-root cgroup, it can
2720 + * become trapped in a cpuset, or RT kthread may be born in a
2721 + * cgroup with no rt_runtime allocated. Just say no.
2722 */
2723 - if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2724 + if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2725 ret = -EINVAL;
2726 goto out_unlock_rcu;
2727 }
2728 diff --git a/kernel/kthread.c b/kernel/kthread.c
2729 index be2cc1f9dd57..c2c911a106cf 100644
2730 --- a/kernel/kthread.c
2731 +++ b/kernel/kthread.c
2732 @@ -18,6 +18,7 @@
2733 #include <linux/freezer.h>
2734 #include <linux/ptrace.h>
2735 #include <linux/uaccess.h>
2736 +#include <linux/cgroup.h>
2737 #include <trace/events/sched.h>
2738
2739 static DEFINE_SPINLOCK(kthread_create_lock);
2740 @@ -205,6 +206,7 @@ static int kthread(void *_create)
2741 ret = -EINTR;
2742
2743 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
2744 + cgroup_kthread_ready();
2745 __kthread_parkme(&self);
2746 ret = threadfn(data);
2747 }
2748 @@ -530,6 +532,7 @@ int kthreadd(void *unused)
2749 set_mems_allowed(node_states[N_MEMORY]);
2750
2751 current->flags |= PF_NOFREEZE;
2752 + cgroup_init_kthreadd();
2753
2754 for (;;) {
2755 set_current_state(TASK_INTERRUPTIBLE);
2756 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2757 index da87b3cba5b3..221eb59272e1 100644
2758 --- a/kernel/trace/ftrace.c
2759 +++ b/kernel/trace/ftrace.c
2760 @@ -3736,23 +3736,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
2761 ftrace_probe_registered = 1;
2762 }
2763
2764 -static void __disable_ftrace_function_probe(void)
2765 +static bool __disable_ftrace_function_probe(void)
2766 {
2767 int i;
2768
2769 if (!ftrace_probe_registered)
2770 - return;
2771 + return false;
2772
2773 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2774 struct hlist_head *hhd = &ftrace_func_hash[i];
2775 if (hhd->first)
2776 - return;
2777 + return false;
2778 }
2779
2780 /* no more funcs left */
2781 ftrace_shutdown(&trace_probe_ops, 0);
2782
2783 ftrace_probe_registered = 0;
2784 + return true;
2785 }
2786
2787
2788 @@ -3882,6 +3883,7 @@ static void
2789 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2790 void *data, int flags)
2791 {
2792 + struct ftrace_ops_hash old_hash_ops;
2793 struct ftrace_func_entry *rec_entry;
2794 struct ftrace_func_probe *entry;
2795 struct ftrace_func_probe *p;
2796 @@ -3893,6 +3895,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2797 struct hlist_node *tmp;
2798 char str[KSYM_SYMBOL_LEN];
2799 int i, ret;
2800 + bool disabled;
2801
2802 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2803 func_g.search = NULL;
2804 @@ -3911,6 +3914,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2805
2806 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
2807
2808 + old_hash_ops.filter_hash = old_hash;
2809 + /* Probes only have filters */
2810 + old_hash_ops.notrace_hash = NULL;
2811 +
2812 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2813 if (!hash)
2814 /* Hmm, should report this somehow */
2815 @@ -3948,12 +3955,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2816 }
2817 }
2818 mutex_lock(&ftrace_lock);
2819 - __disable_ftrace_function_probe();
2820 + disabled = __disable_ftrace_function_probe();
2821 /*
2822 * Remove after the disable is called. Otherwise, if the last
2823 * probe is removed, a null hash means *all enabled*.
2824 */
2825 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
2826 +
2827 + /* still need to update the function call sites */
2828 + if (ftrace_enabled && !disabled)
2829 + ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
2830 + &old_hash_ops);
2831 synchronize_sched();
2832 if (!ret)
2833 free_ftrace_hash_rcu(old_hash);
2834 @@ -5389,6 +5401,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
2835 trace_free_pid_list(pid_list);
2836 }
2837
2838 +void ftrace_clear_pids(struct trace_array *tr)
2839 +{
2840 + mutex_lock(&ftrace_lock);
2841 +
2842 + clear_ftrace_pids(tr);
2843 +
2844 + mutex_unlock(&ftrace_lock);
2845 +}
2846 +
2847 static void ftrace_pid_reset(struct trace_array *tr)
2848 {
2849 mutex_lock(&ftrace_lock);
2850 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2851 index 90b66ed6f0e2..862bc8805d97 100644
2852 --- a/kernel/trace/trace.c
2853 +++ b/kernel/trace/trace.c
2854 @@ -7150,6 +7150,7 @@ static int instance_rmdir(const char *name)
2855
2856 tracing_set_nop(tr);
2857 event_trace_del_tracer(tr);
2858 + ftrace_clear_pids(tr);
2859 ftrace_destroy_function_files(tr);
2860 tracefs_remove_recursive(tr->dir);
2861 free_trace_buffers(tr);
2862 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
2863 index fd24b1f9ac43..b0d8576c27ae 100644
2864 --- a/kernel/trace/trace.h
2865 +++ b/kernel/trace/trace.h
2866 @@ -870,6 +870,7 @@ int using_ftrace_ops_list_func(void);
2867 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
2868 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
2869 struct dentry *d_tracer);
2870 +void ftrace_clear_pids(struct trace_array *tr);
2871 #else
2872 static inline int ftrace_trace_task(struct trace_array *tr)
2873 {
2874 @@ -888,6 +889,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
2875 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
2876 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
2877 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
2878 +static inline void ftrace_clear_pids(struct trace_array *tr) { }
2879 /* ftace_func_t type is not defined, use macro instead of static inline */
2880 #define ftrace_init_array_ops(tr, func) do { } while (0)
2881 #endif /* CONFIG_FUNCTION_TRACER */
2882 diff --git a/lib/iov_iter.c b/lib/iov_iter.c
2883 index efb0b4d267a1..a75ea633b5c4 100644
2884 --- a/lib/iov_iter.c
2885 +++ b/lib/iov_iter.c
2886 @@ -734,6 +734,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
2887 }
2888 EXPORT_SYMBOL(iov_iter_advance);
2889
2890 +void iov_iter_revert(struct iov_iter *i, size_t unroll)
2891 +{
2892 + if (!unroll)
2893 + return;
2894 + i->count += unroll;
2895 + if (unlikely(i->type & ITER_PIPE)) {
2896 + struct pipe_inode_info *pipe = i->pipe;
2897 + int idx = i->idx;
2898 + size_t off = i->iov_offset;
2899 + while (1) {
2900 + size_t n = off - pipe->bufs[idx].offset;
2901 + if (unroll < n) {
2902 + off -= (n - unroll);
2903 + break;
2904 + }
2905 + unroll -= n;
2906 + if (!unroll && idx == i->start_idx) {
2907 + off = 0;
2908 + break;
2909 + }
2910 + if (!idx--)
2911 + idx = pipe->buffers - 1;
2912 + off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
2913 + }
2914 + i->iov_offset = off;
2915 + i->idx = idx;
2916 + pipe_truncate(i);
2917 + return;
2918 + }
2919 + if (unroll <= i->iov_offset) {
2920 + i->iov_offset -= unroll;
2921 + return;
2922 + }
2923 + unroll -= i->iov_offset;
2924 + if (i->type & ITER_BVEC) {
2925 + const struct bio_vec *bvec = i->bvec;
2926 + while (1) {
2927 + size_t n = (--bvec)->bv_len;
2928 + i->nr_segs++;
2929 + if (unroll <= n) {
2930 + i->bvec = bvec;
2931 + i->iov_offset = n - unroll;
2932 + return;
2933 + }
2934 + unroll -= n;
2935 + }
2936 + } else { /* same logics for iovec and kvec */
2937 + const struct iovec *iov = i->iov;
2938 + while (1) {
2939 + size_t n = (--iov)->iov_len;
2940 + i->nr_segs++;
2941 + if (unroll <= n) {
2942 + i->iov = iov;
2943 + i->iov_offset = n - unroll;
2944 + return;
2945 + }
2946 + unroll -= n;
2947 + }
2948 + }
2949 +}
2950 +EXPORT_SYMBOL(iov_iter_revert);
2951 +
2952 /*
2953 * Return the count of just the current iov_iter segment.
2954 */
2955 @@ -787,6 +849,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
2956 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
2957 i->iov_offset = 0;
2958 i->count = count;
2959 + i->start_idx = i->idx;
2960 }
2961 EXPORT_SYMBOL(iov_iter_pipe);
2962
2963 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2964 index 917555cf6be0..d5b2b759f76f 100644
2965 --- a/mm/huge_memory.c
2966 +++ b/mm/huge_memory.c
2967 @@ -1380,8 +1380,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2968 deactivate_page(page);
2969
2970 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
2971 - orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
2972 - tlb->fullmm);
2973 + pmdp_invalidate(vma, addr, pmd);
2974 orig_pmd = pmd_mkold(orig_pmd);
2975 orig_pmd = pmd_mkclean(orig_pmd);
2976
2977 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2978 index 0de26691f0f5..47559cc0cdcc 100644
2979 --- a/mm/memcontrol.c
2980 +++ b/mm/memcontrol.c
2981 @@ -2152,6 +2152,8 @@ struct memcg_kmem_cache_create_work {
2982 struct work_struct work;
2983 };
2984
2985 +static struct workqueue_struct *memcg_kmem_cache_create_wq;
2986 +
2987 static void memcg_kmem_cache_create_func(struct work_struct *w)
2988 {
2989 struct memcg_kmem_cache_create_work *cw =
2990 @@ -2183,7 +2185,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2991 cw->cachep = cachep;
2992 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2993
2994 - schedule_work(&cw->work);
2995 + queue_work(memcg_kmem_cache_create_wq, &cw->work);
2996 }
2997
2998 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2999 @@ -5786,6 +5788,17 @@ static int __init mem_cgroup_init(void)
3000 {
3001 int cpu, node;
3002
3003 +#ifndef CONFIG_SLOB
3004 + /*
3005 + * Kmem cache creation is mostly done with the slab_mutex held,
3006 + * so use a special workqueue to avoid stalling all worker
3007 + * threads in case lots of cgroups are created simultaneously.
3008 + */
3009 + memcg_kmem_cache_create_wq =
3010 + alloc_ordered_workqueue("memcg_kmem_cache_create", 0);
3011 + BUG_ON(!memcg_kmem_cache_create_wq);
3012 +#endif
3013 +
3014 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
3015
3016 for_each_possible_cpu(cpu)
3017 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
3018 index b0bc023d25c5..1689bb58e0d1 100644
3019 --- a/mm/zsmalloc.c
3020 +++ b/mm/zsmalloc.c
3021 @@ -280,7 +280,7 @@ struct zs_pool {
3022 struct zspage {
3023 struct {
3024 unsigned int fullness:FULLNESS_BITS;
3025 - unsigned int class:CLASS_BITS;
3026 + unsigned int class:CLASS_BITS + 1;
3027 unsigned int isolated:ISOLATED_BITS;
3028 unsigned int magic:MAGIC_VAL_BITS;
3029 };
3030 diff --git a/net/core/datagram.c b/net/core/datagram.c
3031 index b7de71f8d5d3..963732e775df 100644
3032 --- a/net/core/datagram.c
3033 +++ b/net/core/datagram.c
3034 @@ -378,7 +378,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
3035 struct iov_iter *to, int len)
3036 {
3037 int start = skb_headlen(skb);
3038 - int i, copy = start - offset;
3039 + int i, copy = start - offset, start_off = offset, n;
3040 struct sk_buff *frag_iter;
3041
3042 trace_skb_copy_datagram_iovec(skb, len);
3043 @@ -387,11 +387,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
3044 if (copy > 0) {
3045 if (copy > len)
3046 copy = len;
3047 - if (copy_to_iter(skb->data + offset, copy, to) != copy)
3048 + n = copy_to_iter(skb->data + offset, copy, to);
3049 + offset += n;
3050 + if (n != copy)
3051 goto short_copy;
3052 if ((len -= copy) == 0)
3053 return 0;
3054 - offset += copy;
3055 }
3056
3057 /* Copy paged appendix. Hmm... why does this look so complicated? */
3058 @@ -405,13 +406,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
3059 if ((copy = end - offset) > 0) {
3060 if (copy > len)
3061 copy = len;
3062 - if (copy_page_to_iter(skb_frag_page(frag),
3063 + n = copy_page_to_iter(skb_frag_page(frag),
3064 frag->page_offset + offset -
3065 - start, copy, to) != copy)
3066 + start, copy, to);
3067 + offset += n;
3068 + if (n != copy)
3069 goto short_copy;
3070 if (!(len -= copy))
3071 return 0;
3072 - offset += copy;
3073 }
3074 start = end;
3075 }
3076 @@ -443,6 +445,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
3077 */
3078
3079 fault:
3080 + iov_iter_revert(to, offset - start_off);
3081 return -EFAULT;
3082
3083 short_copy:
3084 @@ -593,7 +596,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
3085 __wsum *csump)
3086 {
3087 int start = skb_headlen(skb);
3088 - int i, copy = start - offset;
3089 + int i, copy = start - offset, start_off = offset;
3090 struct sk_buff *frag_iter;
3091 int pos = 0;
3092 int n;
3093 @@ -603,11 +606,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
3094 if (copy > len)
3095 copy = len;
3096 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
3097 + offset += n;
3098 if (n != copy)
3099 goto fault;
3100 if ((len -= copy) == 0)
3101 return 0;
3102 - offset += copy;
3103 pos = copy;
3104 }
3105
3106 @@ -629,12 +632,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
3107 offset - start, copy,
3108 &csum2, to);
3109 kunmap(page);
3110 + offset += n;
3111 if (n != copy)
3112 goto fault;
3113 *csump = csum_block_add(*csump, csum2, pos);
3114 if (!(len -= copy))
3115 return 0;
3116 - offset += copy;
3117 pos += copy;
3118 }
3119 start = end;
3120 @@ -667,6 +670,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
3121 return 0;
3122
3123 fault:
3124 + iov_iter_revert(to, offset - start_off);
3125 return -EFAULT;
3126 }
3127
3128 @@ -751,6 +755,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
3129 }
3130 return 0;
3131 csum_error:
3132 + iov_iter_revert(&msg->msg_iter, chunk);
3133 return -EINVAL;
3134 fault:
3135 return -EFAULT;
3136 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3137 index bff4460f17be..8d6c09f082c2 100644
3138 --- a/net/ipv6/route.c
3139 +++ b/net/ipv6/route.c
3140 @@ -2166,6 +2166,8 @@ static int ip6_route_del(struct fib6_config *cfg)
3141 continue;
3142 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
3143 continue;
3144 + if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
3145 + continue;
3146 dst_hold(&rt->dst);
3147 read_unlock_bh(&table->tb6_lock);
3148
3149 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3150 index 6cbe5bdf2b15..673442025bfd 100644
3151 --- a/net/sctp/socket.c
3152 +++ b/net/sctp/socket.c
3153 @@ -4735,6 +4735,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
3154 if (!asoc)
3155 return -EINVAL;
3156
3157 + /* If there is a thread waiting on more sndbuf space for
3158 + * sending on this asoc, it cannot be peeled.
3159 + */
3160 + if (waitqueue_active(&asoc->wait))
3161 + return -EBUSY;
3162 +
3163 /* An association cannot be branched off from an already peeled-off
3164 * socket, nor is this supported for tcp style sockets.
3165 */
3166 @@ -7427,8 +7433,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3167 */
3168 release_sock(sk);
3169 current_timeo = schedule_timeout(current_timeo);
3170 - if (sk != asoc->base.sk)
3171 - goto do_error;
3172 lock_sock(sk);
3173
3174 *timeo_p = current_timeo;
3175 diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
3176 index fd5d1e091038..e18fe9d6f08f 100644
3177 --- a/sound/soc/intel/Kconfig
3178 +++ b/sound/soc/intel/Kconfig
3179 @@ -33,11 +33,9 @@ config SND_SOC_INTEL_SST
3180 select SND_SOC_INTEL_SST_MATCH if ACPI
3181 depends on (X86 || COMPILE_TEST)
3182
3183 -# firmware stuff depends DW_DMAC_CORE; since there is no depends-on from
3184 -# the reverse selection, each machine driver needs to select
3185 -# SND_SOC_INTEL_SST_FIRMWARE carefully depending on DW_DMAC_CORE
3186 config SND_SOC_INTEL_SST_FIRMWARE
3187 tristate
3188 + select DW_DMAC_CORE
3189
3190 config SND_SOC_INTEL_SST_ACPI
3191 tristate
3192 @@ -47,16 +45,18 @@ config SND_SOC_INTEL_SST_MATCH
3193
3194 config SND_SOC_INTEL_HASWELL
3195 tristate
3196 + select SND_SOC_INTEL_SST
3197 select SND_SOC_INTEL_SST_FIRMWARE
3198
3199 config SND_SOC_INTEL_BAYTRAIL
3200 tristate
3201 + select SND_SOC_INTEL_SST
3202 + select SND_SOC_INTEL_SST_FIRMWARE
3203
3204 config SND_SOC_INTEL_HASWELL_MACH
3205 tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
3206 depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
3207 - depends on DW_DMAC_CORE
3208 - select SND_SOC_INTEL_SST
3209 + depends on DMADEVICES
3210 select SND_SOC_INTEL_HASWELL
3211 select SND_SOC_RT5640
3212 help
3213 @@ -99,9 +99,8 @@ config SND_SOC_INTEL_BXT_RT298_MACH
3214 config SND_SOC_INTEL_BYT_RT5640_MACH
3215 tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
3216 depends on X86_INTEL_LPSS && I2C
3217 - depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
3218 - select SND_SOC_INTEL_SST
3219 - select SND_SOC_INTEL_SST_FIRMWARE
3220 + depends on DMADEVICES
3221 + depends on SND_SST_IPC_ACPI = n
3222 select SND_SOC_INTEL_BAYTRAIL
3223 select SND_SOC_RT5640
3224 help
3225 @@ -112,9 +111,8 @@ config SND_SOC_INTEL_BYT_RT5640_MACH
3226 config SND_SOC_INTEL_BYT_MAX98090_MACH
3227 tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
3228 depends on X86_INTEL_LPSS && I2C
3229 - depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
3230 - select SND_SOC_INTEL_SST
3231 - select SND_SOC_INTEL_SST_FIRMWARE
3232 + depends on DMADEVICES
3233 + depends on SND_SST_IPC_ACPI = n
3234 select SND_SOC_INTEL_BAYTRAIL
3235 select SND_SOC_MAX98090
3236 help
3237 @@ -123,9 +121,8 @@ config SND_SOC_INTEL_BYT_MAX98090_MACH
3238
3239 config SND_SOC_INTEL_BDW_RT5677_MACH
3240 tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
3241 - depends on X86_INTEL_LPSS && GPIOLIB && I2C && DW_DMAC
3242 - depends on DW_DMAC_CORE=y
3243 - select SND_SOC_INTEL_SST
3244 + depends on X86_INTEL_LPSS && GPIOLIB && I2C
3245 + depends on DMADEVICES
3246 select SND_SOC_INTEL_HASWELL
3247 select SND_SOC_RT5677
3248 help
3249 @@ -134,10 +131,8 @@ config SND_SOC_INTEL_BDW_RT5677_MACH
3250
3251 config SND_SOC_INTEL_BROADWELL_MACH
3252 tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
3253 - depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
3254 - I2C_DESIGNWARE_PLATFORM
3255 - depends on DW_DMAC_CORE
3256 - select SND_SOC_INTEL_SST
3257 + depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
3258 + depends on DMADEVICES
3259 select SND_SOC_INTEL_HASWELL
3260 select SND_SOC_RT286
3261 help