Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0335-4.9.236-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3632 - (show annotations) (download)
Mon Sep 14 09:54:03 2020 UTC (3 years, 7 months ago) by niro
File size: 120985 byte(s)
-linux-4.9.336
1 diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt
2 index 71b63c2b98410..a8f1a58e36922 100644
3 --- a/Documentation/filesystems/affs.txt
4 +++ b/Documentation/filesystems/affs.txt
5 @@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows:
6
7 - R maps to r for user, group and others. On directories, R implies x.
8
9 - - If both W and D are allowed, w will be set.
10 + - W maps to w.
11
12 - E maps to x.
13
14 - - H and P are always retained and ignored under Linux.
15 + - D is ignored.
16
17 - - A is always reset when a file is written to.
18 + - H, S and P are always retained and ignored under Linux.
19 +
20 + - A is cleared when a file is written to.
21
22 User id and group id will be used unless set[gu]id are given as mount
23 options. Since most of the Amiga file systems are single user systems
24 @@ -111,11 +113,13 @@ Linux -> Amiga:
25
26 The Linux rwxrwxrwx file mode is handled as follows:
27
28 - - r permission will set R for user, group and others.
29 + - r permission will allow R for user, group and others.
30 +
31 + - w permission will allow W for user, group and others.
32
33 - - w permission will set W and D for user, group and others.
34 + - x permission of the user will allow E for plain files.
35
36 - - x permission of the user will set E for plain files.
37 + - D will be allowed for user, group and others.
38
39 - All other flags (suid, sgid, ...) are ignored and will
40 not be retained.
41 diff --git a/Makefile b/Makefile
42 index d21084a36bd4d..a454c9cd126e0 100644
43 --- a/Makefile
44 +++ b/Makefile
45 @@ -1,6 +1,6 @@
46 VERSION = 4
47 PATCHLEVEL = 9
48 -SUBLEVEL = 235
49 +SUBLEVEL = 236
50 EXTRAVERSION =
51 NAME = Roaring Lionus
52
53 diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
54 index a11c8c2915c93..e8cb69b0cf4fb 100644
55 --- a/arch/arm64/include/asm/kvm_arm.h
56 +++ b/arch/arm64/include/asm/kvm_arm.h
57 @@ -78,10 +78,11 @@
58 * IMO: Override CPSR.I and enable signaling with VI
59 * FMO: Override CPSR.F and enable signaling with VF
60 * SWIO: Turn set/way invalidates into set/way clean+invalidate
61 + * PTW: Take a stage2 fault if a stage1 walk steps in device memory
62 */
63 #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
64 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
65 - HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
66 + HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_PTW)
67 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
68 #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
69 #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
70 diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
71 index 8f5cf83b23396..3d2fddac25b91 100644
72 --- a/arch/arm64/include/asm/kvm_asm.h
73 +++ b/arch/arm64/include/asm/kvm_asm.h
74 @@ -82,6 +82,34 @@ extern u32 __init_stage2_translation(void);
75 *__hyp_this_cpu_ptr(sym); \
76 })
77
78 +#define __KVM_EXTABLE(from, to) \
79 + " .pushsection __kvm_ex_table, \"a\"\n" \
80 + " .align 3\n" \
81 + " .long (" #from " - .), (" #to " - .)\n" \
82 + " .popsection\n"
83 +
84 +
85 +#define __kvm_at(at_op, addr) \
86 +( { \
87 + int __kvm_at_err = 0; \
88 + u64 spsr, elr; \
89 + asm volatile( \
90 + " mrs %1, spsr_el2\n" \
91 + " mrs %2, elr_el2\n" \
92 + "1: at "at_op", %3\n" \
93 + " isb\n" \
94 + " b 9f\n" \
95 + "2: msr spsr_el2, %1\n" \
96 + " msr elr_el2, %2\n" \
97 + " mov %w0, %4\n" \
98 + "9:\n" \
99 + __KVM_EXTABLE(1b, 2b) \
100 + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
101 + : "r" (addr), "i" (-EFAULT)); \
102 + __kvm_at_err; \
103 +} )
104 +
105 +
106 #else /* __ASSEMBLY__ */
107
108 .macro hyp_adr_this_cpu reg, sym, tmp
109 @@ -106,6 +134,21 @@ extern u32 __init_stage2_translation(void);
110 kern_hyp_va \vcpu
111 .endm
112
113 +/*
114 + * KVM extable for unexpected exceptions.
115 + * In the same format _asm_extable, but output to a different section so that
116 + * it can be mapped to EL2. The KVM version is not sorted. The caller must
117 + * ensure:
118 + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
119 + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
120 + */
121 +.macro _kvm_extable, from, to
122 + .pushsection __kvm_ex_table, "a"
123 + .align 3
124 + .long (\from - .), (\to - .)
125 + .popsection
126 +.endm
127 +
128 #endif
129
130 #endif /* __ARM_KVM_ASM_H__ */
131 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
132 index 6a584558b29d2..fa3ffad50a61c 100644
133 --- a/arch/arm64/kernel/vmlinux.lds.S
134 +++ b/arch/arm64/kernel/vmlinux.lds.S
135 @@ -23,6 +23,13 @@ ENTRY(_text)
136
137 jiffies = jiffies_64;
138
139 +
140 +#define HYPERVISOR_EXTABLE \
141 + . = ALIGN(SZ_8); \
142 + VMLINUX_SYMBOL(__start___kvm_ex_table) = .; \
143 + *(__kvm_ex_table) \
144 + VMLINUX_SYMBOL(__stop___kvm_ex_table) = .;
145 +
146 #define HYPERVISOR_TEXT \
147 /* \
148 * Align to 4 KB so that \
149 @@ -38,6 +45,7 @@ jiffies = jiffies_64;
150 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
151 VMLINUX_SYMBOL(__hyp_text_start) = .; \
152 *(.hyp.text) \
153 + HYPERVISOR_EXTABLE \
154 VMLINUX_SYMBOL(__hyp_text_end) = .;
155
156 #define IDMAP_TEXT \
157 diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
158 index a360ac6e89e9d..4e0eac361f87c 100644
159 --- a/arch/arm64/kvm/hyp/entry.S
160 +++ b/arch/arm64/kvm/hyp/entry.S
161 @@ -17,6 +17,7 @@
162
163 #include <linux/linkage.h>
164
165 +#include <asm/alternative.h>
166 #include <asm/asm-offsets.h>
167 #include <asm/assembler.h>
168 #include <asm/fpsimdmacros.h>
169 @@ -62,6 +63,15 @@ ENTRY(__guest_enter)
170 // Store the host regs
171 save_callee_saved_regs x1
172
173 + // Now the host state is stored if we have a pending RAS SError it must
174 + // affect the host. If any asynchronous exception is pending we defer
175 + // the guest entry.
176 + mrs x1, isr_el1
177 + cbz x1, 1f
178 + mov x0, #ARM_EXCEPTION_IRQ
179 + ret
180 +
181 +1:
182 add x18, x0, #VCPU_CONTEXT
183
184 // Restore guest regs x0-x17
185 @@ -135,18 +145,22 @@ ENTRY(__guest_exit)
186 // This is our single instruction exception window. A pending
187 // SError is guaranteed to occur at the earliest when we unmask
188 // it, and at the latest just after the ISB.
189 - .global abort_guest_exit_start
190 abort_guest_exit_start:
191
192 isb
193
194 - .global abort_guest_exit_end
195 abort_guest_exit_end:
196 + msr daifset, #4 // Mask aborts
197 + ret
198 +
199 + _kvm_extable abort_guest_exit_start, 9997f
200 + _kvm_extable abort_guest_exit_end, 9997f
201 +9997:
202 + msr daifset, #4 // Mask aborts
203 + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
204
205 - // If the exception took place, restore the EL1 exception
206 - // context so that we can report some information.
207 - // Merge the exception code with the SError pending bit.
208 - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
209 + // restore the EL1 exception context so that we can report some
210 + // information. Merge the exception code with the SError pending bit.
211 msr elr_el2, x2
212 msr esr_el2, x3
213 msr spsr_el2, x4
214 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
215 index bf4988f9dae8f..7ced1fb93d077 100644
216 --- a/arch/arm64/kvm/hyp/hyp-entry.S
217 +++ b/arch/arm64/kvm/hyp/hyp-entry.S
218 @@ -25,6 +25,30 @@
219 #include <asm/kvm_asm.h>
220 #include <asm/kvm_mmu.h>
221
222 +.macro save_caller_saved_regs_vect
223 + stp x0, x1, [sp, #-16]!
224 + stp x2, x3, [sp, #-16]!
225 + stp x4, x5, [sp, #-16]!
226 + stp x6, x7, [sp, #-16]!
227 + stp x8, x9, [sp, #-16]!
228 + stp x10, x11, [sp, #-16]!
229 + stp x12, x13, [sp, #-16]!
230 + stp x14, x15, [sp, #-16]!
231 + stp x16, x17, [sp, #-16]!
232 +.endm
233 +
234 +.macro restore_caller_saved_regs_vect
235 + ldp x16, x17, [sp], #16
236 + ldp x14, x15, [sp], #16
237 + ldp x12, x13, [sp], #16
238 + ldp x10, x11, [sp], #16
239 + ldp x8, x9, [sp], #16
240 + ldp x6, x7, [sp], #16
241 + ldp x4, x5, [sp], #16
242 + ldp x2, x3, [sp], #16
243 + ldp x0, x1, [sp], #16
244 +.endm
245 +
246 .text
247 .pushsection .hyp.text, "ax"
248
249 @@ -177,26 +201,24 @@ el1_error:
250 mov x0, #ARM_EXCEPTION_EL1_SERROR
251 b __guest_exit
252
253 +el2_sync:
254 + save_caller_saved_regs_vect
255 + stp x29, x30, [sp, #-16]!
256 + bl kvm_unexpected_el2_exception
257 + ldp x29, x30, [sp], #16
258 + restore_caller_saved_regs_vect
259 +
260 + eret
261 +
262 el2_error:
263 - /*
264 - * Only two possibilities:
265 - * 1) Either we come from the exit path, having just unmasked
266 - * PSTATE.A: change the return code to an EL2 fault, and
267 - * carry on, as we're already in a sane state to handle it.
268 - * 2) Or we come from anywhere else, and that's a bug: we panic.
269 - *
270 - * For (1), x0 contains the original return code and x1 doesn't
271 - * contain anything meaningful at that stage. We can reuse them
272 - * as temp registers.
273 - * For (2), who cares?
274 - */
275 - mrs x0, elr_el2
276 - adr x1, abort_guest_exit_start
277 - cmp x0, x1
278 - adr x1, abort_guest_exit_end
279 - ccmp x0, x1, #4, ne
280 - b.ne __hyp_panic
281 - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
282 + save_caller_saved_regs_vect
283 + stp x29, x30, [sp, #-16]!
284 +
285 + bl kvm_unexpected_el2_exception
286 +
287 + ldp x29, x30, [sp], #16
288 + restore_caller_saved_regs_vect
289 +
290 eret
291
292 ENTRY(__hyp_do_panic)
293 @@ -225,7 +247,6 @@ ENDPROC(\label)
294 invalid_vector el2t_irq_invalid
295 invalid_vector el2t_fiq_invalid
296 invalid_vector el2t_error_invalid
297 - invalid_vector el2h_sync_invalid
298 invalid_vector el2h_irq_invalid
299 invalid_vector el2h_fiq_invalid
300 invalid_vector el1_sync_invalid
301 @@ -242,7 +263,7 @@ ENTRY(__kvm_hyp_vector)
302 ventry el2t_fiq_invalid // FIQ EL2t
303 ventry el2t_error_invalid // Error EL2t
304
305 - ventry el2h_sync_invalid // Synchronous EL2h
306 + ventry el2_sync // Synchronous EL2h
307 ventry el2h_irq_invalid // IRQ EL2h
308 ventry el2h_fiq_invalid // FIQ EL2h
309 ventry el2_error // Error EL2h
310 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
311 index ed7e3a288b4e5..0a2f37bceab0a 100644
312 --- a/arch/arm64/kvm/hyp/switch.c
313 +++ b/arch/arm64/kvm/hyp/switch.c
314 @@ -25,6 +25,10 @@
315 #include <asm/kvm_asm.h>
316 #include <asm/kvm_emulate.h>
317 #include <asm/kvm_hyp.h>
318 +#include <asm/uaccess.h>
319 +
320 +extern struct exception_table_entry __start___kvm_ex_table;
321 +extern struct exception_table_entry __stop___kvm_ex_table;
322
323 static bool __hyp_text __fpsimd_enabled_nvhe(void)
324 {
325 @@ -202,10 +206,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
326 * saved the guest context yet, and we may return early...
327 */
328 par = read_sysreg(par_el1);
329 - asm volatile("at s1e1r, %0" : : "r" (far));
330 - isb();
331 -
332 - tmp = read_sysreg(par_el1);
333 + if (!__kvm_at("s1e1r", far))
334 + tmp = read_sysreg(par_el1);
335 + else
336 + tmp = 1; /* back to the guest */
337 write_sysreg(par, par_el1);
338
339 if (unlikely(tmp & 1))
340 @@ -454,3 +458,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
341
342 unreachable();
343 }
344 +
345 +asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
346 +{
347 + unsigned long addr, fixup;
348 + struct kvm_cpu_context *host_ctxt;
349 + struct exception_table_entry *entry, *end;
350 + unsigned long elr_el2 = read_sysreg(elr_el2);
351 +
352 + entry = hyp_symbol_addr(__start___kvm_ex_table);
353 + end = hyp_symbol_addr(__stop___kvm_ex_table);
354 + host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
355 +
356 + while (entry < end) {
357 + addr = (unsigned long)&entry->insn + entry->insn;
358 + fixup = (unsigned long)&entry->fixup + entry->fixup;
359 +
360 + if (addr != elr_el2) {
361 + entry++;
362 + continue;
363 + }
364 +
365 + write_sysreg(fixup, elr_el2);
366 + return;
367 + }
368 +
369 + hyp_panic(host_ctxt);
370 +}
371 diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
372 index 416d53f587e7c..6e36717527754 100644
373 --- a/arch/mips/kernel/smp-bmips.c
374 +++ b/arch/mips/kernel/smp-bmips.c
375 @@ -236,6 +236,8 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
376 */
377 static void bmips_init_secondary(void)
378 {
379 + bmips_cpu_setup();
380 +
381 switch (current_cpu_type()) {
382 case CPU_BMIPS4350:
383 case CPU_BMIPS4380:
384 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
385 index 0ff379f0cc4a7..cb877f86f5fc9 100644
386 --- a/arch/mips/mm/c-r4k.c
387 +++ b/arch/mips/mm/c-r4k.c
388 @@ -1746,7 +1746,11 @@ static void setup_scache(void)
389 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
390 scache_size >> 10,
391 way_string[c->scache.ways], c->scache.linesz);
392 +
393 + if (current_cpu_type() == CPU_BMIPS5000)
394 + c->options |= MIPS_CPU_INCLUSIVE_CACHES;
395 }
396 +
397 #else
398 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
399 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
400 diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
401 index 90240dfef76a1..5889c1ed84c46 100644
402 --- a/arch/s390/include/asm/percpu.h
403 +++ b/arch/s390/include/asm/percpu.h
404 @@ -28,7 +28,7 @@
405 typedef typeof(pcp) pcp_op_T__; \
406 pcp_op_T__ old__, new__, prev__; \
407 pcp_op_T__ *ptr__; \
408 - preempt_disable(); \
409 + preempt_disable_notrace(); \
410 ptr__ = raw_cpu_ptr(&(pcp)); \
411 prev__ = *ptr__; \
412 do { \
413 @@ -36,7 +36,7 @@
414 new__ = old__ op (val); \
415 prev__ = cmpxchg(ptr__, old__, new__); \
416 } while (prev__ != old__); \
417 - preempt_enable(); \
418 + preempt_enable_notrace(); \
419 new__; \
420 })
421
422 @@ -67,7 +67,7 @@
423 typedef typeof(pcp) pcp_op_T__; \
424 pcp_op_T__ val__ = (val); \
425 pcp_op_T__ old__, *ptr__; \
426 - preempt_disable(); \
427 + preempt_disable_notrace(); \
428 ptr__ = raw_cpu_ptr(&(pcp)); \
429 if (__builtin_constant_p(val__) && \
430 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
431 @@ -83,7 +83,7 @@
432 : [val__] "d" (val__) \
433 : "cc"); \
434 } \
435 - preempt_enable(); \
436 + preempt_enable_notrace(); \
437 }
438
439 #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
440 @@ -94,14 +94,14 @@
441 typedef typeof(pcp) pcp_op_T__; \
442 pcp_op_T__ val__ = (val); \
443 pcp_op_T__ old__, *ptr__; \
444 - preempt_disable(); \
445 + preempt_disable_notrace(); \
446 ptr__ = raw_cpu_ptr(&(pcp)); \
447 asm volatile( \
448 op " %[old__],%[val__],%[ptr__]\n" \
449 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
450 : [val__] "d" (val__) \
451 : "cc"); \
452 - preempt_enable(); \
453 + preempt_enable_notrace(); \
454 old__ + val__; \
455 })
456
457 @@ -113,14 +113,14 @@
458 typedef typeof(pcp) pcp_op_T__; \
459 pcp_op_T__ val__ = (val); \
460 pcp_op_T__ old__, *ptr__; \
461 - preempt_disable(); \
462 + preempt_disable_notrace(); \
463 ptr__ = raw_cpu_ptr(&(pcp)); \
464 asm volatile( \
465 op " %[old__],%[val__],%[ptr__]\n" \
466 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
467 : [val__] "d" (val__) \
468 : "cc"); \
469 - preempt_enable(); \
470 + preempt_enable_notrace(); \
471 }
472
473 #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
474 @@ -135,10 +135,10 @@
475 typedef typeof(pcp) pcp_op_T__; \
476 pcp_op_T__ ret__; \
477 pcp_op_T__ *ptr__; \
478 - preempt_disable(); \
479 + preempt_disable_notrace(); \
480 ptr__ = raw_cpu_ptr(&(pcp)); \
481 ret__ = cmpxchg(ptr__, oval, nval); \
482 - preempt_enable(); \
483 + preempt_enable_notrace(); \
484 ret__; \
485 })
486
487 @@ -151,10 +151,10 @@
488 ({ \
489 typeof(pcp) *ptr__; \
490 typeof(pcp) ret__; \
491 - preempt_disable(); \
492 + preempt_disable_notrace(); \
493 ptr__ = raw_cpu_ptr(&(pcp)); \
494 ret__ = xchg(ptr__, nval); \
495 - preempt_enable(); \
496 + preempt_enable_notrace(); \
497 ret__; \
498 })
499
500 @@ -170,11 +170,11 @@
501 typeof(pcp1) *p1__; \
502 typeof(pcp2) *p2__; \
503 int ret__; \
504 - preempt_disable(); \
505 + preempt_disable_notrace(); \
506 p1__ = raw_cpu_ptr(&(pcp1)); \
507 p2__ = raw_cpu_ptr(&(pcp2)); \
508 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
509 - preempt_enable(); \
510 + preempt_enable_notrace(); \
511 ret__; \
512 })
513
514 diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
515 index ede04cca30dde..82fb5102d8244 100644
516 --- a/arch/xtensa/platforms/iss/simdisk.c
517 +++ b/arch/xtensa/platforms/iss/simdisk.c
518 @@ -21,7 +21,6 @@
519 #include <platform/simcall.h>
520
521 #define SIMDISK_MAJOR 240
522 -#define SECTOR_SHIFT 9
523 #define SIMDISK_MINORS 1
524 #define MAX_SIMDISK_COUNT 10
525
526 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
527 index 46bf7e9d00aba..2aa10cd4c5b75 100644
528 --- a/drivers/ata/libata-core.c
529 +++ b/drivers/ata/libata-core.c
530 @@ -4371,9 +4371,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
531 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
532 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
533
534 - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
535 - SD7SN6S256G and SD8SN8U256G */
536 - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
537 + /* Sandisk SD7/8/9s lock up hard on large trims */
538 + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
539
540 /* devices which puke on READ_NATIVE_MAX */
541 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
542 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
543 index f4b38adb9d8a7..76ba83e245c23 100644
544 --- a/drivers/ata/libata-scsi.c
545 +++ b/drivers/ata/libata-scsi.c
546 @@ -2314,6 +2314,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
547
548 static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
549 {
550 + struct ata_device *dev = args->dev;
551 u16 min_io_sectors;
552
553 rbuf[1] = 0xb0;
554 @@ -2339,7 +2340,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
555 * with the unmap bit set.
556 */
557 if (ata_id_has_trim(args->id)) {
558 - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
559 + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
560 +
561 + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
562 + max_blocks = 128 << (20 - SECTOR_SHIFT);
563 +
564 + put_unaligned_be64(max_blocks, &rbuf[36]);
565 put_unaligned_be32(1, &rbuf[28]);
566 }
567
568 diff --git a/drivers/block/brd.c b/drivers/block/brd.c
569 index 7e35574a17dfc..9d81ac8b4512a 100644
570 --- a/drivers/block/brd.c
571 +++ b/drivers/block/brd.c
572 @@ -25,7 +25,6 @@
573
574 #include <asm/uaccess.h>
575
576 -#define SECTOR_SHIFT 9
577 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
578 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
579
580 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
581 index 8a93ca4d6840c..19f336752ad75 100644
582 --- a/drivers/block/rbd.c
583 +++ b/drivers/block/rbd.c
584 @@ -50,15 +50,6 @@
585
586 #define RBD_DEBUG /* Activate rbd_assert() calls */
587
588 -/*
589 - * The basic unit of block I/O is a sector. It is interpreted in a
590 - * number of contexts in Linux (blk, bio, genhd), but the default is
591 - * universally 512 bytes. These symbols are just slightly more
592 - * meaningful than the bare numbers they represent.
593 - */
594 -#define SECTOR_SHIFT 9
595 -#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
596 -
597 /*
598 * Increment the given counter and return its updated value.
599 * If the counter is already 0 it will not be incremented.
600 diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
601 index 74fcf10da3749..6d2475a39e84b 100644
602 --- a/drivers/block/zram/zram_drv.h
603 +++ b/drivers/block/zram/zram_drv.h
604 @@ -37,7 +37,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
605
606 /*-- End of configurable params */
607
608 -#define SECTOR_SHIFT 9
609 #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
610 #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
611 #define ZRAM_LOGICAL_BLOCK_SHIFT 12
612 diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
613 index a32cd71f94bbe..cb72b8c915c73 100644
614 --- a/drivers/dma/at_hdmac.c
615 +++ b/drivers/dma/at_hdmac.c
616 @@ -1810,6 +1810,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
617 return NULL;
618
619 dmac_pdev = of_find_device_by_node(dma_spec->np);
620 + if (!dmac_pdev)
621 + return NULL;
622
623 dma_cap_zero(mask);
624 dma_cap_set(DMA_SLAVE, mask);
625 diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
626 index faae0bfe1109e..757cf48c1c5ed 100644
627 --- a/drivers/dma/of-dma.c
628 +++ b/drivers/dma/of-dma.c
629 @@ -72,12 +72,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
630 return NULL;
631
632 chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
633 - if (chan) {
634 - chan->router = ofdma->dma_router;
635 - chan->route_data = route_data;
636 - } else {
637 + if (IS_ERR_OR_NULL(chan)) {
638 ofdma->dma_router->route_free(ofdma->dma_router->dev,
639 route_data);
640 + } else {
641 + chan->router = ofdma->dma_router;
642 + chan->route_data = route_data;
643 }
644
645 /*
646 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
647 index 57b375d0de292..16c08846ea0e1 100644
648 --- a/drivers/dma/pl330.c
649 +++ b/drivers/dma/pl330.c
650 @@ -2677,6 +2677,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
651 while (burst != (1 << desc->rqcfg.brst_size))
652 desc->rqcfg.brst_size++;
653
654 + desc->rqcfg.brst_len = get_burst_len(desc, len);
655 /*
656 * If burst size is smaller than bus width then make sure we only
657 * transfer one at a time to avoid a burst stradling an MFIFO entry.
658 @@ -2684,7 +2685,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
659 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
660 desc->rqcfg.brst_len = 1;
661
662 - desc->rqcfg.brst_len = get_burst_len(desc, len);
663 desc->bytes_requested = len;
664
665 desc->txd.flags = flags;
666 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
667 index b4b9d81525369..d99c9ed5dfe39 100644
668 --- a/drivers/hid/hid-core.c
669 +++ b/drivers/hid/hid-core.c
670 @@ -1406,6 +1406,17 @@ static void hid_output_field(const struct hid_device *hid,
671 }
672 }
673
674 +/*
675 + * Compute the size of a report.
676 + */
677 +static size_t hid_compute_report_size(struct hid_report *report)
678 +{
679 + if (report->size)
680 + return ((report->size - 1) >> 3) + 1;
681 +
682 + return 0;
683 +}
684 +
685 /*
686 * Create a report. 'data' has to be allocated using
687 * hid_alloc_report_buf() so that it has proper size.
688 @@ -1418,7 +1429,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
689 if (report->id > 0)
690 *data++ = report->id;
691
692 - memset(data, 0, ((report->size - 1) >> 3) + 1);
693 + memset(data, 0, hid_compute_report_size(report));
694 for (n = 0; n < report->maxfield; n++)
695 hid_output_field(report->device, report->field[n], data);
696 }
697 @@ -1545,7 +1556,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
698 csize--;
699 }
700
701 - rsize = ((report->size - 1) >> 3) + 1;
702 + rsize = hid_compute_report_size(report);
703
704 if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
705 rsize = HID_MAX_BUFFER_SIZE - 1;
706 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
707 index 26e9677309972..5e1a51ba6500f 100644
708 --- a/drivers/hid/hid-input.c
709 +++ b/drivers/hid/hid-input.c
710 @@ -1026,6 +1026,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
711 }
712
713 mapped:
714 + /* Mapping failed, bail out */
715 + if (!bit)
716 + return;
717 +
718 if (device->driver->input_mapped &&
719 device->driver->input_mapped(device, hidinput, field, usage,
720 &bit, &max) < 0) {
721 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
722 index 1207102823de3..258a50ec15727 100644
723 --- a/drivers/hid/hid-multitouch.c
724 +++ b/drivers/hid/hid-multitouch.c
725 @@ -567,6 +567,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
726 case HID_UP_BUTTON:
727 code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE);
728 hid_map_usage(hi, usage, bit, max, EV_KEY, code);
729 + if (!*bit)
730 + return -1;
731 input_set_capability(hi->input, EV_KEY, code);
732 return 1;
733
734 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
735 index 0af7fd311979d..587fc5c686b3c 100644
736 --- a/drivers/hwmon/applesmc.c
737 +++ b/drivers/hwmon/applesmc.c
738 @@ -758,15 +758,18 @@ static ssize_t applesmc_light_show(struct device *dev,
739 }
740
741 ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
742 + if (ret)
743 + goto out;
744 /* newer macbooks report a single 10-bit bigendian value */
745 if (data_length == 10) {
746 left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
747 goto out;
748 }
749 left = buffer[2];
750 +
751 + ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
752 if (ret)
753 goto out;
754 - ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
755 right = buffer[2];
756
757 out:
758 @@ -814,12 +817,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
759 sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
760
761 ret = applesmc_read_key(newkey, buffer, 2);
762 - speed = ((buffer[0] << 8 | buffer[1]) >> 2);
763 -
764 if (ret)
765 return ret;
766 - else
767 - return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
768 +
769 + speed = ((buffer[0] << 8 | buffer[1]) >> 2);
770 + return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
771 }
772
773 static ssize_t applesmc_store_fan_speed(struct device *dev,
774 @@ -854,12 +856,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev,
775 u8 buffer[2];
776
777 ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
778 - manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
779 -
780 if (ret)
781 return ret;
782 - else
783 - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
784 +
785 + manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
786 + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
787 }
788
789 static ssize_t applesmc_store_fan_manual(struct device *dev,
790 @@ -875,10 +876,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
791 return -EINVAL;
792
793 ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
794 - val = (buffer[0] << 8 | buffer[1]);
795 if (ret)
796 goto out;
797
798 + val = (buffer[0] << 8 | buffer[1]);
799 +
800 if (input)
801 val = val | (0x01 << to_index(attr));
802 else
803 @@ -954,13 +956,12 @@ static ssize_t applesmc_key_count_show(struct device *dev,
804 u32 count;
805
806 ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
807 - count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
808 - ((u32)buffer[2]<<8) + buffer[3];
809 -
810 if (ret)
811 return ret;
812 - else
813 - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
814 +
815 + count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
816 + ((u32)buffer[2]<<8) + buffer[3];
817 + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
818 }
819
820 static ssize_t applesmc_key_at_index_read_show(struct device *dev,
821 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
822 index 883fe2cdd42cc..6e3b3a5a3c36f 100644
823 --- a/drivers/ide/ide-cd.c
824 +++ b/drivers/ide/ide-cd.c
825 @@ -704,7 +704,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
826 struct request_queue *q = drive->queue;
827 int write = rq_data_dir(rq) == WRITE;
828 unsigned short sectors_per_frame =
829 - queue_logical_block_size(q) >> SECTOR_BITS;
830 + queue_logical_block_size(q) >> SECTOR_SHIFT;
831
832 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
833 "secs_per_frame: %u",
834 @@ -900,7 +900,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
835 * end up being bogus.
836 */
837 blocklen = be32_to_cpu(capbuf.blocklen);
838 - blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS;
839 + blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT;
840 switch (blocklen) {
841 case 512:
842 case 1024:
843 @@ -916,7 +916,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
844 }
845
846 *capacity = 1 + be32_to_cpu(capbuf.lba);
847 - *sectors_per_frame = blocklen >> SECTOR_BITS;
848 + *sectors_per_frame = blocklen >> SECTOR_SHIFT;
849
850 ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu",
851 *capacity, *sectors_per_frame);
852 @@ -993,7 +993,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
853 drive->probed_capacity = toc->capacity * sectors_per_frame;
854
855 blk_queue_logical_block_size(drive->queue,
856 - sectors_per_frame << SECTOR_BITS);
857 + sectors_per_frame << SECTOR_SHIFT);
858
859 /* first read just the header, so we know how long the TOC is */
860 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
861 diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
862 index 1efc936f5b667..7c6d017e84e9e 100644
863 --- a/drivers/ide/ide-cd.h
864 +++ b/drivers/ide/ide-cd.h
865 @@ -20,11 +20,7 @@
866
867 /************************************************************************/
868
869 -#define SECTOR_BITS 9
870 -#ifndef SECTOR_SIZE
871 -#define SECTOR_SIZE (1 << SECTOR_BITS)
872 -#endif
873 -#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS)
874 +#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT)
875 #define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32)
876
877 /* Capabilities Page size including 8 bytes of Mode Page Header */
878 diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
879 index ac596928f6b40..ce125ec23d2a5 100644
880 --- a/drivers/iommu/intel_irq_remapping.c
881 +++ b/drivers/iommu/intel_irq_remapping.c
882 @@ -486,12 +486,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
883
884 /* Enable interrupt-remapping */
885 iommu->gcmd |= DMA_GCMD_IRE;
886 - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
887 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
888 -
889 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
890 readl, (sts & DMA_GSTS_IRES), sts);
891
892 + /* Block compatibility-format MSIs */
893 + if (sts & DMA_GSTS_CFIS) {
894 + iommu->gcmd &= ~DMA_GCMD_CFI;
895 + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
896 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
897 + readl, !(sts & DMA_GSTS_CFIS), sts);
898 + }
899 +
900 /*
901 * With CFI clear in the Global Command register, we should be
902 * protected from dangerous (i.e. compatibility) interrupts
903 diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
904 index 62eb4b7caff33..a9208ab127080 100644
905 --- a/drivers/md/dm-cache-metadata.c
906 +++ b/drivers/md/dm-cache-metadata.c
907 @@ -508,12 +508,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
908 CACHE_MAX_CONCURRENT_LOCKS);
909 if (IS_ERR(cmd->bm)) {
910 DMERR("could not create block manager");
911 - return PTR_ERR(cmd->bm);
912 + r = PTR_ERR(cmd->bm);
913 + cmd->bm = NULL;
914 + return r;
915 }
916
917 r = __open_or_format_metadata(cmd, may_format_device);
918 - if (r)
919 + if (r) {
920 dm_block_manager_destroy(cmd->bm);
921 + cmd->bm = NULL;
922 + }
923
924 return r;
925 }
926 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
927 index d20f4023f6c12..b5bf2ecfaf913 100644
928 --- a/drivers/md/dm-thin-metadata.c
929 +++ b/drivers/md/dm-thin-metadata.c
930 @@ -700,12 +700,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
931 THIN_MAX_CONCURRENT_LOCKS);
932 if (IS_ERR(pmd->bm)) {
933 DMERR("could not create block manager");
934 - return PTR_ERR(pmd->bm);
935 + r = PTR_ERR(pmd->bm);
936 + pmd->bm = NULL;
937 + return r;
938 }
939
940 r = __open_or_format_metadata(pmd, format_device);
941 - if (r)
942 + if (r) {
943 dm_block_manager_destroy(pmd->bm);
944 + pmd->bm = NULL;
945 + }
946
947 return r;
948 }
949 diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
950 index a22403c688c95..337cfce78aef2 100644
951 --- a/drivers/net/ethernet/arc/emac_mdio.c
952 +++ b/drivers/net/ethernet/arc/emac_mdio.c
953 @@ -152,6 +152,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
954 if (IS_ERR(data->reset_gpio)) {
955 error = PTR_ERR(data->reset_gpio);
956 dev_err(priv->dev, "Failed to request gpio: %d\n", error);
957 + mdiobus_free(bus);
958 return error;
959 }
960
961 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
962 index 421cbba9a3bc8..dc34cfa2a58fc 100644
963 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
964 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
965 @@ -5589,14 +5589,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
966 }
967 }
968
969 - bnxt_enable_napi(bp);
970 -
971 rc = bnxt_init_nic(bp, irq_re_init);
972 if (rc) {
973 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
974 - goto open_err;
975 + goto open_err_irq;
976 }
977
978 + bnxt_enable_napi(bp);
979 +
980 if (link_re_init) {
981 mutex_lock(&bp->link_lock);
982 rc = bnxt_update_phy_setting(bp);
983 @@ -5618,9 +5618,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
984
985 return 0;
986
987 -open_err:
988 - bnxt_disable_napi(bp);
989 -
990 open_err_irq:
991 bnxt_del_napi(bp);
992
993 @@ -7085,6 +7082,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
994
995 bnxt_parse_log_pcie_link(bp);
996
997 + pci_save_state(pdev);
998 return 0;
999
1000 init_err:
1001 @@ -7158,6 +7156,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
1002 "Cannot re-enable PCI device after reset.\n");
1003 } else {
1004 pci_set_master(pdev);
1005 + pci_restore_state(pdev);
1006 + pci_save_state(pdev);
1007
1008 if (netif_running(netdev))
1009 err = bnxt_open(netdev);
1010 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1011 index 427d4dbc97354..ac03bba10e4fd 100644
1012 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1013 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1014 @@ -1457,6 +1457,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1015 if (rc != 0)
1016 return rc;
1017
1018 + if (!dir_entries || !entry_length)
1019 + return -EIO;
1020 +
1021 /* Insert 2 bytes of directory info (count and size of entries) */
1022 if (len < 2)
1023 return -EINVAL;
1024 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1025 index 5790b35064a8d..2db6102ed5848 100644
1026 --- a/drivers/net/ethernet/broadcom/tg3.c
1027 +++ b/drivers/net/ethernet/broadcom/tg3.c
1028 @@ -7201,8 +7201,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp)
1029
1030 static inline void tg3_reset_task_cancel(struct tg3 *tp)
1031 {
1032 - cancel_work_sync(&tp->reset_task);
1033 - tg3_flag_clear(tp, RESET_TASK_PENDING);
1034 + if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
1035 + cancel_work_sync(&tp->reset_task);
1036 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
1037 }
1038
1039 @@ -11174,18 +11174,27 @@ static void tg3_reset_task(struct work_struct *work)
1040
1041 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1042 err = tg3_init_hw(tp, true);
1043 - if (err)
1044 + if (err) {
1045 + tg3_full_unlock(tp);
1046 + tp->irq_sync = 0;
1047 + tg3_napi_enable(tp);
1048 + /* Clear this flag so that tg3_reset_task_cancel() will not
1049 + * call cancel_work_sync() and wait forever.
1050 + */
1051 + tg3_flag_clear(tp, RESET_TASK_PENDING);
1052 + dev_close(tp->dev);
1053 goto out;
1054 + }
1055
1056 tg3_netif_start(tp);
1057
1058 -out:
1059 tg3_full_unlock(tp);
1060
1061 if (!err)
1062 tg3_phy_start(tp);
1063
1064 tg3_flag_clear(tp, RESET_TASK_PENDING);
1065 +out:
1066 rtnl_unlock();
1067 }
1068
1069 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1070 index 24a815997ec57..796f81106b432 100644
1071 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1072 +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1073 @@ -1990,8 +1990,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1074 priv->enet_ver = AE_VERSION_1;
1075 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
1076 priv->enet_ver = AE_VERSION_2;
1077 - else
1078 - return -ENXIO;
1079 + else {
1080 + ret = -ENXIO;
1081 + goto out_read_prop_fail;
1082 + }
1083
1084 /* try to find port-idx-in-ae first */
1085 ret = acpi_node_get_property_reference(dev->fwnode,
1086 @@ -2003,7 +2005,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1087 priv->fwnode = acpi_fwnode_handle(args.adev);
1088 } else {
1089 dev_err(dev, "cannot read cfg data from OF or acpi\n");
1090 - return -ENXIO;
1091 + ret = -ENXIO;
1092 + goto out_read_prop_fail;
1093 }
1094
1095 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
1096 diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
1097 index 3637474cab8a0..50683693d9fc3 100644
1098 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c
1099 +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
1100 @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
1101 goto err_out;
1102
1103 for (i = 0; i <= buddy->max_order; ++i) {
1104 - s = BITS_TO_LONGS(1 << (buddy->max_order - i));
1105 + s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
1106 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
1107 if (!buddy->bits[i]) {
1108 buddy->bits[i] = vzalloc(s * sizeof(long));
1109 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
1110 index 93d3152752ff4..a5de56bcbac08 100644
1111 --- a/drivers/net/ethernet/renesas/ravb_main.c
1112 +++ b/drivers/net/ethernet/renesas/ravb_main.c
1113 @@ -1336,6 +1336,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1114 return error;
1115 }
1116
1117 +/* MDIO bus init function */
1118 +static int ravb_mdio_init(struct ravb_private *priv)
1119 +{
1120 + struct platform_device *pdev = priv->pdev;
1121 + struct device *dev = &pdev->dev;
1122 + int error;
1123 +
1124 + /* Bitbang init */
1125 + priv->mdiobb.ops = &bb_ops;
1126 +
1127 + /* MII controller setting */
1128 + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1129 + if (!priv->mii_bus)
1130 + return -ENOMEM;
1131 +
1132 + /* Hook up MII support for ethtool */
1133 + priv->mii_bus->name = "ravb_mii";
1134 + priv->mii_bus->parent = dev;
1135 + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1136 + pdev->name, pdev->id);
1137 +
1138 + /* Register MDIO bus */
1139 + error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1140 + if (error)
1141 + goto out_free_bus;
1142 +
1143 + return 0;
1144 +
1145 +out_free_bus:
1146 + free_mdio_bitbang(priv->mii_bus);
1147 + return error;
1148 +}
1149 +
1150 +/* MDIO bus release function */
1151 +static int ravb_mdio_release(struct ravb_private *priv)
1152 +{
1153 + /* Unregister mdio bus */
1154 + mdiobus_unregister(priv->mii_bus);
1155 +
1156 + /* Free bitbang info */
1157 + free_mdio_bitbang(priv->mii_bus);
1158 +
1159 + return 0;
1160 +}
1161 +
1162 /* Network device open function for Ethernet AVB */
1163 static int ravb_open(struct net_device *ndev)
1164 {
1165 @@ -1344,6 +1389,13 @@ static int ravb_open(struct net_device *ndev)
1166 struct device *dev = &pdev->dev;
1167 int error;
1168
1169 + /* MDIO bus init */
1170 + error = ravb_mdio_init(priv);
1171 + if (error) {
1172 + netdev_err(ndev, "failed to initialize MDIO\n");
1173 + return error;
1174 + }
1175 +
1176 napi_enable(&priv->napi[RAVB_BE]);
1177 napi_enable(&priv->napi[RAVB_NC]);
1178
1179 @@ -1421,6 +1473,7 @@ out_free_irq:
1180 out_napi_off:
1181 napi_disable(&priv->napi[RAVB_NC]);
1182 napi_disable(&priv->napi[RAVB_BE]);
1183 + ravb_mdio_release(priv);
1184 return error;
1185 }
1186
1187 @@ -1718,6 +1771,8 @@ static int ravb_close(struct net_device *ndev)
1188 ravb_ring_free(ndev, RAVB_BE);
1189 ravb_ring_free(ndev, RAVB_NC);
1190
1191 + ravb_mdio_release(priv);
1192 +
1193 return 0;
1194 }
1195
1196 @@ -1820,51 +1875,6 @@ static const struct net_device_ops ravb_netdev_ops = {
1197 .ndo_change_mtu = eth_change_mtu,
1198 };
1199
1200 -/* MDIO bus init function */
1201 -static int ravb_mdio_init(struct ravb_private *priv)
1202 -{
1203 - struct platform_device *pdev = priv->pdev;
1204 - struct device *dev = &pdev->dev;
1205 - int error;
1206 -
1207 - /* Bitbang init */
1208 - priv->mdiobb.ops = &bb_ops;
1209 -
1210 - /* MII controller setting */
1211 - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1212 - if (!priv->mii_bus)
1213 - return -ENOMEM;
1214 -
1215 - /* Hook up MII support for ethtool */
1216 - priv->mii_bus->name = "ravb_mii";
1217 - priv->mii_bus->parent = dev;
1218 - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1219 - pdev->name, pdev->id);
1220 -
1221 - /* Register MDIO bus */
1222 - error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1223 - if (error)
1224 - goto out_free_bus;
1225 -
1226 - return 0;
1227 -
1228 -out_free_bus:
1229 - free_mdio_bitbang(priv->mii_bus);
1230 - return error;
1231 -}
1232 -
1233 -/* MDIO bus release function */
1234 -static int ravb_mdio_release(struct ravb_private *priv)
1235 -{
1236 - /* Unregister mdio bus */
1237 - mdiobus_unregister(priv->mii_bus);
1238 -
1239 - /* Free bitbang info */
1240 - free_mdio_bitbang(priv->mii_bus);
1241 -
1242 - return 0;
1243 -}
1244 -
1245 static const struct of_device_id ravb_match_table[] = {
1246 { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
1247 { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1248 @@ -2069,13 +2079,6 @@ static int ravb_probe(struct platform_device *pdev)
1249 eth_hw_addr_random(ndev);
1250 }
1251
1252 - /* MDIO bus init */
1253 - error = ravb_mdio_init(priv);
1254 - if (error) {
1255 - dev_err(&pdev->dev, "failed to initialize MDIO\n");
1256 - goto out_dma_free;
1257 - }
1258 -
1259 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
1260 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
1261
1262 @@ -2095,8 +2098,6 @@ static int ravb_probe(struct platform_device *pdev)
1263 out_napi_del:
1264 netif_napi_del(&priv->napi[RAVB_NC]);
1265 netif_napi_del(&priv->napi[RAVB_BE]);
1266 - ravb_mdio_release(priv);
1267 -out_dma_free:
1268 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
1269 priv->desc_bat_dma);
1270
1271 @@ -2129,7 +2130,6 @@ static int ravb_remove(struct platform_device *pdev)
1272 unregister_netdev(ndev);
1273 netif_napi_del(&priv->napi[RAVB_NC]);
1274 netif_napi_del(&priv->napi[RAVB_BE]);
1275 - ravb_mdio_release(priv);
1276 pm_runtime_disable(&pdev->dev);
1277 free_netdev(ndev);
1278 platform_set_drvdata(pdev, NULL);
1279 diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
1280 index 3dbb0646b0245..541c06c884e55 100644
1281 --- a/drivers/net/usb/asix_common.c
1282 +++ b/drivers/net/usb/asix_common.c
1283 @@ -277,7 +277,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
1284
1285 netdev_dbg(dev->net, "asix_get_phy_addr()\n");
1286
1287 - if (ret < 0) {
1288 + if (ret < 2) {
1289 netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
1290 goto out;
1291 }
1292 diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
1293 index 0b4bdd39106b0..fb18801d0fe7b 100644
1294 --- a/drivers/net/usb/dm9601.c
1295 +++ b/drivers/net/usb/dm9601.c
1296 @@ -624,6 +624,10 @@ static const struct usb_device_id products[] = {
1297 USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
1298 .driver_info = (unsigned long)&dm9601_info,
1299 },
1300 + {
1301 + USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */
1302 + .driver_info = (unsigned long)&dm9601_info,
1303 + },
1304 {}, // END
1305 };
1306
1307 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1308 index 254a27295f41d..74c925cd19a93 100644
1309 --- a/drivers/net/usb/qmi_wwan.c
1310 +++ b/drivers/net/usb/qmi_wwan.c
1311 @@ -890,6 +890,7 @@ static const struct usb_device_id products[] = {
1312 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1313 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1314 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1315 + {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
1316 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1317 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1318 {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
1319 @@ -910,6 +911,8 @@ static const struct usb_device_id products[] = {
1320 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
1321 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
1322 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1323 + {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1324 + {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1325 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1326 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
1327 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1328 @@ -923,10 +926,13 @@ static const struct usb_device_id products[] = {
1329 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1330 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
1331 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
1332 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
1333 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1334 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1335 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1336 - {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
1337 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1338 + {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
1339 + {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
1340 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
1341 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
1342 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
1343 diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
1344 index bd29e598bac18..2a820c1fdfcde 100644
1345 --- a/drivers/nvdimm/nd.h
1346 +++ b/drivers/nvdimm/nd.h
1347 @@ -29,7 +29,6 @@ enum {
1348 * BTT instance
1349 */
1350 ND_MAX_LANES = 256,
1351 - SECTOR_SHIFT = 9,
1352 INT_LBASIZE_ALIGNMENT = 64,
1353 };
1354
1355 diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
1356 index 96ea6c76be6e5..63b87a8472762 100644
1357 --- a/drivers/nvme/target/core.c
1358 +++ b/drivers/nvme/target/core.c
1359 @@ -205,6 +205,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
1360
1361 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
1362 {
1363 + if (unlikely(ctrl->kato == 0))
1364 + return;
1365 +
1366 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
1367 ctrl->cntlid, ctrl->kato);
1368
1369 @@ -214,6 +217,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
1370
1371 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
1372 {
1373 + if (unlikely(ctrl->kato == 0))
1374 + return;
1375 +
1376 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
1377
1378 cancel_delayed_work_sync(&ctrl->ka_work);
1379 diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
1380 index 3fd8b83ffbf9f..8039c809cef27 100644
1381 --- a/drivers/scsi/gdth.h
1382 +++ b/drivers/scsi/gdth.h
1383 @@ -177,9 +177,6 @@
1384 #define MSG_SIZE 34 /* size of message structure */
1385 #define MSG_REQUEST 0 /* async. event: message */
1386
1387 -/* cacheservice defines */
1388 -#define SECTOR_SIZE 0x200 /* always 512 bytes per sec. */
1389 -
1390 /* DPMEM constants */
1391 #define DPMEM_MAGIC 0xC0FFEE11
1392 #define IC_HEADER_BYTES 48
1393 diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
1394 index d255d33da9eb3..02e71d461d5c5 100644
1395 --- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
1396 +++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
1397 @@ -49,20 +49,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
1398
1399 /*
1400 * Temperature values in milli degree celsius
1401 - * ADC code values from 530 to 923
1402 + * ADC code values from 13 to 107, see TRM
1403 + * "18.4.10.2.3 ADC Codes Versus Temperature".
1404 */
1405 static const int
1406 omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
1407 - -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
1408 - -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
1409 - -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
1410 - 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
1411 - 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
1412 - 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
1413 - 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
1414 - 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
1415 - 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
1416 - 117000, 118000, 120000, 122000, 123000,
1417 + -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000,
1418 + -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000,
1419 + -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000,
1420 + 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500,
1421 + 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000,
1422 + 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000,
1423 + 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000,
1424 + 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000,
1425 + 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000,
1426 + 115000, 117000, 118500, 120000, 122000, 123500, 125000,
1427 };
1428
1429 /* OMAP4430 data */
1430 diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
1431 index 6f2de3a3356d4..86850082b24b9 100644
1432 --- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
1433 +++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
1434 @@ -67,9 +67,13 @@
1435 * and thresholds for OMAP4430.
1436 */
1437
1438 -/* ADC conversion table limits */
1439 -#define OMAP4430_ADC_START_VALUE 0
1440 -#define OMAP4430_ADC_END_VALUE 127
1441 +/*
1442 + * ADC conversion table limits. Ignore values outside the TRM listed
1443 + * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter
1444 + * "18.4.10.2.3 ADC Codes Versus Temperature".
1445 + */
1446 +#define OMAP4430_ADC_START_VALUE 13
1447 +#define OMAP4430_ADC_END_VALUE 107
1448 /* bandgap clock limits (no control on 4430) */
1449 #define OMAP4430_MAX_FREQ 32768
1450 #define OMAP4430_MIN_FREQ 32768
1451 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
1452 index c94167d871789..2254c281cc766 100644
1453 --- a/drivers/vfio/pci/vfio_pci.c
1454 +++ b/drivers/vfio/pci/vfio_pci.c
1455 @@ -29,6 +29,7 @@
1456 #include <linux/vfio.h>
1457 #include <linux/vgaarb.h>
1458 #include <linux/nospec.h>
1459 +#include <linux/mm.h>
1460
1461 #include "vfio_pci_private.h"
1462
1463 @@ -181,6 +182,7 @@ no_mmap:
1464
1465 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
1466 static void vfio_pci_disable(struct vfio_pci_device *vdev);
1467 +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
1468
1469 /*
1470 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
1471 @@ -656,6 +658,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
1472 return 0;
1473 }
1474
1475 +struct vfio_devices {
1476 + struct vfio_device **devices;
1477 + int cur_index;
1478 + int max_index;
1479 +};
1480 +
1481 static long vfio_pci_ioctl(void *device_data,
1482 unsigned int cmd, unsigned long arg)
1483 {
1484 @@ -729,7 +737,7 @@ static long vfio_pci_ioctl(void *device_data,
1485 {
1486 void __iomem *io;
1487 size_t size;
1488 - u16 orig_cmd;
1489 + u16 cmd;
1490
1491 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1492 info.flags = 0;
1493 @@ -749,10 +757,7 @@ static long vfio_pci_ioctl(void *device_data,
1494 * Is it really there? Enable memory decode for
1495 * implicit access in pci_map_rom().
1496 */
1497 - pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
1498 - pci_write_config_word(pdev, PCI_COMMAND,
1499 - orig_cmd | PCI_COMMAND_MEMORY);
1500 -
1501 + cmd = vfio_pci_memory_lock_and_enable(vdev);
1502 io = pci_map_rom(pdev, &size);
1503 if (io) {
1504 info.flags = VFIO_REGION_INFO_FLAG_READ;
1505 @@ -760,8 +765,8 @@ static long vfio_pci_ioctl(void *device_data,
1506 } else {
1507 info.size = 0;
1508 }
1509 + vfio_pci_memory_unlock_and_restore(vdev, cmd);
1510
1511 - pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
1512 break;
1513 }
1514 case VFIO_PCI_VGA_REGION_INDEX:
1515 @@ -909,8 +914,16 @@ static long vfio_pci_ioctl(void *device_data,
1516 return ret;
1517
1518 } else if (cmd == VFIO_DEVICE_RESET) {
1519 - return vdev->reset_works ?
1520 - pci_try_reset_function(vdev->pdev) : -EINVAL;
1521 + int ret;
1522 +
1523 + if (!vdev->reset_works)
1524 + return -EINVAL;
1525 +
1526 + vfio_pci_zap_and_down_write_memory_lock(vdev);
1527 + ret = pci_try_reset_function(vdev->pdev);
1528 + up_write(&vdev->memory_lock);
1529 +
1530 + return ret;
1531
1532 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
1533 struct vfio_pci_hot_reset_info hdr;
1534 @@ -990,8 +1003,9 @@ reset_info_exit:
1535 int32_t *group_fds;
1536 struct vfio_pci_group_entry *groups;
1537 struct vfio_pci_group_info info;
1538 + struct vfio_devices devs = { .cur_index = 0 };
1539 bool slot = false;
1540 - int i, count = 0, ret = 0;
1541 + int i, group_idx, mem_idx = 0, count = 0, ret = 0;
1542
1543 minsz = offsetofend(struct vfio_pci_hot_reset, count);
1544
1545 @@ -1043,9 +1057,9 @@ reset_info_exit:
1546 * user interface and store the group and iommu ID. This
1547 * ensures the group is held across the reset.
1548 */
1549 - for (i = 0; i < hdr.count; i++) {
1550 + for (group_idx = 0; group_idx < hdr.count; group_idx++) {
1551 struct vfio_group *group;
1552 - struct fd f = fdget(group_fds[i]);
1553 + struct fd f = fdget(group_fds[group_idx]);
1554 if (!f.file) {
1555 ret = -EBADF;
1556 break;
1557 @@ -1058,8 +1072,9 @@ reset_info_exit:
1558 break;
1559 }
1560
1561 - groups[i].group = group;
1562 - groups[i].id = vfio_external_user_iommu_id(group);
1563 + groups[group_idx].group = group;
1564 + groups[group_idx].id =
1565 + vfio_external_user_iommu_id(group);
1566 }
1567
1568 kfree(group_fds);
1569 @@ -1078,14 +1093,65 @@ reset_info_exit:
1570 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1571 vfio_pci_validate_devs,
1572 &info, slot);
1573 - if (!ret)
1574 - /* User has access, do the reset */
1575 - ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1576 - pci_try_reset_bus(vdev->pdev->bus);
1577 +
1578 + if (ret)
1579 + goto hot_reset_release;
1580 +
1581 + devs.max_index = count;
1582 + devs.devices = kcalloc(count, sizeof(struct vfio_device *),
1583 + GFP_KERNEL);
1584 + if (!devs.devices) {
1585 + ret = -ENOMEM;
1586 + goto hot_reset_release;
1587 + }
1588 +
1589 + /*
1590 + * We need to get memory_lock for each device, but devices
1591 + * can share mmap_sem, therefore we need to zap and hold
1592 + * the vma_lock for each device, and only then get each
1593 + * memory_lock.
1594 + */
1595 + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1596 + vfio_pci_try_zap_and_vma_lock_cb,
1597 + &devs, slot);
1598 + if (ret)
1599 + goto hot_reset_release;
1600 +
1601 + for (; mem_idx < devs.cur_index; mem_idx++) {
1602 + struct vfio_pci_device *tmp;
1603 +
1604 + tmp = vfio_device_data(devs.devices[mem_idx]);
1605 +
1606 + ret = down_write_trylock(&tmp->memory_lock);
1607 + if (!ret) {
1608 + ret = -EBUSY;
1609 + goto hot_reset_release;
1610 + }
1611 + mutex_unlock(&tmp->vma_lock);
1612 + }
1613 +
1614 + /* User has access, do the reset */
1615 + ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1616 + pci_try_reset_bus(vdev->pdev->bus);
1617
1618 hot_reset_release:
1619 - for (i--; i >= 0; i--)
1620 - vfio_group_put_external_user(groups[i].group);
1621 + for (i = 0; i < devs.cur_index; i++) {
1622 + struct vfio_device *device;
1623 + struct vfio_pci_device *tmp;
1624 +
1625 + device = devs.devices[i];
1626 + tmp = vfio_device_data(device);
1627 +
1628 + if (i < mem_idx)
1629 + up_write(&tmp->memory_lock);
1630 + else
1631 + mutex_unlock(&tmp->vma_lock);
1632 + vfio_device_put(device);
1633 + }
1634 + kfree(devs.devices);
1635 +
1636 + for (group_idx--; group_idx >= 0; group_idx--)
1637 + vfio_group_put_external_user(groups[group_idx].group);
1638
1639 kfree(groups);
1640 return ret;
1641 @@ -1144,6 +1210,201 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1642 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1643 }
1644
1645 +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
1646 +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
1647 +{
1648 + struct vfio_pci_mmap_vma *mmap_vma, *tmp;
1649 +
1650 + /*
1651 + * Lock ordering:
1652 + * vma_lock is nested under mmap_sem for vm_ops callback paths.
1653 + * The memory_lock semaphore is used by both code paths calling
1654 + * into this function to zap vmas and the vm_ops.fault callback
1655 + * to protect the memory enable state of the device.
1656 + *
1657 + * When zapping vmas we need to maintain the mmap_sem => vma_lock
1658 + * ordering, which requires using vma_lock to walk vma_list to
1659 + * acquire an mm, then dropping vma_lock to get the mmap_sem and
1660 + * reacquiring vma_lock. This logic is derived from similar
1661 + * requirements in uverbs_user_mmap_disassociate().
1662 + *
1663 + * mmap_sem must always be the top-level lock when it is taken.
1664 + * Therefore we can only hold the memory_lock write lock when
1665 + * vma_list is empty, as we'd need to take mmap_sem to clear
1666 + * entries. vma_list can only be guaranteed empty when holding
1667 + * vma_lock, thus memory_lock is nested under vma_lock.
1668 + *
1669 + * This enables the vm_ops.fault callback to acquire vma_lock,
1670 + * followed by memory_lock read lock, while already holding
1671 + * mmap_sem without risk of deadlock.
1672 + */
1673 + while (1) {
1674 + struct mm_struct *mm = NULL;
1675 +
1676 + if (try) {
1677 + if (!mutex_trylock(&vdev->vma_lock))
1678 + return 0;
1679 + } else {
1680 + mutex_lock(&vdev->vma_lock);
1681 + }
1682 + while (!list_empty(&vdev->vma_list)) {
1683 + mmap_vma = list_first_entry(&vdev->vma_list,
1684 + struct vfio_pci_mmap_vma,
1685 + vma_next);
1686 + mm = mmap_vma->vma->vm_mm;
1687 + if (mmget_not_zero(mm))
1688 + break;
1689 +
1690 + list_del(&mmap_vma->vma_next);
1691 + kfree(mmap_vma);
1692 + mm = NULL;
1693 + }
1694 + if (!mm)
1695 + return 1;
1696 + mutex_unlock(&vdev->vma_lock);
1697 +
1698 + if (try) {
1699 + if (!down_read_trylock(&mm->mmap_sem)) {
1700 + mmput(mm);
1701 + return 0;
1702 + }
1703 + } else {
1704 + down_read(&mm->mmap_sem);
1705 + }
1706 + if (mmget_still_valid(mm)) {
1707 + if (try) {
1708 + if (!mutex_trylock(&vdev->vma_lock)) {
1709 + up_read(&mm->mmap_sem);
1710 + mmput(mm);
1711 + return 0;
1712 + }
1713 + } else {
1714 + mutex_lock(&vdev->vma_lock);
1715 + }
1716 + list_for_each_entry_safe(mmap_vma, tmp,
1717 + &vdev->vma_list, vma_next) {
1718 + struct vm_area_struct *vma = mmap_vma->vma;
1719 +
1720 + if (vma->vm_mm != mm)
1721 + continue;
1722 +
1723 + list_del(&mmap_vma->vma_next);
1724 + kfree(mmap_vma);
1725 +
1726 + zap_vma_ptes(vma, vma->vm_start,
1727 + vma->vm_end - vma->vm_start);
1728 + }
1729 + mutex_unlock(&vdev->vma_lock);
1730 + }
1731 + up_read(&mm->mmap_sem);
1732 + mmput(mm);
1733 + }
1734 +}
1735 +
1736 +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
1737 +{
1738 + vfio_pci_zap_and_vma_lock(vdev, false);
1739 + down_write(&vdev->memory_lock);
1740 + mutex_unlock(&vdev->vma_lock);
1741 +}
1742 +
1743 +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
1744 +{
1745 + u16 cmd;
1746 +
1747 + down_write(&vdev->memory_lock);
1748 + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
1749 + if (!(cmd & PCI_COMMAND_MEMORY))
1750 + pci_write_config_word(vdev->pdev, PCI_COMMAND,
1751 + cmd | PCI_COMMAND_MEMORY);
1752 +
1753 + return cmd;
1754 +}
1755 +
1756 +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
1757 +{
1758 + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
1759 + up_write(&vdev->memory_lock);
1760 +}
1761 +
1762 +/* Caller holds vma_lock */
1763 +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
1764 + struct vm_area_struct *vma)
1765 +{
1766 + struct vfio_pci_mmap_vma *mmap_vma;
1767 +
1768 + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
1769 + if (!mmap_vma)
1770 + return -ENOMEM;
1771 +
1772 + mmap_vma->vma = vma;
1773 + list_add(&mmap_vma->vma_next, &vdev->vma_list);
1774 +
1775 + return 0;
1776 +}
1777 +
1778 +/*
1779 + * Zap mmaps on open so that we can fault them in on access and therefore
1780 + * our vma_list only tracks mappings accessed since last zap.
1781 + */
1782 +static void vfio_pci_mmap_open(struct vm_area_struct *vma)
1783 +{
1784 + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1785 +}
1786 +
1787 +static void vfio_pci_mmap_close(struct vm_area_struct *vma)
1788 +{
1789 + struct vfio_pci_device *vdev = vma->vm_private_data;
1790 + struct vfio_pci_mmap_vma *mmap_vma;
1791 +
1792 + mutex_lock(&vdev->vma_lock);
1793 + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1794 + if (mmap_vma->vma == vma) {
1795 + list_del(&mmap_vma->vma_next);
1796 + kfree(mmap_vma);
1797 + break;
1798 + }
1799 + }
1800 + mutex_unlock(&vdev->vma_lock);
1801 +}
1802 +
1803 +static int vfio_pci_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1804 +{
1805 + struct vfio_pci_device *vdev = vma->vm_private_data;
1806 + int ret = VM_FAULT_NOPAGE;
1807 +
1808 + mutex_lock(&vdev->vma_lock);
1809 + down_read(&vdev->memory_lock);
1810 +
1811 + if (!__vfio_pci_memory_enabled(vdev)) {
1812 + ret = VM_FAULT_SIGBUS;
1813 + mutex_unlock(&vdev->vma_lock);
1814 + goto up_out;
1815 + }
1816 +
1817 + if (__vfio_pci_add_vma(vdev, vma)) {
1818 + ret = VM_FAULT_OOM;
1819 + mutex_unlock(&vdev->vma_lock);
1820 + goto up_out;
1821 + }
1822 +
1823 + mutex_unlock(&vdev->vma_lock);
1824 +
1825 + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1826 + vma->vm_end - vma->vm_start, vma->vm_page_prot))
1827 + ret = VM_FAULT_SIGBUS;
1828 +
1829 +up_out:
1830 + up_read(&vdev->memory_lock);
1831 + return ret;
1832 +}
1833 +
1834 +static const struct vm_operations_struct vfio_pci_mmap_ops = {
1835 + .open = vfio_pci_mmap_open,
1836 + .close = vfio_pci_mmap_close,
1837 + .fault = vfio_pci_mmap_fault,
1838 +};
1839 +
1840 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1841 {
1842 struct vfio_pci_device *vdev = device_data;
1843 @@ -1209,8 +1470,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1844 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1845 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1846
1847 - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1848 - req_len, vma->vm_page_prot);
1849 + /*
1850 + * See remap_pfn_range(), called from vfio_pci_fault() but we can't
1851 + * change vm_flags within the fault handler. Set them now.
1852 + */
1853 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1854 + vma->vm_ops = &vfio_pci_mmap_ops;
1855 +
1856 + return 0;
1857 }
1858
1859 static void vfio_pci_request(void *device_data, unsigned int count)
1860 @@ -1268,6 +1535,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1861 mutex_init(&vdev->igate);
1862 spin_lock_init(&vdev->irqlock);
1863
1864 + mutex_init(&vdev->vma_lock);
1865 + INIT_LIST_HEAD(&vdev->vma_list);
1866 + init_rwsem(&vdev->memory_lock);
1867 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1868 if (ret) {
1869 vfio_iommu_group_put(group, &pdev->dev);
1870 @@ -1361,12 +1631,6 @@ static struct pci_driver vfio_pci_driver = {
1871 .err_handler = &vfio_err_handlers,
1872 };
1873
1874 -struct vfio_devices {
1875 - struct vfio_device **devices;
1876 - int cur_index;
1877 - int max_index;
1878 -};
1879 -
1880 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1881 {
1882 struct vfio_devices *devs = data;
1883 @@ -1388,6 +1652,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1884 return 0;
1885 }
1886
1887 +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
1888 +{
1889 + struct vfio_devices *devs = data;
1890 + struct vfio_device *device;
1891 + struct vfio_pci_device *vdev;
1892 +
1893 + if (devs->cur_index == devs->max_index)
1894 + return -ENOSPC;
1895 +
1896 + device = vfio_device_get_from_dev(&pdev->dev);
1897 + if (!device)
1898 + return -EINVAL;
1899 +
1900 + if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1901 + vfio_device_put(device);
1902 + return -EBUSY;
1903 + }
1904 +
1905 + vdev = vfio_device_data(device);
1906 +
1907 + /*
1908 + * Locking multiple devices is prone to deadlock, runaway and
1909 + * unwind if we hit contention.
1910 + */
1911 + if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
1912 + vfio_device_put(device);
1913 + return -EBUSY;
1914 + }
1915 +
1916 + devs->devices[devs->cur_index++] = device;
1917 + return 0;
1918 +}
1919 +
1920 /*
1921 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1922 * this device that are needs_reset and all of the affected devices are unused
1923 diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
1924 index ef45b8f5bf510..f3c2de04b20d3 100644
1925 --- a/drivers/vfio/pci/vfio_pci_config.c
1926 +++ b/drivers/vfio/pci/vfio_pci_config.c
1927 @@ -400,6 +400,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
1928 *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
1929 }
1930
1931 +/* Caller should hold memory_lock semaphore */
1932 +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
1933 +{
1934 + struct pci_dev *pdev = vdev->pdev;
1935 + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
1936 +
1937 + /*
1938 + * SR-IOV VF memory enable is handled by the MSE bit in the
1939 + * PF SR-IOV capability, there's therefore no need to trigger
1940 + * faults based on the virtual value.
1941 + */
1942 + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
1943 +}
1944 +
1945 /*
1946 * Restore the *real* BARs after we detect a FLR or backdoor reset.
1947 * (backdoor = some device specific technique that we didn't catch)
1948 @@ -560,13 +574,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
1949
1950 new_cmd = le32_to_cpu(val);
1951
1952 + phys_io = !!(phys_cmd & PCI_COMMAND_IO);
1953 + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
1954 + new_io = !!(new_cmd & PCI_COMMAND_IO);
1955 +
1956 phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
1957 virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
1958 new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
1959
1960 - phys_io = !!(phys_cmd & PCI_COMMAND_IO);
1961 - virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
1962 - new_io = !!(new_cmd & PCI_COMMAND_IO);
1963 + if (!new_mem)
1964 + vfio_pci_zap_and_down_write_memory_lock(vdev);
1965 + else
1966 + down_write(&vdev->memory_lock);
1967
1968 /*
1969 * If the user is writing mem/io enable (new_mem/io) and we
1970 @@ -583,8 +602,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
1971 }
1972
1973 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
1974 - if (count < 0)
1975 + if (count < 0) {
1976 + if (offset == PCI_COMMAND)
1977 + up_write(&vdev->memory_lock);
1978 return count;
1979 + }
1980
1981 /*
1982 * Save current memory/io enable bits in vconfig to allow for
1983 @@ -595,6 +617,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
1984
1985 *virt_cmd &= cpu_to_le16(~mask);
1986 *virt_cmd |= cpu_to_le16(new_cmd & mask);
1987 +
1988 + up_write(&vdev->memory_lock);
1989 }
1990
1991 /* Emulate INTx disable */
1992 @@ -832,8 +856,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
1993 pos - offset + PCI_EXP_DEVCAP,
1994 &cap);
1995
1996 - if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
1997 + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
1998 + vfio_pci_zap_and_down_write_memory_lock(vdev);
1999 pci_try_reset_function(vdev->pdev);
2000 + up_write(&vdev->memory_lock);
2001 + }
2002 }
2003
2004 /*
2005 @@ -911,8 +938,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
2006 pos - offset + PCI_AF_CAP,
2007 &cap);
2008
2009 - if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
2010 + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
2011 + vfio_pci_zap_and_down_write_memory_lock(vdev);
2012 pci_try_reset_function(vdev->pdev);
2013 + up_write(&vdev->memory_lock);
2014 + }
2015 }
2016
2017 return count;
2018 @@ -1705,6 +1735,15 @@ int vfio_config_init(struct vfio_pci_device *vdev)
2019 vconfig[PCI_INTERRUPT_PIN]);
2020
2021 vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
2022 +
2023 + /*
2024 + * VFs do no implement the memory enable bit of the COMMAND
2025 + * register therefore we'll not have it set in our initial
2026 + * copy of config space after pci_enable_device(). For
2027 + * consistency with PFs, set the virtual enable bit here.
2028 + */
2029 + *(__le16 *)&vconfig[PCI_COMMAND] |=
2030 + cpu_to_le16(PCI_COMMAND_MEMORY);
2031 }
2032
2033 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
2034 diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
2035 index 94594dc63c417..bdfdd506bc588 100644
2036 --- a/drivers/vfio/pci/vfio_pci_intrs.c
2037 +++ b/drivers/vfio/pci/vfio_pci_intrs.c
2038 @@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
2039 struct pci_dev *pdev = vdev->pdev;
2040 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
2041 int ret;
2042 + u16 cmd;
2043
2044 if (!is_irq_none(vdev))
2045 return -EINVAL;
2046 @@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
2047 return -ENOMEM;
2048
2049 /* return the number of supported vectors if we can't get all: */
2050 + cmd = vfio_pci_memory_lock_and_enable(vdev);
2051 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
2052 if (ret < nvec) {
2053 if (ret > 0)
2054 pci_free_irq_vectors(pdev);
2055 + vfio_pci_memory_unlock_and_restore(vdev, cmd);
2056 kfree(vdev->ctx);
2057 return ret;
2058 }
2059 + vfio_pci_memory_unlock_and_restore(vdev, cmd);
2060
2061 vdev->num_ctx = nvec;
2062 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
2063 @@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
2064 struct pci_dev *pdev = vdev->pdev;
2065 struct eventfd_ctx *trigger;
2066 int irq, ret;
2067 + u16 cmd;
2068
2069 if (vector < 0 || vector >= vdev->num_ctx)
2070 return -EINVAL;
2071 @@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
2072
2073 if (vdev->ctx[vector].trigger) {
2074 irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
2075 +
2076 + cmd = vfio_pci_memory_lock_and_enable(vdev);
2077 free_irq(irq, vdev->ctx[vector].trigger);
2078 + vfio_pci_memory_unlock_and_restore(vdev, cmd);
2079 +
2080 kfree(vdev->ctx[vector].name);
2081 eventfd_ctx_put(vdev->ctx[vector].trigger);
2082 vdev->ctx[vector].trigger = NULL;
2083 @@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
2084 * such a reset it would be unsuccessful. To avoid this, restore the
2085 * cached value of the message prior to enabling.
2086 */
2087 + cmd = vfio_pci_memory_lock_and_enable(vdev);
2088 if (msix) {
2089 struct msi_msg msg;
2090
2091 @@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
2092
2093 ret = request_irq(irq, vfio_msihandler, 0,
2094 vdev->ctx[vector].name, trigger);
2095 + vfio_pci_memory_unlock_and_restore(vdev, cmd);
2096 if (ret) {
2097 kfree(vdev->ctx[vector].name);
2098 eventfd_ctx_put(trigger);
2099 @@ -379,6 +390,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
2100 {
2101 struct pci_dev *pdev = vdev->pdev;
2102 int i;
2103 + u16 cmd;
2104
2105 for (i = 0; i < vdev->num_ctx; i++) {
2106 vfio_virqfd_disable(&vdev->ctx[i].unmask);
2107 @@ -387,7 +399,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
2108
2109 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
2110
2111 + cmd = vfio_pci_memory_lock_and_enable(vdev);
2112 pci_free_irq_vectors(pdev);
2113 + vfio_pci_memory_unlock_and_restore(vdev, cmd);
2114
2115 /*
2116 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
2117 diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
2118 index f561ac1c78a0d..f896cebb5c2c2 100644
2119 --- a/drivers/vfio/pci/vfio_pci_private.h
2120 +++ b/drivers/vfio/pci/vfio_pci_private.h
2121 @@ -63,6 +63,11 @@ struct vfio_pci_dummy_resource {
2122 struct list_head res_next;
2123 };
2124
2125 +struct vfio_pci_mmap_vma {
2126 + struct vm_area_struct *vma;
2127 + struct list_head vma_next;
2128 +};
2129 +
2130 struct vfio_pci_device {
2131 struct pci_dev *pdev;
2132 void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
2133 @@ -95,6 +100,9 @@ struct vfio_pci_device {
2134 struct eventfd_ctx *err_trigger;
2135 struct eventfd_ctx *req_trigger;
2136 struct list_head dummy_resources_list;
2137 + struct mutex vma_lock;
2138 + struct list_head vma_list;
2139 + struct rw_semaphore memory_lock;
2140 };
2141
2142 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
2143 @@ -130,6 +138,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
2144 unsigned int type, unsigned int subtype,
2145 const struct vfio_pci_regops *ops,
2146 size_t size, u32 flags, void *data);
2147 +
2148 +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
2149 +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
2150 + *vdev);
2151 +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
2152 +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
2153 + u16 cmd);
2154 +
2155 #ifdef CONFIG_VFIO_PCI_IGD
2156 extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
2157 #else
2158 diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
2159 index 357243d76f108..6445461a56013 100644
2160 --- a/drivers/vfio/pci/vfio_pci_rdwr.c
2161 +++ b/drivers/vfio/pci/vfio_pci_rdwr.c
2162 @@ -122,6 +122,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
2163 size_t x_start = 0, x_end = 0;
2164 resource_size_t end;
2165 void __iomem *io;
2166 + struct resource *res = &vdev->pdev->resource[bar];
2167 ssize_t done;
2168
2169 if (pci_resource_start(pdev, bar))
2170 @@ -137,6 +138,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
2171
2172 count = min(count, (size_t)(end - pos));
2173
2174 + if (res->flags & IORESOURCE_MEM) {
2175 + down_read(&vdev->memory_lock);
2176 + if (!__vfio_pci_memory_enabled(vdev)) {
2177 + up_read(&vdev->memory_lock);
2178 + return -EIO;
2179 + }
2180 + }
2181 +
2182 if (bar == PCI_ROM_RESOURCE) {
2183 /*
2184 * The ROM can fill less space than the BAR, so we start the
2185 @@ -144,20 +153,21 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
2186 * filling large ROM BARs much faster.
2187 */
2188 io = pci_map_rom(pdev, &x_start);
2189 - if (!io)
2190 - return -ENOMEM;
2191 + if (!io) {
2192 + done = -ENOMEM;
2193 + goto out;
2194 + }
2195 x_end = end;
2196 } else if (!vdev->barmap[bar]) {
2197 - int ret;
2198 -
2199 - ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
2200 - if (ret)
2201 - return ret;
2202 + done = pci_request_selected_regions(pdev, 1 << bar, "vfio");
2203 + if (done)
2204 + goto out;
2205
2206 io = pci_iomap(pdev, bar, 0);
2207 if (!io) {
2208 pci_release_selected_regions(pdev, 1 << bar);
2209 - return -ENOMEM;
2210 + done = -ENOMEM;
2211 + goto out;
2212 }
2213
2214 vdev->barmap[bar] = io;
2215 @@ -176,6 +186,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
2216
2217 if (bar == PCI_ROM_RESOURCE)
2218 pci_unmap_rom(pdev, io);
2219 +out:
2220 + if (res->flags & IORESOURCE_MEM)
2221 + up_read(&vdev->memory_lock);
2222
2223 return done;
2224 }
2225 diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
2226 index a9f58f3867f02..ccef02ceaad93 100644
2227 --- a/drivers/vfio/vfio_iommu_type1.c
2228 +++ b/drivers/vfio/vfio_iommu_type1.c
2229 @@ -213,6 +213,32 @@ static int put_pfn(unsigned long pfn, int prot)
2230 return 0;
2231 }
2232
2233 +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
2234 + unsigned long vaddr, unsigned long *pfn,
2235 + bool write_fault)
2236 +{
2237 + int ret;
2238 +
2239 + ret = follow_pfn(vma, vaddr, pfn);
2240 + if (ret) {
2241 + bool unlocked = false;
2242 +
2243 + ret = fixup_user_fault(NULL, mm, vaddr,
2244 + FAULT_FLAG_REMOTE |
2245 + (write_fault ? FAULT_FLAG_WRITE : 0),
2246 + &unlocked);
2247 + if (unlocked)
2248 + return -EAGAIN;
2249 +
2250 + if (ret)
2251 + return ret;
2252 +
2253 + ret = follow_pfn(vma, vaddr, pfn);
2254 + }
2255 +
2256 + return ret;
2257 +}
2258 +
2259 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
2260 {
2261 struct page *page[1];
2262 @@ -226,12 +252,16 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
2263
2264 down_read(&current->mm->mmap_sem);
2265
2266 +retry:
2267 vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
2268
2269 if (vma && vma->vm_flags & VM_PFNMAP) {
2270 - if (!follow_pfn(vma, vaddr, pfn) &&
2271 - is_invalid_reserved_pfn(*pfn))
2272 - ret = 0;
2273 + ret = follow_fault_pfn(vma, current->mm, vaddr, pfn, prot & IOMMU_WRITE);
2274 + if (ret == -EAGAIN)
2275 + goto retry;
2276 +
2277 + if (!ret && !is_invalid_reserved_pfn(*pfn))
2278 + ret = -EFAULT;
2279 }
2280
2281 up_read(&current->mm->mmap_sem);
2282 diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
2283 index df27cefb2fa35..266f446ba331c 100644
2284 --- a/drivers/xen/xenbus/xenbus_client.c
2285 +++ b/drivers/xen/xenbus/xenbus_client.c
2286 @@ -384,8 +384,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
2287 int i, j;
2288
2289 for (i = 0; i < nr_pages; i++) {
2290 - err = gnttab_grant_foreign_access(dev->otherend_id,
2291 - virt_to_gfn(vaddr), 0);
2292 + unsigned long gfn;
2293 +
2294 + if (is_vmalloc_addr(vaddr))
2295 + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
2296 + else
2297 + gfn = virt_to_gfn(vaddr);
2298 +
2299 + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
2300 if (err < 0) {
2301 xenbus_dev_fatal(dev, err,
2302 "granting access to ring page");
2303 diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
2304 index 0ec65c133b934..e57f12317ab62 100644
2305 --- a/fs/affs/amigaffs.c
2306 +++ b/fs/affs/amigaffs.c
2307 @@ -391,23 +391,23 @@ prot_to_mode(u32 prot)
2308 umode_t mode = 0;
2309
2310 if (!(prot & FIBF_NOWRITE))
2311 - mode |= S_IWUSR;
2312 + mode |= 0200;
2313 if (!(prot & FIBF_NOREAD))
2314 - mode |= S_IRUSR;
2315 + mode |= 0400;
2316 if (!(prot & FIBF_NOEXECUTE))
2317 - mode |= S_IXUSR;
2318 + mode |= 0100;
2319 if (prot & FIBF_GRP_WRITE)
2320 - mode |= S_IWGRP;
2321 + mode |= 0020;
2322 if (prot & FIBF_GRP_READ)
2323 - mode |= S_IRGRP;
2324 + mode |= 0040;
2325 if (prot & FIBF_GRP_EXECUTE)
2326 - mode |= S_IXGRP;
2327 + mode |= 0010;
2328 if (prot & FIBF_OTR_WRITE)
2329 - mode |= S_IWOTH;
2330 + mode |= 0002;
2331 if (prot & FIBF_OTR_READ)
2332 - mode |= S_IROTH;
2333 + mode |= 0004;
2334 if (prot & FIBF_OTR_EXECUTE)
2335 - mode |= S_IXOTH;
2336 + mode |= 0001;
2337
2338 return mode;
2339 }
2340 @@ -418,24 +418,51 @@ mode_to_prot(struct inode *inode)
2341 u32 prot = AFFS_I(inode)->i_protect;
2342 umode_t mode = inode->i_mode;
2343
2344 - if (!(mode & S_IXUSR))
2345 + /*
2346 + * First, clear all RWED bits for owner, group, other.
2347 + * Then, recalculate them afresh.
2348 + *
2349 + * We'll always clear the delete-inhibit bit for the owner, as that is
2350 + * the classic single-user mode AmigaOS protection bit and we need to
2351 + * stay compatible with all scenarios.
2352 + *
2353 + * Since multi-user AmigaOS is an extension, we'll only set the
2354 + * delete-allow bit if any of the other bits in the same user class
2355 + * (group/other) are used.
2356 + */
2357 + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD
2358 + | FIBF_NOWRITE | FIBF_NODELETE
2359 + | FIBF_GRP_EXECUTE | FIBF_GRP_READ
2360 + | FIBF_GRP_WRITE | FIBF_GRP_DELETE
2361 + | FIBF_OTR_EXECUTE | FIBF_OTR_READ
2362 + | FIBF_OTR_WRITE | FIBF_OTR_DELETE);
2363 +
2364 + /* Classic single-user AmigaOS flags. These are inverted. */
2365 + if (!(mode & 0100))
2366 prot |= FIBF_NOEXECUTE;
2367 - if (!(mode & S_IRUSR))
2368 + if (!(mode & 0400))
2369 prot |= FIBF_NOREAD;
2370 - if (!(mode & S_IWUSR))
2371 + if (!(mode & 0200))
2372 prot |= FIBF_NOWRITE;
2373 - if (mode & S_IXGRP)
2374 +
2375 + /* Multi-user extended flags. Not inverted. */
2376 + if (mode & 0010)
2377 prot |= FIBF_GRP_EXECUTE;
2378 - if (mode & S_IRGRP)
2379 + if (mode & 0040)
2380 prot |= FIBF_GRP_READ;
2381 - if (mode & S_IWGRP)
2382 + if (mode & 0020)
2383 prot |= FIBF_GRP_WRITE;
2384 - if (mode & S_IXOTH)
2385 + if (mode & 0070)
2386 + prot |= FIBF_GRP_DELETE;
2387 +
2388 + if (mode & 0001)
2389 prot |= FIBF_OTR_EXECUTE;
2390 - if (mode & S_IROTH)
2391 + if (mode & 0004)
2392 prot |= FIBF_OTR_READ;
2393 - if (mode & S_IWOTH)
2394 + if (mode & 0002)
2395 prot |= FIBF_OTR_WRITE;
2396 + if (mode & 0007)
2397 + prot |= FIBF_OTR_DELETE;
2398
2399 AFFS_I(inode)->i_protect = prot;
2400 }
2401 diff --git a/fs/affs/file.c b/fs/affs/file.c
2402 index 0deec9cc2362c..0daca9d00cd8b 100644
2403 --- a/fs/affs/file.c
2404 +++ b/fs/affs/file.c
2405 @@ -427,6 +427,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
2406 return ret;
2407 }
2408
2409 +static int affs_write_end(struct file *file, struct address_space *mapping,
2410 + loff_t pos, unsigned int len, unsigned int copied,
2411 + struct page *page, void *fsdata)
2412 +{
2413 + struct inode *inode = mapping->host;
2414 + int ret;
2415 +
2416 + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
2417 +
2418 + /* Clear Archived bit on file writes, as AmigaOS would do */
2419 + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
2420 + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
2421 + mark_inode_dirty(inode);
2422 + }
2423 +
2424 + return ret;
2425 +}
2426 +
2427 static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
2428 {
2429 return generic_block_bmap(mapping,block,affs_get_block);
2430 @@ -436,7 +454,7 @@ const struct address_space_operations affs_aops = {
2431 .readpage = affs_readpage,
2432 .writepage = affs_writepage,
2433 .write_begin = affs_write_begin,
2434 - .write_end = generic_write_end,
2435 + .write_end = affs_write_end,
2436 .direct_IO = affs_direct_IO,
2437 .bmap = _affs_bmap
2438 };
2439 @@ -793,6 +811,12 @@ done:
2440 if (tmp > inode->i_size)
2441 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
2442
2443 + /* Clear Archived bit on file writes, as AmigaOS would do */
2444 + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
2445 + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
2446 + mark_inode_dirty(inode);
2447 + }
2448 +
2449 err_first_bh:
2450 unlock_page(page);
2451 put_page(page);
2452 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
2453 index b5ebb43b1824f..65689cbc362db 100644
2454 --- a/fs/btrfs/ctree.c
2455 +++ b/fs/btrfs/ctree.c
2456 @@ -1360,7 +1360,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2457 btrfs_tree_read_unlock_blocking(eb);
2458 free_extent_buffer(eb);
2459
2460 - extent_buffer_get(eb_rewin);
2461 + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
2462 + eb_rewin, btrfs_header_level(eb_rewin));
2463 btrfs_tree_read_lock(eb_rewin);
2464 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
2465 WARN_ON(btrfs_header_nritems(eb_rewin) >
2466 @@ -1430,8 +1431,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
2467
2468 if (!eb)
2469 return NULL;
2470 - extent_buffer_get(eb);
2471 - btrfs_tree_read_lock(eb);
2472 if (old_root) {
2473 btrfs_set_header_bytenr(eb, eb->start);
2474 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
2475 @@ -1439,6 +1438,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
2476 btrfs_set_header_level(eb, old_root->level);
2477 btrfs_set_header_generation(eb, old_generation);
2478 }
2479 + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
2480 + btrfs_header_level(eb));
2481 + btrfs_tree_read_lock(eb);
2482 if (tm)
2483 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
2484 else
2485 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2486 index fa22bb29eee6f..d6c827a9ebc56 100644
2487 --- a/fs/btrfs/extent_io.c
2488 +++ b/fs/btrfs/extent_io.c
2489 @@ -5488,9 +5488,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
2490 }
2491 }
2492
2493 -int read_extent_buffer_to_user(const struct extent_buffer *eb,
2494 - void __user *dstv,
2495 - unsigned long start, unsigned long len)
2496 +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
2497 + void __user *dstv,
2498 + unsigned long start, unsigned long len)
2499 {
2500 size_t cur;
2501 size_t offset;
2502 @@ -5511,7 +5511,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
2503
2504 cur = min(len, (PAGE_SIZE - offset));
2505 kaddr = page_address(page);
2506 - if (copy_to_user(dst, kaddr + offset, cur)) {
2507 + if (probe_user_write(dst, kaddr + offset, cur)) {
2508 ret = -EFAULT;
2509 break;
2510 }
2511 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
2512 index 9ecdc9584df77..75c03aa1800fe 100644
2513 --- a/fs/btrfs/extent_io.h
2514 +++ b/fs/btrfs/extent_io.h
2515 @@ -401,9 +401,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
2516 void read_extent_buffer(const struct extent_buffer *eb, void *dst,
2517 unsigned long start,
2518 unsigned long len);
2519 -int read_extent_buffer_to_user(const struct extent_buffer *eb,
2520 - void __user *dst, unsigned long start,
2521 - unsigned long len);
2522 +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
2523 + void __user *dst, unsigned long start,
2524 + unsigned long len);
2525 void write_extent_buffer(struct extent_buffer *eb, const void *src,
2526 unsigned long start, unsigned long len);
2527 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2528 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2529 index eefe103c65daa..6db46daeed16b 100644
2530 --- a/fs/btrfs/ioctl.c
2531 +++ b/fs/btrfs/ioctl.c
2532 @@ -2041,9 +2041,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
2533 sh.len = item_len;
2534 sh.transid = found_transid;
2535
2536 - /* copy search result header */
2537 - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2538 - ret = -EFAULT;
2539 + /*
2540 + * Copy search result header. If we fault then loop again so we
2541 + * can fault in the pages and -EFAULT there if there's a
2542 + * problem. Otherwise we'll fault and then copy the buffer in
2543 + * properly this next time through
2544 + */
2545 + if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
2546 + ret = 0;
2547 goto out;
2548 }
2549
2550 @@ -2051,10 +2056,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
2551
2552 if (item_len) {
2553 char __user *up = ubuf + *sk_offset;
2554 - /* copy the item */
2555 - if (read_extent_buffer_to_user(leaf, up,
2556 - item_off, item_len)) {
2557 - ret = -EFAULT;
2558 + /*
2559 + * Copy the item, same behavior as above, but reset the
2560 + * * sk_offset so we copy the full thing again.
2561 + */
2562 + if (read_extent_buffer_to_user_nofault(leaf, up,
2563 + item_off, item_len)) {
2564 + ret = 0;
2565 + *sk_offset -= sizeof(sh);
2566 goto out;
2567 }
2568
2569 @@ -2142,6 +2151,10 @@ static noinline int search_ioctl(struct inode *inode,
2570 key.offset = sk->min_offset;
2571
2572 while (1) {
2573 + ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
2574 + if (ret)
2575 + break;
2576 +
2577 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2578 if (ret != 0) {
2579 if (ret > 0)
2580 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2581 index bace03a546b2d..c31b02692f706 100644
2582 --- a/fs/btrfs/volumes.c
2583 +++ b/fs/btrfs/volumes.c
2584 @@ -4181,6 +4181,7 @@ static int btrfs_uuid_scan_kthread(void *data)
2585 goto skip;
2586 }
2587 update_tree:
2588 + btrfs_release_path(path);
2589 if (!btrfs_is_empty_uuid(root_item.uuid)) {
2590 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
2591 root_item.uuid,
2592 @@ -4206,6 +4207,7 @@ update_tree:
2593 }
2594
2595 skip:
2596 + btrfs_release_path(path);
2597 if (trans) {
2598 ret = btrfs_end_transaction(trans, fs_info->uuid_root);
2599 trans = NULL;
2600 @@ -4213,7 +4215,6 @@ skip:
2601 break;
2602 }
2603
2604 - btrfs_release_path(path);
2605 if (key.offset < (u64)-1) {
2606 key.offset++;
2607 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
2608 diff --git a/fs/ceph/file.c b/fs/ceph/file.c
2609 index e7ddb23d9bb73..e818344a052cb 100644
2610 --- a/fs/ceph/file.c
2611 +++ b/fs/ceph/file.c
2612 @@ -1773,6 +1773,7 @@ const struct file_operations ceph_file_fops = {
2613 .mmap = ceph_mmap,
2614 .fsync = ceph_fsync,
2615 .lock = ceph_lock,
2616 + .setlease = simple_nosetlease,
2617 .flock = ceph_flock,
2618 .splice_write = iter_file_splice_write,
2619 .unlocked_ioctl = ceph_ioctl,
2620 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2621 index aad52e1858363..8c40d6652a9a9 100644
2622 --- a/fs/eventpoll.c
2623 +++ b/fs/eventpoll.c
2624 @@ -1748,9 +1748,9 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2625 * during ep_insert().
2626 */
2627 if (list_empty(&epi->ffd.file->f_tfile_llink)) {
2628 - get_file(epi->ffd.file);
2629 - list_add(&epi->ffd.file->f_tfile_llink,
2630 - &tfile_check_list);
2631 + if (get_file_rcu(epi->ffd.file))
2632 + list_add(&epi->ffd.file->f_tfile_llink,
2633 + &tfile_check_list);
2634 }
2635 }
2636 }
2637 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2638 index 060881478e59e..848aab6c69823 100644
2639 --- a/include/linux/blkdev.h
2640 +++ b/include/linux/blkdev.h
2641 @@ -850,6 +850,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
2642 return bdev->bd_disk->queue; /* this is never NULL */
2643 }
2644
2645 +/*
2646 + * The basic unit of block I/O is a sector. It is used in a number of contexts
2647 + * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
2648 + * bytes. Variables of type sector_t represent an offset or size that is a
2649 + * multiple of 512 bytes. Hence these two constants.
2650 + */
2651 +#ifndef SECTOR_SHIFT
2652 +#define SECTOR_SHIFT 9
2653 +#endif
2654 +#ifndef SECTOR_SIZE
2655 +#define SECTOR_SIZE (1 << SECTOR_SHIFT)
2656 +#endif
2657 +
2658 /*
2659 * blk_rq_pos() : the current sector
2660 * blk_rq_bytes() : bytes left in the entire request
2661 @@ -877,19 +890,20 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq);
2662
2663 static inline unsigned int blk_rq_sectors(const struct request *rq)
2664 {
2665 - return blk_rq_bytes(rq) >> 9;
2666 + return blk_rq_bytes(rq) >> SECTOR_SHIFT;
2667 }
2668
2669 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
2670 {
2671 - return blk_rq_cur_bytes(rq) >> 9;
2672 + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
2673 }
2674
2675 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
2676 int op)
2677 {
2678 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
2679 - return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
2680 + return min(q->limits.max_discard_sectors,
2681 + UINT_MAX >> SECTOR_SHIFT);
2682
2683 if (unlikely(op == REQ_OP_WRITE_SAME))
2684 return q->limits.max_write_same_sectors;
2685 @@ -1162,16 +1176,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
2686 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
2687 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
2688 {
2689 - return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
2690 - nr_blocks << (sb->s_blocksize_bits - 9),
2691 + return blkdev_issue_discard(sb->s_bdev,
2692 + block << (sb->s_blocksize_bits -
2693 + SECTOR_SHIFT),
2694 + nr_blocks << (sb->s_blocksize_bits -
2695 + SECTOR_SHIFT),
2696 gfp_mask, flags);
2697 }
2698 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
2699 sector_t nr_blocks, gfp_t gfp_mask)
2700 {
2701 return blkdev_issue_zeroout(sb->s_bdev,
2702 - block << (sb->s_blocksize_bits - 9),
2703 - nr_blocks << (sb->s_blocksize_bits - 9),
2704 + block << (sb->s_blocksize_bits -
2705 + SECTOR_SHIFT),
2706 + nr_blocks << (sb->s_blocksize_bits -
2707 + SECTOR_SHIFT),
2708 gfp_mask, true);
2709 }
2710
2711 @@ -1278,7 +1297,8 @@ static inline int queue_alignment_offset(struct request_queue *q)
2712 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
2713 {
2714 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
2715 - unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
2716 + unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
2717 + << SECTOR_SHIFT;
2718
2719 return (granularity + lim->alignment_offset - alignment) % granularity;
2720 }
2721 @@ -1312,8 +1332,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
2722 return 0;
2723
2724 /* Why are these in bytes, not sectors? */
2725 - alignment = lim->discard_alignment >> 9;
2726 - granularity = lim->discard_granularity >> 9;
2727 + alignment = lim->discard_alignment >> SECTOR_SHIFT;
2728 + granularity = lim->discard_granularity >> SECTOR_SHIFT;
2729 if (!granularity)
2730 return 0;
2731
2732 @@ -1324,7 +1344,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
2733 offset = (granularity + alignment - offset) % granularity;
2734
2735 /* Turn it back into bytes, gaah */
2736 - return offset << 9;
2737 + return offset << SECTOR_SHIFT;
2738 }
2739
2740 static inline int bdev_discard_alignment(struct block_device *bdev)
2741 diff --git a/include/linux/bvec.h b/include/linux/bvec.h
2742 index 89b65b82d98f5..8047c3ad77a64 100644
2743 --- a/include/linux/bvec.h
2744 +++ b/include/linux/bvec.h
2745 @@ -88,10 +88,17 @@ static inline void bvec_iter_advance(const struct bio_vec *bv,
2746 }
2747 }
2748
2749 +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
2750 +{
2751 + iter->bi_bvec_done = 0;
2752 + iter->bi_idx++;
2753 +}
2754 +
2755 #define for_each_bvec(bvl, bio_vec, iter, start) \
2756 for (iter = (start); \
2757 (iter).bi_size && \
2758 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
2759 - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
2760 + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
2761 + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
2762
2763 #endif /* __LINUX_BVEC_ITER_H */
2764 diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
2765 index 9661bb2fbe221..165ddd482f0d7 100644
2766 --- a/include/linux/device-mapper.h
2767 +++ b/include/linux/device-mapper.h
2768 @@ -576,8 +576,6 @@ extern struct ratelimit_state dm_ratelimit_state;
2769 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
2770 0 : scnprintf(result + sz, maxlen - sz, x))
2771
2772 -#define SECTOR_SHIFT 9
2773 -
2774 /*
2775 * Definitions of return values from target end_io function.
2776 */
2777 diff --git a/include/linux/hid.h b/include/linux/hid.h
2778 index eda06f7ee84af..981657075f051 100644
2779 --- a/include/linux/hid.h
2780 +++ b/include/linux/hid.h
2781 @@ -874,34 +874,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
2782 * @max: maximal valid usage->code to consider later (out parameter)
2783 * @type: input event type (EV_KEY, EV_REL, ...)
2784 * @c: code which corresponds to this usage and type
2785 + *
2786 + * The value pointed to by @bit will be set to NULL if either @type is
2787 + * an unhandled event type, or if @c is out of range for @type. This
2788 + * can be used as an error condition.
2789 */
2790 static inline void hid_map_usage(struct hid_input *hidinput,
2791 struct hid_usage *usage, unsigned long **bit, int *max,
2792 - __u8 type, __u16 c)
2793 + __u8 type, unsigned int c)
2794 {
2795 struct input_dev *input = hidinput->input;
2796 -
2797 - usage->type = type;
2798 - usage->code = c;
2799 + unsigned long *bmap = NULL;
2800 + unsigned int limit = 0;
2801
2802 switch (type) {
2803 case EV_ABS:
2804 - *bit = input->absbit;
2805 - *max = ABS_MAX;
2806 + bmap = input->absbit;
2807 + limit = ABS_MAX;
2808 break;
2809 case EV_REL:
2810 - *bit = input->relbit;
2811 - *max = REL_MAX;
2812 + bmap = input->relbit;
2813 + limit = REL_MAX;
2814 break;
2815 case EV_KEY:
2816 - *bit = input->keybit;
2817 - *max = KEY_MAX;
2818 + bmap = input->keybit;
2819 + limit = KEY_MAX;
2820 break;
2821 case EV_LED:
2822 - *bit = input->ledbit;
2823 - *max = LED_MAX;
2824 + bmap = input->ledbit;
2825 + limit = LED_MAX;
2826 break;
2827 }
2828 +
2829 + if (unlikely(c > limit || !bmap)) {
2830 + pr_warn_ratelimited("%s: Invalid code %d type %d\n",
2831 + input->name, c, type);
2832 + *bit = NULL;
2833 + return;
2834 + }
2835 +
2836 + usage->type = type;
2837 + usage->code = c;
2838 + *max = limit;
2839 + *bit = bmap;
2840 }
2841
2842 /**
2843 @@ -915,7 +930,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
2844 __u8 type, __u16 c)
2845 {
2846 hid_map_usage(hidinput, usage, bit, max, type, c);
2847 - clear_bit(c, *bit);
2848 + if (*bit)
2849 + clear_bit(usage->code, *bit);
2850 }
2851
2852 /**
2853 diff --git a/include/linux/ide.h b/include/linux/ide.h
2854 index a633898f36ac8..eb2ac48c99db3 100644
2855 --- a/include/linux/ide.h
2856 +++ b/include/linux/ide.h
2857 @@ -128,7 +128,6 @@ struct ide_io_ports {
2858 */
2859 #define PARTN_BITS 6 /* number of minor dev bits for partitions */
2860 #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
2861 -#define SECTOR_SIZE 512
2862
2863 /*
2864 * Timeouts for various operations:
2865 diff --git a/include/linux/libata.h b/include/linux/libata.h
2866 index 780ccde2c3127..e2dac33eae964 100644
2867 --- a/include/linux/libata.h
2868 +++ b/include/linux/libata.h
2869 @@ -435,6 +435,7 @@ enum {
2870 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
2871 ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
2872 ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
2873 + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
2874
2875 /* DMA mask for user DMA control: User visible values; DO NOT
2876 renumber */
2877 diff --git a/include/linux/log2.h b/include/linux/log2.h
2878 index c373295f359fa..cca606609e1bc 100644
2879 --- a/include/linux/log2.h
2880 +++ b/include/linux/log2.h
2881 @@ -159,7 +159,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
2882 #define roundup_pow_of_two(n) \
2883 ( \
2884 __builtin_constant_p(n) ? ( \
2885 - (n == 1) ? 1 : \
2886 + ((n) == 1) ? 1 : \
2887 (1UL << (ilog2((n) - 1) + 1)) \
2888 ) : \
2889 __roundup_pow_of_two(n) \
2890 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
2891 index 9442423979c1c..cc5ba47062e87 100644
2892 --- a/include/linux/uaccess.h
2893 +++ b/include/linux/uaccess.h
2894 @@ -90,6 +90,17 @@ static inline unsigned long __copy_from_user_nocache(void *to,
2895 extern long probe_kernel_read(void *dst, const void *src, size_t size);
2896 extern long __probe_kernel_read(void *dst, const void *src, size_t size);
2897
2898 +/*
2899 + * probe_user_read(): safely attempt to read from a location in user space
2900 + * @dst: pointer to the buffer that shall take the data
2901 + * @src: address to read from
2902 + * @size: size of the data chunk
2903 + *
2904 + * Safely read from address @src to the buffer at @dst. If a kernel fault
2905 + * happens, handle that and return -EFAULT.
2906 + */
2907 +extern long probe_user_read(void *dst, const void __user *src, size_t size);
2908 +
2909 /*
2910 * probe_kernel_write(): safely attempt to write to a location
2911 * @dst: address to write to
2912 @@ -102,7 +113,22 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
2913 extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
2914 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
2915
2916 +/*
2917 + * probe_user_write(): safely attempt to write to a location in user space
2918 + * @dst: address to write to
2919 + * @src: pointer to the data that shall be written
2920 + * @size: size of the data chunk
2921 + *
2922 + * Safely write to address @dst from the buffer at @src. If a kernel fault
2923 + * happens, handle that and return -EFAULT.
2924 + */
2925 +extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
2926 +extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
2927 +
2928 extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
2929 +extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
2930 + long count);
2931 +extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
2932
2933 /**
2934 * probe_kernel_address(): safely attempt to read from a location
2935 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
2936 index 146054ceea8e0..5bb56ebf3c9f9 100644
2937 --- a/include/net/inet_connection_sock.h
2938 +++ b/include/net/inet_connection_sock.h
2939 @@ -319,5 +319,9 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
2940 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
2941 char __user *optval, unsigned int optlen);
2942
2943 +/* update the fast reuse flag when adding a socket */
2944 +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
2945 + struct sock *sk);
2946 +
2947 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
2948 #endif /* _INET_CONNECTION_SOCK_H */
2949 diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
2950 index 7ba9a624090fb..91e395fd0a65c 100644
2951 --- a/include/net/netfilter/nf_tables.h
2952 +++ b/include/net/netfilter/nf_tables.h
2953 @@ -119,6 +119,8 @@ static inline u8 nft_reg_load8(u32 *sreg)
2954 static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
2955 unsigned int len)
2956 {
2957 + if (len % NFT_REG32_SIZE)
2958 + dst[len / NFT_REG32_SIZE] = 0;
2959 memcpy(dst, src, len);
2960 }
2961
2962 diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
2963 index e956704f5fb1b..95b8a9395ec10 100644
2964 --- a/include/uapi/linux/msdos_fs.h
2965 +++ b/include/uapi/linux/msdos_fs.h
2966 @@ -9,7 +9,9 @@
2967 * The MS-DOS filesystem constants/structures
2968 */
2969
2970 +#ifndef SECTOR_SIZE
2971 #define SECTOR_SIZE 512 /* sector size (bytes) */
2972 +#endif
2973 #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */
2974 #define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */
2975 #define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */
2976 diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
2977 index c6c4477c136b9..d121c22bf9284 100644
2978 --- a/include/uapi/linux/netfilter/nf_tables.h
2979 +++ b/include/uapi/linux/netfilter/nf_tables.h
2980 @@ -114,7 +114,7 @@ enum nf_tables_msg_types {
2981 * @NFTA_LIST_ELEM: list element (NLA_NESTED)
2982 */
2983 enum nft_list_attributes {
2984 - NFTA_LIST_UNPEC,
2985 + NFTA_LIST_UNSPEC,
2986 NFTA_LIST_ELEM,
2987 __NFTA_LIST_MAX
2988 };
2989 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2990 index 2c22ea7a20131..b469d099dc5f6 100644
2991 --- a/mm/hugetlb.c
2992 +++ b/mm/hugetlb.c
2993 @@ -2921,6 +2921,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
2994 }
2995
2996 #ifdef CONFIG_SYSCTL
2997 +static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
2998 + void *buffer, size_t *length,
2999 + loff_t *ppos, unsigned long *out)
3000 +{
3001 + struct ctl_table dup_table;
3002 +
3003 + /*
3004 + * In order to avoid races with __do_proc_doulongvec_minmax(), we
3005 + * can duplicate the @table and alter the duplicate of it.
3006 + */
3007 + dup_table = *table;
3008 + dup_table.data = out;
3009 +
3010 + return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3011 +}
3012 +
3013 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3014 struct ctl_table *table, int write,
3015 void __user *buffer, size_t *length, loff_t *ppos)
3016 @@ -2932,9 +2948,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3017 if (!hugepages_supported())
3018 return -EOPNOTSUPP;
3019
3020 - table->data = &tmp;
3021 - table->maxlen = sizeof(unsigned long);
3022 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3023 + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3024 + &tmp);
3025 if (ret)
3026 goto out;
3027
3028 @@ -2978,9 +2993,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3029 if (write && hstate_is_gigantic(h))
3030 return -EINVAL;
3031
3032 - table->data = &tmp;
3033 - table->maxlen = sizeof(unsigned long);
3034 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3035 + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3036 + &tmp);
3037 if (ret)
3038 goto out;
3039
3040 diff --git a/mm/maccess.c b/mm/maccess.c
3041 index 78f9274dd49d0..03ea550f5a743 100644
3042 --- a/mm/maccess.c
3043 +++ b/mm/maccess.c
3044 @@ -5,8 +5,32 @@
3045 #include <linux/mm.h>
3046 #include <linux/uaccess.h>
3047
3048 +static __always_inline long
3049 +probe_read_common(void *dst, const void __user *src, size_t size)
3050 +{
3051 + long ret;
3052 +
3053 + pagefault_disable();
3054 + ret = __copy_from_user_inatomic(dst, src, size);
3055 + pagefault_enable();
3056 +
3057 + return ret ? -EFAULT : 0;
3058 +}
3059 +
3060 +static __always_inline long
3061 +probe_write_common(void __user *dst, const void *src, size_t size)
3062 +{
3063 + long ret;
3064 +
3065 + pagefault_disable();
3066 + ret = __copy_to_user_inatomic(dst, src, size);
3067 + pagefault_enable();
3068 +
3069 + return ret ? -EFAULT : 0;
3070 +}
3071 +
3072 /**
3073 - * probe_kernel_read(): safely attempt to read from a location
3074 + * probe_kernel_read(): safely attempt to read from a kernel-space location
3075 * @dst: pointer to the buffer that shall take the data
3076 * @src: address to read from
3077 * @size: size of the data chunk
3078 @@ -29,16 +53,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
3079 mm_segment_t old_fs = get_fs();
3080
3081 set_fs(KERNEL_DS);
3082 - pagefault_disable();
3083 - ret = __copy_from_user_inatomic(dst,
3084 - (__force const void __user *)src, size);
3085 - pagefault_enable();
3086 + ret = probe_read_common(dst, (__force const void __user *)src, size);
3087 set_fs(old_fs);
3088
3089 - return ret ? -EFAULT : 0;
3090 + return ret;
3091 }
3092 EXPORT_SYMBOL_GPL(probe_kernel_read);
3093
3094 +/**
3095 + * probe_user_read(): safely attempt to read from a user-space location
3096 + * @dst: pointer to the buffer that shall take the data
3097 + * @src: address to read from. This must be a user address.
3098 + * @size: size of the data chunk
3099 + *
3100 + * Safely read from user address @src to the buffer at @dst. If a kernel fault
3101 + * happens, handle that and return -EFAULT.
3102 + */
3103 +
3104 +long __weak probe_user_read(void *dst, const void __user *src, size_t size)
3105 + __attribute__((alias("__probe_user_read")));
3106 +
3107 +long __probe_user_read(void *dst, const void __user *src, size_t size)
3108 +{
3109 + long ret = -EFAULT;
3110 + mm_segment_t old_fs = get_fs();
3111 +
3112 + set_fs(USER_DS);
3113 + if (access_ok(VERIFY_READ, src, size))
3114 + ret = probe_read_common(dst, src, size);
3115 + set_fs(old_fs);
3116 +
3117 + return ret;
3118 +}
3119 +EXPORT_SYMBOL_GPL(probe_user_read);
3120 +
3121 /**
3122 * probe_kernel_write(): safely attempt to write to a location
3123 * @dst: address to write to
3124 @@ -48,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
3125 * Safely write to address @dst from the buffer at @src. If a kernel fault
3126 * happens, handle that and return -EFAULT.
3127 */
3128 +
3129 long __weak probe_kernel_write(void *dst, const void *src, size_t size)
3130 __attribute__((alias("__probe_kernel_write")));
3131
3132 @@ -57,15 +106,40 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
3133 mm_segment_t old_fs = get_fs();
3134
3135 set_fs(KERNEL_DS);
3136 - pagefault_disable();
3137 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
3138 - pagefault_enable();
3139 + ret = probe_write_common((__force void __user *)dst, src, size);
3140 set_fs(old_fs);
3141
3142 - return ret ? -EFAULT : 0;
3143 + return ret;
3144 }
3145 EXPORT_SYMBOL_GPL(probe_kernel_write);
3146
3147 +/**
3148 + * probe_user_write(): safely attempt to write to a user-space location
3149 + * @dst: address to write to
3150 + * @src: pointer to the data that shall be written
3151 + * @size: size of the data chunk
3152 + *
3153 + * Safely write to address @dst from the buffer at @src. If a kernel fault
3154 + * happens, handle that and return -EFAULT.
3155 + */
3156 +
3157 +long __weak probe_user_write(void __user *dst, const void *src, size_t size)
3158 + __attribute__((alias("__probe_user_write")));
3159 +
3160 +long __probe_user_write(void __user *dst, const void *src, size_t size)
3161 +{
3162 + long ret = -EFAULT;
3163 + mm_segment_t old_fs = get_fs();
3164 +
3165 + set_fs(USER_DS);
3166 + if (access_ok(VERIFY_WRITE, dst, size))
3167 + ret = probe_write_common(dst, src, size);
3168 + set_fs(old_fs);
3169 +
3170 + return ret;
3171 +}
3172 +EXPORT_SYMBOL_GPL(probe_user_write);
3173 +
3174 /**
3175 * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
3176 * @dst: Destination address, in kernel space. This buffer must be at
3177 @@ -105,3 +179,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
3178
3179 return ret ? -EFAULT : src - unsafe_addr;
3180 }
3181 +
3182 +/**
3183 + * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user
3184 + * address.
3185 + * @dst: Destination address, in kernel space. This buffer must be at
3186 + * least @count bytes long.
3187 + * @unsafe_addr: Unsafe user address.
3188 + * @count: Maximum number of bytes to copy, including the trailing NUL.
3189 + *
3190 + * Copies a NUL-terminated string from unsafe user address to kernel buffer.
3191 + *
3192 + * On success, returns the length of the string INCLUDING the trailing NUL.
3193 + *
3194 + * If access fails, returns -EFAULT (some data may have been copied
3195 + * and the trailing NUL added).
3196 + *
3197 + * If @count is smaller than the length of the string, copies @count-1 bytes,
3198 + * sets the last byte of @dst buffer to NUL and returns @count.
3199 + */
3200 +long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
3201 + long count)
3202 +{
3203 + mm_segment_t old_fs = get_fs();
3204 + long ret;
3205 +
3206 + if (unlikely(count <= 0))
3207 + return 0;
3208 +
3209 + set_fs(USER_DS);
3210 + pagefault_disable();
3211 + ret = strncpy_from_user(dst, unsafe_addr, count);
3212 + pagefault_enable();
3213 + set_fs(old_fs);
3214 +
3215 + if (ret >= count) {
3216 + ret = count;
3217 + dst[ret - 1] = '\0';
3218 + } else if (ret > 0) {
3219 + ret++;
3220 + }
3221 +
3222 + return ret;
3223 +}
3224 +
3225 +/**
3226 + * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL.
3227 + * @unsafe_addr: The string to measure.
3228 + * @count: Maximum count (including NUL)
3229 + *
3230 + * Get the size of a NUL-terminated string in user space without pagefault.
3231 + *
3232 + * Returns the size of the string INCLUDING the terminating NUL.
3233 + *
3234 + * If the string is too long, returns a number larger than @count. User
3235 + * has to check the return value against "> count".
3236 + * On exception (or invalid count), returns 0.
3237 + *
3238 + * Unlike strnlen_user, this can be used from IRQ handler etc. because
3239 + * it disables pagefaults.
3240 + */
3241 +long strnlen_unsafe_user(const void __user *unsafe_addr, long count)
3242 +{
3243 + mm_segment_t old_fs = get_fs();
3244 + int ret;
3245 +
3246 + set_fs(USER_DS);
3247 + pagefault_disable();
3248 + ret = strnlen_user(unsafe_addr, count);
3249 + pagefault_enable();
3250 + set_fs(old_fs);
3251 +
3252 + return ret;
3253 +}
3254 diff --git a/mm/slub.c b/mm/slub.c
3255 index 454c1d550ad22..51a73d2d1082e 100644
3256 --- a/mm/slub.c
3257 +++ b/mm/slub.c
3258 @@ -625,12 +625,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
3259 }
3260
3261 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
3262 - void *freelist, void *nextfree)
3263 + void **freelist, void *nextfree)
3264 {
3265 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
3266 - !check_valid_pointer(s, page, nextfree)) {
3267 - object_err(s, page, freelist, "Freechain corrupt");
3268 - freelist = NULL;
3269 + !check_valid_pointer(s, page, nextfree) && freelist) {
3270 + object_err(s, page, *freelist, "Freechain corrupt");
3271 + *freelist = NULL;
3272 slab_fix(s, "Isolate corrupted freechain");
3273 return true;
3274 }
3275 @@ -1320,7 +1320,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
3276 int objects) {}
3277
3278 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
3279 - void *freelist, void *nextfree)
3280 + void **freelist, void *nextfree)
3281 {
3282 return false;
3283 }
3284 @@ -2040,7 +2040,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
3285 * 'freelist' is already corrupted. So isolate all objects
3286 * starting at 'freelist'.
3287 */
3288 - if (freelist_corrupted(s, page, freelist, nextfree))
3289 + if (freelist_corrupted(s, page, &freelist, nextfree))
3290 break;
3291
3292 do {
3293 diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
3294 index 00123064eb26d..e545b42ab0b98 100644
3295 --- a/net/batman-adv/bridge_loop_avoidance.c
3296 +++ b/net/batman-adv/bridge_loop_avoidance.c
3297 @@ -451,7 +451,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
3298 skb->len + ETH_HLEN);
3299 soft_iface->last_rx = jiffies;
3300
3301 - netif_rx(skb);
3302 + if (in_interrupt())
3303 + netif_rx(skb);
3304 + else
3305 + netif_rx_ni(skb);
3306 out:
3307 if (primary_if)
3308 batadv_hardif_put(primary_if);
3309 diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
3310 index 3bd7ed6b6b3e1..9727afc030d8c 100644
3311 --- a/net/batman-adv/gateway_client.c
3312 +++ b/net/batman-adv/gateway_client.c
3313 @@ -673,8 +673,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
3314
3315 chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
3316 /* store the client address if the message is going to a client */
3317 - if (ret == BATADV_DHCP_TO_CLIENT &&
3318 - pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
3319 + if (ret == BATADV_DHCP_TO_CLIENT) {
3320 + if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
3321 + return BATADV_DHCP_NO;
3322 +
3323 /* check if the DHCP packet carries an Ethernet DHCP */
3324 p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
3325 if (*p != BATADV_DHCP_HTYPE_ETHERNET)
3326 diff --git a/net/core/dev.c b/net/core/dev.c
3327 index dd8d36feb69f4..9ac591dd16d50 100644
3328 --- a/net/core/dev.c
3329 +++ b/net/core/dev.c
3330 @@ -5188,13 +5188,14 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3331 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
3332 weight, dev->name);
3333 napi->weight = weight;
3334 - list_add(&napi->dev_list, &dev->napi_list);
3335 napi->dev = dev;
3336 #ifdef CONFIG_NETPOLL
3337 spin_lock_init(&napi->poll_lock);
3338 napi->poll_owner = -1;
3339 #endif
3340 set_bit(NAPI_STATE_SCHED, &napi->state);
3341 + set_bit(NAPI_STATE_NPSVC, &napi->state);
3342 + list_add_rcu(&napi->dev_list, &dev->napi_list);
3343 napi_hash_add(napi);
3344 }
3345 EXPORT_SYMBOL(netif_napi_add);
3346 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
3347 index 5de180a9b7f5a..9c1bad3909bd7 100644
3348 --- a/net/core/netpoll.c
3349 +++ b/net/core/netpoll.c
3350 @@ -178,7 +178,7 @@ static void poll_napi(struct net_device *dev)
3351 {
3352 struct napi_struct *napi;
3353
3354 - list_for_each_entry(napi, &dev->napi_list, dev_list) {
3355 + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
3356 if (napi->poll_owner != smp_processor_id() &&
3357 spin_trylock(&napi->poll_lock)) {
3358 poll_one_napi(napi);
3359 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
3360 index 1bcbb7399fe69..5a0352ccadd3d 100644
3361 --- a/net/ipv4/inet_connection_sock.c
3362 +++ b/net/ipv4/inet_connection_sock.c
3363 @@ -89,6 +89,28 @@ int inet_csk_bind_conflict(const struct sock *sk,
3364 }
3365 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
3366
3367 +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
3368 + struct sock *sk)
3369 +{
3370 + kuid_t uid = sock_i_uid(sk);
3371 + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
3372 +
3373 + if (!hlist_empty(&tb->owners)) {
3374 + if (!reuse)
3375 + tb->fastreuse = 0;
3376 + if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))
3377 + tb->fastreuseport = 0;
3378 + } else {
3379 + tb->fastreuse = reuse;
3380 + if (sk->sk_reuseport) {
3381 + tb->fastreuseport = 1;
3382 + tb->fastuid = uid;
3383 + } else {
3384 + tb->fastreuseport = 0;
3385 + }
3386 + }
3387 +}
3388 +
3389 /* Obtain a reference to a local port for the given sock,
3390 * if snum is zero it means select any available local port.
3391 * We try to allocate an odd port (and leave even ports for connect())
3392 @@ -218,19 +240,10 @@ tb_found:
3393 }
3394 goto fail_unlock;
3395 }
3396 - if (!reuse)
3397 - tb->fastreuse = 0;
3398 - if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))
3399 - tb->fastreuseport = 0;
3400 - } else {
3401 - tb->fastreuse = reuse;
3402 - if (sk->sk_reuseport) {
3403 - tb->fastreuseport = 1;
3404 - tb->fastuid = uid;
3405 - } else {
3406 - tb->fastreuseport = 0;
3407 - }
3408 }
3409 +
3410 + inet_csk_update_fastreuse(tb, sk);
3411 +
3412 success:
3413 if (!inet_csk(sk)->icsk_bind_hash)
3414 inet_bind_hash(sk, tb, port);
3415 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
3416 index 4bf542f4d9809..8876338707636 100644
3417 --- a/net/ipv4/inet_hashtables.c
3418 +++ b/net/ipv4/inet_hashtables.c
3419 @@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
3420 return -ENOMEM;
3421 }
3422 }
3423 + inet_csk_update_fastreuse(tb, child);
3424 }
3425 inet_bind_hash(child, tb, port);
3426 spin_unlock(&head->lock);
3427 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3428 index 2fa1c4f2e94e0..ec460aedfc617 100644
3429 --- a/net/netfilter/nf_tables_api.c
3430 +++ b/net/netfilter/nf_tables_api.c
3431 @@ -2592,7 +2592,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
3432 goto nla_put_failure;
3433 }
3434
3435 - if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
3436 + if (set->udata &&
3437 + nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
3438 goto nla_put_failure;
3439
3440 desc = nla_nest_start(skb, NFTA_SET_DESC);
3441 diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
3442 index b2f88617611aa..f73d47b3ffb72 100644
3443 --- a/net/netfilter/nft_payload.c
3444 +++ b/net/netfilter/nft_payload.c
3445 @@ -74,7 +74,9 @@ static void nft_payload_eval(const struct nft_expr *expr,
3446 u32 *dest = &regs->data[priv->dreg];
3447 int offset;
3448
3449 - dest[priv->len / NFT_REG32_SIZE] = 0;
3450 + if (priv->len % NFT_REG32_SIZE)
3451 + dest[priv->len / NFT_REG32_SIZE] = 0;
3452 +
3453 switch (priv->base) {
3454 case NFT_PAYLOAD_LL_HEADER:
3455 if (!skb_mac_header_was_set(skb))
3456 diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
3457 index 41d0e95d171e1..b1a1718495f34 100644
3458 --- a/net/netlabel/netlabel_domainhash.c
3459 +++ b/net/netlabel/netlabel_domainhash.c
3460 @@ -99,6 +99,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
3461 kfree(netlbl_domhsh_addr6_entry(iter6));
3462 }
3463 #endif /* IPv6 */
3464 + kfree(ptr->def.addrsel);
3465 }
3466 kfree(ptr->domain);
3467 kfree(ptr);
3468 @@ -550,6 +551,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
3469 goto add_return;
3470 }
3471 #endif /* IPv6 */
3472 + /* cleanup the new entry since we've moved everything over */
3473 + netlbl_domhsh_free_entry(&entry->rcu);
3474 } else
3475 ret_val = -EINVAL;
3476
3477 @@ -593,6 +596,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
3478 {
3479 int ret_val = 0;
3480 struct audit_buffer *audit_buf;
3481 + struct netlbl_af4list *iter4;
3482 + struct netlbl_domaddr4_map *map4;
3483 +#if IS_ENABLED(CONFIG_IPV6)
3484 + struct netlbl_af6list *iter6;
3485 + struct netlbl_domaddr6_map *map6;
3486 +#endif /* IPv6 */
3487
3488 if (entry == NULL)
3489 return -ENOENT;
3490 @@ -610,6 +619,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
3491 ret_val = -ENOENT;
3492 spin_unlock(&netlbl_domhsh_lock);
3493
3494 + if (ret_val)
3495 + return ret_val;
3496 +
3497 audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
3498 if (audit_buf != NULL) {
3499 audit_log_format(audit_buf,
3500 @@ -619,40 +631,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
3501 audit_log_end(audit_buf);
3502 }
3503
3504 - if (ret_val == 0) {
3505 - struct netlbl_af4list *iter4;
3506 - struct netlbl_domaddr4_map *map4;
3507 -#if IS_ENABLED(CONFIG_IPV6)
3508 - struct netlbl_af6list *iter6;
3509 - struct netlbl_domaddr6_map *map6;
3510 -#endif /* IPv6 */
3511 -
3512 - switch (entry->def.type) {
3513 - case NETLBL_NLTYPE_ADDRSELECT:
3514 - netlbl_af4list_foreach_rcu(iter4,
3515 - &entry->def.addrsel->list4) {
3516 - map4 = netlbl_domhsh_addr4_entry(iter4);
3517 - cipso_v4_doi_putdef(map4->def.cipso);
3518 - }
3519 + switch (entry->def.type) {
3520 + case NETLBL_NLTYPE_ADDRSELECT:
3521 + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
3522 + map4 = netlbl_domhsh_addr4_entry(iter4);
3523 + cipso_v4_doi_putdef(map4->def.cipso);
3524 + }
3525 #if IS_ENABLED(CONFIG_IPV6)
3526 - netlbl_af6list_foreach_rcu(iter6,
3527 - &entry->def.addrsel->list6) {
3528 - map6 = netlbl_domhsh_addr6_entry(iter6);
3529 - calipso_doi_putdef(map6->def.calipso);
3530 - }
3531 + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
3532 + map6 = netlbl_domhsh_addr6_entry(iter6);
3533 + calipso_doi_putdef(map6->def.calipso);
3534 + }
3535 #endif /* IPv6 */
3536 - break;
3537 - case NETLBL_NLTYPE_CIPSOV4:
3538 - cipso_v4_doi_putdef(entry->def.cipso);
3539 - break;
3540 + break;
3541 + case NETLBL_NLTYPE_CIPSOV4:
3542 + cipso_v4_doi_putdef(entry->def.cipso);
3543 + break;
3544 #if IS_ENABLED(CONFIG_IPV6)
3545 - case NETLBL_NLTYPE_CALIPSO:
3546 - calipso_doi_putdef(entry->def.calipso);
3547 - break;
3548 + case NETLBL_NLTYPE_CALIPSO:
3549 + calipso_doi_putdef(entry->def.calipso);
3550 + break;
3551 #endif /* IPv6 */
3552 - }
3553 - call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
3554 }
3555 + call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
3556
3557 return ret_val;
3558 }
3559 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3560 index 95f39dde1e08e..c0fe647dd4acb 100644
3561 --- a/net/sctp/socket.c
3562 +++ b/net/sctp/socket.c
3563 @@ -6687,8 +6687,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
3564
3565 pr_debug("%s: begins, snum:%d\n", __func__, snum);
3566
3567 - local_bh_disable();
3568 -
3569 if (snum == 0) {
3570 /* Search for an available port. */
3571 int low, high, remaining, index;
3572 @@ -6707,20 +6705,21 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
3573 continue;
3574 index = sctp_phashfn(sock_net(sk), rover);
3575 head = &sctp_port_hashtable[index];
3576 - spin_lock(&head->lock);
3577 + spin_lock_bh(&head->lock);
3578 sctp_for_each_hentry(pp, &head->chain)
3579 if ((pp->port == rover) &&
3580 net_eq(sock_net(sk), pp->net))
3581 goto next;
3582 break;
3583 next:
3584 - spin_unlock(&head->lock);
3585 + spin_unlock_bh(&head->lock);
3586 + cond_resched();
3587 } while (--remaining > 0);
3588
3589 /* Exhausted local port range during search? */
3590 ret = 1;
3591 if (remaining <= 0)
3592 - goto fail;
3593 + return ret;
3594
3595 /* OK, here is the one we will use. HEAD (the port
3596 * hash table list entry) is non-NULL and we hold it's
3597 @@ -6735,7 +6734,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
3598 * port iterator, pp being NULL.
3599 */
3600 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
3601 - spin_lock(&head->lock);
3602 + spin_lock_bh(&head->lock);
3603 sctp_for_each_hentry(pp, &head->chain) {
3604 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
3605 goto pp_found;
3606 @@ -6819,10 +6818,7 @@ success:
3607 ret = 0;
3608
3609 fail_unlock:
3610 - spin_unlock(&head->lock);
3611 -
3612 -fail:
3613 - local_bh_enable();
3614 + spin_unlock_bh(&head->lock);
3615 return ret;
3616 }
3617
3618 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
3619 index 6d5f3f737207d..a649763b854d5 100644
3620 --- a/net/wireless/reg.c
3621 +++ b/net/wireless/reg.c
3622 @@ -2321,6 +2321,9 @@ int regulatory_hint_user(const char *alpha2,
3623 if (WARN_ON(!alpha2))
3624 return -EINVAL;
3625
3626 + if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
3627 + return -EINVAL;
3628 +
3629 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
3630 if (!request)
3631 return -ENOMEM;
3632 diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
3633 index 55171647f5167..9432387dc1789 100755
3634 --- a/scripts/checkpatch.pl
3635 +++ b/scripts/checkpatch.pl
3636 @@ -2375,8 +2375,8 @@ sub process {
3637
3638 # Check if the commit log has what seems like a diff which can confuse patch
3639 if ($in_commit_log && !$commit_log_has_diff &&
3640 - (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
3641 - $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
3642 + (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
3643 + $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
3644 $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
3645 $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
3646 ERROR("DIFF_IN_COMMIT_MSG",
3647 diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
3648 index 3788906421a73..fe27034f28460 100644
3649 --- a/sound/core/oss/mulaw.c
3650 +++ b/sound/core/oss/mulaw.c
3651 @@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug,
3652 snd_BUG();
3653 return -EINVAL;
3654 }
3655 - if (snd_BUG_ON(!snd_pcm_format_linear(format->format)))
3656 - return -ENXIO;
3657 + if (!snd_pcm_format_linear(format->format))
3658 + return -EINVAL;
3659
3660 err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion",
3661 src_format, dst_format,
3662 diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
3663 index ef689997d6a5b..bf53e342788e2 100644
3664 --- a/sound/firewire/digi00x/digi00x.c
3665 +++ b/sound/firewire/digi00x/digi00x.c
3666 @@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2");
3667 #define VENDOR_DIGIDESIGN 0x00a07e
3668 #define MODEL_CONSOLE 0x000001
3669 #define MODEL_RACK 0x000002
3670 +#define SPEC_VERSION 0x000001
3671
3672 static int name_card(struct snd_dg00x *dg00x)
3673 {
3674 @@ -185,14 +186,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = {
3675 /* Both of 002/003 use the same ID. */
3676 {
3677 .match_flags = IEEE1394_MATCH_VENDOR_ID |
3678 + IEEE1394_MATCH_VERSION |
3679 IEEE1394_MATCH_MODEL_ID,
3680 .vendor_id = VENDOR_DIGIDESIGN,
3681 + .version = SPEC_VERSION,
3682 .model_id = MODEL_CONSOLE,
3683 },
3684 {
3685 .match_flags = IEEE1394_MATCH_VENDOR_ID |
3686 + IEEE1394_MATCH_VERSION |
3687 IEEE1394_MATCH_MODEL_ID,
3688 .vendor_id = VENDOR_DIGIDESIGN,
3689 + .version = SPEC_VERSION,
3690 .model_id = MODEL_RACK,
3691 },
3692 {}
3693 diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
3694 index 4c967ac1c0e83..40ed4c92e48bd 100644
3695 --- a/sound/firewire/tascam/tascam.c
3696 +++ b/sound/firewire/tascam/tascam.c
3697 @@ -225,11 +225,39 @@ static void snd_tscm_remove(struct fw_unit *unit)
3698 }
3699
3700 static const struct ieee1394_device_id snd_tscm_id_table[] = {
3701 + // Tascam, FW-1884.
3702 {
3703 .match_flags = IEEE1394_MATCH_VENDOR_ID |
3704 - IEEE1394_MATCH_SPECIFIER_ID,
3705 + IEEE1394_MATCH_SPECIFIER_ID |
3706 + IEEE1394_MATCH_VERSION,
3707 .vendor_id = 0x00022e,
3708 .specifier_id = 0x00022e,
3709 + .version = 0x800000,
3710 + },
3711 + // Tascam, FE-8 (.version = 0x800001)
3712 + // This kernel module doesn't support FE-8 because the most of features
3713 + // can be implemented in userspace without any specific support of this
3714 + // module.
3715 + //
3716 + // .version = 0x800002 is unknown.
3717 + //
3718 + // Tascam, FW-1082.
3719 + {
3720 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
3721 + IEEE1394_MATCH_SPECIFIER_ID |
3722 + IEEE1394_MATCH_VERSION,
3723 + .vendor_id = 0x00022e,
3724 + .specifier_id = 0x00022e,
3725 + .version = 0x800003,
3726 + },
3727 + // Tascam, FW-1804.
3728 + {
3729 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
3730 + IEEE1394_MATCH_SPECIFIER_ID |
3731 + IEEE1394_MATCH_VERSION,
3732 + .vendor_id = 0x00022e,
3733 + .specifier_id = 0x00022e,
3734 + .version = 0x800004,
3735 },
3736 /* FE-08 requires reverse-engineering because it just has faders. */
3737 {}
3738 diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
3739 index 6165a57a94aea..2c30a0672c17f 100644
3740 --- a/sound/pci/ca0106/ca0106_main.c
3741 +++ b/sound/pci/ca0106/ca0106_main.c
3742 @@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id,
3743 else
3744 /* Power down */
3745 chip->spi_dac_reg[reg] |= bit;
3746 - return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]);
3747 + if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0)
3748 + return -ENXIO;
3749 }
3750 return 0;
3751 }
3752 diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
3753 index 92335193dc338..d443ca3abf27d 100644
3754 --- a/tools/perf/Documentation/perf-record.txt
3755 +++ b/tools/perf/Documentation/perf-record.txt
3756 @@ -33,6 +33,10 @@ OPTIONS
3757 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
3758 hexadecimal event descriptor.
3759
3760 + - a symbolic or raw PMU event followed by an optional colon
3761 + and a list of event modifiers, e.g., cpu-cycles:p. See the
3762 + linkperf:perf-list[1] man page for details on event modifiers.
3763 +
3764 - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
3765 'param1', 'param2', etc are defined as formats for the PMU in
3766 /sys/bus/event_source/devices/<pmu>/format/*.
3767 diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
3768 index d96ccd4844df9..b099ac1de8546 100644
3769 --- a/tools/perf/Documentation/perf-stat.txt
3770 +++ b/tools/perf/Documentation/perf-stat.txt
3771 @@ -39,6 +39,10 @@ report::
3772 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
3773 hexadecimal event descriptor.
3774
3775 + - a symbolic or raw PMU event followed by an optional colon
3776 + and a list of event modifiers, e.g., cpu-cycles:p. See the
3777 + linkperf:perf-list[1] man page for details on event modifiers.
3778 +
3779 - a symbolically formed event like 'pmu/param1=0x3,param2/' where
3780 param1 and param2 are defined as formats for the PMU in
3781 /sys/bus/event_sources/devices/<pmu>/format/*