Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0290-4.9.191-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3565 - (show annotations) (download)
Thu Aug 13 10:21:10 2020 UTC (3 years, 8 months ago) by niro
File size: 89400 byte(s)
linux-191
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index f4f0a1b9ba29..61b73e42f488 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -3829,6 +3829,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 Run specified binary instead of /init from the ramdisk,
7 used for early userspace startup. See initrd.
8
9 + rdrand= [X86]
10 + force - Override the decision by the kernel to hide the
11 + advertisement of RDRAND support (this affects
12 + certain AMD processors because of buggy BIOS
13 + support, specifically around the suspend/resume
14 + path).
15 +
16 reboot= [KNL]
17 Format (x86 or x86_64):
18 [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
19 diff --git a/Makefile b/Makefile
20 index 4b6cf4641eba..311e861afb15 100644
21 --- a/Makefile
22 +++ b/Makefile
23 @@ -1,6 +1,6 @@
24 VERSION = 4
25 PATCHLEVEL = 9
26 -SUBLEVEL = 190
27 +SUBLEVEL = 191
28 EXTRAVERSION =
29 NAME = Roaring Lionus
30
31 diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
32 index c5bc344fc745..73039746ae36 100644
33 --- a/arch/mips/kernel/i8253.c
34 +++ b/arch/mips/kernel/i8253.c
35 @@ -31,7 +31,8 @@ void __init setup_pit_timer(void)
36
37 static int __init init_pit_clocksource(void)
38 {
39 - if (num_possible_cpus() > 1) /* PIT does not scale! */
40 + if (num_possible_cpus() > 1 || /* PIT does not scale! */
41 + !clockevent_state_periodic(&i8253_clockevent))
42 return 0;
43
44 return clocksource_i8253_init();
45 diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
46 index 4a8cb8d7cbd5..0232b5a2a2d9 100644
47 --- a/arch/x86/include/asm/bootparam_utils.h
48 +++ b/arch/x86/include/asm/bootparam_utils.h
49 @@ -17,6 +17,20 @@
50 * Note: efi_info is commonly left uninitialized, but that field has a
51 * private magic, so it is better to leave it unchanged.
52 */
53 +
54 +#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
55 +
56 +#define BOOT_PARAM_PRESERVE(struct_member) \
57 + { \
58 + .start = offsetof(struct boot_params, struct_member), \
59 + .len = sizeof_mbr(struct boot_params, struct_member), \
60 + }
61 +
62 +struct boot_params_to_save {
63 + unsigned int start;
64 + unsigned int len;
65 +};
66 +
67 static void sanitize_boot_params(struct boot_params *boot_params)
68 {
69 /*
70 @@ -35,19 +49,39 @@ static void sanitize_boot_params(struct boot_params *boot_params)
71 */
72 if (boot_params->sentinel) {
73 /* fields in boot_params are left uninitialized, clear them */
74 - memset(&boot_params->ext_ramdisk_image, 0,
75 - (char *)&boot_params->efi_info -
76 - (char *)&boot_params->ext_ramdisk_image);
77 - memset(&boot_params->kbd_status, 0,
78 - (char *)&boot_params->hdr -
79 - (char *)&boot_params->kbd_status);
80 - memset(&boot_params->_pad7[0], 0,
81 - (char *)&boot_params->edd_mbr_sig_buffer[0] -
82 - (char *)&boot_params->_pad7[0]);
83 - memset(&boot_params->_pad8[0], 0,
84 - (char *)&boot_params->eddbuf[0] -
85 - (char *)&boot_params->_pad8[0]);
86 - memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
87 + static struct boot_params scratch;
88 + char *bp_base = (char *)boot_params;
89 + char *save_base = (char *)&scratch;
90 + int i;
91 +
92 + const struct boot_params_to_save to_save[] = {
93 + BOOT_PARAM_PRESERVE(screen_info),
94 + BOOT_PARAM_PRESERVE(apm_bios_info),
95 + BOOT_PARAM_PRESERVE(tboot_addr),
96 + BOOT_PARAM_PRESERVE(ist_info),
97 + BOOT_PARAM_PRESERVE(hd0_info),
98 + BOOT_PARAM_PRESERVE(hd1_info),
99 + BOOT_PARAM_PRESERVE(sys_desc_table),
100 + BOOT_PARAM_PRESERVE(olpc_ofw_header),
101 + BOOT_PARAM_PRESERVE(efi_info),
102 + BOOT_PARAM_PRESERVE(alt_mem_k),
103 + BOOT_PARAM_PRESERVE(scratch),
104 + BOOT_PARAM_PRESERVE(e820_entries),
105 + BOOT_PARAM_PRESERVE(eddbuf_entries),
106 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
107 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
108 + BOOT_PARAM_PRESERVE(hdr),
109 + BOOT_PARAM_PRESERVE(eddbuf),
110 + };
111 +
112 + memset(&scratch, 0, sizeof(scratch));
113 +
114 + for (i = 0; i < ARRAY_SIZE(to_save); i++) {
115 + memcpy(save_base + to_save[i].start,
116 + bp_base + to_save[i].start, to_save[i].len);
117 + }
118 +
119 + memcpy(boot_params, save_base, sizeof(*boot_params));
120 }
121 }
122
123 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
124 index 38f94d07920d..86166868db8c 100644
125 --- a/arch/x86/include/asm/msr-index.h
126 +++ b/arch/x86/include/asm/msr-index.h
127 @@ -313,6 +313,7 @@
128 #define MSR_AMD64_PATCH_LEVEL 0x0000008b
129 #define MSR_AMD64_TSC_RATIO 0xc0000104
130 #define MSR_AMD64_NB_CFG 0xc001001f
131 +#define MSR_AMD64_CPUID_FN_1 0xc0011004
132 #define MSR_AMD64_PATCH_LOADER 0xc0010020
133 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
134 #define MSR_AMD64_OSVW_STATUS 0xc0010141
135 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
136 index 031a58e84e5b..10a48505abb5 100644
137 --- a/arch/x86/include/asm/nospec-branch.h
138 +++ b/arch/x86/include/asm/nospec-branch.h
139 @@ -196,7 +196,7 @@
140 " lfence;\n" \
141 " jmp 902b;\n" \
142 " .align 16\n" \
143 - "903: addl $4, %%esp;\n" \
144 + "903: lea 4(%%esp), %%esp;\n" \
145 " pushl %[thunk_target];\n" \
146 " ret;\n" \
147 " .align 16\n" \
148 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
149 index 2b5d686ea9f3..ea78a8438a8a 100644
150 --- a/arch/x86/include/asm/ptrace.h
151 +++ b/arch/x86/include/asm/ptrace.h
152 @@ -115,9 +115,9 @@ static inline int v8086_mode(struct pt_regs *regs)
153 #endif
154 }
155
156 -#ifdef CONFIG_X86_64
157 static inline bool user_64bit_mode(struct pt_regs *regs)
158 {
159 +#ifdef CONFIG_X86_64
160 #ifndef CONFIG_PARAVIRT
161 /*
162 * On non-paravirt systems, this is the only long mode CPL 3
163 @@ -128,8 +128,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
164 /* Headers are too twisted for this to go in paravirt.h. */
165 return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
166 #endif
167 +#else /* !CONFIG_X86_64 */
168 + return false;
169 +#endif
170 }
171
172 +#ifdef CONFIG_X86_64
173 #define current_user_stack_pointer() current_pt_regs()->sp
174 #define compat_user_stack_pointer() current_pt_regs()->sp
175 #endif
176 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
177 index cc9a6f680225..37666c536741 100644
178 --- a/arch/x86/kernel/apic/apic.c
179 +++ b/arch/x86/kernel/apic/apic.c
180 @@ -629,7 +629,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
181 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
182
183 /*
184 - * Temporary interrupt handler.
185 + * Temporary interrupt handler and polled calibration function.
186 */
187 static void __init lapic_cal_handler(struct clock_event_device *dev)
188 {
189 @@ -713,7 +713,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
190 static int __init calibrate_APIC_clock(void)
191 {
192 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
193 - void (*real_handler)(struct clock_event_device *dev);
194 + u64 tsc_perj = 0, tsc_start = 0;
195 + unsigned long jif_start;
196 unsigned long deltaj;
197 long delta, deltatsc;
198 int pm_referenced = 0;
199 @@ -742,28 +743,64 @@ static int __init calibrate_APIC_clock(void)
200 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
201 "calibrating APIC timer ...\n");
202
203 + /*
204 + * There are platforms w/o global clockevent devices. Instead of
205 + * making the calibration conditional on that, use a polling based
206 + * approach everywhere.
207 + */
208 local_irq_disable();
209
210 - /* Replace the global interrupt handler */
211 - real_handler = global_clock_event->event_handler;
212 - global_clock_event->event_handler = lapic_cal_handler;
213 -
214 /*
215 * Setup the APIC counter to maximum. There is no way the lapic
216 * can underflow in the 100ms detection time frame
217 */
218 __setup_APIC_LVTT(0xffffffff, 0, 0);
219
220 - /* Let the interrupts run */
221 + /*
222 + * Methods to terminate the calibration loop:
223 + * 1) Global clockevent if available (jiffies)
224 + * 2) TSC if available and frequency is known
225 + */
226 + jif_start = READ_ONCE(jiffies);
227 +
228 + if (tsc_khz) {
229 + tsc_start = rdtsc();
230 + tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
231 + }
232 +
233 + /*
234 + * Enable interrupts so the tick can fire, if a global
235 + * clockevent device is available
236 + */
237 local_irq_enable();
238
239 - while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
240 - cpu_relax();
241 + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
242 + /* Wait for a tick to elapse */
243 + while (1) {
244 + if (tsc_khz) {
245 + u64 tsc_now = rdtsc();
246 + if ((tsc_now - tsc_start) >= tsc_perj) {
247 + tsc_start += tsc_perj;
248 + break;
249 + }
250 + } else {
251 + unsigned long jif_now = READ_ONCE(jiffies);
252
253 - local_irq_disable();
254 + if (time_after(jif_now, jif_start)) {
255 + jif_start = jif_now;
256 + break;
257 + }
258 + }
259 + cpu_relax();
260 + }
261
262 - /* Restore the real event handler */
263 - global_clock_event->event_handler = real_handler;
264 + /* Invoke the calibration routine */
265 + local_irq_disable();
266 + lapic_cal_handler(NULL);
267 + local_irq_enable();
268 + }
269 +
270 + local_irq_disable();
271
272 /* Build delta t1-t2 as apic timer counts down */
273 delta = lapic_cal_t1 - lapic_cal_t2;
274 @@ -814,10 +851,11 @@ static int __init calibrate_APIC_clock(void)
275 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
276
277 /*
278 - * PM timer calibration failed or not turned on
279 - * so lets try APIC timer based calibration
280 + * PM timer calibration failed or not turned on so lets try APIC
281 + * timer based calibration, if a global clockevent device is
282 + * available.
283 */
284 - if (!pm_referenced) {
285 + if (!pm_referenced && global_clock_event) {
286 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
287
288 /*
289 @@ -1029,6 +1067,10 @@ void clear_local_APIC(void)
290 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
291 v = apic_read(APIC_LVT1);
292 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
293 + if (!x2apic_enabled()) {
294 + v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
295 + apic_write(APIC_LDR, v);
296 + }
297 if (maxlvt >= 4) {
298 v = apic_read(APIC_LVTPC);
299 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
300 diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
301 index 56012010332c..76fe153ccc6d 100644
302 --- a/arch/x86/kernel/apic/bigsmp_32.c
303 +++ b/arch/x86/kernel/apic/bigsmp_32.c
304 @@ -37,32 +37,12 @@ static int bigsmp_early_logical_apicid(int cpu)
305 return early_per_cpu(x86_cpu_to_apicid, cpu);
306 }
307
308 -static inline unsigned long calculate_ldr(int cpu)
309 -{
310 - unsigned long val, id;
311 -
312 - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
313 - id = per_cpu(x86_bios_cpu_apicid, cpu);
314 - val |= SET_APIC_LOGICAL_ID(id);
315 -
316 - return val;
317 -}
318 -
319 /*
320 - * Set up the logical destination ID.
321 - *
322 - * Intel recommends to set DFR, LDR and TPR before enabling
323 - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
324 - * document number 292116). So here it goes...
325 + * bigsmp enables physical destination mode
326 + * and doesn't use LDR and DFR
327 */
328 static void bigsmp_init_apic_ldr(void)
329 {
330 - unsigned long val;
331 - int cpu = smp_processor_id();
332 -
333 - apic_write(APIC_DFR, APIC_DFR_FLAT);
334 - val = calculate_ldr(cpu);
335 - apic_write(APIC_LDR, val);
336 }
337
338 static void bigsmp_setup_apic_routing(void)
339 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
340 index 52a65f14db06..9428b54fff66 100644
341 --- a/arch/x86/kernel/cpu/amd.c
342 +++ b/arch/x86/kernel/cpu/amd.c
343 @@ -746,6 +746,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
344 msr_set_bit(MSR_AMD64_DE_CFG, 31);
345 }
346
347 +static bool rdrand_force;
348 +
349 +static int __init rdrand_cmdline(char *str)
350 +{
351 + if (!str)
352 + return -EINVAL;
353 +
354 + if (!strcmp(str, "force"))
355 + rdrand_force = true;
356 + else
357 + return -EINVAL;
358 +
359 + return 0;
360 +}
361 +early_param("rdrand", rdrand_cmdline);
362 +
363 +static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
364 +{
365 + /*
366 + * Saving of the MSR used to hide the RDRAND support during
367 + * suspend/resume is done by arch/x86/power/cpu.c, which is
368 + * dependent on CONFIG_PM_SLEEP.
369 + */
370 + if (!IS_ENABLED(CONFIG_PM_SLEEP))
371 + return;
372 +
373 + /*
374 + * The nordrand option can clear X86_FEATURE_RDRAND, so check for
375 + * RDRAND support using the CPUID function directly.
376 + */
377 + if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
378 + return;
379 +
380 + msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
381 +
382 + /*
383 + * Verify that the CPUID change has occurred in case the kernel is
384 + * running virtualized and the hypervisor doesn't support the MSR.
385 + */
386 + if (cpuid_ecx(1) & BIT(30)) {
387 + pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
388 + return;
389 + }
390 +
391 + clear_cpu_cap(c, X86_FEATURE_RDRAND);
392 + pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
393 +}
394 +
395 +static void init_amd_jg(struct cpuinfo_x86 *c)
396 +{
397 + /*
398 + * Some BIOS implementations do not restore proper RDRAND support
399 + * across suspend and resume. Check on whether to hide the RDRAND
400 + * instruction support via CPUID.
401 + */
402 + clear_rdrand_cpuid_bit(c);
403 +}
404 +
405 static void init_amd_bd(struct cpuinfo_x86 *c)
406 {
407 u64 value;
408 @@ -760,6 +818,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
409 wrmsrl_safe(MSR_F15H_IC_CFG, value);
410 }
411 }
412 +
413 + /*
414 + * Some BIOS implementations do not restore proper RDRAND support
415 + * across suspend and resume. Check on whether to hide the RDRAND
416 + * instruction support via CPUID.
417 + */
418 + clear_rdrand_cpuid_bit(c);
419 }
420
421 static void init_amd_zn(struct cpuinfo_x86 *c)
422 @@ -804,6 +869,7 @@ static void init_amd(struct cpuinfo_x86 *c)
423 case 0x10: init_amd_gh(c); break;
424 case 0x12: init_amd_ln(c); break;
425 case 0x15: init_amd_bd(c); break;
426 + case 0x16: init_amd_jg(c); break;
427 case 0x17: init_amd_zn(c); break;
428 }
429
430 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
431 index 8d20fb09722c..7f377f8792aa 100644
432 --- a/arch/x86/kernel/ptrace.c
433 +++ b/arch/x86/kernel/ptrace.c
434 @@ -651,11 +651,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
435 {
436 struct thread_struct *thread = &tsk->thread;
437 unsigned long val = 0;
438 - int index = n;
439
440 if (n < HBP_NUM) {
441 + int index = array_index_nospec(n, HBP_NUM);
442 struct perf_event *bp = thread->ptrace_bps[index];
443 - index = array_index_nospec(index, HBP_NUM);
444
445 if (bp)
446 val = bp->hw.info.address;
447 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
448 index e78a6b1db74b..e35466afe989 100644
449 --- a/arch/x86/kernel/uprobes.c
450 +++ b/arch/x86/kernel/uprobes.c
451 @@ -514,9 +514,12 @@ struct uprobe_xol_ops {
452 void (*abort)(struct arch_uprobe *, struct pt_regs *);
453 };
454
455 -static inline int sizeof_long(void)
456 +static inline int sizeof_long(struct pt_regs *regs)
457 {
458 - return in_ia32_syscall() ? 4 : 8;
459 + /*
460 + * Check registers for mode as in_xxx_syscall() does not apply here.
461 + */
462 + return user_64bit_mode(regs) ? 8 : 4;
463 }
464
465 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
466 @@ -527,9 +530,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
467
468 static int push_ret_address(struct pt_regs *regs, unsigned long ip)
469 {
470 - unsigned long new_sp = regs->sp - sizeof_long();
471 + unsigned long new_sp = regs->sp - sizeof_long(regs);
472
473 - if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
474 + if (copy_to_user((void __user *)new_sp, &ip, sizeof_long(regs)))
475 return -EFAULT;
476
477 regs->sp = new_sp;
478 @@ -562,7 +565,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
479 long correction = utask->vaddr - utask->xol_vaddr;
480 regs->ip += correction;
481 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
482 - regs->sp += sizeof_long(); /* Pop incorrect return address */
483 + regs->sp += sizeof_long(regs); /* Pop incorrect return address */
484 if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
485 return -ERESTART;
486 }
487 @@ -671,7 +674,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
488 * "call" insn was executed out-of-line. Just restore ->sp and restart.
489 * We could also restore ->ip and try to call branch_emulate_op() again.
490 */
491 - regs->sp += sizeof_long();
492 + regs->sp += sizeof_long(regs);
493 return -ERESTART;
494 }
495
496 @@ -962,7 +965,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
497 unsigned long
498 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
499 {
500 - int rasize = sizeof_long(), nleft;
501 + int rasize = sizeof_long(regs), nleft;
502 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
503
504 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
505 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
506 index 8b06700d1676..bbecbf2b1f5e 100644
507 --- a/arch/x86/kvm/x86.c
508 +++ b/arch/x86/kvm/x86.c
509 @@ -5823,12 +5823,13 @@ restart:
510 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
511 toggle_interruptibility(vcpu, ctxt->interruptibility);
512 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
513 - kvm_rip_write(vcpu, ctxt->eip);
514 - if (r == EMULATE_DONE && ctxt->tf)
515 - kvm_vcpu_do_singlestep(vcpu, &r);
516 if (!ctxt->have_exception ||
517 - exception_type(ctxt->exception.vector) == EXCPT_TRAP)
518 + exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
519 + kvm_rip_write(vcpu, ctxt->eip);
520 + if (r == EMULATE_DONE && ctxt->tf)
521 + kvm_vcpu_do_singlestep(vcpu, &r);
522 __kvm_set_rflags(vcpu, ctxt->eflags);
523 + }
524
525 /*
526 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
527 diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
528 index 2dd1fe13a37b..19f707992db2 100644
529 --- a/arch/x86/lib/cpu.c
530 +++ b/arch/x86/lib/cpu.c
531 @@ -1,5 +1,6 @@
532 #include <linux/types.h>
533 #include <linux/export.h>
534 +#include <asm/cpu.h>
535
536 unsigned int x86_family(unsigned int sig)
537 {
538 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
539 index 29dc59baf0c2..c8f947a4aaf2 100644
540 --- a/arch/x86/power/cpu.c
541 +++ b/arch/x86/power/cpu.c
542 @@ -13,6 +13,7 @@
543 #include <linux/smp.h>
544 #include <linux/perf_event.h>
545 #include <linux/tboot.h>
546 +#include <linux/dmi.h>
547
548 #include <asm/pgtable.h>
549 #include <asm/proto.h>
550 @@ -24,7 +25,7 @@
551 #include <asm/debugreg.h>
552 #include <asm/cpu.h>
553 #include <asm/mmu_context.h>
554 -#include <linux/dmi.h>
555 +#include <asm/cpu_device_id.h>
556
557 #ifdef CONFIG_X86_32
558 __visible unsigned long saved_context_ebx;
559 @@ -391,15 +392,14 @@ static int __init bsp_pm_check_init(void)
560
561 core_initcall(bsp_pm_check_init);
562
563 -static int msr_init_context(const u32 *msr_id, const int total_num)
564 +static int msr_build_context(const u32 *msr_id, const int num)
565 {
566 - int i = 0;
567 + struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
568 struct saved_msr *msr_array;
569 + int total_num;
570 + int i, j;
571
572 - if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
573 - pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
574 - return -EINVAL;
575 - }
576 + total_num = saved_msrs->num + num;
577
578 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
579 if (!msr_array) {
580 @@ -407,19 +407,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
581 return -ENOMEM;
582 }
583
584 - for (i = 0; i < total_num; i++) {
585 - msr_array[i].info.msr_no = msr_id[i];
586 + if (saved_msrs->array) {
587 + /*
588 + * Multiple callbacks can invoke this function, so copy any
589 + * MSR save requests from previous invocations.
590 + */
591 + memcpy(msr_array, saved_msrs->array,
592 + sizeof(struct saved_msr) * saved_msrs->num);
593 +
594 + kfree(saved_msrs->array);
595 + }
596 +
597 + for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
598 + msr_array[i].info.msr_no = msr_id[j];
599 msr_array[i].valid = false;
600 msr_array[i].info.reg.q = 0;
601 }
602 - saved_context.saved_msrs.num = total_num;
603 - saved_context.saved_msrs.array = msr_array;
604 + saved_msrs->num = total_num;
605 + saved_msrs->array = msr_array;
606
607 return 0;
608 }
609
610 /*
611 - * The following section is a quirk framework for problematic BIOSen:
612 + * The following sections are a quirk framework for problematic BIOSen:
613 * Sometimes MSRs are modified by the BIOSen after suspended to
614 * RAM, this might cause unexpected behavior after wakeup.
615 * Thus we save/restore these specified MSRs across suspend/resume
616 @@ -434,7 +445,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
617 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
618
619 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
620 - return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
621 + return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
622 }
623
624 static struct dmi_system_id msr_save_dmi_table[] = {
625 @@ -449,9 +460,58 @@ static struct dmi_system_id msr_save_dmi_table[] = {
626 {}
627 };
628
629 +static int msr_save_cpuid_features(const struct x86_cpu_id *c)
630 +{
631 + u32 cpuid_msr_id[] = {
632 + MSR_AMD64_CPUID_FN_1,
633 + };
634 +
635 + pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
636 + c->family);
637 +
638 + return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
639 +}
640 +
641 +static const struct x86_cpu_id msr_save_cpu_table[] = {
642 + {
643 + .vendor = X86_VENDOR_AMD,
644 + .family = 0x15,
645 + .model = X86_MODEL_ANY,
646 + .feature = X86_FEATURE_ANY,
647 + .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
648 + },
649 + {
650 + .vendor = X86_VENDOR_AMD,
651 + .family = 0x16,
652 + .model = X86_MODEL_ANY,
653 + .feature = X86_FEATURE_ANY,
654 + .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
655 + },
656 + {}
657 +};
658 +
659 +typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
660 +static int pm_cpu_check(const struct x86_cpu_id *c)
661 +{
662 + const struct x86_cpu_id *m;
663 + int ret = 0;
664 +
665 + m = x86_match_cpu(msr_save_cpu_table);
666 + if (m) {
667 + pm_cpu_match_t fn;
668 +
669 + fn = (pm_cpu_match_t)m->driver_data;
670 + ret = fn(m);
671 + }
672 +
673 + return ret;
674 +}
675 +
676 static int pm_check_save_msr(void)
677 {
678 dmi_check_system(msr_save_dmi_table);
679 + pm_cpu_check(msr_save_cpu_table);
680 +
681 return 0;
682 }
683
684 diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
685 index 8d22acdf90f0..0e2bc5b9a78c 100644
686 --- a/drivers/ata/libata-sff.c
687 +++ b/drivers/ata/libata-sff.c
688 @@ -703,6 +703,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
689 unsigned int offset;
690 unsigned char *buf;
691
692 + if (!qc->cursg) {
693 + qc->curbytes = qc->nbytes;
694 + return;
695 + }
696 if (qc->curbytes == qc->nbytes - qc->sect_size)
697 ap->hsm_task_state = HSM_ST_LAST;
698
699 @@ -742,6 +746,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
700
701 if (qc->cursg_ofs == qc->cursg->length) {
702 qc->cursg = sg_next(qc->cursg);
703 + if (!qc->cursg)
704 + ap->hsm_task_state = HSM_ST_LAST;
705 qc->cursg_ofs = 0;
706 }
707 }
708 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
709 index 5dfe6e8af140..ad736d7de838 100644
710 --- a/drivers/block/xen-blkback/xenbus.c
711 +++ b/drivers/block/xen-blkback/xenbus.c
712 @@ -967,6 +967,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
713 }
714 blkif->nr_ring_pages = nr_grefs;
715
716 + err = -ENOMEM;
717 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
718 req = kzalloc(sizeof(*req), GFP_KERNEL);
719 if (!req)
720 @@ -989,7 +990,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
721 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
722 if (err) {
723 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
724 - return err;
725 + goto fail;
726 }
727
728 return 0;
729 @@ -1009,8 +1010,7 @@ fail:
730 }
731 kfree(req);
732 }
733 - return -ENOMEM;
734 -
735 + return err;
736 }
737
738 static int connect_ring(struct backend_info *be)
739 diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
740 index 8684d11b29bb..68b41daab3a8 100644
741 --- a/drivers/dma/ste_dma40.c
742 +++ b/drivers/dma/ste_dma40.c
743 @@ -142,7 +142,7 @@ enum d40_events {
744 * when the DMA hw is powered off.
745 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
746 */
747 -static u32 d40_backup_regs[] = {
748 +static __maybe_unused u32 d40_backup_regs[] = {
749 D40_DREG_LCPA,
750 D40_DREG_LCLA,
751 D40_DREG_PRMSE,
752 @@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
753
754 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
755
756 -static u32 d40_backup_regs_chan[] = {
757 +static __maybe_unused u32 d40_backup_regs_chan[] = {
758 D40_CHAN_REG_SSCFG,
759 D40_CHAN_REG_SSELT,
760 D40_CHAN_REG_SSPTR,
761 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
762 index d3675819f561..3b0d77b2fdc5 100644
763 --- a/drivers/gpio/gpiolib.c
764 +++ b/drivers/gpio/gpiolib.c
765 @@ -953,9 +953,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
766 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
767 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
768 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
769 - lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
770 + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
771 + GPIOLINE_FLAG_IS_OUT);
772 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
773 - lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
774 + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
775 + GPIOLINE_FLAG_IS_OUT);
776
777 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
778 return -EFAULT;
779 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
780 index e57a0bad7a62..77df50dd6d30 100644
781 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
782 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
783 @@ -300,8 +300,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
784 break;
785 }
786
787 - if (retries == RETRIES)
788 + if (retries == RETRIES) {
789 + kfree(reply);
790 return -EINVAL;
791 + }
792
793 *msg_len = reply_len;
794 *msg = reply;
795 diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
796 index 9428ea7cdf8a..c52bd163abb3 100644
797 --- a/drivers/hid/hid-a4tech.c
798 +++ b/drivers/hid/hid-a4tech.c
799 @@ -26,12 +26,36 @@
800 #define A4_2WHEEL_MOUSE_HACK_7 0x01
801 #define A4_2WHEEL_MOUSE_HACK_B8 0x02
802
803 +#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
804 +
805 struct a4tech_sc {
806 unsigned long quirks;
807 unsigned int hw_wheel;
808 __s32 delayed_value;
809 };
810
811 +static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
812 + struct hid_field *field, struct hid_usage *usage,
813 + unsigned long **bit, int *max)
814 +{
815 + struct a4tech_sc *a4 = hid_get_drvdata(hdev);
816 +
817 + if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
818 + usage->hid == A4_WHEEL_ORIENTATION) {
819 + /*
820 + * We do not want to have this usage mapped to anything as it's
821 + * nonstandard and doesn't really behave like an HID report.
822 + * It's only selecting the orientation (vertical/horizontal) of
823 + * the previous mouse wheel report. The input_events will be
824 + * generated once both reports are recorded in a4_event().
825 + */
826 + return -1;
827 + }
828 +
829 + return 0;
830 +
831 +}
832 +
833 static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
834 struct hid_field *field, struct hid_usage *usage,
835 unsigned long **bit, int *max)
836 @@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
837 struct a4tech_sc *a4 = hid_get_drvdata(hdev);
838 struct input_dev *input;
839
840 - if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
841 - !usage->type)
842 + if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
843 return 0;
844
845 input = field->hidinput->input;
846 @@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
847 return 1;
848 }
849
850 - if (usage->hid == 0x000100b8) {
851 + if (usage->hid == A4_WHEEL_ORIENTATION) {
852 input_event(input, EV_REL, value ? REL_HWHEEL :
853 REL_WHEEL, a4->delayed_value);
854 return 1;
855 @@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
856 static struct hid_driver a4_driver = {
857 .name = "a4tech",
858 .id_table = a4_devices,
859 + .input_mapping = a4_input_mapping,
860 .input_mapped = a4_input_mapped,
861 .event = a4_event,
862 .probe = a4_probe,
863 diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
864 index b83376077d72..cfa0cb22c9b3 100644
865 --- a/drivers/hid/hid-tmff.c
866 +++ b/drivers/hid/hid-tmff.c
867 @@ -34,6 +34,8 @@
868
869 #include "hid-ids.h"
870
871 +#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
872 +
873 static const signed short ff_rumble[] = {
874 FF_RUMBLE,
875 -1
876 @@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
877 struct hid_field *ff_field = tmff->ff_field;
878 int x, y;
879 int left, right; /* Rumbling */
880 + int motor_swap;
881
882 switch (effect->type) {
883 case FF_CONSTANT:
884 @@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
885 ff_field->logical_minimum,
886 ff_field->logical_maximum);
887
888 + /* 2-in-1 strong motor is left */
889 + if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
890 + motor_swap = left;
891 + left = right;
892 + right = motor_swap;
893 + }
894 +
895 dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
896 ff_field->value[0] = left;
897 ff_field->value[1] = right;
898 @@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
899 .driver_data = (unsigned long)ff_rumble },
900 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
901 .driver_data = (unsigned long)ff_rumble },
902 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
903 + .driver_data = (unsigned long)ff_rumble },
904 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
905 .driver_data = (unsigned long)ff_rumble },
906 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
907 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
908 index 6c3bf8846b52..fbf14a14bdd4 100644
909 --- a/drivers/hid/wacom_wac.c
910 +++ b/drivers/hid/wacom_wac.c
911 @@ -819,7 +819,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
912 input_report_key(input, BTN_BASE2, (data[11] & 0x02));
913
914 if (data[12] & 0x80)
915 - input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
916 + input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
917 else
918 input_report_abs(input, ABS_WHEEL, 0);
919
920 @@ -949,6 +949,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
921 y >>= 1;
922 distance >>= 1;
923 }
924 + if (features->type == INTUOSHT2)
925 + distance = features->distance_max - distance;
926 input_report_abs(input, ABS_X, x);
927 input_report_abs(input, ABS_Y, y);
928 input_report_abs(input, ABS_DISTANCE, distance);
929 diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
930 index fd0ebec03ae7..beefec9701ed 100644
931 --- a/drivers/hwtracing/stm/core.c
932 +++ b/drivers/hwtracing/stm/core.c
933 @@ -1107,7 +1107,6 @@ int stm_source_register_device(struct device *parent,
934
935 err:
936 put_device(&src->dev);
937 - kfree(src);
938
939 return err;
940 }
941 diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
942 index 96bb4e749012..0218ba6eb26a 100644
943 --- a/drivers/i2c/busses/i2c-emev2.c
944 +++ b/drivers/i2c/busses/i2c-emev2.c
945 @@ -72,6 +72,7 @@ struct em_i2c_device {
946 struct completion msg_done;
947 struct clk *sclk;
948 struct i2c_client *slave;
949 + int irq;
950 };
951
952 static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
953 @@ -342,6 +343,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
954
955 writeb(0, priv->base + I2C_OFS_SVA0);
956
957 + /*
958 + * Wait for interrupt to finish. New slave irqs cannot happen because we
959 + * cleared the slave address and, thus, only extension codes will be
960 + * detected which do not use the slave ptr.
961 + */
962 + synchronize_irq(priv->irq);
963 priv->slave = NULL;
964
965 return 0;
966 @@ -358,7 +365,7 @@ static int em_i2c_probe(struct platform_device *pdev)
967 {
968 struct em_i2c_device *priv;
969 struct resource *r;
970 - int irq, ret;
971 + int ret;
972
973 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
974 if (!priv)
975 @@ -391,8 +398,8 @@ static int em_i2c_probe(struct platform_device *pdev)
976
977 em_i2c_reset(&priv->adap);
978
979 - irq = platform_get_irq(pdev, 0);
980 - ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
981 + priv->irq = platform_get_irq(pdev, 0);
982 + ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
983 "em_i2c", priv);
984 if (ret)
985 goto err_clk;
986 @@ -402,7 +409,8 @@ static int em_i2c_probe(struct platform_device *pdev)
987 if (ret)
988 goto err_clk;
989
990 - dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
991 + dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
992 + priv->irq);
993
994 return 0;
995
996 diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
997 index 8f1c5f24c1df..62785aa76b3f 100644
998 --- a/drivers/i2c/busses/i2c-piix4.c
999 +++ b/drivers/i2c/busses/i2c-piix4.c
1000 @@ -96,7 +96,7 @@
1001 #define SB800_PIIX4_PORT_IDX_MASK 0x06
1002 #define SB800_PIIX4_PORT_IDX_SHIFT 1
1003
1004 -/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
1005 +/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
1006 #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
1007 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
1008 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
1009 @@ -355,18 +355,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
1010
1011 /* Find which register is used for port selection */
1012 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
1013 - switch (PIIX4_dev->device) {
1014 - case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
1015 + if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
1016 + (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
1017 + PIIX4_dev->revision >= 0x1F)) {
1018 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
1019 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
1020 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
1021 - break;
1022 - case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
1023 - default:
1024 + } else {
1025 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
1026 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
1027 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
1028 - break;
1029 }
1030 } else {
1031 mutex_lock(&piix4_mutex_sb800);
1032 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
1033 index 1520e7f02c2f..89d191b6a0e0 100644
1034 --- a/drivers/iommu/dma-iommu.c
1035 +++ b/drivers/iommu/dma-iommu.c
1036 @@ -493,7 +493,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
1037 * - and wouldn't make the resulting output segment too long
1038 */
1039 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1040 - (cur_len + s_length <= max_len)) {
1041 + (max_len - cur_len >= s_length)) {
1042 /* ...then concatenate it with the previous one */
1043 cur_len += s_length;
1044 } else {
1045 diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
1046 index c60c7998af17..726fba452f5f 100644
1047 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c
1048 +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
1049 @@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
1050 printk(KERN_DEBUG
1051 "%s: %s: alloc urb for fifo %i failed",
1052 hw->name, __func__, fifo->fifonum);
1053 + continue;
1054 }
1055 fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
1056 fifo->iso[i].indx = i;
1057 @@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1058 static int
1059 setup_hfcsusb(struct hfcsusb *hw)
1060 {
1061 + void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
1062 u_char b;
1063 + int ret;
1064
1065 if (debug & DBG_HFC_CALL_TRACE)
1066 printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
1067
1068 + if (!dmabuf)
1069 + return -ENOMEM;
1070 +
1071 + ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
1072 +
1073 + memcpy(&b, dmabuf, sizeof(u_char));
1074 + kfree(dmabuf);
1075 +
1076 /* check the chip id */
1077 - if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
1078 + if (ret != 1) {
1079 printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
1080 hw->name, __func__);
1081 return 1;
1082 diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1083 index 673ce38735ff..c837defb5e4d 100644
1084 --- a/drivers/md/dm-bufio.c
1085 +++ b/drivers/md/dm-bufio.c
1086 @@ -1585,7 +1585,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1087 unsigned long freed;
1088
1089 c = container_of(shrink, struct dm_bufio_client, shrinker);
1090 - if (!dm_bufio_trylock(c))
1091 + if (sc->gfp_mask & __GFP_FS)
1092 + dm_bufio_lock(c);
1093 + else if (!dm_bufio_trylock(c))
1094 return SHRINK_STOP;
1095
1096 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1097 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1098 index 5ac239d0f787..29deda7aed04 100644
1099 --- a/drivers/md/dm-table.c
1100 +++ b/drivers/md/dm-table.c
1101 @@ -1263,7 +1263,7 @@ void dm_table_event(struct dm_table *t)
1102 }
1103 EXPORT_SYMBOL(dm_table_event);
1104
1105 -sector_t dm_table_get_size(struct dm_table *t)
1106 +inline sector_t dm_table_get_size(struct dm_table *t)
1107 {
1108 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1109 }
1110 @@ -1288,6 +1288,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1111 unsigned int l, n = 0, k = 0;
1112 sector_t *node;
1113
1114 + if (unlikely(sector >= dm_table_get_size(t)))
1115 + return &t->targets[t->num_targets];
1116 +
1117 for (l = 0; l < t->depth; l++) {
1118 n = get_child(n, k);
1119 node = get_node(t, l, n);
1120 diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1121 index e4ececd3df00..386215245dfe 100644
1122 --- a/drivers/md/persistent-data/dm-btree.c
1123 +++ b/drivers/md/persistent-data/dm-btree.c
1124 @@ -623,39 +623,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
1125
1126 new_parent = shadow_current(s);
1127
1128 + pn = dm_block_data(new_parent);
1129 + size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1130 + sizeof(__le64) : s->info->value_type.size;
1131 +
1132 + /* create & init the left block */
1133 r = new_block(s->info, &left);
1134 if (r < 0)
1135 return r;
1136
1137 + ln = dm_block_data(left);
1138 + nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1139 +
1140 + ln->header.flags = pn->header.flags;
1141 + ln->header.nr_entries = cpu_to_le32(nr_left);
1142 + ln->header.max_entries = pn->header.max_entries;
1143 + ln->header.value_size = pn->header.value_size;
1144 + memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1145 + memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1146 +
1147 + /* create & init the right block */
1148 r = new_block(s->info, &right);
1149 if (r < 0) {
1150 unlock_block(s->info, left);
1151 return r;
1152 }
1153
1154 - pn = dm_block_data(new_parent);
1155 - ln = dm_block_data(left);
1156 rn = dm_block_data(right);
1157 -
1158 - nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1159 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
1160
1161 - ln->header.flags = pn->header.flags;
1162 - ln->header.nr_entries = cpu_to_le32(nr_left);
1163 - ln->header.max_entries = pn->header.max_entries;
1164 - ln->header.value_size = pn->header.value_size;
1165 -
1166 rn->header.flags = pn->header.flags;
1167 rn->header.nr_entries = cpu_to_le32(nr_right);
1168 rn->header.max_entries = pn->header.max_entries;
1169 rn->header.value_size = pn->header.value_size;
1170 -
1171 - memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1172 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
1173 -
1174 - size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1175 - sizeof(__le64) : s->info->value_type.size;
1176 - memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1177 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
1178 nr_right * size);
1179
1180 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1181 index 20557e2c60c6..1d29771af380 100644
1182 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
1183 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1184 @@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
1185 }
1186
1187 if (smm->recursion_count == 1)
1188 - apply_bops(smm);
1189 + r = apply_bops(smm);
1190
1191 smm->recursion_count--;
1192
1193 diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
1194 index b3fa738ae005..f005206d9033 100644
1195 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c
1196 +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
1197 @@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
1198
1199 entry = container_of(resource, struct dbell_entry, resource);
1200 if (entry->run_delayed) {
1201 - schedule_work(&entry->work);
1202 + if (!schedule_work(&entry->work))
1203 + vmci_resource_put(resource);
1204 } else {
1205 entry->notify_cb(entry->client_data);
1206 vmci_resource_put(resource);
1207 @@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
1208 atomic_read(&dbell->active) == 1) {
1209 if (dbell->run_delayed) {
1210 vmci_resource_get(&dbell->resource);
1211 - schedule_work(&dbell->work);
1212 + if (!schedule_work(&dbell->work))
1213 + vmci_resource_put(&dbell->resource);
1214 } else {
1215 dbell->notify_cb(dbell->client_data);
1216 }
1217 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
1218 index 00ba8807dafe..7f654c714fff 100644
1219 --- a/drivers/mmc/core/sd.c
1220 +++ b/drivers/mmc/core/sd.c
1221 @@ -1259,6 +1259,12 @@ int mmc_attach_sd(struct mmc_host *host)
1222 goto err;
1223 }
1224
1225 + /*
1226 + * Some SD cards claims an out of spec VDD voltage range. Let's treat
1227 + * these bits as being in-valid and especially also bit7.
1228 + */
1229 + ocr &= ~0x7FFF;
1230 +
1231 rocr = mmc_select_voltage(host, ocr);
1232
1233 /*
1234 diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
1235 index 83b84ffec27d..2ff6140ea0b7 100644
1236 --- a/drivers/mmc/host/sdhci-of-at91.c
1237 +++ b/drivers/mmc/host/sdhci-of-at91.c
1238 @@ -317,6 +317,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
1239 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1240 pm_runtime_use_autosuspend(&pdev->dev);
1241
1242 + /* HS200 is broken at this moment */
1243 + host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
1244 +
1245 ret = sdhci_add_host(host);
1246 if (ret)
1247 goto pm_runtime_disable;
1248 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1249 index d338c319b30e..8820fb1aec5b 100644
1250 --- a/drivers/net/bonding/bond_main.c
1251 +++ b/drivers/net/bonding/bond_main.c
1252 @@ -2131,6 +2131,15 @@ static void bond_miimon_commit(struct bonding *bond)
1253 bond_for_each_slave(bond, slave, iter) {
1254 switch (slave->new_link) {
1255 case BOND_LINK_NOCHANGE:
1256 + /* For 802.3ad mode, check current slave speed and
1257 + * duplex again in case its port was disabled after
1258 + * invalid speed/duplex reporting but recovered before
1259 + * link monitoring could make a decision on the actual
1260 + * link status
1261 + */
1262 + if (BOND_MODE(bond) == BOND_MODE_8023AD &&
1263 + slave->link == BOND_LINK_UP)
1264 + bond_3ad_adapter_speed_duplex_changed(slave);
1265 continue;
1266
1267 case BOND_LINK_UP:
1268 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1269 index 214a48703a4e..ffc5467a1ec2 100644
1270 --- a/drivers/net/can/dev.c
1271 +++ b/drivers/net/can/dev.c
1272 @@ -1095,6 +1095,8 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
1273 int register_candev(struct net_device *dev)
1274 {
1275 dev->rtnl_link_ops = &can_link_ops;
1276 + netif_carrier_off(dev);
1277 +
1278 return register_netdev(dev);
1279 }
1280 EXPORT_SYMBOL_GPL(register_candev);
1281 diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
1282 index dd56133cc461..fc9f8b01ecae 100644
1283 --- a/drivers/net/can/sja1000/peak_pcmcia.c
1284 +++ b/drivers/net/can/sja1000/peak_pcmcia.c
1285 @@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
1286 if (!netdev)
1287 continue;
1288
1289 - strncpy(name, netdev->name, IFNAMSIZ);
1290 + strlcpy(name, netdev->name, IFNAMSIZ);
1291
1292 unregister_sja1000dev(netdev);
1293
1294 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1295 index 54c2354053ac..ce0a352a5eaa 100644
1296 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1297 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1298 @@ -879,7 +879,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
1299
1300 dev_prev_siblings = dev->prev_siblings;
1301 dev->state &= ~PCAN_USB_STATE_CONNECTED;
1302 - strncpy(name, netdev->name, IFNAMSIZ);
1303 + strlcpy(name, netdev->name, IFNAMSIZ);
1304
1305 unregister_netdev(netdev);
1306
1307 diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1308 index ddd1ec8f7bd0..d1a2159e40d6 100644
1309 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1310 +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1311 @@ -3263,7 +3263,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1312 if (!adapter->regs) {
1313 dev_err(&pdev->dev, "cannot map device registers\n");
1314 err = -ENOMEM;
1315 - goto out_free_adapter;
1316 + goto out_free_adapter_nofail;
1317 }
1318
1319 adapter->pdev = pdev;
1320 @@ -3381,6 +3381,9 @@ out_free_dev:
1321 if (adapter->port[i])
1322 free_netdev(adapter->port[i]);
1323
1324 +out_free_adapter_nofail:
1325 + kfree_skb(adapter->nofail_skb);
1326 +
1327 out_free_adapter:
1328 kfree(adapter);
1329
1330 diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
1331 index b5d18d95d7b9..f7882c1fde16 100644
1332 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c
1333 +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
1334 @@ -157,6 +157,7 @@ struct hip04_priv {
1335 unsigned int reg_inten;
1336
1337 struct napi_struct napi;
1338 + struct device *dev;
1339 struct net_device *ndev;
1340
1341 struct tx_desc *tx_desc;
1342 @@ -185,7 +186,7 @@ struct hip04_priv {
1343
1344 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
1345 {
1346 - return (head - tail) % (TX_DESC_NUM - 1);
1347 + return (head - tail) % TX_DESC_NUM;
1348 }
1349
1350 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
1351 @@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
1352 }
1353
1354 if (priv->tx_phys[tx_tail]) {
1355 - dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
1356 + dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
1357 priv->tx_skb[tx_tail]->len,
1358 DMA_TO_DEVICE);
1359 priv->tx_phys[tx_tail] = 0;
1360 @@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1361 return NETDEV_TX_BUSY;
1362 }
1363
1364 - phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1365 - if (dma_mapping_error(&ndev->dev, phys)) {
1366 + phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
1367 + if (dma_mapping_error(priv->dev, phys)) {
1368 dev_kfree_skb(skb);
1369 return NETDEV_TX_OK;
1370 }
1371 @@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1372 u16 len;
1373 u32 err;
1374
1375 + /* clean up tx descriptors */
1376 + tx_remaining = hip04_tx_reclaim(ndev, false);
1377 +
1378 while (cnt && !last) {
1379 buf = priv->rx_buf[priv->rx_head];
1380 skb = build_skb(buf, priv->rx_buf_size);
1381 @@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1382 goto refill;
1383 }
1384
1385 - dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
1386 + dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
1387 RX_BUF_SIZE, DMA_FROM_DEVICE);
1388 priv->rx_phys[priv->rx_head] = 0;
1389
1390 @@ -534,9 +538,9 @@ refill:
1391 buf = netdev_alloc_frag(priv->rx_buf_size);
1392 if (!buf)
1393 goto done;
1394 - phys = dma_map_single(&ndev->dev, buf,
1395 + phys = dma_map_single(priv->dev, buf,
1396 RX_BUF_SIZE, DMA_FROM_DEVICE);
1397 - if (dma_mapping_error(&ndev->dev, phys))
1398 + if (dma_mapping_error(priv->dev, phys))
1399 goto done;
1400 priv->rx_buf[priv->rx_head] = buf;
1401 priv->rx_phys[priv->rx_head] = phys;
1402 @@ -557,8 +561,7 @@ refill:
1403 }
1404 napi_complete(napi);
1405 done:
1406 - /* clean up tx descriptors and start a new timer if necessary */
1407 - tx_remaining = hip04_tx_reclaim(ndev, false);
1408 + /* start a new timer if necessary */
1409 if (rx < budget && tx_remaining)
1410 hip04_start_tx_timer(priv);
1411
1412 @@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
1413 for (i = 0; i < RX_DESC_NUM; i++) {
1414 dma_addr_t phys;
1415
1416 - phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
1417 + phys = dma_map_single(priv->dev, priv->rx_buf[i],
1418 RX_BUF_SIZE, DMA_FROM_DEVICE);
1419 - if (dma_mapping_error(&ndev->dev, phys))
1420 + if (dma_mapping_error(priv->dev, phys))
1421 return -EIO;
1422
1423 priv->rx_phys[i] = phys;
1424 @@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
1425
1426 for (i = 0; i < RX_DESC_NUM; i++) {
1427 if (priv->rx_phys[i]) {
1428 - dma_unmap_single(&ndev->dev, priv->rx_phys[i],
1429 + dma_unmap_single(priv->dev, priv->rx_phys[i],
1430 RX_BUF_SIZE, DMA_FROM_DEVICE);
1431 priv->rx_phys[i] = 0;
1432 }
1433 @@ -827,6 +830,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
1434 return -ENOMEM;
1435
1436 priv = netdev_priv(ndev);
1437 + priv->dev = d;
1438 priv->ndev = ndev;
1439 platform_set_drvdata(pdev, ndev);
1440
1441 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1442 index d51ad140f46d..05953e14a064 100644
1443 --- a/drivers/net/usb/qmi_wwan.c
1444 +++ b/drivers/net/usb/qmi_wwan.c
1445 @@ -892,6 +892,7 @@ static const struct usb_device_id products[] = {
1446 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1447 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1448 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1449 + {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
1450 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1451 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1452 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1453 diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
1454 index 56f2112e0cd8..85df2e009310 100644
1455 --- a/drivers/nfc/st-nci/se.c
1456 +++ b/drivers/nfc/st-nci/se.c
1457 @@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
1458
1459 transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1460 skb->len - 2, GFP_KERNEL);
1461 + if (!transaction)
1462 + return -ENOMEM;
1463
1464 transaction->aid_len = skb->data[1];
1465 memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
1466 diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
1467 index 3a98563d4a12..eac608a457f0 100644
1468 --- a/drivers/nfc/st21nfca/se.c
1469 +++ b/drivers/nfc/st21nfca/se.c
1470 @@ -326,6 +326,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
1471
1472 transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1473 skb->len - 2, GFP_KERNEL);
1474 + if (!transaction)
1475 + return -ENOMEM;
1476
1477 transaction->aid_len = skb->data[1];
1478 memcpy(transaction->aid, &skb->data[2],
1479 diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
1480 index 23129d7b2678..c77e36526447 100644
1481 --- a/drivers/scsi/ufs/unipro.h
1482 +++ b/drivers/scsi/ufs/unipro.h
1483 @@ -52,7 +52,7 @@
1484 #define RX_HS_UNTERMINATED_ENABLE 0x00A6
1485 #define RX_ENTER_HIBERN8 0x00A7
1486 #define RX_BYPASS_8B10B_ENABLE 0x00A8
1487 -#define RX_TERMINATION_FORCE_ENABLE 0x0089
1488 +#define RX_TERMINATION_FORCE_ENABLE 0x00A9
1489 #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
1490 #define RX_HIBERN8TIME_CAPABILITY 0x0092
1491 #define RX_REFCLKFREQ 0x00EB
1492 diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
1493 index 0f9859478649..2fbc67ca47d4 100644
1494 --- a/drivers/usb/chipidea/udc.c
1495 +++ b/drivers/usb/chipidea/udc.c
1496 @@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1497 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1498 unsigned long flags;
1499
1500 - spin_lock_irqsave(&ci->lock, flags);
1501 - ci->gadget.speed = USB_SPEED_UNKNOWN;
1502 - ci->remote_wakeup = 0;
1503 - ci->suspended = 0;
1504 - spin_unlock_irqrestore(&ci->lock, flags);
1505 -
1506 /* flush all endpoints */
1507 gadget_for_each_ep(ep, gadget) {
1508 usb_ep_fifo_flush(ep);
1509 @@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1510 ci->status = NULL;
1511 }
1512
1513 + spin_lock_irqsave(&ci->lock, flags);
1514 + ci->gadget.speed = USB_SPEED_UNKNOWN;
1515 + ci->remote_wakeup = 0;
1516 + ci->suspended = 0;
1517 + spin_unlock_irqrestore(&ci->lock, flags);
1518 +
1519 return 0;
1520 }
1521
1522 @@ -1306,6 +1306,10 @@ static int ep_disable(struct usb_ep *ep)
1523 return -EBUSY;
1524
1525 spin_lock_irqsave(hwep->lock, flags);
1526 + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1527 + spin_unlock_irqrestore(hwep->lock, flags);
1528 + return 0;
1529 + }
1530
1531 /* only internal SW should disable ctrl endpts */
1532
1533 @@ -1395,6 +1399,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1534 return -EINVAL;
1535
1536 spin_lock_irqsave(hwep->lock, flags);
1537 + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1538 + spin_unlock_irqrestore(hwep->lock, flags);
1539 + return 0;
1540 + }
1541 retval = _ep_queue(ep, req, gfp_flags);
1542 spin_unlock_irqrestore(hwep->lock, flags);
1543 return retval;
1544 @@ -1418,8 +1426,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1545 return -EINVAL;
1546
1547 spin_lock_irqsave(hwep->lock, flags);
1548 -
1549 - hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1550 + if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1551 + hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1552
1553 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1554 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1555 @@ -1490,6 +1498,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
1556 }
1557
1558 spin_lock_irqsave(hwep->lock, flags);
1559 + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1560 + spin_unlock_irqrestore(hwep->lock, flags);
1561 + return;
1562 + }
1563
1564 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1565
1566 @@ -1558,6 +1570,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
1567 int ret = 0;
1568
1569 spin_lock_irqsave(&ci->lock, flags);
1570 + if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1571 + spin_unlock_irqrestore(&ci->lock, flags);
1572 + return 0;
1573 + }
1574 if (!ci->remote_wakeup) {
1575 ret = -EOPNOTSUPP;
1576 goto out;
1577 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
1578 index 9f001659807a..217a479165e0 100644
1579 --- a/drivers/usb/class/cdc-wdm.c
1580 +++ b/drivers/usb/class/cdc-wdm.c
1581 @@ -597,10 +597,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
1582 {
1583 struct wdm_device *desc = file->private_data;
1584
1585 - wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
1586 + wait_event(desc->wait,
1587 + /*
1588 + * needs both flags. We cannot do with one
1589 + * because resetting it would cause a race
1590 + * with write() yet we need to signal
1591 + * a disconnect
1592 + */
1593 + !test_bit(WDM_IN_USE, &desc->flags) ||
1594 + test_bit(WDM_DISCONNECTING, &desc->flags));
1595
1596 /* cannot dereference desc->intf if WDM_DISCONNECTING */
1597 - if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
1598 + if (test_bit(WDM_DISCONNECTING, &desc->flags))
1599 + return -ENODEV;
1600 + if (desc->werr < 0)
1601 dev_err(&desc->intf->dev, "Error in flush path: %d\n",
1602 desc->werr);
1603
1604 @@ -968,8 +978,6 @@ static void wdm_disconnect(struct usb_interface *intf)
1605 spin_lock_irqsave(&desc->iuspin, flags);
1606 set_bit(WDM_DISCONNECTING, &desc->flags);
1607 set_bit(WDM_READ, &desc->flags);
1608 - /* to terminate pending flushes */
1609 - clear_bit(WDM_IN_USE, &desc->flags);
1610 spin_unlock_irqrestore(&desc->iuspin, flags);
1611 wake_up_all(&desc->wait);
1612 mutex_lock(&desc->rlock);
1613 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1614 index 2c022a08f163..9fa168af847b 100644
1615 --- a/drivers/usb/gadget/composite.c
1616 +++ b/drivers/usb/gadget/composite.c
1617 @@ -2000,6 +2000,7 @@ void composite_disconnect(struct usb_gadget *gadget)
1618 * disconnect callbacks?
1619 */
1620 spin_lock_irqsave(&cdev->lock, flags);
1621 + cdev->suspended = 0;
1622 if (cdev->config)
1623 reset_config(cdev);
1624 if (cdev->driver->disconnect)
1625 diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
1626 index 66efa9a67687..72853020a542 100644
1627 --- a/drivers/usb/host/fotg210-hcd.c
1628 +++ b/drivers/usb/host/fotg210-hcd.c
1629 @@ -1653,6 +1653,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1630 /* see what we found out */
1631 temp = check_reset_complete(fotg210, wIndex, status_reg,
1632 fotg210_readl(fotg210, status_reg));
1633 +
1634 + /* restart schedule */
1635 + fotg210->command |= CMD_RUN;
1636 + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1637 }
1638
1639 if (!(temp & (PORT_RESUME|PORT_RESET))) {
1640 diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
1641 index 1afb76e8b1a5..17f1cf02ce34 100644
1642 --- a/drivers/usb/host/ohci-hcd.c
1643 +++ b/drivers/usb/host/ohci-hcd.c
1644 @@ -417,8 +417,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
1645 * other cases where the next software may expect clean state from the
1646 * "firmware". this is bus-neutral, unlike shutdown() methods.
1647 */
1648 -static void
1649 -ohci_shutdown (struct usb_hcd *hcd)
1650 +static void _ohci_shutdown(struct usb_hcd *hcd)
1651 {
1652 struct ohci_hcd *ohci;
1653
1654 @@ -434,6 +433,16 @@ ohci_shutdown (struct usb_hcd *hcd)
1655 ohci->rh_state = OHCI_RH_HALTED;
1656 }
1657
1658 +static void ohci_shutdown(struct usb_hcd *hcd)
1659 +{
1660 + struct ohci_hcd *ohci = hcd_to_ohci(hcd);
1661 + unsigned long flags;
1662 +
1663 + spin_lock_irqsave(&ohci->lock, flags);
1664 + _ohci_shutdown(hcd);
1665 + spin_unlock_irqrestore(&ohci->lock, flags);
1666 +}
1667 +
1668 /*-------------------------------------------------------------------------*
1669 * HC functions
1670 *-------------------------------------------------------------------------*/
1671 @@ -752,7 +761,7 @@ static void io_watchdog_func(unsigned long _ohci)
1672 died:
1673 usb_hc_died(ohci_to_hcd(ohci));
1674 ohci_dump(ohci);
1675 - ohci_shutdown(ohci_to_hcd(ohci));
1676 + _ohci_shutdown(ohci_to_hcd(ohci));
1677 goto done;
1678 } else {
1679 /* No write back because the done queue was empty */
1680 diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
1681 index 64ee8154f2bb..89ec9f0905ca 100644
1682 --- a/drivers/usb/host/xhci-rcar.c
1683 +++ b/drivers/usb/host/xhci-rcar.c
1684 @@ -84,7 +84,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
1685 return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
1686 of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
1687 of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
1688 - of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
1689 + of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
1690 }
1691
1692 static int xhci_rcar_is_gen3(struct device *dev)
1693 diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
1694 index fac3447021b2..d955761fce6f 100644
1695 --- a/drivers/usb/storage/realtek_cr.c
1696 +++ b/drivers/usb/storage/realtek_cr.c
1697 @@ -51,7 +51,7 @@ MODULE_VERSION("1.03");
1698
1699 static int auto_delink_en = 1;
1700 module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
1701 -MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
1702 +MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
1703
1704 #ifdef CONFIG_REALTEK_AUTOPM
1705 static int ss_en = 1;
1706 @@ -1010,12 +1010,15 @@ static int init_realtek_cr(struct us_data *us)
1707 goto INIT_FAIL;
1708 }
1709
1710 - if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1711 - CHECK_FW_VER(chip, 0x5901))
1712 - SET_AUTO_DELINK(chip);
1713 - if (STATUS_LEN(chip) == 16) {
1714 - if (SUPPORT_AUTO_DELINK(chip))
1715 + if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
1716 + CHECK_PID(chip, 0x0159)) {
1717 + if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1718 + CHECK_FW_VER(chip, 0x5901))
1719 SET_AUTO_DELINK(chip);
1720 + if (STATUS_LEN(chip) == 16) {
1721 + if (SUPPORT_AUTO_DELINK(chip))
1722 + SET_AUTO_DELINK(chip);
1723 + }
1724 }
1725 #ifdef CONFIG_REALTEK_AUTOPM
1726 if (ss_en)
1727 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1728 index c802aabcc58c..3ebf6307217c 100644
1729 --- a/drivers/usb/storage/unusual_devs.h
1730 +++ b/drivers/usb/storage/unusual_devs.h
1731 @@ -2119,7 +2119,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1732 US_FL_IGNORE_RESIDUE ),
1733
1734 /* Reported by Michael Büsch <m@bues.ch> */
1735 -UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
1736 +UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
1737 "JMicron",
1738 "USB to ATA/ATAPI Bridge",
1739 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1740 diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
1741 index 4dddd8298a22..3e2e2e6a8328 100644
1742 --- a/drivers/watchdog/bcm2835_wdt.c
1743 +++ b/drivers/watchdog/bcm2835_wdt.c
1744 @@ -240,6 +240,7 @@ module_param(nowayout, bool, 0);
1745 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
1746 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
1747
1748 +MODULE_ALIAS("platform:bcm2835-wdt");
1749 MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
1750 MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
1751 MODULE_LICENSE("GPL");
1752 diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
1753 index 1452177c822d..c719389381dc 100644
1754 --- a/fs/nfs/nfs4_fs.h
1755 +++ b/fs/nfs/nfs4_fs.h
1756 @@ -434,7 +434,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
1757
1758 extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
1759 extern void nfs4_put_state_owner(struct nfs4_state_owner *);
1760 -extern void nfs4_purge_state_owners(struct nfs_server *);
1761 +extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
1762 +extern void nfs4_free_state_owners(struct list_head *head);
1763 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
1764 extern void nfs4_put_open_state(struct nfs4_state *);
1765 extern void nfs4_close_state(struct nfs4_state *, fmode_t);
1766 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
1767 index 43f42cc30a60..1ec6dd4f3e2e 100644
1768 --- a/fs/nfs/nfs4client.c
1769 +++ b/fs/nfs/nfs4client.c
1770 @@ -781,9 +781,12 @@ found:
1771
1772 static void nfs4_destroy_server(struct nfs_server *server)
1773 {
1774 + LIST_HEAD(freeme);
1775 +
1776 nfs_server_return_all_delegations(server);
1777 unset_pnfs_layoutdriver(server);
1778 - nfs4_purge_state_owners(server);
1779 + nfs4_purge_state_owners(server, &freeme);
1780 + nfs4_free_state_owners(&freeme);
1781 }
1782
1783 /*
1784 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
1785 index 6f474b067032..4e63daeef633 100644
1786 --- a/fs/nfs/nfs4state.c
1787 +++ b/fs/nfs/nfs4state.c
1788 @@ -611,24 +611,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
1789 /**
1790 * nfs4_purge_state_owners - Release all cached state owners
1791 * @server: nfs_server with cached state owners to release
1792 + * @head: resulting list of state owners
1793 *
1794 * Called at umount time. Remaining state owners will be on
1795 * the LRU with ref count of zero.
1796 + * Note that the state owners are not freed, but are added
1797 + * to the list @head, which can later be used as an argument
1798 + * to nfs4_free_state_owners.
1799 */
1800 -void nfs4_purge_state_owners(struct nfs_server *server)
1801 +void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
1802 {
1803 struct nfs_client *clp = server->nfs_client;
1804 struct nfs4_state_owner *sp, *tmp;
1805 - LIST_HEAD(doomed);
1806
1807 spin_lock(&clp->cl_lock);
1808 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
1809 - list_move(&sp->so_lru, &doomed);
1810 + list_move(&sp->so_lru, head);
1811 nfs4_remove_state_owner_locked(sp);
1812 }
1813 spin_unlock(&clp->cl_lock);
1814 +}
1815
1816 - list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
1817 +/**
1818 + * nfs4_purge_state_owners - Release all cached state owners
1819 + * @head: resulting list of state owners
1820 + *
1821 + * Frees a list of state owners that was generated by
1822 + * nfs4_purge_state_owners
1823 + */
1824 +void nfs4_free_state_owners(struct list_head *head)
1825 +{
1826 + struct nfs4_state_owner *sp, *tmp;
1827 +
1828 + list_for_each_entry_safe(sp, tmp, head, so_lru) {
1829 list_del(&sp->so_lru);
1830 nfs4_free_state_owner(sp);
1831 }
1832 @@ -1764,12 +1779,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
1833 struct nfs4_state_owner *sp;
1834 struct nfs_server *server;
1835 struct rb_node *pos;
1836 + LIST_HEAD(freeme);
1837 int status = 0;
1838
1839 restart:
1840 rcu_read_lock();
1841 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1842 - nfs4_purge_state_owners(server);
1843 + nfs4_purge_state_owners(server, &freeme);
1844 spin_lock(&clp->cl_lock);
1845 for (pos = rb_first(&server->state_owners);
1846 pos != NULL;
1847 @@ -1798,6 +1814,7 @@ restart:
1848 spin_unlock(&clp->cl_lock);
1849 }
1850 rcu_read_unlock();
1851 + nfs4_free_state_owners(&freeme);
1852 return 0;
1853 }
1854
1855 diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
1856 index 8bf425a103f0..de63d4e2dfba 100644
1857 --- a/fs/userfaultfd.c
1858 +++ b/fs/userfaultfd.c
1859 @@ -464,6 +464,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1860 /* len == 0 means wake all */
1861 struct userfaultfd_wake_range range = { .len = 0, };
1862 unsigned long new_flags;
1863 + bool still_valid;
1864
1865 ACCESS_ONCE(ctx->released) = true;
1866
1867 @@ -479,8 +480,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1868 * taking the mmap_sem for writing.
1869 */
1870 down_write(&mm->mmap_sem);
1871 - if (!mmget_still_valid(mm))
1872 - goto skip_mm;
1873 + still_valid = mmget_still_valid(mm);
1874 prev = NULL;
1875 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1876 cond_resched();
1877 @@ -491,19 +491,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1878 continue;
1879 }
1880 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
1881 - prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
1882 - new_flags, vma->anon_vma,
1883 - vma->vm_file, vma->vm_pgoff,
1884 - vma_policy(vma),
1885 - NULL_VM_UFFD_CTX);
1886 - if (prev)
1887 - vma = prev;
1888 - else
1889 - prev = vma;
1890 + if (still_valid) {
1891 + prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
1892 + new_flags, vma->anon_vma,
1893 + vma->vm_file, vma->vm_pgoff,
1894 + vma_policy(vma),
1895 + NULL_VM_UFFD_CTX);
1896 + if (prev)
1897 + vma = prev;
1898 + else
1899 + prev = vma;
1900 + }
1901 vma->vm_flags = new_flags;
1902 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1903 }
1904 -skip_mm:
1905 up_write(&mm->mmap_sem);
1906 mmput(mm);
1907 wakeup:
1908 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
1909 index 33c389934238..7bfddcd32d73 100644
1910 --- a/fs/xfs/xfs_iops.c
1911 +++ b/fs/xfs/xfs_iops.c
1912 @@ -774,6 +774,7 @@ xfs_setattr_nonsize(
1913
1914 out_cancel:
1915 xfs_trans_cancel(tp);
1916 + xfs_iunlock(ip, XFS_ILOCK_EXCL);
1917 out_dqrele:
1918 xfs_qm_dqrele(udqp);
1919 xfs_qm_dqrele(gdqp);
1920 diff --git a/include/net/tcp.h b/include/net/tcp.h
1921 index a474213ca015..23814d997e86 100644
1922 --- a/include/net/tcp.h
1923 +++ b/include/net/tcp.h
1924 @@ -1609,6 +1609,10 @@ static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1925 {
1926 struct sk_buff *skb = tcp_send_head(sk);
1927
1928 + /* empty retransmit queue, for example due to zero window */
1929 + if (skb == tcp_write_queue_head(sk))
1930 + return NULL;
1931 +
1932 return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
1933 }
1934
1935 diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
1936 index 5e0ea17d01a6..8847f277a14f 100644
1937 --- a/kernel/irq/irqdesc.c
1938 +++ b/kernel/irq/irqdesc.c
1939 @@ -267,6 +267,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
1940 }
1941 }
1942
1943 +static void irq_sysfs_del(struct irq_desc *desc)
1944 +{
1945 + /*
1946 + * If irq_sysfs_init() has not yet been invoked (early boot), then
1947 + * irq_kobj_base is NULL and the descriptor was never added.
1948 + * kobject_del() complains about a object with no parent, so make
1949 + * it conditional.
1950 + */
1951 + if (irq_kobj_base)
1952 + kobject_del(&desc->kobj);
1953 +}
1954 +
1955 static int __init irq_sysfs_init(void)
1956 {
1957 struct irq_desc *desc;
1958 @@ -297,6 +309,7 @@ static struct kobj_type irq_kobj_type = {
1959 };
1960
1961 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
1962 +static void irq_sysfs_del(struct irq_desc *desc) {}
1963
1964 #endif /* CONFIG_SYSFS */
1965
1966 @@ -406,7 +419,7 @@ static void free_desc(unsigned int irq)
1967 * The sysfs entry must be serialized against a concurrent
1968 * irq_sysfs_init() as well.
1969 */
1970 - kobject_del(&desc->kobj);
1971 + irq_sysfs_del(desc);
1972 delete_irq_desc(irq);
1973
1974 /*
1975 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1976 index 7ea8da990b9d..f32f73fa5d3a 100644
1977 --- a/mm/huge_memory.c
1978 +++ b/mm/huge_memory.c
1979 @@ -30,6 +30,7 @@
1980 #include <linux/userfaultfd_k.h>
1981 #include <linux/page_idle.h>
1982 #include <linux/shmem_fs.h>
1983 +#include <linux/page_owner.h>
1984
1985 #include <asm/tlb.h>
1986 #include <asm/pgalloc.h>
1987 @@ -1950,6 +1951,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
1988 }
1989
1990 ClearPageCompound(head);
1991 +
1992 + split_page_owner(head, HPAGE_PMD_ORDER);
1993 +
1994 /* See comment in __split_huge_page_tail() */
1995 if (PageAnon(head)) {
1996 page_ref_inc(head);
1997 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
1998 index cf15851a7d2f..5a50ad517f0f 100644
1999 --- a/mm/zsmalloc.c
2000 +++ b/mm/zsmalloc.c
2001 @@ -52,6 +52,7 @@
2002 #include <linux/zpool.h>
2003 #include <linux/mount.h>
2004 #include <linux/migrate.h>
2005 +#include <linux/wait.h>
2006 #include <linux/pagemap.h>
2007
2008 #define ZSPAGE_MAGIC 0x58
2009 @@ -265,6 +266,10 @@ struct zs_pool {
2010 #ifdef CONFIG_COMPACTION
2011 struct inode *inode;
2012 struct work_struct free_work;
2013 + /* A wait queue for when migration races with async_free_zspage() */
2014 + wait_queue_head_t migration_wait;
2015 + atomic_long_t isolated_pages;
2016 + bool destroying;
2017 #endif
2018 };
2019
2020 @@ -1939,6 +1944,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
2021 zspage->isolated--;
2022 }
2023
2024 +static void putback_zspage_deferred(struct zs_pool *pool,
2025 + struct size_class *class,
2026 + struct zspage *zspage)
2027 +{
2028 + enum fullness_group fg;
2029 +
2030 + fg = putback_zspage(class, zspage);
2031 + if (fg == ZS_EMPTY)
2032 + schedule_work(&pool->free_work);
2033 +
2034 +}
2035 +
2036 +static inline void zs_pool_dec_isolated(struct zs_pool *pool)
2037 +{
2038 + VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
2039 + atomic_long_dec(&pool->isolated_pages);
2040 + /*
2041 + * There's no possibility of racing, since wait_for_isolated_drain()
2042 + * checks the isolated count under &class->lock after enqueuing
2043 + * on migration_wait.
2044 + */
2045 + if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
2046 + wake_up_all(&pool->migration_wait);
2047 +}
2048 +
2049 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
2050 struct page *newpage, struct page *oldpage)
2051 {
2052 @@ -2008,6 +2038,7 @@ bool zs_page_isolate(struct page *page, isolate_mode_t mode)
2053 */
2054 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
2055 get_zspage_mapping(zspage, &class_idx, &fullness);
2056 + atomic_long_inc(&pool->isolated_pages);
2057 remove_zspage(class, zspage, fullness);
2058 }
2059
2060 @@ -2096,8 +2127,16 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2061 * Page migration is done so let's putback isolated zspage to
2062 * the list if @page is final isolated subpage in the zspage.
2063 */
2064 - if (!is_zspage_isolated(zspage))
2065 - putback_zspage(class, zspage);
2066 + if (!is_zspage_isolated(zspage)) {
2067 + /*
2068 + * We cannot race with zs_destroy_pool() here because we wait
2069 + * for isolation to hit zero before we start destroying.
2070 + * Also, we ensure that everyone can see pool->destroying before
2071 + * we start waiting.
2072 + */
2073 + putback_zspage_deferred(pool, class, zspage);
2074 + zs_pool_dec_isolated(pool);
2075 + }
2076
2077 reset_page(page);
2078 put_page(page);
2079 @@ -2144,13 +2183,12 @@ void zs_page_putback(struct page *page)
2080 spin_lock(&class->lock);
2081 dec_zspage_isolation(zspage);
2082 if (!is_zspage_isolated(zspage)) {
2083 - fg = putback_zspage(class, zspage);
2084 /*
2085 * Due to page_lock, we cannot free zspage immediately
2086 * so let's defer.
2087 */
2088 - if (fg == ZS_EMPTY)
2089 - schedule_work(&pool->free_work);
2090 + putback_zspage_deferred(pool, class, zspage);
2091 + zs_pool_dec_isolated(pool);
2092 }
2093 spin_unlock(&class->lock);
2094 }
2095 @@ -2174,8 +2212,36 @@ static int zs_register_migration(struct zs_pool *pool)
2096 return 0;
2097 }
2098
2099 +static bool pool_isolated_are_drained(struct zs_pool *pool)
2100 +{
2101 + return atomic_long_read(&pool->isolated_pages) == 0;
2102 +}
2103 +
2104 +/* Function for resolving migration */
2105 +static void wait_for_isolated_drain(struct zs_pool *pool)
2106 +{
2107 +
2108 + /*
2109 + * We're in the process of destroying the pool, so there are no
2110 + * active allocations. zs_page_isolate() fails for completely free
2111 + * zspages, so we need only wait for the zs_pool's isolated
2112 + * count to hit zero.
2113 + */
2114 + wait_event(pool->migration_wait,
2115 + pool_isolated_are_drained(pool));
2116 +}
2117 +
2118 static void zs_unregister_migration(struct zs_pool *pool)
2119 {
2120 + pool->destroying = true;
2121 + /*
2122 + * We need a memory barrier here to ensure global visibility of
2123 + * pool->destroying. Thus pool->isolated pages will either be 0 in which
2124 + * case we don't care, or it will be > 0 and pool->destroying will
2125 + * ensure that we wake up once isolation hits 0.
2126 + */
2127 + smp_mb();
2128 + wait_for_isolated_drain(pool); /* This can block */
2129 flush_work(&pool->free_work);
2130 iput(pool->inode);
2131 }
2132 @@ -2422,6 +2488,10 @@ struct zs_pool *zs_create_pool(const char *name)
2133 if (!pool->name)
2134 goto err;
2135
2136 +#ifdef CONFIG_COMPACTION
2137 + init_waitqueue_head(&pool->migration_wait);
2138 +#endif
2139 +
2140 if (create_cache(pool))
2141 goto err;
2142
2143 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2144 index 142ccaae9c7b..4a47918b504f 100644
2145 --- a/net/bridge/netfilter/ebtables.c
2146 +++ b/net/bridge/netfilter/ebtables.c
2147 @@ -2288,8 +2288,10 @@ static int compat_do_replace(struct net *net, void __user *user,
2148 state.buf_kern_len = size64;
2149
2150 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2151 - if (WARN_ON(ret < 0))
2152 + if (WARN_ON(ret < 0)) {
2153 + vfree(entries_tmp);
2154 goto out_unlock;
2155 + }
2156
2157 vfree(entries_tmp);
2158 tmp.entries_size = size64;
2159 diff --git a/net/core/stream.c b/net/core/stream.c
2160 index 1086c8b280a8..6e41b20bf9f8 100644
2161 --- a/net/core/stream.c
2162 +++ b/net/core/stream.c
2163 @@ -118,7 +118,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
2164 int err = 0;
2165 long vm_wait = 0;
2166 long current_timeo = *timeo_p;
2167 - bool noblock = (*timeo_p ? false : true);
2168 DEFINE_WAIT(wait);
2169
2170 if (sk_stream_memory_free(sk))
2171 @@ -131,11 +130,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
2172
2173 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
2174 goto do_error;
2175 - if (!*timeo_p) {
2176 - if (noblock)
2177 - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2178 - goto do_nonblock;
2179 - }
2180 + if (!*timeo_p)
2181 + goto do_eagain;
2182 if (signal_pending(current))
2183 goto do_interrupted;
2184 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2185 @@ -167,7 +163,13 @@ out:
2186 do_error:
2187 err = -EPIPE;
2188 goto out;
2189 -do_nonblock:
2190 +do_eagain:
2191 + /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
2192 + * be generated later.
2193 + * When TCP receives ACK packets that make room, tcp_check_space()
2194 + * only calls tcp_new_space() if SOCK_NOSPACE is set.
2195 + */
2196 + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2197 err = -EAGAIN;
2198 goto out;
2199 do_interrupted:
2200 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
2201 index 954315e1661d..3b2c4692d966 100644
2202 --- a/net/mac80211/cfg.c
2203 +++ b/net/mac80211/cfg.c
2204 @@ -1418,6 +1418,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
2205 if (is_multicast_ether_addr(mac))
2206 return -EINVAL;
2207
2208 + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
2209 + sdata->vif.type == NL80211_IFTYPE_STATION &&
2210 + !sdata->u.mgd.associated)
2211 + return -EINVAL;
2212 +
2213 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
2214 if (!sta)
2215 return -ENOMEM;
2216 @@ -1425,10 +1430,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
2217 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
2218 sta->sta.tdls = true;
2219
2220 - if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
2221 - !sdata->u.mgd.associated)
2222 - return -EINVAL;
2223 -
2224 err = sta_apply_parameters(local, sta, params);
2225 if (err) {
2226 sta_info_free(local, sta);
2227 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2228 index 7c19d0d2549b..d1378340d590 100644
2229 --- a/net/wireless/reg.c
2230 +++ b/net/wireless/reg.c
2231 @@ -2165,7 +2165,7 @@ static void reg_process_pending_hints(void)
2232
2233 /* When last_request->processed becomes true this will be rescheduled */
2234 if (lr && !lr->processed) {
2235 - reg_process_hint(lr);
2236 + pr_debug("Pending regulatory request, waiting for it to be processed...\n");
2237 return;
2238 }
2239
2240 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2241 index 130e22742137..eee4ea17a8f5 100644
2242 --- a/sound/core/seq/seq_clientmgr.c
2243 +++ b/sound/core/seq/seq_clientmgr.c
2244 @@ -1822,8 +1822,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
2245 if (cptr->type == USER_CLIENT) {
2246 info->input_pool = cptr->data.user.fifo_pool_size;
2247 info->input_free = info->input_pool;
2248 - if (cptr->data.user.fifo)
2249 - info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
2250 + info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
2251 } else {
2252 info->input_pool = 0;
2253 info->input_free = 0;
2254 diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
2255 index 9acbed1ac982..d9f5428ee995 100644
2256 --- a/sound/core/seq/seq_fifo.c
2257 +++ b/sound/core/seq/seq_fifo.c
2258 @@ -278,3 +278,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
2259
2260 return 0;
2261 }
2262 +
2263 +/* get the number of unused cells safely */
2264 +int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
2265 +{
2266 + unsigned long flags;
2267 + int cells;
2268 +
2269 + if (!f)
2270 + return 0;
2271 +
2272 + snd_use_lock_use(&f->use_lock);
2273 + spin_lock_irqsave(&f->lock, flags);
2274 + cells = snd_seq_unused_cells(f->pool);
2275 + spin_unlock_irqrestore(&f->lock, flags);
2276 + snd_use_lock_free(&f->use_lock);
2277 + return cells;
2278 +}
2279 diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
2280 index 062c446e7867..5d38a0d7f0cd 100644
2281 --- a/sound/core/seq/seq_fifo.h
2282 +++ b/sound/core/seq/seq_fifo.h
2283 @@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
2284 /* resize pool in fifo */
2285 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
2286
2287 +/* get the number of unused cells safely */
2288 +int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
2289
2290 #endif
2291 diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
2292 index 5a0b17ebfc02..624c209c9498 100644
2293 --- a/sound/soc/davinci/davinci-mcasp.c
2294 +++ b/sound/soc/davinci/davinci-mcasp.c
2295 @@ -1158,6 +1158,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
2296 return ret;
2297 }
2298
2299 +static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
2300 + struct snd_pcm_hw_rule *rule)
2301 +{
2302 + struct davinci_mcasp_ruledata *rd = rule->private;
2303 + struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2304 + struct snd_mask nfmt;
2305 + int i, slot_width;
2306 +
2307 + snd_mask_none(&nfmt);
2308 + slot_width = rd->mcasp->slot_width;
2309 +
2310 + for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
2311 + if (snd_mask_test(fmt, i)) {
2312 + if (snd_pcm_format_width(i) <= slot_width) {
2313 + snd_mask_set(&nfmt, i);
2314 + }
2315 + }
2316 + }
2317 +
2318 + return snd_mask_refine(fmt, &nfmt);
2319 +}
2320 +
2321 static const unsigned int davinci_mcasp_dai_rates[] = {
2322 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
2323 88200, 96000, 176400, 192000,
2324 @@ -1251,7 +1273,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2325 struct davinci_mcasp_ruledata *ruledata =
2326 &mcasp->ruledata[substream->stream];
2327 u32 max_channels = 0;
2328 - int i, dir;
2329 + int i, dir, ret;
2330 int tdm_slots = mcasp->tdm_slots;
2331
2332 /* Do not allow more then one stream per direction */
2333 @@ -1280,6 +1302,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2334 max_channels++;
2335 }
2336 ruledata->serializers = max_channels;
2337 + ruledata->mcasp = mcasp;
2338 max_channels *= tdm_slots;
2339 /*
2340 * If the already active stream has less channels than the calculated
2341 @@ -1305,20 +1328,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2342 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2343 &mcasp->chconstr[substream->stream]);
2344
2345 - if (mcasp->slot_width)
2346 - snd_pcm_hw_constraint_minmax(substream->runtime,
2347 - SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2348 - 8, mcasp->slot_width);
2349 + if (mcasp->slot_width) {
2350 + /* Only allow formats require <= slot_width bits on the bus */
2351 + ret = snd_pcm_hw_rule_add(substream->runtime, 0,
2352 + SNDRV_PCM_HW_PARAM_FORMAT,
2353 + davinci_mcasp_hw_rule_slot_width,
2354 + ruledata,
2355 + SNDRV_PCM_HW_PARAM_FORMAT, -1);
2356 + if (ret)
2357 + return ret;
2358 + }
2359
2360 /*
2361 * If we rely on implicit BCLK divider setting we should
2362 * set constraints based on what we can provide.
2363 */
2364 if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
2365 - int ret;
2366 -
2367 - ruledata->mcasp = mcasp;
2368 -
2369 ret = snd_pcm_hw_rule_add(substream->runtime, 0,
2370 SNDRV_PCM_HW_PARAM_RATE,
2371 davinci_mcasp_hw_rule_rate,
2372 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2373 index ab647f1fe11b..08bfc91c686f 100644
2374 --- a/sound/soc/soc-dapm.c
2375 +++ b/sound/soc/soc-dapm.c
2376 @@ -1104,8 +1104,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
2377 list_add_tail(&widget->work_list, list);
2378
2379 if (custom_stop_condition && custom_stop_condition(widget, dir)) {
2380 - widget->endpoints[dir] = 1;
2381 - return widget->endpoints[dir];
2382 + list = NULL;
2383 + custom_stop_condition = NULL;
2384 }
2385
2386 if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
2387 @@ -1142,8 +1142,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
2388 *
2389 * Optionally, can be supplied with a function acting as a stopping condition.
2390 * This function takes the dapm widget currently being examined and the walk
2391 - * direction as an arguments, it should return true if the walk should be
2392 - * stopped and false otherwise.
2393 + * direction as an arguments, it should return true if widgets from that point
2394 + * in the graph onwards should not be added to the widget list.
2395 */
2396 static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
2397 struct list_head *list,
2398 diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
2399 index a9f99a6c3909..74b399372e0b 100644
2400 --- a/sound/usb/line6/pcm.c
2401 +++ b/sound/usb/line6/pcm.c
2402 @@ -552,6 +552,15 @@ int line6_init_pcm(struct usb_line6 *line6,
2403 line6pcm->volume_monitor = 255;
2404 line6pcm->line6 = line6;
2405
2406 + spin_lock_init(&line6pcm->out.lock);
2407 + spin_lock_init(&line6pcm->in.lock);
2408 + line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
2409 +
2410 + line6->line6pcm = line6pcm;
2411 +
2412 + pcm->private_data = line6pcm;
2413 + pcm->private_free = line6_cleanup_pcm;
2414 +
2415 line6pcm->max_packet_size_in =
2416 usb_maxpacket(line6->usbdev,
2417 usb_rcvisocpipe(line6->usbdev, ep_read), 0);
2418 @@ -564,15 +573,6 @@ int line6_init_pcm(struct usb_line6 *line6,
2419 return -EINVAL;
2420 }
2421
2422 - spin_lock_init(&line6pcm->out.lock);
2423 - spin_lock_init(&line6pcm->in.lock);
2424 - line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
2425 -
2426 - line6->line6pcm = line6pcm;
2427 -
2428 - pcm->private_data = line6pcm;
2429 - pcm->private_free = line6_cleanup_pcm;
2430 -
2431 err = line6_create_audio_out_urbs(line6pcm);
2432 if (err < 0)
2433 return err;
2434 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2435 index 248a4bd82397..a02443717625 100644
2436 --- a/sound/usb/mixer.c
2437 +++ b/sound/usb/mixer.c
2438 @@ -82,6 +82,7 @@ struct mixer_build {
2439 unsigned char *buffer;
2440 unsigned int buflen;
2441 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
2442 + DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
2443 struct usb_audio_term oterm;
2444 const struct usbmix_name_map *map;
2445 const struct usbmix_selector_map *selector_map;
2446 @@ -710,15 +711,24 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
2447 * parse the source unit recursively until it reaches to a terminal
2448 * or a branched unit.
2449 */
2450 -static int check_input_term(struct mixer_build *state, int id,
2451 +static int __check_input_term(struct mixer_build *state, int id,
2452 struct usb_audio_term *term)
2453 {
2454 int err;
2455 void *p1;
2456 + unsigned char *hdr;
2457
2458 memset(term, 0, sizeof(*term));
2459 - while ((p1 = find_audio_control_unit(state, id)) != NULL) {
2460 - unsigned char *hdr = p1;
2461 + for (;;) {
2462 + /* a loop in the terminal chain? */
2463 + if (test_and_set_bit(id, state->termbitmap))
2464 + return -EINVAL;
2465 +
2466 + p1 = find_audio_control_unit(state, id);
2467 + if (!p1)
2468 + break;
2469 +
2470 + hdr = p1;
2471 term->id = id;
2472 switch (hdr[2]) {
2473 case UAC_INPUT_TERMINAL:
2474 @@ -733,7 +743,7 @@ static int check_input_term(struct mixer_build *state, int id,
2475
2476 /* call recursively to verify that the
2477 * referenced clock entity is valid */
2478 - err = check_input_term(state, d->bCSourceID, term);
2479 + err = __check_input_term(state, d->bCSourceID, term);
2480 if (err < 0)
2481 return err;
2482
2483 @@ -765,7 +775,7 @@ static int check_input_term(struct mixer_build *state, int id,
2484 case UAC2_CLOCK_SELECTOR: {
2485 struct uac_selector_unit_descriptor *d = p1;
2486 /* call recursively to retrieve the channel info */
2487 - err = check_input_term(state, d->baSourceID[0], term);
2488 + err = __check_input_term(state, d->baSourceID[0], term);
2489 if (err < 0)
2490 return err;
2491 term->type = d->bDescriptorSubtype << 16; /* virtual type */
2492 @@ -812,6 +822,15 @@ static int check_input_term(struct mixer_build *state, int id,
2493 return -ENODEV;
2494 }
2495
2496 +
2497 +static int check_input_term(struct mixer_build *state, int id,
2498 + struct usb_audio_term *term)
2499 +{
2500 + memset(term, 0, sizeof(*term));
2501 + memset(state->termbitmap, 0, sizeof(state->termbitmap));
2502 + return __check_input_term(state, id, term);
2503 +}
2504 +
2505 /*
2506 * Feature Unit
2507 */
2508 @@ -1694,6 +1713,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2509 int pin, ich, err;
2510
2511 if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
2512 + desc->bLength < sizeof(*desc) + desc->bNrInPins ||
2513 !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
2514 usb_audio_err(state->chip,
2515 "invalid MIXER UNIT descriptor %d\n",
2516 diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
2517 index 177480066816..fffc7c418459 100644
2518 --- a/tools/hv/hv_kvp_daemon.c
2519 +++ b/tools/hv/hv_kvp_daemon.c
2520 @@ -1379,6 +1379,8 @@ int main(int argc, char *argv[])
2521 daemonize = 0;
2522 break;
2523 case 'h':
2524 + print_usage(argv);
2525 + exit(0);
2526 default:
2527 print_usage(argv);
2528 exit(EXIT_FAILURE);
2529 diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
2530 index e0829809c897..bdc1891e0a9a 100644
2531 --- a/tools/hv/hv_vss_daemon.c
2532 +++ b/tools/hv/hv_vss_daemon.c
2533 @@ -164,6 +164,8 @@ int main(int argc, char *argv[])
2534 daemonize = 0;
2535 break;
2536 case 'h':
2537 + print_usage(argv);
2538 + exit(0);
2539 default:
2540 print_usage(argv);
2541 exit(EXIT_FAILURE);
2542 diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
2543 index e58be7eeced8..7b364f2926d4 100644
2544 --- a/tools/perf/bench/numa.c
2545 +++ b/tools/perf/bench/numa.c
2546 @@ -373,8 +373,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
2547
2548 /* Allocate and initialize all memory on CPU#0: */
2549 if (init_cpu0) {
2550 - orig_mask = bind_to_node(0);
2551 - bind_to_memnode(0);
2552 + int node = numa_node_of_cpu(0);
2553 +
2554 + orig_mask = bind_to_node(node);
2555 + bind_to_memnode(node);
2556 }
2557
2558 bytes = bytes0 + HPSIZE;
2559 diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
2560 index 41611d7f9873..016d12af6877 100644
2561 --- a/tools/perf/pmu-events/jevents.c
2562 +++ b/tools/perf/pmu-events/jevents.c
2563 @@ -315,6 +315,7 @@ static struct fixed {
2564 { "inst_retired.any_p", "event=0xc0" },
2565 { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
2566 { "cpu_clk_unhalted.thread", "event=0x3c" },
2567 + { "cpu_clk_unhalted.core", "event=0x3c" },
2568 { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
2569 { NULL, NULL},
2570 };
2571 diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
2572 index 9134a0c3e99d..aa9276bfe3e9 100644
2573 --- a/tools/perf/tests/parse-events.c
2574 +++ b/tools/perf/tests/parse-events.c
2575 @@ -12,32 +12,6 @@
2576 #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
2577 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
2578
2579 -#if defined(__s390x__)
2580 -/* Return true if kvm module is available and loaded. Test this
2581 - * and retun success when trace point kvm_s390_create_vm
2582 - * exists. Otherwise this test always fails.
2583 - */
2584 -static bool kvm_s390_create_vm_valid(void)
2585 -{
2586 - char *eventfile;
2587 - bool rc = false;
2588 -
2589 - eventfile = get_events_file("kvm-s390");
2590 -
2591 - if (eventfile) {
2592 - DIR *mydir = opendir(eventfile);
2593 -
2594 - if (mydir) {
2595 - rc = true;
2596 - closedir(mydir);
2597 - }
2598 - put_events_file(eventfile);
2599 - }
2600 -
2601 - return rc;
2602 -}
2603 -#endif
2604 -
2605 static int test__checkevent_tracepoint(struct perf_evlist *evlist)
2606 {
2607 struct perf_evsel *evsel = perf_evlist__first(evlist);
2608 @@ -1619,7 +1593,6 @@ static struct evlist_test test__events[] = {
2609 {
2610 .name = "kvm-s390:kvm_s390_create_vm",
2611 .check = test__checkevent_tracepoint,
2612 - .valid = kvm_s390_create_vm_valid,
2613 .id = 100,
2614 },
2615 #endif
2616 diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
2617 new file mode 100644
2618 index 000000000000..63ed533f73d6
2619 --- /dev/null
2620 +++ b/tools/testing/selftests/kvm/config
2621 @@ -0,0 +1,3 @@
2622 +CONFIG_KVM=y
2623 +CONFIG_KVM_INTEL=y
2624 +CONFIG_KVM_AMD=y
2625 diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
2626 index 85814d1bad11..87742c9803a7 100644
2627 --- a/virt/kvm/arm/vgic/vgic-mmio.c
2628 +++ b/virt/kvm/arm/vgic/vgic-mmio.c
2629 @@ -120,6 +120,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
2630 return value;
2631 }
2632
2633 +static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
2634 +{
2635 + return (vgic_irq_is_sgi(irq->intid) &&
2636 + vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
2637 +}
2638 +
2639 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
2640 gpa_t addr, unsigned int len,
2641 unsigned long val)
2642 @@ -130,6 +136,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
2643 for_each_set_bit(i, &val, len * 8) {
2644 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
2645
2646 + /* GICD_ISPENDR0 SGI bits are WI */
2647 + if (is_vgic_v2_sgi(vcpu, irq)) {
2648 + vgic_put_irq(vcpu->kvm, irq);
2649 + continue;
2650 + }
2651 +
2652 spin_lock(&irq->irq_lock);
2653 irq->pending = true;
2654 if (irq->config == VGIC_CONFIG_LEVEL)
2655 @@ -150,6 +162,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
2656 for_each_set_bit(i, &val, len * 8) {
2657 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
2658
2659 + /* GICD_ICPENDR0 SGI bits are WI */
2660 + if (is_vgic_v2_sgi(vcpu, irq)) {
2661 + vgic_put_irq(vcpu->kvm, irq);
2662 + continue;
2663 + }
2664 +
2665 spin_lock(&irq->irq_lock);
2666
2667 if (irq->config == VGIC_CONFIG_LEVEL) {
2668 diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
2669 index 1ab58f7b5d74..4c2919cc13ca 100644
2670 --- a/virt/kvm/arm/vgic/vgic-v2.c
2671 +++ b/virt/kvm/arm/vgic/vgic-v2.c
2672 @@ -154,7 +154,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
2673 if (vgic_irq_is_sgi(irq->intid)) {
2674 u32 src = ffs(irq->source);
2675
2676 - BUG_ON(!src);
2677 + if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
2678 + irq->intid))
2679 + return;
2680 +
2681 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
2682 irq->source &= ~(1 << (src - 1));
2683 if (irq->source)
2684 diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
2685 index c7924718990e..267b1cf88a7f 100644
2686 --- a/virt/kvm/arm/vgic/vgic-v3.c
2687 +++ b/virt/kvm/arm/vgic/vgic-v3.c
2688 @@ -137,7 +137,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
2689 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2690 u32 src = ffs(irq->source);
2691
2692 - BUG_ON(!src);
2693 + if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
2694 + irq->intid))
2695 + return;
2696 +
2697 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
2698 irq->source &= ~(1 << (src - 1));
2699 if (irq->source)
2700 diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
2701 index 6440b56ec90e..1934dc8a2ce0 100644
2702 --- a/virt/kvm/arm/vgic/vgic.c
2703 +++ b/virt/kvm/arm/vgic/vgic.c
2704 @@ -196,6 +196,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
2705 bool penda, pendb;
2706 int ret;
2707
2708 + /*
2709 + * list_sort may call this function with the same element when
2710 + * the list is fairly long.
2711 + */
2712 + if (unlikely(irqa == irqb))
2713 + return 0;
2714 +
2715 spin_lock(&irqa->irq_lock);
2716 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
2717