Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0143-4.4.44-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2879 - (show annotations) (download)
Mon Mar 27 13:49:19 2017 UTC (7 years, 1 month ago) by niro
File size: 48977 byte(s)
linux-4.4.44
1 diff --git a/Makefile b/Makefile
2 index 04a2186a4276..d6a1de0e2bd7 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 4
8 -SUBLEVEL = 43
9 +SUBLEVEL = 44
10 EXTRAVERSION =
11 NAME = Blurry Fish Butt
12
13 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
14 index ac86c53e2542..e524a775fa5c 100644
15 --- a/arch/powerpc/kernel/ibmebus.c
16 +++ b/arch/powerpc/kernel/ibmebus.c
17 @@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
18 static int ibmebus_create_devices(const struct of_device_id *matches)
19 {
20 struct device_node *root, *child;
21 + struct device *dev;
22 int ret = 0;
23
24 root = of_find_node_by_path("/");
25 @@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
26 if (!of_match_node(matches, child))
27 continue;
28
29 - if (bus_find_device(&ibmebus_bus_type, NULL, child,
30 - ibmebus_match_node))
31 + dev = bus_find_device(&ibmebus_bus_type, NULL, child,
32 + ibmebus_match_node);
33 + if (dev) {
34 + put_device(dev);
35 continue;
36 + }
37
38 ret = ibmebus_create_device(child);
39 if (ret) {
40 @@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
41 const char *buf, size_t count)
42 {
43 struct device_node *dn = NULL;
44 + struct device *dev;
45 char *path;
46 ssize_t rc = 0;
47
48 @@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
49 if (!path)
50 return -ENOMEM;
51
52 - if (bus_find_device(&ibmebus_bus_type, NULL, path,
53 - ibmebus_match_path)) {
54 + dev = bus_find_device(&ibmebus_bus_type, NULL, path,
55 + ibmebus_match_path);
56 + if (dev) {
57 + put_device(dev);
58 printk(KERN_WARNING "%s: %s has already been probed\n",
59 __func__, path);
60 rc = -EEXIST;
61 @@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
62 if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
63 ibmebus_match_path))) {
64 of_device_unregister(to_platform_device(dev));
65 + put_device(dev);
66
67 kfree(path);
68 return count;
69 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
70 index 2b49b113d65d..637ca414d431 100644
71 --- a/arch/x86/kernel/cpu/common.c
72 +++ b/arch/x86/kernel/cpu/common.c
73 @@ -1129,7 +1129,7 @@ static __init int setup_disablecpuid(char *arg)
74 {
75 int bit;
76
77 - if (get_option(&arg, &bit) && bit < NCAPINTS*32)
78 + if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
79 setup_clear_cpu_cap(bit);
80 else
81 return 0;
82 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
83 index f49e98062ea5..1dcea225977d 100644
84 --- a/arch/x86/kvm/emulate.c
85 +++ b/arch/x86/kvm/emulate.c
86 @@ -172,6 +172,7 @@
87 #define NearBranch ((u64)1 << 52) /* Near branches */
88 #define No16 ((u64)1 << 53) /* No 16 bit operand */
89 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
90 +#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
91
92 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
93
94 @@ -434,6 +435,26 @@ FOP_END;
95 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
96 FOP_END;
97
98 +/*
99 + * XXX: inoutclob user must know where the argument is being expanded.
100 + * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
101 + */
102 +#define asm_safe(insn, inoutclob...) \
103 +({ \
104 + int _fault = 0; \
105 + \
106 + asm volatile("1:" insn "\n" \
107 + "2:\n" \
108 + ".pushsection .fixup, \"ax\"\n" \
109 + "3: movl $1, %[_fault]\n" \
110 + " jmp 2b\n" \
111 + ".popsection\n" \
112 + _ASM_EXTABLE(1b, 3b) \
113 + : [_fault] "+qm"(_fault) inoutclob ); \
114 + \
115 + _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
116 +})
117 +
118 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
119 enum x86_intercept intercept,
120 enum x86_intercept_stage stage)
121 @@ -620,21 +641,24 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
122 * depending on whether they're AVX encoded or not.
123 *
124 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
125 - * subject to the same check.
126 + * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
127 + * 512 bytes of data must be aligned to a 16 byte boundary.
128 */
129 -static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
130 +static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
131 {
132 if (likely(size < 16))
133 - return false;
134 + return 1;
135
136 if (ctxt->d & Aligned)
137 - return true;
138 + return size;
139 else if (ctxt->d & Unaligned)
140 - return false;
141 + return 1;
142 else if (ctxt->d & Avx)
143 - return false;
144 + return 1;
145 + else if (ctxt->d & Aligned16)
146 + return 16;
147 else
148 - return true;
149 + return size;
150 }
151
152 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
153 @@ -692,7 +716,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
154 }
155 break;
156 }
157 - if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
158 + if (la & (insn_alignment(ctxt, size) - 1))
159 return emulate_gp(ctxt, 0);
160 return X86EMUL_CONTINUE;
161 bad:
162 @@ -779,6 +803,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
163 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
164 }
165
166 +static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
167 + struct segmented_address addr,
168 + void *data,
169 + unsigned int size)
170 +{
171 + int rc;
172 + ulong linear;
173 +
174 + rc = linearize(ctxt, addr, size, true, &linear);
175 + if (rc != X86EMUL_CONTINUE)
176 + return rc;
177 + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
178 +}
179 +
180 /*
181 * Prefetch the remaining bytes of the instruction without crossing page
182 * boundary if they are not in fetch_cache yet.
183 @@ -1532,7 +1570,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
184 &ctxt->exception);
185 }
186
187 -/* Does not support long mode */
188 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
189 u16 selector, int seg, u8 cpl,
190 enum x86_transfer_type transfer,
191 @@ -1569,20 +1606,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
192
193 rpl = selector & 3;
194
195 - /* NULL selector is not valid for TR, CS and SS (except for long mode) */
196 - if ((seg == VCPU_SREG_CS
197 - || (seg == VCPU_SREG_SS
198 - && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
199 - || seg == VCPU_SREG_TR)
200 - && null_selector)
201 - goto exception;
202 -
203 /* TR should be in GDT only */
204 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
205 goto exception;
206
207 - if (null_selector) /* for NULL selector skip all following checks */
208 + /* NULL selector is not valid for TR, CS and (except for long mode) SS */
209 + if (null_selector) {
210 + if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
211 + goto exception;
212 +
213 + if (seg == VCPU_SREG_SS) {
214 + if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
215 + goto exception;
216 +
217 + /*
218 + * ctxt->ops->set_segment expects the CPL to be in
219 + * SS.DPL, so fake an expand-up 32-bit data segment.
220 + */
221 + seg_desc.type = 3;
222 + seg_desc.p = 1;
223 + seg_desc.s = 1;
224 + seg_desc.dpl = cpl;
225 + seg_desc.d = 1;
226 + seg_desc.g = 1;
227 + }
228 +
229 + /* Skip all following checks */
230 goto load;
231 + }
232
233 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
234 if (ret != X86EMUL_CONTINUE)
235 @@ -1698,6 +1749,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
236 u16 selector, int seg)
237 {
238 u8 cpl = ctxt->ops->cpl(ctxt);
239 +
240 + /*
241 + * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
242 + * they can load it at CPL<3 (Intel's manual says only LSS can,
243 + * but it's wrong).
244 + *
245 + * However, the Intel manual says that putting IST=1/DPL=3 in
246 + * an interrupt gate will result in SS=3 (the AMD manual instead
247 + * says it doesn't), so allow SS=3 in __load_segment_descriptor
248 + * and only forbid it here.
249 + */
250 + if (seg == VCPU_SREG_SS && selector == 3 &&
251 + ctxt->mode == X86EMUL_MODE_PROT64)
252 + return emulate_exception(ctxt, GP_VECTOR, 0, true);
253 +
254 return __load_segment_descriptor(ctxt, selector, seg, cpl,
255 X86_TRANSFER_NONE, NULL);
256 }
257 @@ -3646,8 +3712,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
258 }
259 /* Disable writeback. */
260 ctxt->dst.type = OP_NONE;
261 - return segmented_write(ctxt, ctxt->dst.addr.mem,
262 - &desc_ptr, 2 + ctxt->op_bytes);
263 + return segmented_write_std(ctxt, ctxt->dst.addr.mem,
264 + &desc_ptr, 2 + ctxt->op_bytes);
265 }
266
267 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
268 @@ -3830,6 +3896,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
269 return X86EMUL_CONTINUE;
270 }
271
272 +static int check_fxsr(struct x86_emulate_ctxt *ctxt)
273 +{
274 + u32 eax = 1, ebx, ecx = 0, edx;
275 +
276 + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
277 + if (!(edx & FFL(FXSR)))
278 + return emulate_ud(ctxt);
279 +
280 + if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
281 + return emulate_nm(ctxt);
282 +
283 + /*
284 + * Don't emulate a case that should never be hit, instead of working
285 + * around a lack of fxsave64/fxrstor64 on old compilers.
286 + */
287 + if (ctxt->mode >= X86EMUL_MODE_PROT64)
288 + return X86EMUL_UNHANDLEABLE;
289 +
290 + return X86EMUL_CONTINUE;
291 +}
292 +
293 +/*
294 + * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
295 + * 1) 16 bit mode
296 + * 2) 32 bit mode
297 + * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
298 + * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
299 + * save and restore
300 + * 3) 64-bit mode with REX.W prefix
301 + * - like (2), but XMM 8-15 are being saved and restored
302 + * 4) 64-bit mode without REX.W prefix
303 + * - like (3), but FIP and FDP are 64 bit
304 + *
305 + * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
306 + * desired result. (4) is not emulated.
307 + *
308 + * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
309 + * and FPU DS) should match.
310 + */
311 +static int em_fxsave(struct x86_emulate_ctxt *ctxt)
312 +{
313 + struct fxregs_state fx_state;
314 + size_t size;
315 + int rc;
316 +
317 + rc = check_fxsr(ctxt);
318 + if (rc != X86EMUL_CONTINUE)
319 + return rc;
320 +
321 + ctxt->ops->get_fpu(ctxt);
322 +
323 + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
324 +
325 + ctxt->ops->put_fpu(ctxt);
326 +
327 + if (rc != X86EMUL_CONTINUE)
328 + return rc;
329 +
330 + if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
331 + size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
332 + else
333 + size = offsetof(struct fxregs_state, xmm_space[0]);
334 +
335 + return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
336 +}
337 +
338 +static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
339 + struct fxregs_state *new)
340 +{
341 + int rc = X86EMUL_CONTINUE;
342 + struct fxregs_state old;
343 +
344 + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
345 + if (rc != X86EMUL_CONTINUE)
346 + return rc;
347 +
348 + /*
349 + * 64 bit host will restore XMM 8-15, which is not correct on non-64
350 + * bit guests. Load the current values in order to preserve 64 bit
351 + * XMMs after fxrstor.
352 + */
353 +#ifdef CONFIG_X86_64
354 + /* XXX: accessing XMM 8-15 very awkwardly */
355 + memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
356 +#endif
357 +
358 + /*
359 + * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
360 + * does save and restore MXCSR.
361 + */
362 + if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
363 + memcpy(new->xmm_space, old.xmm_space, 8 * 16);
364 +
365 + return rc;
366 +}
367 +
368 +static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
369 +{
370 + struct fxregs_state fx_state;
371 + int rc;
372 +
373 + rc = check_fxsr(ctxt);
374 + if (rc != X86EMUL_CONTINUE)
375 + return rc;
376 +
377 + rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
378 + if (rc != X86EMUL_CONTINUE)
379 + return rc;
380 +
381 + if (fx_state.mxcsr >> 16)
382 + return emulate_gp(ctxt, 0);
383 +
384 + ctxt->ops->get_fpu(ctxt);
385 +
386 + if (ctxt->mode < X86EMUL_MODE_PROT64)
387 + rc = fxrstor_fixup(ctxt, &fx_state);
388 +
389 + if (rc == X86EMUL_CONTINUE)
390 + rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
391 +
392 + ctxt->ops->put_fpu(ctxt);
393 +
394 + return rc;
395 +}
396 +
397 static bool valid_cr(int nr)
398 {
399 switch (nr) {
400 @@ -4182,7 +4373,9 @@ static const struct gprefix pfx_0f_ae_7 = {
401 };
402
403 static const struct group_dual group15 = { {
404 - N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
405 + I(ModRM | Aligned16, em_fxsave),
406 + I(ModRM | Aligned16, em_fxrstor),
407 + N, N, N, N, N, GP(0, &pfx_0f_ae_7),
408 }, {
409 N, N, N, N, N, N, N, N,
410 } };
411 @@ -5054,21 +5247,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
412
413 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
414 {
415 - bool fault = false;
416 + int rc;
417
418 ctxt->ops->get_fpu(ctxt);
419 - asm volatile("1: fwait \n\t"
420 - "2: \n\t"
421 - ".pushsection .fixup,\"ax\" \n\t"
422 - "3: \n\t"
423 - "movb $1, %[fault] \n\t"
424 - "jmp 2b \n\t"
425 - ".popsection \n\t"
426 - _ASM_EXTABLE(1b, 3b)
427 - : [fault]"+qm"(fault));
428 + rc = asm_safe("fwait");
429 ctxt->ops->put_fpu(ctxt);
430
431 - if (unlikely(fault))
432 + if (unlikely(rc != X86EMUL_CONTINUE))
433 return emulate_exception(ctxt, MF_VECTOR, 0, false);
434
435 return X86EMUL_CONTINUE;
436 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
437 index 4d30b865be30..1c96f09367ae 100644
438 --- a/arch/x86/kvm/lapic.c
439 +++ b/arch/x86/kvm/lapic.c
440 @@ -2187,3 +2187,9 @@ void kvm_lapic_init(void)
441 jump_label_rate_limit(&apic_hw_disabled, HZ);
442 jump_label_rate_limit(&apic_sw_disabled, HZ);
443 }
444 +
445 +void kvm_lapic_exit(void)
446 +{
447 + static_key_deferred_flush(&apic_hw_disabled);
448 + static_key_deferred_flush(&apic_sw_disabled);
449 +}
450 diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
451 index fde8e35d5850..eb418fd670ff 100644
452 --- a/arch/x86/kvm/lapic.h
453 +++ b/arch/x86/kvm/lapic.h
454 @@ -95,6 +95,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
455
456 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
457 void kvm_lapic_init(void);
458 +void kvm_lapic_exit(void);
459
460 static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
461 {
462 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
463 index b3c2ae7aa213..25a6efcfdf7f 100644
464 --- a/arch/x86/kvm/x86.c
465 +++ b/arch/x86/kvm/x86.c
466 @@ -5842,6 +5842,7 @@ out:
467
468 void kvm_arch_exit(void)
469 {
470 + kvm_lapic_exit();
471 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
472
473 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
474 diff --git a/block/blk-mq.c b/block/blk-mq.c
475 index 9f99a01b00e8..6cfc6b200366 100644
476 --- a/block/blk-mq.c
477 +++ b/block/blk-mq.c
478 @@ -842,7 +842,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
479 return WORK_CPU_UNBOUND;
480
481 if (--hctx->next_cpu_batch <= 0) {
482 - int cpu = hctx->next_cpu, next_cpu;
483 + int next_cpu;
484
485 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
486 if (next_cpu >= nr_cpu_ids)
487 @@ -850,8 +850,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
488
489 hctx->next_cpu = next_cpu;
490 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
491 -
492 - return cpu;
493 }
494
495 return hctx->next_cpu;
496 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
497 index 3ad307ee6029..e04a7b8492cf 100644
498 --- a/block/cfq-iosched.c
499 +++ b/block/cfq-iosched.c
500 @@ -1572,7 +1572,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
501 {
502 struct cfq_group_data *cgd;
503
504 - cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
505 + cgd = kzalloc(sizeof(*cgd), gfp);
506 if (!cgd)
507 return NULL;
508 return &cgd->cpd;
509 diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
510 index 8f8da9f92090..eac4f3b02df9 100644
511 --- a/drivers/acpi/apei/ghes.c
512 +++ b/drivers/acpi/apei/ghes.c
513 @@ -847,6 +847,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
514 if (ghes_read_estatus(ghes, 1)) {
515 ghes_clear_estatus(ghes);
516 continue;
517 + } else {
518 + ret = NMI_HANDLED;
519 }
520
521 sev = ghes_severity(ghes->estatus->error_severity);
522 @@ -858,12 +860,11 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
523
524 __process_error(ghes);
525 ghes_clear_estatus(ghes);
526 -
527 - ret = NMI_HANDLED;
528 }
529
530 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
531 - irq_work_queue(&ghes_proc_irq_work);
532 + if (ret == NMI_HANDLED)
533 + irq_work_queue(&ghes_proc_irq_work);
534 #endif
535 atomic_dec(&ghes_in_nmi);
536 return ret;
537 diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
538 index 6575c0fe6a4e..27ea64fa4f9b 100644
539 --- a/drivers/bus/vexpress-config.c
540 +++ b/drivers/bus/vexpress-config.c
541 @@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
542 {
543 struct device_node *bridge;
544 struct device *parent;
545 + int ret;
546
547 bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
548 if (!bridge)
549 @@ -181,7 +182,11 @@ static int vexpress_config_populate(struct device_node *node)
550 if (WARN_ON(!parent))
551 return -ENODEV;
552
553 - return of_platform_populate(node, NULL, NULL, parent);
554 + ret = of_platform_populate(node, NULL, NULL, parent);
555 +
556 + put_device(parent);
557 +
558 + return ret;
559 }
560
561 static int __init vexpress_config_init(void)
562 diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
563 index cb501386eb6e..c4b0ef65988c 100644
564 --- a/drivers/cpufreq/powernv-cpufreq.c
565 +++ b/drivers/cpufreq/powernv-cpufreq.c
566 @@ -373,8 +373,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
567 if (unlikely(rebooting) && new_index != get_nominal_index())
568 return 0;
569
570 - if (!throttled)
571 + if (!throttled) {
572 + /* we don't want to be preempted while
573 + * checking if the CPU frequency has been throttled
574 + */
575 + preempt_disable();
576 powernv_cpufreq_throttle_check(NULL);
577 + preempt_enable();
578 + }
579
580 freq_data.pstate_id = powernv_freqs[new_index].driver_data;
581
582 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
583 index eb2a2a49974f..b6f16804e73b 100644
584 --- a/drivers/gpu/drm/radeon/si_dpm.c
585 +++ b/drivers/gpu/drm/radeon/si_dpm.c
586 @@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
587 (rdev->pdev->device == 0x6817) ||
588 (rdev->pdev->device == 0x6806))
589 max_mclk = 120000;
590 - } else if (rdev->family == CHIP_VERDE) {
591 - if ((rdev->pdev->revision == 0x81) ||
592 - (rdev->pdev->revision == 0x83) ||
593 - (rdev->pdev->revision == 0x87) ||
594 - (rdev->pdev->device == 0x6820) ||
595 - (rdev->pdev->device == 0x6821) ||
596 - (rdev->pdev->device == 0x6822) ||
597 - (rdev->pdev->device == 0x6823) ||
598 - (rdev->pdev->device == 0x682A) ||
599 - (rdev->pdev->device == 0x682B)) {
600 - max_sclk = 75000;
601 - max_mclk = 80000;
602 - }
603 } else if (rdev->family == CHIP_OLAND) {
604 if ((rdev->pdev->revision == 0xC7) ||
605 (rdev->pdev->revision == 0x80) ||
606 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
607 index d625167357cc..e4587411b447 100644
608 --- a/drivers/i2c/i2c-core.c
609 +++ b/drivers/i2c/i2c-core.c
610 @@ -1400,7 +1400,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
611
612 if (i2c_check_addr_validity(addr, info.flags)) {
613 dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
614 - info.addr, node->full_name);
615 + addr, node->full_name);
616 return ERR_PTR(-EINVAL);
617 }
618
619 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
620 index 2413ec9f8207..94c837046786 100644
621 --- a/drivers/i2c/i2c-dev.c
622 +++ b/drivers/i2c/i2c-dev.c
623 @@ -329,7 +329,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
624 unsigned long arg)
625 {
626 struct i2c_smbus_ioctl_data data_arg;
627 - union i2c_smbus_data temp;
628 + union i2c_smbus_data temp = {};
629 int datasize, res;
630
631 if (copy_from_user(&data_arg,
632 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
633 index aff42d5e2296..16f000a76de5 100644
634 --- a/drivers/input/joystick/xpad.c
635 +++ b/drivers/input/joystick/xpad.c
636 @@ -1238,6 +1238,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
637 input_dev->name = xpad->name;
638 input_dev->phys = xpad->phys;
639 usb_to_input_id(xpad->udev, &input_dev->id);
640 +
641 + if (xpad->xtype == XTYPE_XBOX360W) {
642 + /* x360w controllers and the receiver have different ids */
643 + input_dev->id.product = 0x02a1;
644 + }
645 +
646 input_dev->dev.parent = &xpad->intf->dev;
647
648 input_set_drvdata(input_dev, xpad);
649 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
650 index 073246c7d163..0cdd95801a25 100644
651 --- a/drivers/input/serio/i8042-x86ia64io.h
652 +++ b/drivers/input/serio/i8042-x86ia64io.h
653 @@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
654 DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
655 },
656 },
657 + {
658 + .matches = {
659 + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
660 + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
661 + },
662 + },
663 { }
664 };
665
666 diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
667 index ac09855fa435..486f8fe242da 100644
668 --- a/drivers/input/touchscreen/elants_i2c.c
669 +++ b/drivers/input/touchscreen/elants_i2c.c
670 @@ -905,9 +905,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
671
672 case QUEUE_HEADER_NORMAL:
673 report_count = ts->buf[FW_HDR_COUNT];
674 - if (report_count > 3) {
675 + if (report_count == 0 || report_count > 3) {
676 dev_err(&client->dev,
677 - "too large report count: %*ph\n",
678 + "bad report count: %*ph\n",
679 HEADER_SIZE, ts->buf);
680 break;
681 }
682 diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
683 index 84943e4cff09..13730ca151ad 100644
684 --- a/drivers/pinctrl/meson/pinctrl-meson.c
685 +++ b/drivers/pinctrl/meson/pinctrl-meson.c
686 @@ -246,7 +246,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
687 {
688 struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
689
690 - meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
691 + meson_pmx_disable_other_groups(pc, offset, -1);
692
693 return 0;
694 }
695 diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
696 index 863c3e30ce05..50f2014fed55 100644
697 --- a/drivers/pinctrl/sh-pfc/pinctrl.c
698 +++ b/drivers/pinctrl/sh-pfc/pinctrl.c
699 @@ -483,7 +483,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
700
701 switch (param) {
702 case PIN_CONFIG_BIAS_DISABLE:
703 - return true;
704 + return pin->configs &
705 + (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
706
707 case PIN_CONFIG_BIAS_PULL_UP:
708 return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
709 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
710 index e0b89b961e1b..a0f911641b04 100644
711 --- a/drivers/tty/serial/atmel_serial.c
712 +++ b/drivers/tty/serial/atmel_serial.c
713 @@ -470,6 +470,14 @@ static void atmel_stop_tx(struct uart_port *port)
714 /* disable PDC transmit */
715 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
716 }
717 +
718 + /*
719 + * Disable the transmitter.
720 + * This is mandatory when DMA is used, otherwise the DMA buffer
721 + * is fully transmitted.
722 + */
723 + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
724 +
725 /* Disable interrupts */
726 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
727
728 @@ -502,6 +510,9 @@ static void atmel_start_tx(struct uart_port *port)
729
730 /* Enable interrupts */
731 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
732 +
733 + /* re-enable the transmitter */
734 + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
735 }
736
737 /*
738 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
739 index 5381a728d23e..1fa4128eb88e 100644
740 --- a/drivers/tty/sysrq.c
741 +++ b/drivers/tty/sysrq.c
742 @@ -939,8 +939,8 @@ static const struct input_device_id sysrq_ids[] = {
743 {
744 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
745 INPUT_DEVICE_ID_MATCH_KEYBIT,
746 - .evbit = { BIT_MASK(EV_KEY) },
747 - .keybit = { BIT_MASK(KEY_LEFTALT) },
748 + .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
749 + .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
750 },
751 { },
752 };
753 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
754 index fa5d8c2f6982..de7dce6eb474 100644
755 --- a/drivers/usb/host/xhci-ring.c
756 +++ b/drivers/usb/host/xhci-ring.c
757 @@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
758 spin_lock_irqsave(&xhci->lock, flags);
759
760 ep->stop_cmds_pending--;
761 - if (xhci->xhc_state & XHCI_STATE_REMOVING) {
762 - spin_unlock_irqrestore(&xhci->lock, flags);
763 - return;
764 - }
765 - if (xhci->xhc_state & XHCI_STATE_DYING) {
766 - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
767 - "Stop EP timer ran, but another timer marked "
768 - "xHCI as DYING, exiting.");
769 - spin_unlock_irqrestore(&xhci->lock, flags);
770 - return;
771 - }
772 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
773 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
774 "Stop EP timer ran, but no command pending, "
775 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
776 index a8b3c0fc11fb..f2e9f59c90d6 100644
777 --- a/drivers/usb/host/xhci.c
778 +++ b/drivers/usb/host/xhci.c
779 @@ -1569,19 +1569,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
780 xhci_urb_free_priv(urb_priv);
781 return ret;
782 }
783 - if ((xhci->xhc_state & XHCI_STATE_DYING) ||
784 - (xhci->xhc_state & XHCI_STATE_HALTED)) {
785 - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
786 - "Ep 0x%x: URB %p to be canceled on "
787 - "non-responsive xHCI host.",
788 - urb->ep->desc.bEndpointAddress, urb);
789 - /* Let the stop endpoint command watchdog timer (which set this
790 - * state) finish cleaning up the endpoint TD lists. We must
791 - * have caught it in the middle of dropping a lock and giving
792 - * back an URB.
793 - */
794 - goto done;
795 - }
796
797 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
798 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
799 diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
800 index c73808f095bb..71133d96f97d 100644
801 --- a/drivers/usb/serial/ch341.c
802 +++ b/drivers/usb/serial/ch341.c
803 @@ -99,6 +99,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
804 r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
805 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
806 value, index, NULL, 0, DEFAULT_TIMEOUT);
807 + if (r < 0)
808 + dev_err(&dev->dev, "failed to send control message: %d\n", r);
809
810 return r;
811 }
812 @@ -116,7 +118,20 @@ static int ch341_control_in(struct usb_device *dev,
813 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
814 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
815 value, index, buf, bufsize, DEFAULT_TIMEOUT);
816 - return r;
817 + if (r < bufsize) {
818 + if (r >= 0) {
819 + dev_err(&dev->dev,
820 + "short control message received (%d < %u)\n",
821 + r, bufsize);
822 + r = -EIO;
823 + }
824 +
825 + dev_err(&dev->dev, "failed to receive control message: %d\n",
826 + r);
827 + return r;
828 + }
829 +
830 + return 0;
831 }
832
833 static int ch341_set_baudrate(struct usb_device *dev,
834 @@ -158,9 +173,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
835
836 static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
837 {
838 + const unsigned int size = 2;
839 char *buffer;
840 int r;
841 - const unsigned size = 8;
842 unsigned long flags;
843
844 buffer = kmalloc(size, GFP_KERNEL);
845 @@ -171,14 +186,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
846 if (r < 0)
847 goto out;
848
849 - /* setup the private status if available */
850 - if (r == 2) {
851 - r = 0;
852 - spin_lock_irqsave(&priv->lock, flags);
853 - priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
854 - spin_unlock_irqrestore(&priv->lock, flags);
855 - } else
856 - r = -EPROTO;
857 + spin_lock_irqsave(&priv->lock, flags);
858 + priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
859 + spin_unlock_irqrestore(&priv->lock, flags);
860
861 out: kfree(buffer);
862 return r;
863 @@ -188,9 +198,9 @@ out: kfree(buffer);
864
865 static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
866 {
867 + const unsigned int size = 2;
868 char *buffer;
869 int r;
870 - const unsigned size = 8;
871
872 buffer = kmalloc(size, GFP_KERNEL);
873 if (!buffer)
874 @@ -253,7 +263,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
875
876 spin_lock_init(&priv->lock);
877 priv->baud_rate = DEFAULT_BAUD_RATE;
878 - priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
879
880 r = ch341_configure(port->serial->dev, priv);
881 if (r < 0)
882 @@ -315,7 +324,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
883
884 r = ch341_configure(serial->dev, priv);
885 if (r)
886 - goto out;
887 + return r;
888
889 if (tty)
890 ch341_set_termios(tty, port, NULL);
891 @@ -325,12 +334,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
892 if (r) {
893 dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
894 __func__, r);
895 - goto out;
896 + return r;
897 }
898
899 r = usb_serial_generic_open(tty, port);
900 + if (r)
901 + goto err_kill_interrupt_urb;
902
903 -out: return r;
904 + return 0;
905 +
906 +err_kill_interrupt_urb:
907 + usb_kill_urb(port->interrupt_in_urb);
908 +
909 + return r;
910 }
911
912 /* Old_termios contains the original termios settings and
913 @@ -345,26 +361,25 @@ static void ch341_set_termios(struct tty_struct *tty,
914
915 baud_rate = tty_get_baud_rate(tty);
916
917 - priv->baud_rate = baud_rate;
918 -
919 if (baud_rate) {
920 - spin_lock_irqsave(&priv->lock, flags);
921 - priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
922 - spin_unlock_irqrestore(&priv->lock, flags);
923 + priv->baud_rate = baud_rate;
924 ch341_set_baudrate(port->serial->dev, priv);
925 - } else {
926 - spin_lock_irqsave(&priv->lock, flags);
927 - priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
928 - spin_unlock_irqrestore(&priv->lock, flags);
929 }
930
931 - ch341_set_handshake(port->serial->dev, priv->line_control);
932 -
933 /* Unimplemented:
934 * (cflag & CSIZE) : data bits [5, 8]
935 * (cflag & PARENB) : parity {NONE, EVEN, ODD}
936 * (cflag & CSTOPB) : stop bits [1, 2]
937 */
938 +
939 + spin_lock_irqsave(&priv->lock, flags);
940 + if (C_BAUD(tty) == B0)
941 + priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
942 + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
943 + priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
944 + spin_unlock_irqrestore(&priv->lock, flags);
945 +
946 + ch341_set_handshake(port->serial->dev, priv->line_control);
947 }
948
949 static void ch341_break_ctl(struct tty_struct *tty, int break_state)
950 @@ -539,14 +554,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
951
952 static int ch341_reset_resume(struct usb_serial *serial)
953 {
954 - struct ch341_private *priv;
955 -
956 - priv = usb_get_serial_port_data(serial->port[0]);
957 + struct usb_serial_port *port = serial->port[0];
958 + struct ch341_private *priv = usb_get_serial_port_data(port);
959 + int ret;
960
961 /* reconfigure ch341 serial port after bus-reset */
962 ch341_configure(serial->dev, priv);
963
964 - return 0;
965 + if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
966 + ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
967 + if (ret) {
968 + dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
969 + ret);
970 + return ret;
971 + }
972 + }
973 +
974 + return usb_serial_generic_resume(serial);
975 }
976
977 static struct usb_serial_driver ch341_device = {
978 diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
979 index cd0322502ab1..83c823d32ff9 100644
980 --- a/drivers/usb/serial/kl5kusb105.c
981 +++ b/drivers/usb/serial/kl5kusb105.c
982 @@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
983 status_buf, KLSI_STATUSBUF_LEN,
984 10000
985 );
986 - if (rc < 0)
987 - dev_err(&port->dev, "Reading line status failed (error = %d)\n",
988 - rc);
989 - else {
990 + if (rc != KLSI_STATUSBUF_LEN) {
991 + dev_err(&port->dev, "reading line status failed: %d\n", rc);
992 + if (rc >= 0)
993 + rc = -EIO;
994 + } else {
995 status = get_unaligned_le16(status_buf);
996
997 dev_info(&port->serial->dev->dev, "read status %x %x\n",
998 diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
999 index b79a74a98a23..ad94d8a45728 100644
1000 --- a/drivers/vme/bridges/vme_ca91cx42.c
1001 +++ b/drivers/vme/bridges/vme_ca91cx42.c
1002 @@ -467,7 +467,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
1003 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
1004 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
1005
1006 - *pci_base = (dma_addr_t)vme_base + pci_offset;
1007 + *pci_base = (dma_addr_t)*vme_base + pci_offset;
1008 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
1009
1010 *enabled = 0;
1011 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
1012 index 2af08c3de775..2a2e370399ba 100644
1013 --- a/fs/btrfs/extent-tree.c
1014 +++ b/fs/btrfs/extent-tree.c
1015 @@ -2520,11 +2520,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1016 if (ref && ref->seq &&
1017 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
1018 spin_unlock(&locked_ref->lock);
1019 - btrfs_delayed_ref_unlock(locked_ref);
1020 spin_lock(&delayed_refs->lock);
1021 locked_ref->processing = 0;
1022 delayed_refs->num_heads_ready++;
1023 spin_unlock(&delayed_refs->lock);
1024 + btrfs_delayed_ref_unlock(locked_ref);
1025 locked_ref = NULL;
1026 cond_resched();
1027 count++;
1028 @@ -2570,7 +2570,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1029 */
1030 if (must_insert_reserved)
1031 locked_ref->must_insert_reserved = 1;
1032 + spin_lock(&delayed_refs->lock);
1033 locked_ref->processing = 0;
1034 + delayed_refs->num_heads_ready++;
1035 + spin_unlock(&delayed_refs->lock);
1036 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
1037 btrfs_delayed_ref_unlock(locked_ref);
1038 return ret;
1039 diff --git a/fs/dcache.c b/fs/dcache.c
1040 index 71b6056ad35d..849c1c1e787b 100644
1041 --- a/fs/dcache.c
1042 +++ b/fs/dcache.c
1043 @@ -1322,8 +1322,11 @@ int d_set_mounted(struct dentry *dentry)
1044 }
1045 spin_lock(&dentry->d_lock);
1046 if (!d_unlinked(dentry)) {
1047 - dentry->d_flags |= DCACHE_MOUNTED;
1048 - ret = 0;
1049 + ret = -EBUSY;
1050 + if (!d_mountpoint(dentry)) {
1051 + dentry->d_flags |= DCACHE_MOUNTED;
1052 + ret = 0;
1053 + }
1054 }
1055 spin_unlock(&dentry->d_lock);
1056 out:
1057 diff --git a/fs/namespace.c b/fs/namespace.c
1058 index 5be02a0635be..da98a1bbd8b5 100644
1059 --- a/fs/namespace.c
1060 +++ b/fs/namespace.c
1061 @@ -743,26 +743,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
1062 return NULL;
1063 }
1064
1065 -static struct mountpoint *new_mountpoint(struct dentry *dentry)
1066 +static struct mountpoint *get_mountpoint(struct dentry *dentry)
1067 {
1068 - struct hlist_head *chain = mp_hash(dentry);
1069 - struct mountpoint *mp;
1070 + struct mountpoint *mp, *new = NULL;
1071 int ret;
1072
1073 - mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
1074 - if (!mp)
1075 + if (d_mountpoint(dentry)) {
1076 +mountpoint:
1077 + read_seqlock_excl(&mount_lock);
1078 + mp = lookup_mountpoint(dentry);
1079 + read_sequnlock_excl(&mount_lock);
1080 + if (mp)
1081 + goto done;
1082 + }
1083 +
1084 + if (!new)
1085 + new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
1086 + if (!new)
1087 return ERR_PTR(-ENOMEM);
1088
1089 +
1090 + /* Exactly one processes may set d_mounted */
1091 ret = d_set_mounted(dentry);
1092 - if (ret) {
1093 - kfree(mp);
1094 - return ERR_PTR(ret);
1095 - }
1096
1097 - mp->m_dentry = dentry;
1098 - mp->m_count = 1;
1099 - hlist_add_head(&mp->m_hash, chain);
1100 - INIT_HLIST_HEAD(&mp->m_list);
1101 + /* Someone else set d_mounted? */
1102 + if (ret == -EBUSY)
1103 + goto mountpoint;
1104 +
1105 + /* The dentry is not available as a mountpoint? */
1106 + mp = ERR_PTR(ret);
1107 + if (ret)
1108 + goto done;
1109 +
1110 + /* Add the new mountpoint to the hash table */
1111 + read_seqlock_excl(&mount_lock);
1112 + new->m_dentry = dentry;
1113 + new->m_count = 1;
1114 + hlist_add_head(&new->m_hash, mp_hash(dentry));
1115 + INIT_HLIST_HEAD(&new->m_list);
1116 + read_sequnlock_excl(&mount_lock);
1117 +
1118 + mp = new;
1119 + new = NULL;
1120 +done:
1121 + kfree(new);
1122 return mp;
1123 }
1124
1125 @@ -1557,11 +1581,11 @@ void __detach_mounts(struct dentry *dentry)
1126 struct mount *mnt;
1127
1128 namespace_lock();
1129 + lock_mount_hash();
1130 mp = lookup_mountpoint(dentry);
1131 if (IS_ERR_OR_NULL(mp))
1132 goto out_unlock;
1133
1134 - lock_mount_hash();
1135 event++;
1136 while (!hlist_empty(&mp->m_list)) {
1137 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1138 @@ -1571,9 +1595,9 @@ void __detach_mounts(struct dentry *dentry)
1139 }
1140 else umount_tree(mnt, UMOUNT_CONNECTED);
1141 }
1142 - unlock_mount_hash();
1143 put_mountpoint(mp);
1144 out_unlock:
1145 + unlock_mount_hash();
1146 namespace_unlock();
1147 }
1148
1149 @@ -1962,9 +1986,7 @@ retry:
1150 namespace_lock();
1151 mnt = lookup_mnt(path);
1152 if (likely(!mnt)) {
1153 - struct mountpoint *mp = lookup_mountpoint(dentry);
1154 - if (!mp)
1155 - mp = new_mountpoint(dentry);
1156 + struct mountpoint *mp = get_mountpoint(dentry);
1157 if (IS_ERR(mp)) {
1158 namespace_unlock();
1159 mutex_unlock(&dentry->d_inode->i_mutex);
1160 @@ -1983,7 +2005,11 @@ retry:
1161 static void unlock_mount(struct mountpoint *where)
1162 {
1163 struct dentry *dentry = where->m_dentry;
1164 +
1165 + read_seqlock_excl(&mount_lock);
1166 put_mountpoint(where);
1167 + read_sequnlock_excl(&mount_lock);
1168 +
1169 namespace_unlock();
1170 mutex_unlock(&dentry->d_inode->i_mutex);
1171 }
1172 @@ -3055,9 +3081,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
1173 touch_mnt_namespace(current->nsproxy->mnt_ns);
1174 /* A moved mount should not expire automatically */
1175 list_del_init(&new_mnt->mnt_expire);
1176 + put_mountpoint(root_mp);
1177 unlock_mount_hash();
1178 chroot_fs_refs(&root, &new);
1179 - put_mountpoint(root_mp);
1180 error = 0;
1181 out4:
1182 unlock_mount(old_mp);
1183 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1184 index 46cfed63d229..52ee0b73ab4a 100644
1185 --- a/fs/nfs/dir.c
1186 +++ b/fs/nfs/dir.c
1187 @@ -462,7 +462,7 @@ void nfs_force_use_readdirplus(struct inode *dir)
1188 {
1189 if (!list_empty(&NFS_I(dir)->open_files)) {
1190 nfs_advise_use_readdirplus(dir);
1191 - nfs_zap_mapping(dir, dir->i_mapping);
1192 + invalidate_mapping_pages(dir->i_mapping, 0, -1);
1193 }
1194 }
1195
1196 @@ -847,17 +847,6 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
1197 goto out;
1198 }
1199
1200 -static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
1201 -{
1202 - struct nfs_inode *nfsi = NFS_I(dir);
1203 -
1204 - if (nfs_attribute_cache_expired(dir))
1205 - return true;
1206 - if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1207 - return true;
1208 - return false;
1209 -}
1210 -
1211 /* The file offset position represents the dirent entry number. A
1212 last cookie cache takes care of the common case of reading the
1213 whole directory.
1214 @@ -890,7 +879,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
1215 desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
1216
1217 nfs_block_sillyrename(dentry);
1218 - if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
1219 + if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
1220 res = nfs_revalidate_mapping(inode, file->f_mapping);
1221 if (res < 0)
1222 goto out;
1223 diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
1224 index 4946ef40ba87..85ef38f9765f 100644
1225 --- a/fs/nfs/filelayout/filelayoutdev.c
1226 +++ b/fs/nfs/filelayout/filelayoutdev.c
1227 @@ -283,7 +283,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
1228 s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
1229
1230 out_test_devid:
1231 - if (filelayout_test_devid_unavailable(devid))
1232 + if (ret->ds_clp == NULL ||
1233 + filelayout_test_devid_unavailable(devid))
1234 ret = NULL;
1235 out:
1236 return ret;
1237 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
1238 index 5cd3568eea06..3cae0726c1b1 100644
1239 --- a/fs/nfs/pnfs.c
1240 +++ b/fs/nfs/pnfs.c
1241 @@ -1185,13 +1185,11 @@ bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1242 * i_lock */
1243 spin_lock(&ino->i_lock);
1244 lo = nfsi->layout;
1245 - if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1246 + if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1247 + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1248 sleep = true;
1249 + }
1250 spin_unlock(&ino->i_lock);
1251 -
1252 - if (sleep)
1253 - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1254 -
1255 return sleep;
1256 }
1257
1258 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
1259 index b002acf50203..60a5f1548cd9 100644
1260 --- a/fs/ocfs2/dlmglue.c
1261 +++ b/fs/ocfs2/dlmglue.c
1262 @@ -3321,6 +3321,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
1263 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
1264 lockres->l_level, new_level);
1265
1266 + /*
1267 + * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
1268 + * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
1269 + * we can recover correctly from node failure. Otherwise, we may get
1270 + * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
1271 + */
1272 + if (!ocfs2_is_o2cb_active() &&
1273 + lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1274 + lvb = 1;
1275 +
1276 if (lvb)
1277 dlm_flags |= DLM_LKF_VALBLK;
1278
1279 diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
1280 index 5d965e83bd43..783bcdce5666 100644
1281 --- a/fs/ocfs2/stackglue.c
1282 +++ b/fs/ocfs2/stackglue.c
1283 @@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
1284 */
1285 static struct ocfs2_stack_plugin *active_stack;
1286
1287 +inline int ocfs2_is_o2cb_active(void)
1288 +{
1289 + return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
1290 +}
1291 +EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
1292 +
1293 static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
1294 {
1295 struct ocfs2_stack_plugin *p;
1296 diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
1297 index 66334a30cea8..e1b30931974d 100644
1298 --- a/fs/ocfs2/stackglue.h
1299 +++ b/fs/ocfs2/stackglue.h
1300 @@ -298,4 +298,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
1301 int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
1302 void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
1303
1304 +/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
1305 +int ocfs2_is_o2cb_active(void);
1306 +
1307 #endif /* STACKGLUE_H */
1308 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
1309 index fe5b6e6c4671..4dbe1e2daeca 100644
1310 --- a/fs/proc/proc_sysctl.c
1311 +++ b/fs/proc/proc_sysctl.c
1312 @@ -703,7 +703,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
1313 ctl_dir = container_of(head, struct ctl_dir, header);
1314
1315 if (!dir_emit_dots(file, ctx))
1316 - return 0;
1317 + goto out;
1318
1319 pos = 2;
1320
1321 @@ -713,6 +713,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
1322 break;
1323 }
1324 }
1325 +out:
1326 sysctl_head_finish(head);
1327 return 0;
1328 }
1329 diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
1330 index 089f70f83e97..23da3af459fe 100644
1331 --- a/include/linux/jump_label_ratelimit.h
1332 +++ b/include/linux/jump_label_ratelimit.h
1333 @@ -14,6 +14,7 @@ struct static_key_deferred {
1334
1335 #ifdef HAVE_JUMP_LABEL
1336 extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
1337 +extern void static_key_deferred_flush(struct static_key_deferred *key);
1338 extern void
1339 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
1340
1341 @@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
1342 STATIC_KEY_CHECK_USE();
1343 static_key_slow_dec(&key->key);
1344 }
1345 +static inline void static_key_deferred_flush(struct static_key_deferred *key)
1346 +{
1347 + STATIC_KEY_CHECK_USE();
1348 +}
1349 static inline void
1350 jump_label_rate_limit(struct static_key_deferred *key,
1351 unsigned long rl)
1352 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
1353 index 4b353e0be121..453ec4232852 100644
1354 --- a/kernel/jump_label.c
1355 +++ b/kernel/jump_label.c
1356 @@ -138,6 +138,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
1357 }
1358 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
1359
1360 +void static_key_deferred_flush(struct static_key_deferred *key)
1361 +{
1362 + STATIC_KEY_CHECK_USE();
1363 + flush_delayed_work(&key->work);
1364 +}
1365 +EXPORT_SYMBOL_GPL(static_key_deferred_flush);
1366 +
1367 void jump_label_rate_limit(struct static_key_deferred *key,
1368 unsigned long rl)
1369 {
1370 diff --git a/kernel/memremap.c b/kernel/memremap.c
1371 index 25ced161ebeb..f719c925cb54 100644
1372 --- a/kernel/memremap.c
1373 +++ b/kernel/memremap.c
1374 @@ -159,7 +159,9 @@ static void devm_memremap_pages_release(struct device *dev, void *res)
1375 struct page_map *page_map = res;
1376
1377 /* pages are dead and unused, undo the arch mapping */
1378 + mem_hotplug_begin();
1379 arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
1380 + mem_hotplug_done();
1381 }
1382
1383 void *devm_memremap_pages(struct device *dev, struct resource *res)
1384 @@ -189,7 +191,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
1385 if (nid < 0)
1386 nid = numa_mem_id();
1387
1388 + mem_hotplug_begin();
1389 error = arch_add_memory(nid, res->start, resource_size(res), true);
1390 + mem_hotplug_done();
1391 if (error) {
1392 devres_free(page_map);
1393 return ERR_PTR(error);
1394 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1395 index 4434cdd4cd9a..ea11123a9249 100644
1396 --- a/mm/hugetlb.c
1397 +++ b/mm/hugetlb.c
1398 @@ -1723,23 +1723,32 @@ free:
1399 }
1400
1401 /*
1402 - * When releasing a hugetlb pool reservation, any surplus pages that were
1403 - * allocated to satisfy the reservation must be explicitly freed if they were
1404 - * never used.
1405 - * Called with hugetlb_lock held.
1406 + * This routine has two main purposes:
1407 + * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1408 + * in unused_resv_pages. This corresponds to the prior adjustments made
1409 + * to the associated reservation map.
1410 + * 2) Free any unused surplus pages that may have been allocated to satisfy
1411 + * the reservation. As many as unused_resv_pages may be freed.
1412 + *
1413 + * Called with hugetlb_lock held. However, the lock could be dropped (and
1414 + * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
1415 + * we must make sure nobody else can claim pages we are in the process of
1416 + * freeing. Do this by ensuring resv_huge_page always is greater than the
1417 + * number of huge pages we plan to free when dropping the lock.
1418 */
1419 static void return_unused_surplus_pages(struct hstate *h,
1420 unsigned long unused_resv_pages)
1421 {
1422 unsigned long nr_pages;
1423
1424 - /* Uncommit the reservation */
1425 - h->resv_huge_pages -= unused_resv_pages;
1426 -
1427 /* Cannot return gigantic pages currently */
1428 if (hstate_is_gigantic(h))
1429 - return;
1430 + goto out;
1431
1432 + /*
1433 + * Part (or even all) of the reservation could have been backed
1434 + * by pre-allocated pages. Only free surplus pages.
1435 + */
1436 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1437
1438 /*
1439 @@ -1749,12 +1758,22 @@ static void return_unused_surplus_pages(struct hstate *h,
1440 * when the nodes with surplus pages have no free pages.
1441 * free_pool_huge_page() will balance the the freed pages across the
1442 * on-line nodes with memory and will handle the hstate accounting.
1443 + *
1444 + * Note that we decrement resv_huge_pages as we free the pages. If
1445 + * we drop the lock, resv_huge_pages will still be sufficiently large
1446 + * to cover subsequent pages we may free.
1447 */
1448 while (nr_pages--) {
1449 + h->resv_huge_pages--;
1450 + unused_resv_pages--;
1451 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1452 - break;
1453 + goto out;
1454 cond_resched_lock(&hugetlb_lock);
1455 }
1456 +
1457 +out:
1458 + /* Fully uncommit the reservation */
1459 + h->resv_huge_pages -= unused_resv_pages;
1460 }
1461
1462
1463 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
1464 index bf65f31bd55e..1f0de6d74daa 100644
1465 --- a/net/wireless/nl80211.c
1466 +++ b/net/wireless/nl80211.c
1467 @@ -13168,13 +13168,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
1468
1469 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
1470 bool schedule_destroy_work = false;
1471 - bool schedule_scan_stop = false;
1472 struct cfg80211_sched_scan_request *sched_scan_req =
1473 rcu_dereference(rdev->sched_scan_req);
1474
1475 if (sched_scan_req && notify->portid &&
1476 - sched_scan_req->owner_nlportid == notify->portid)
1477 - schedule_scan_stop = true;
1478 + sched_scan_req->owner_nlportid == notify->portid) {
1479 + sched_scan_req->owner_nlportid = 0;
1480 +
1481 + if (rdev->ops->sched_scan_stop &&
1482 + rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
1483 + schedule_work(&rdev->sched_scan_stop_wk);
1484 + }
1485
1486 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
1487 cfg80211_mlme_unregister_socket(wdev, notify->portid);
1488 @@ -13205,12 +13209,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
1489 spin_unlock(&rdev->destroy_list_lock);
1490 schedule_work(&rdev->destroy_work);
1491 }
1492 - } else if (schedule_scan_stop) {
1493 - sched_scan_req->owner_nlportid = 0;
1494 -
1495 - if (rdev->ops->sched_scan_stop &&
1496 - rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
1497 - schedule_work(&rdev->sched_scan_stop_wk);
1498 }
1499 }
1500
1501 diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
1502 index c8edff6803d1..24ebd3e3eb7d 100644
1503 --- a/tools/testing/selftests/Makefile
1504 +++ b/tools/testing/selftests/Makefile
1505 @@ -83,7 +83,7 @@ ifdef INSTALL_PATH
1506 done;
1507
1508 @# Ask all targets to emit their test scripts
1509 - echo "#!/bin/bash" > $(ALL_SCRIPT)
1510 + echo "#!/bin/sh" > $(ALL_SCRIPT)
1511 echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
1512 echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
1513
1514 diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
1515 index c09a682df56a..16058bbea7a8 100755
1516 --- a/tools/testing/selftests/net/run_netsocktests
1517 +++ b/tools/testing/selftests/net/run_netsocktests
1518 @@ -1,4 +1,4 @@
1519 -#!/bin/bash
1520 +#!/bin/sh
1521
1522 echo "--------------------"
1523 echo "running socket test"
1524 diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
1525 index 09a03b5a21ff..e5d5dde6bf75 100644
1526 --- a/virt/lib/irqbypass.c
1527 +++ b/virt/lib/irqbypass.c
1528 @@ -188,7 +188,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
1529 mutex_lock(&lock);
1530
1531 list_for_each_entry(tmp, &consumers, node) {
1532 - if (tmp->token == consumer->token) {
1533 + if (tmp->token == consumer->token || tmp == consumer) {
1534 mutex_unlock(&lock);
1535 module_put(THIS_MODULE);
1536 return -EBUSY;
1537 @@ -235,7 +235,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
1538 mutex_lock(&lock);
1539
1540 list_for_each_entry(tmp, &consumers, node) {
1541 - if (tmp->token != consumer->token)
1542 + if (tmp != consumer)
1543 continue;
1544
1545 list_for_each_entry(producer, &producers, node) {