Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0108-4.9.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 78333 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index 1130803ab93c..c0c41c9fac0c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 8
9 +SUBLEVEL = 9
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
14 index c53dbeae79f2..838dad5c209f 100644
15 --- a/arch/arm64/crypto/aes-modes.S
16 +++ b/arch/arm64/crypto/aes-modes.S
17 @@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
18 cbz w6, .Lcbcencloop
19
20 ld1 {v0.16b}, [x5] /* get iv */
21 - enc_prepare w3, x2, x5
22 + enc_prepare w3, x2, x6
23
24 .Lcbcencloop:
25 ld1 {v1.16b}, [x1], #16 /* get next pt block */
26 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
27 - encrypt_block v0, w3, x2, x5, w6
28 + encrypt_block v0, w3, x2, x6, w7
29 st1 {v0.16b}, [x0], #16
30 subs w4, w4, #1
31 bne .Lcbcencloop
32 + st1 {v0.16b}, [x5] /* return iv */
33 ret
34 AES_ENDPROC(aes_cbc_encrypt)
35
36 @@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
37 cbz w6, .LcbcdecloopNx
38
39 ld1 {v7.16b}, [x5] /* get iv */
40 - dec_prepare w3, x2, x5
41 + dec_prepare w3, x2, x6
42
43 .LcbcdecloopNx:
44 #if INTERLEAVE >= 2
45 @@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
46 .Lcbcdecloop:
47 ld1 {v1.16b}, [x1], #16 /* get next ct block */
48 mov v0.16b, v1.16b /* ...and copy to v0 */
49 - decrypt_block v0, w3, x2, x5, w6
50 + decrypt_block v0, w3, x2, x6, w7
51 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
52 mov v7.16b, v1.16b /* ct is next iv */
53 st1 {v0.16b}, [x0], #16
54 @@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
55 bne .Lcbcdecloop
56 .Lcbcdecout:
57 FRAME_POP
58 + st1 {v7.16b}, [x5] /* return iv */
59 ret
60 AES_ENDPROC(aes_cbc_decrypt)
61
62 @@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
63
64 AES_ENTRY(aes_ctr_encrypt)
65 FRAME_PUSH
66 - cbnz w6, .Lctrfirst /* 1st time around? */
67 - umov x5, v4.d[1] /* keep swabbed ctr in reg */
68 - rev x5, x5
69 -#if INTERLEAVE >= 2
70 - cmn w5, w4 /* 32 bit overflow? */
71 - bcs .Lctrinc
72 - add x5, x5, #1 /* increment BE ctr */
73 - b .LctrincNx
74 -#else
75 - b .Lctrinc
76 -#endif
77 -.Lctrfirst:
78 + cbz w6, .Lctrnotfirst /* 1st time around? */
79 enc_prepare w3, x2, x6
80 ld1 {v4.16b}, [x5]
81 - umov x5, v4.d[1] /* keep swabbed ctr in reg */
82 - rev x5, x5
83 +
84 +.Lctrnotfirst:
85 + umov x8, v4.d[1] /* keep swabbed ctr in reg */
86 + rev x8, x8
87 #if INTERLEAVE >= 2
88 - cmn w5, w4 /* 32 bit overflow? */
89 + cmn w8, w4 /* 32 bit overflow? */
90 bcs .Lctrloop
91 .LctrloopNx:
92 subs w4, w4, #INTERLEAVE
93 @@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
94 #if INTERLEAVE == 2
95 mov v0.8b, v4.8b
96 mov v1.8b, v4.8b
97 - rev x7, x5
98 - add x5, x5, #1
99 + rev x7, x8
100 + add x8, x8, #1
101 ins v0.d[1], x7
102 - rev x7, x5
103 - add x5, x5, #1
104 + rev x7, x8
105 + add x8, x8, #1
106 ins v1.d[1], x7
107 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
108 do_encrypt_block2x
109 @@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
110 st1 {v0.16b-v1.16b}, [x0], #32
111 #else
112 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
113 - dup v7.4s, w5
114 + dup v7.4s, w8
115 mov v0.16b, v4.16b
116 add v7.4s, v7.4s, v8.4s
117 mov v1.16b, v4.16b
118 @@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
119 eor v2.16b, v7.16b, v2.16b
120 eor v3.16b, v5.16b, v3.16b
121 st1 {v0.16b-v3.16b}, [x0], #64
122 - add x5, x5, #INTERLEAVE
123 + add x8, x8, #INTERLEAVE
124 #endif
125 - cbz w4, .LctroutNx
126 -.LctrincNx:
127 - rev x7, x5
128 + rev x7, x8
129 ins v4.d[1], x7
130 + cbz w4, .Lctrout
131 b .LctrloopNx
132 -.LctroutNx:
133 - sub x5, x5, #1
134 - rev x7, x5
135 - ins v4.d[1], x7
136 - b .Lctrout
137 .Lctr1x:
138 adds w4, w4, #INTERLEAVE
139 beq .Lctrout
140 @@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
141 .Lctrloop:
142 mov v0.16b, v4.16b
143 encrypt_block v0, w3, x2, x6, w7
144 +
145 + adds x8, x8, #1 /* increment BE ctr */
146 + rev x7, x8
147 + ins v4.d[1], x7
148 + bcs .Lctrcarry /* overflow? */
149 +
150 +.Lctrcarrydone:
151 subs w4, w4, #1
152 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
153 ld1 {v3.16b}, [x1], #16
154 eor v3.16b, v0.16b, v3.16b
155 st1 {v3.16b}, [x0], #16
156 - beq .Lctrout
157 -.Lctrinc:
158 - adds x5, x5, #1 /* increment BE ctr */
159 - rev x7, x5
160 - ins v4.d[1], x7
161 - bcc .Lctrloop /* no overflow? */
162 - umov x7, v4.d[0] /* load upper word of ctr */
163 - rev x7, x7 /* ... to handle the carry */
164 - add x7, x7, #1
165 - rev x7, x7
166 - ins v4.d[0], x7
167 - b .Lctrloop
168 + bne .Lctrloop
169 +
170 +.Lctrout:
171 + st1 {v4.16b}, [x5] /* return next CTR value */
172 + FRAME_POP
173 + ret
174 +
175 .Lctrhalfblock:
176 ld1 {v3.8b}, [x1]
177 eor v3.8b, v0.8b, v3.8b
178 st1 {v3.8b}, [x0]
179 -.Lctrout:
180 FRAME_POP
181 ret
182 +
183 +.Lctrcarry:
184 + umov x7, v4.d[0] /* load upper word of ctr */
185 + rev x7, x7 /* ... to handle the carry */
186 + add x7, x7, #1
187 + rev x7, x7
188 + ins v4.d[0], x7
189 + b .Lctrcarrydone
190 AES_ENDPROC(aes_ctr_encrypt)
191 .ltorg
192
193 diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
194 index b312b152461b..6e834caa3720 100644
195 --- a/arch/powerpc/include/asm/cpu_has_feature.h
196 +++ b/arch/powerpc/include/asm/cpu_has_feature.h
197 @@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
198 {
199 int i;
200
201 +#ifndef __clang__ /* clang can't cope with this */
202 BUILD_BUG_ON(!__builtin_constant_p(feature));
203 +#endif
204
205 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
206 if (!static_key_initialized) {
207 diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
208 index e311c25751a4..a244e09d2d88 100644
209 --- a/arch/powerpc/include/asm/mmu.h
210 +++ b/arch/powerpc/include/asm/mmu.h
211 @@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
212 {
213 int i;
214
215 +#ifndef __clang__ /* clang can't cope with this */
216 BUILD_BUG_ON(!__builtin_constant_p(feature));
217 +#endif
218
219 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
220 if (!static_key_initialized) {
221 diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
222 index 5c31369435f2..a5dd493670a0 100644
223 --- a/arch/powerpc/kernel/eeh_driver.c
224 +++ b/arch/powerpc/kernel/eeh_driver.c
225 @@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
226 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
227 {
228 struct eeh_pe *pe = (struct eeh_pe *)data;
229 - bool *clear_sw_state = flag;
230 + bool clear_sw_state = *(bool *)flag;
231 int i, rc = 1;
232
233 for (i = 0; rc && i < 3; i++)
234 diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
235 index 88ac964f4858..1e8c57207346 100644
236 --- a/arch/powerpc/kernel/prom_init.c
237 +++ b/arch/powerpc/kernel/prom_init.c
238 @@ -2747,6 +2747,9 @@ static void __init prom_find_boot_cpu(void)
239
240 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
241
242 + if (!PHANDLE_VALID(cpu_pkg))
243 + return;
244 +
245 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
246 prom.cpu = be32_to_cpu(rval);
247
248 diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
249 index ebb7f46f0532..9a25dce87875 100644
250 --- a/arch/powerpc/mm/pgtable-radix.c
251 +++ b/arch/powerpc/mm/pgtable-radix.c
252 @@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
253 if (!pmdp)
254 return -ENOMEM;
255 if (map_page_size == PMD_SIZE) {
256 - ptep = (pte_t *)pudp;
257 + ptep = pmdp_ptep(pmdp);
258 goto set_the_pte;
259 }
260 ptep = pte_alloc_kernel(pmdp, ea);
261 @@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
262 }
263 pmdp = pmd_offset(pudp, ea);
264 if (map_page_size == PMD_SIZE) {
265 - ptep = (pte_t *)pudp;
266 + ptep = pmdp_ptep(pmdp);
267 goto set_the_pte;
268 }
269 if (!pmd_present(*pmdp)) {
270 diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
271 index dbaaf7dc8373..19d646a783fd 100644
272 --- a/arch/x86/events/intel/uncore.c
273 +++ b/arch/x86/events/intel/uncore.c
274 @@ -763,30 +763,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
275 pmu->registered = false;
276 }
277
278 -static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
279 -{
280 - struct intel_uncore_pmu *pmu = type->pmus;
281 - struct intel_uncore_box *box;
282 - int i, pkg;
283 -
284 - if (pmu) {
285 - pkg = topology_physical_package_id(cpu);
286 - for (i = 0; i < type->num_boxes; i++, pmu++) {
287 - box = pmu->boxes[pkg];
288 - if (box)
289 - uncore_box_exit(box);
290 - }
291 - }
292 -}
293 -
294 -static void uncore_exit_boxes(void *dummy)
295 -{
296 - struct intel_uncore_type **types;
297 -
298 - for (types = uncore_msr_uncores; *types; types++)
299 - __uncore_exit_boxes(*types++, smp_processor_id());
300 -}
301 -
302 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
303 {
304 int pkg;
305 @@ -1077,22 +1053,12 @@ static int uncore_cpu_dying(unsigned int cpu)
306 return 0;
307 }
308
309 -static int first_init;
310 -
311 static int uncore_cpu_starting(unsigned int cpu)
312 {
313 struct intel_uncore_type *type, **types = uncore_msr_uncores;
314 struct intel_uncore_pmu *pmu;
315 struct intel_uncore_box *box;
316 - int i, pkg, ncpus = 1;
317 -
318 - if (first_init) {
319 - /*
320 - * On init we get the number of online cpus in the package
321 - * and set refcount for all of them.
322 - */
323 - ncpus = cpumask_weight(topology_core_cpumask(cpu));
324 - }
325 + int i, pkg;
326
327 pkg = topology_logical_package_id(cpu);
328 for (; *types; types++) {
329 @@ -1103,7 +1069,7 @@ static int uncore_cpu_starting(unsigned int cpu)
330 if (!box)
331 continue;
332 /* The first cpu on a package activates the box */
333 - if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
334 + if (atomic_inc_return(&box->refcnt) == 1)
335 uncore_box_init(box);
336 }
337 }
338 @@ -1407,19 +1373,17 @@ static int __init intel_uncore_init(void)
339 "PERF_X86_UNCORE_PREP",
340 uncore_cpu_prepare, NULL);
341 }
342 - first_init = 1;
343 +
344 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
345 "AP_PERF_X86_UNCORE_STARTING",
346 uncore_cpu_starting, uncore_cpu_dying);
347 - first_init = 0;
348 +
349 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
350 "AP_PERF_X86_UNCORE_ONLINE",
351 uncore_event_cpu_online, uncore_event_cpu_offline);
352 return 0;
353
354 err:
355 - /* Undo box->init_box() */
356 - on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
357 uncore_types_exit(uncore_msr_uncores);
358 uncore_pci_exit();
359 return ret;
360 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
361 index 3d8ff40ecc6f..7249f1500bcb 100644
362 --- a/arch/x86/kernel/apic/io_apic.c
363 +++ b/arch/x86/kernel/apic/io_apic.c
364 @@ -2118,6 +2118,7 @@ static inline void __init check_timer(void)
365 if (idx != -1 && irq_trigger(idx))
366 unmask_ioapic_irq(irq_get_chip_data(0));
367 }
368 + irq_domain_deactivate_irq(irq_data);
369 irq_domain_activate_irq(irq_data);
370 if (timer_irq_works()) {
371 if (disable_timer_pin_1 > 0)
372 @@ -2139,6 +2140,7 @@ static inline void __init check_timer(void)
373 * legacy devices should be connected to IO APIC #0
374 */
375 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
376 + irq_domain_deactivate_irq(irq_data);
377 irq_domain_activate_irq(irq_data);
378 legacy_pic->unmask(0);
379 if (timer_irq_works()) {
380 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
381 index 274fab99169d..932348fbb6ea 100644
382 --- a/arch/x86/kernel/hpet.c
383 +++ b/arch/x86/kernel/hpet.c
384 @@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
385 } else {
386 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
387
388 + irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
389 irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
390 disable_irq(hdev->irq);
391 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
392 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
393 index 487b957e7802..731044efb195 100644
394 --- a/arch/x86/kvm/x86.c
395 +++ b/arch/x86/kvm/x86.c
396 @@ -3148,6 +3148,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
397 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
398
399 /* Set XSTATE_BV */
400 + xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
401 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
402
403 /*
404 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
405 index 319148bd4b05..2f25a363068c 100644
406 --- a/arch/x86/platform/efi/efi_64.c
407 +++ b/arch/x86/platform/efi/efi_64.c
408 @@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
409 efi_scratch.use_pgd = true;
410
411 /*
412 + * Certain firmware versions are way too sentimential and still believe
413 + * they are exclusive and unquestionable owners of the first physical page,
414 + * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
415 + * (but then write-access it later during SetVirtualAddressMap()).
416 + *
417 + * Create a 1:1 mapping for this page, to avoid triple faults during early
418 + * boot with such firmware. We are free to hand this page to the BIOS,
419 + * as trim_bios_range() will reserve the first page and isolate it away
420 + * from memory allocators anyway.
421 + */
422 + if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
423 + pr_err("Failed to create 1:1 mapping for the first page!\n");
424 + return 1;
425 + }
426 +
427 + /*
428 * When making calls to the firmware everything needs to be 1:1
429 * mapped and addressable with 32-bit pointers. Map the kernel
430 * text and allocate a new stack because we can't rely on the
431 diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
432 index 88a044af7504..32cdc2c52e98 100644
433 --- a/arch/xtensa/kernel/setup.c
434 +++ b/arch/xtensa/kernel/setup.c
435 @@ -540,7 +540,7 @@ subsys_initcall(topology_init);
436
437 void cpu_reset(void)
438 {
439 -#if XCHAL_HAVE_PTP_MMU
440 +#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
441 local_irq_disable();
442 /*
443 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
444 diff --git a/crypto/algapi.c b/crypto/algapi.c
445 index df939b54b09f..1fad2a6b3bbb 100644
446 --- a/crypto/algapi.c
447 +++ b/crypto/algapi.c
448 @@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
449 struct crypto_larval *larval;
450 int err;
451
452 + alg->cra_flags &= ~CRYPTO_ALG_DEAD;
453 err = crypto_check_alg(alg);
454 if (err)
455 return err;
456 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
457 index 223a770f78f3..33e363dcc63b 100644
458 --- a/drivers/ata/libata-core.c
459 +++ b/drivers/ata/libata-core.c
460 @@ -1695,6 +1695,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
461
462 if (qc->err_mask & ~AC_ERR_OTHER)
463 qc->err_mask &= ~AC_ERR_OTHER;
464 + } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
465 + qc->result_tf.command |= ATA_SENSE;
466 }
467
468 /* finish up */
469 @@ -4316,10 +4318,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
470 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
471
472 /*
473 - * Device times out with higher max sects.
474 + * These devices time out with higher max sects.
475 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
476 */
477 - { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
478 + { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
479
480 /* Devices we expect to fail diagnostics */
481
482 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
483 index 823e938c9a78..2f32782cea6d 100644
484 --- a/drivers/ata/sata_mv.c
485 +++ b/drivers/ata/sata_mv.c
486 @@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
487 host->iomap = NULL;
488 hpriv->base = devm_ioremap(&pdev->dev, res->start,
489 resource_size(res));
490 + if (!hpriv->base)
491 + return -ENOMEM;
492 +
493 hpriv->base -= SATAHC0_REG_BASE;
494
495 hpriv->clk = clk_get(&pdev->dev, NULL);
496 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
497 index e7f86a8887d2..c5cdd190b781 100644
498 --- a/drivers/base/memory.c
499 +++ b/drivers/base/memory.c
500 @@ -391,33 +391,33 @@ static ssize_t show_valid_zones(struct device *dev,
501 {
502 struct memory_block *mem = to_memory_block(dev);
503 unsigned long start_pfn, end_pfn;
504 + unsigned long valid_start, valid_end, valid_pages;
505 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
506 - struct page *first_page;
507 struct zone *zone;
508 int zone_shift = 0;
509
510 start_pfn = section_nr_to_pfn(mem->start_section_nr);
511 end_pfn = start_pfn + nr_pages;
512 - first_page = pfn_to_page(start_pfn);
513
514 /* The block contains more than one zone can not be offlined. */
515 - if (!test_pages_in_a_zone(start_pfn, end_pfn))
516 + if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
517 return sprintf(buf, "none\n");
518
519 - zone = page_zone(first_page);
520 + zone = page_zone(pfn_to_page(valid_start));
521 + valid_pages = valid_end - valid_start;
522
523 /* MMOP_ONLINE_KEEP */
524 sprintf(buf, "%s", zone->name);
525
526 /* MMOP_ONLINE_KERNEL */
527 - zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
528 + zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
529 if (zone_shift) {
530 strcat(buf, " ");
531 strcat(buf, (zone + zone_shift)->name);
532 }
533
534 /* MMOP_ONLINE_MOVABLE */
535 - zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
536 + zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
537 if (zone_shift) {
538 strcat(buf, " ");
539 strcat(buf, (zone + zone_shift)->name);
540 diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
541 index f642c4264c27..168fa175d65a 100644
542 --- a/drivers/bcma/bcma_private.h
543 +++ b/drivers/bcma/bcma_private.h
544 @@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
545 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
546 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
547 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
548 +#ifdef CONFIG_BCMA_DRIVER_MIPS
549 +void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
550 +#endif /* CONFIG_BCMA_DRIVER_MIPS */
551
552 /* driver_chipcommon_b.c */
553 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
554 diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
555 index b4f6520e74f0..62f5bfa5065d 100644
556 --- a/drivers/bcma/driver_chipcommon.c
557 +++ b/drivers/bcma/driver_chipcommon.c
558 @@ -15,8 +15,6 @@
559 #include <linux/platform_device.h>
560 #include <linux/bcma/bcma.h>
561
562 -static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
563 -
564 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
565 u32 mask, u32 value)
566 {
567 @@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
568 if (cc->capabilities & BCMA_CC_CAP_PMU)
569 bcma_pmu_early_init(cc);
570
571 - if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
572 - bcma_chipco_serial_init(cc);
573 -
574 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
575 bcma_core_chipcommon_flash_detect(cc);
576
577 @@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
578 return res;
579 }
580
581 -static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
582 +#ifdef CONFIG_BCMA_DRIVER_MIPS
583 +void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
584 {
585 -#if IS_BUILTIN(CONFIG_BCM47XX)
586 unsigned int irq;
587 u32 baud_base;
588 u32 i;
589 @@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
590 ports[i].baud_base = baud_base;
591 ports[i].reg_shift = 0;
592 }
593 -#endif /* CONFIG_BCM47XX */
594 }
595 +#endif /* CONFIG_BCMA_DRIVER_MIPS */
596 diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
597 index 96f171328200..89af807cf29c 100644
598 --- a/drivers/bcma/driver_mips.c
599 +++ b/drivers/bcma/driver_mips.c
600 @@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
601
602 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
603 {
604 + struct bcma_bus *bus = mcore->core->bus;
605 +
606 if (mcore->early_setup_done)
607 return;
608
609 + bcma_chipco_serial_init(&bus->drv_cc);
610 bcma_core_mips_nvram_init(mcore);
611
612 mcore->early_setup_done = true;
613 diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
614 index d5ba43a87a68..55c1782e3623 100644
615 --- a/drivers/dma/cppi41.c
616 +++ b/drivers/dma/cppi41.c
617 @@ -153,6 +153,8 @@ struct cppi41_dd {
618
619 /* context for suspend/resume */
620 unsigned int dma_tdfdq;
621 +
622 + bool is_suspended;
623 };
624
625 #define FIST_COMPLETION_QUEUE 93
626 @@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
627 BUG_ON(desc_num >= ALLOC_DECS_NUM);
628 c = cdd->chan_busy[desc_num];
629 cdd->chan_busy[desc_num] = NULL;
630 +
631 + /* Usecount for chan_busy[], paired with push_desc_queue() */
632 + pm_runtime_put(cdd->ddev.dev);
633 +
634 return c;
635 }
636
637 @@ -447,6 +453,15 @@ static void push_desc_queue(struct cppi41_channel *c)
638 */
639 __iowmb();
640
641 + /*
642 + * DMA transfers can take at least 200ms to complete with USB mass
643 + * storage connected. To prevent autosuspend timeouts, we must use
644 + * pm_runtime_get/put() when chan_busy[] is modified. This will get
645 + * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
646 + * outcome of the transfer.
647 + */
648 + pm_runtime_get(cdd->ddev.dev);
649 +
650 desc_phys = lower_32_bits(c->desc_phys);
651 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
652 WARN_ON(cdd->chan_busy[desc_num]);
653 @@ -457,20 +472,26 @@ static void push_desc_queue(struct cppi41_channel *c)
654 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
655 }
656
657 -static void pending_desc(struct cppi41_channel *c)
658 +/*
659 + * Caller must hold cdd->lock to prevent push_desc_queue()
660 + * getting called out of order. We have both cppi41_dma_issue_pending()
661 + * and cppi41_runtime_resume() call this function.
662 + */
663 +static void cppi41_run_queue(struct cppi41_dd *cdd)
664 {
665 - struct cppi41_dd *cdd = c->cdd;
666 - unsigned long flags;
667 + struct cppi41_channel *c, *_c;
668
669 - spin_lock_irqsave(&cdd->lock, flags);
670 - list_add_tail(&c->node, &cdd->pending);
671 - spin_unlock_irqrestore(&cdd->lock, flags);
672 + list_for_each_entry_safe(c, _c, &cdd->pending, node) {
673 + push_desc_queue(c);
674 + list_del(&c->node);
675 + }
676 }
677
678 static void cppi41_dma_issue_pending(struct dma_chan *chan)
679 {
680 struct cppi41_channel *c = to_cpp41_chan(chan);
681 struct cppi41_dd *cdd = c->cdd;
682 + unsigned long flags;
683 int error;
684
685 error = pm_runtime_get(cdd->ddev.dev);
686 @@ -482,10 +503,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
687 return;
688 }
689
690 - if (likely(pm_runtime_active(cdd->ddev.dev)))
691 - push_desc_queue(c);
692 - else
693 - pending_desc(c);
694 + spin_lock_irqsave(&cdd->lock, flags);
695 + list_add_tail(&c->node, &cdd->pending);
696 + if (!cdd->is_suspended)
697 + cppi41_run_queue(cdd);
698 + spin_unlock_irqrestore(&cdd->lock, flags);
699
700 pm_runtime_mark_last_busy(cdd->ddev.dev);
701 pm_runtime_put_autosuspend(cdd->ddev.dev);
702 @@ -705,6 +727,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
703 WARN_ON(!cdd->chan_busy[desc_num]);
704 cdd->chan_busy[desc_num] = NULL;
705
706 + /* Usecount for chan_busy[], paired with push_desc_queue() */
707 + pm_runtime_put(cdd->ddev.dev);
708 +
709 return 0;
710 }
711
712 @@ -1150,8 +1175,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
713 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
714 {
715 struct cppi41_dd *cdd = dev_get_drvdata(dev);
716 + unsigned long flags;
717
718 + spin_lock_irqsave(&cdd->lock, flags);
719 + cdd->is_suspended = true;
720 WARN_ON(!list_empty(&cdd->pending));
721 + spin_unlock_irqrestore(&cdd->lock, flags);
722
723 return 0;
724 }
725 @@ -1159,14 +1188,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
726 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
727 {
728 struct cppi41_dd *cdd = dev_get_drvdata(dev);
729 - struct cppi41_channel *c, *_c;
730 unsigned long flags;
731
732 spin_lock_irqsave(&cdd->lock, flags);
733 - list_for_each_entry_safe(c, _c, &cdd->pending, node) {
734 - push_desc_queue(c);
735 - list_del(&c->node);
736 - }
737 + cdd->is_suspended = false;
738 + cppi41_run_queue(cdd);
739 spin_unlock_irqrestore(&cdd->lock, flags);
740
741 return 0;
742 diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
743 index 921dfa047202..260c4b4b492e 100644
744 --- a/drivers/firmware/efi/libstub/fdt.c
745 +++ b/drivers/firmware/efi/libstub/fdt.c
746 @@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
747 struct exit_boot_struct {
748 efi_memory_desc_t *runtime_map;
749 int *runtime_entry_count;
750 + void *new_fdt_addr;
751 };
752
753 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
754 @@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
755 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
756 p->runtime_map, p->runtime_entry_count);
757
758 - return EFI_SUCCESS;
759 + return update_fdt_memmap(p->new_fdt_addr, map);
760 }
761
762 /*
763 @@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
764
765 priv.runtime_map = runtime_map;
766 priv.runtime_entry_count = &runtime_entry_count;
767 + priv.new_fdt_addr = (void *)*new_fdt_addr;
768 status = efi_exit_boot_services(sys_table, handle, &map, &priv,
769 exit_boot_func);
770
771 if (status == EFI_SUCCESS) {
772 efi_set_virtual_address_map_t *svam;
773
774 - status = update_fdt_memmap((void *)*new_fdt_addr, &map);
775 - if (status != EFI_SUCCESS) {
776 - /*
777 - * The kernel won't get far without the memory map, but
778 - * may still be able to print something meaningful so
779 - * return success here.
780 - */
781 - return EFI_SUCCESS;
782 - }
783 -
784 /* Install the new virtual address map */
785 svam = sys_table->runtime->set_virtual_address_map;
786 status = svam(runtime_entry_count * desc_size, desc_size,
787 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
788 index b13c8aaec078..6df924f72f29 100644
789 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
790 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
791 @@ -227,6 +227,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
792 }
793 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
794
795 + if (adev->mode_info.num_crtc)
796 + amdgpu_display_set_vga_render_state(adev, false);
797 +
798 gmc_v6_0_mc_stop(adev, &save);
799
800 if (gmc_v6_0_wait_for_idle((void *)adev)) {
801 @@ -256,7 +259,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
802 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
803 }
804 gmc_v6_0_mc_resume(adev, &save);
805 - amdgpu_display_set_vga_render_state(adev, false);
806 }
807
808 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
809 diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
810 index 67db1577ee49..4147e51cf893 100644
811 --- a/drivers/gpu/drm/i915/intel_lrc.c
812 +++ b/drivers/gpu/drm/i915/intel_lrc.c
813 @@ -2152,30 +2152,42 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
814
815 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
816 {
817 - struct i915_gem_context *ctx = dev_priv->kernel_context;
818 struct intel_engine_cs *engine;
819 + struct i915_gem_context *ctx;
820 +
821 + /* Because we emit WA_TAIL_DWORDS there may be a disparity
822 + * between our bookkeeping in ce->ring->head and ce->ring->tail and
823 + * that stored in context. As we only write new commands from
824 + * ce->ring->tail onwards, everything before that is junk. If the GPU
825 + * starts reading from its RING_HEAD from the context, it may try to
826 + * execute that junk and die.
827 + *
828 + * So to avoid that we reset the context images upon resume. For
829 + * simplicity, we just zero everything out.
830 + */
831 + list_for_each_entry(ctx, &dev_priv->context_list, link) {
832 + for_each_engine(engine, dev_priv) {
833 + struct intel_context *ce = &ctx->engine[engine->id];
834 + u32 *reg;
835
836 - for_each_engine(engine, dev_priv) {
837 - struct intel_context *ce = &ctx->engine[engine->id];
838 - void *vaddr;
839 - uint32_t *reg_state;
840 -
841 - if (!ce->state)
842 - continue;
843 -
844 - vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
845 - if (WARN_ON(IS_ERR(vaddr)))
846 - continue;
847 + if (!ce->state)
848 + continue;
849
850 - reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
851 + reg = i915_gem_object_pin_map(ce->state->obj,
852 + I915_MAP_WB);
853 + if (WARN_ON(IS_ERR(reg)))
854 + continue;
855
856 - reg_state[CTX_RING_HEAD+1] = 0;
857 - reg_state[CTX_RING_TAIL+1] = 0;
858 + reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
859 + reg[CTX_RING_HEAD+1] = 0;
860 + reg[CTX_RING_TAIL+1] = 0;
861
862 - ce->state->obj->dirty = true;
863 - i915_gem_object_unpin_map(ce->state->obj);
864 + ce->state->obj->dirty = true;
865 + i915_gem_object_unpin_map(ce->state->obj);
866
867 - ce->ring->head = 0;
868 - ce->ring->tail = 0;
869 + ce->ring->head = ce->ring->tail = 0;
870 + ce->ring->last_retired_head = -1;
871 + intel_ring_update_space(ce->ring);
872 + }
873 }
874 }
875 diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
876 index 74856a8b8f35..e64f52464ecf 100644
877 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
878 +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
879 @@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
880 uint32_t mpllP;
881
882 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
883 + mpllP = (mpllP >> 8) & 0xf;
884 if (!mpllP)
885 mpllP = 4;
886
887 @@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
888 uint32_t clock;
889
890 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
891 - return clock;
892 + return clock / 1000;
893 }
894
895 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
896 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
897 index 6f0436df0219..f8f2f16c22a2 100644
898 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
899 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
900 @@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
901 );
902 }
903 for (i = 0; i < size; i++)
904 - nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
905 + nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
906 for (; i < 0x60; i++)
907 nvkm_wr32(device, 0x61c440 + soff, (i << 8));
908 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
909 diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
910 index 60d30203a5fa..e06c1344c913 100644
911 --- a/drivers/hid/hid-cp2112.c
912 +++ b/drivers/hid/hid-cp2112.c
913 @@ -167,7 +167,7 @@ struct cp2112_device {
914 atomic_t xfer_avail;
915 struct gpio_chip gc;
916 u8 *in_out_buffer;
917 - spinlock_t lock;
918 + struct mutex lock;
919 };
920
921 static int gpio_push_pull = 0xFF;
922 @@ -179,10 +179,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
923 struct cp2112_device *dev = gpiochip_get_data(chip);
924 struct hid_device *hdev = dev->hdev;
925 u8 *buf = dev->in_out_buffer;
926 - unsigned long flags;
927 int ret;
928
929 - spin_lock_irqsave(&dev->lock, flags);
930 + mutex_lock(&dev->lock);
931
932 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
933 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
934 @@ -206,8 +205,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
935 ret = 0;
936
937 exit:
938 - spin_unlock_irqrestore(&dev->lock, flags);
939 - return ret <= 0 ? ret : -EIO;
940 + mutex_unlock(&dev->lock);
941 + return ret < 0 ? ret : -EIO;
942 }
943
944 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
945 @@ -215,10 +214,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
946 struct cp2112_device *dev = gpiochip_get_data(chip);
947 struct hid_device *hdev = dev->hdev;
948 u8 *buf = dev->in_out_buffer;
949 - unsigned long flags;
950 int ret;
951
952 - spin_lock_irqsave(&dev->lock, flags);
953 + mutex_lock(&dev->lock);
954
955 buf[0] = CP2112_GPIO_SET;
956 buf[1] = value ? 0xff : 0;
957 @@ -230,7 +228,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
958 if (ret < 0)
959 hid_err(hdev, "error setting GPIO values: %d\n", ret);
960
961 - spin_unlock_irqrestore(&dev->lock, flags);
962 + mutex_unlock(&dev->lock);
963 }
964
965 static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
966 @@ -238,10 +236,9 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
967 struct cp2112_device *dev = gpiochip_get_data(chip);
968 struct hid_device *hdev = dev->hdev;
969 u8 *buf = dev->in_out_buffer;
970 - unsigned long flags;
971 int ret;
972
973 - spin_lock_irqsave(&dev->lock, flags);
974 + mutex_lock(&dev->lock);
975
976 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
977 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
978 @@ -255,7 +252,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
979 ret = (buf[1] >> offset) & 1;
980
981 exit:
982 - spin_unlock_irqrestore(&dev->lock, flags);
983 + mutex_unlock(&dev->lock);
984
985 return ret;
986 }
987 @@ -266,10 +263,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
988 struct cp2112_device *dev = gpiochip_get_data(chip);
989 struct hid_device *hdev = dev->hdev;
990 u8 *buf = dev->in_out_buffer;
991 - unsigned long flags;
992 int ret;
993
994 - spin_lock_irqsave(&dev->lock, flags);
995 + mutex_lock(&dev->lock);
996
997 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
998 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
999 @@ -290,7 +286,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
1000 goto fail;
1001 }
1002
1003 - spin_unlock_irqrestore(&dev->lock, flags);
1004 + mutex_unlock(&dev->lock);
1005
1006 /*
1007 * Set gpio value when output direction is already set,
1008 @@ -301,7 +297,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
1009 return 0;
1010
1011 fail:
1012 - spin_unlock_irqrestore(&dev->lock, flags);
1013 + mutex_unlock(&dev->lock);
1014 return ret < 0 ? ret : -EIO;
1015 }
1016
1017 @@ -1057,7 +1053,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1018 if (!dev->in_out_buffer)
1019 return -ENOMEM;
1020
1021 - spin_lock_init(&dev->lock);
1022 + mutex_init(&dev->lock);
1023
1024 ret = hid_parse(hdev);
1025 if (ret) {
1026 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1027 index 575aa65436d1..9845189fae92 100644
1028 --- a/drivers/hid/hid-ids.h
1029 +++ b/drivers/hid/hid-ids.h
1030 @@ -76,6 +76,9 @@
1031 #define USB_VENDOR_ID_ALPS_JP 0x044E
1032 #define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
1033
1034 +#define USB_VENDOR_ID_AMI 0x046b
1035 +#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
1036 +
1037 #define USB_VENDOR_ID_ANTON 0x1130
1038 #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
1039
1040 diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
1041 index c5c5fbe9d605..52026dc94d5c 100644
1042 --- a/drivers/hid/hid-lg.c
1043 +++ b/drivers/hid/hid-lg.c
1044 @@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
1045 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
1046 .driver_data = LG_NOGET | LG_FF4 },
1047 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
1048 - .driver_data = LG_FF2 },
1049 + .driver_data = LG_NOGET | LG_FF2 },
1050 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
1051 .driver_data = LG_FF3 },
1052 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
1053 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1054 index e6cfd323babc..cde060fefa91 100644
1055 --- a/drivers/hid/usbhid/hid-quirks.c
1056 +++ b/drivers/hid/usbhid/hid-quirks.c
1057 @@ -57,6 +57,7 @@ static const struct hid_blacklist {
1058 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
1059 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
1060 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
1061 + { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
1062 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
1063 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
1064 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
1065 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1066 index 1cb79925730d..623be90704ab 100644
1067 --- a/drivers/hid/wacom_wac.c
1068 +++ b/drivers/hid/wacom_wac.c
1069 @@ -164,19 +164,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
1070 wacom->id[0] = STYLUS_DEVICE_ID;
1071 }
1072
1073 - pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
1074 - if (features->pressure_max > 255)
1075 - pressure = (pressure << 1) | ((data[4] >> 6) & 1);
1076 - pressure += (features->pressure_max + 1) / 2;
1077 -
1078 - input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
1079 - input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
1080 - input_report_abs(input, ABS_PRESSURE, pressure);
1081 -
1082 - input_report_key(input, BTN_TOUCH, data[4] & 0x08);
1083 - input_report_key(input, BTN_STYLUS, data[4] & 0x10);
1084 - /* Only allow the stylus2 button to be reported for the pen tool. */
1085 - input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
1086 + if (prox) {
1087 + pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
1088 + if (features->pressure_max > 255)
1089 + pressure = (pressure << 1) | ((data[4] >> 6) & 1);
1090 + pressure += (features->pressure_max + 1) / 2;
1091 +
1092 + input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
1093 + input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
1094 + input_report_abs(input, ABS_PRESSURE, pressure);
1095 +
1096 + input_report_key(input, BTN_TOUCH, data[4] & 0x08);
1097 + input_report_key(input, BTN_STYLUS, data[4] & 0x10);
1098 + /* Only allow the stylus2 button to be reported for the pen tool. */
1099 + input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
1100 + }
1101
1102 if (!prox)
1103 wacom->id[0] = 0;
1104 diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
1105 index 2bbf0c521beb..7d61b566e148 100644
1106 --- a/drivers/iio/adc/palmas_gpadc.c
1107 +++ b/drivers/iio/adc/palmas_gpadc.c
1108 @@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
1109
1110 static int palmas_gpadc_suspend(struct device *dev)
1111 {
1112 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1113 + struct iio_dev *indio_dev = dev_get_drvdata(dev);
1114 struct palmas_gpadc *adc = iio_priv(indio_dev);
1115 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
1116 int ret;
1117 @@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
1118
1119 static int palmas_gpadc_resume(struct device *dev)
1120 {
1121 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1122 + struct iio_dev *indio_dev = dev_get_drvdata(dev);
1123 struct palmas_gpadc *adc = iio_priv(indio_dev);
1124 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
1125 int ret;
1126 diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
1127 index 9a081465c42f..6bb23a49e81e 100644
1128 --- a/drivers/iio/health/afe4403.c
1129 +++ b/drivers/iio/health/afe4403.c
1130 @@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
1131
1132 static int __maybe_unused afe4403_suspend(struct device *dev)
1133 {
1134 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1135 + struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
1136 struct afe4403_data *afe = iio_priv(indio_dev);
1137 int ret;
1138
1139 @@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
1140
1141 static int __maybe_unused afe4403_resume(struct device *dev)
1142 {
1143 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1144 + struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
1145 struct afe4403_data *afe = iio_priv(indio_dev);
1146 int ret;
1147
1148 diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
1149 index 45266404f7e3..964f5231a831 100644
1150 --- a/drivers/iio/health/afe4404.c
1151 +++ b/drivers/iio/health/afe4404.c
1152 @@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
1153
1154 static int __maybe_unused afe4404_suspend(struct device *dev)
1155 {
1156 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1157 + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1158 struct afe4404_data *afe = iio_priv(indio_dev);
1159 int ret;
1160
1161 @@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
1162
1163 static int __maybe_unused afe4404_resume(struct device *dev)
1164 {
1165 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1166 + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1167 struct afe4404_data *afe = iio_priv(indio_dev);
1168 int ret;
1169
1170 diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
1171 index 90ab8a2d2846..183c14329d6e 100644
1172 --- a/drivers/iio/health/max30100.c
1173 +++ b/drivers/iio/health/max30100.c
1174 @@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
1175
1176 mutex_lock(&data->lock);
1177
1178 - while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
1179 + while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
1180 ret = max30100_read_measurement(data);
1181 if (ret)
1182 break;
1183 diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
1184 index 9c47bc98f3ac..2a22ad920333 100644
1185 --- a/drivers/iio/humidity/dht11.c
1186 +++ b/drivers/iio/humidity/dht11.c
1187 @@ -71,7 +71,8 @@
1188 * a) select an implementation using busy loop polling on those systems
1189 * b) use the checksum to do some probabilistic decoding
1190 */
1191 -#define DHT11_START_TRANSMISSION 18 /* ms */
1192 +#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
1193 +#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
1194 #define DHT11_MIN_TIMERES 34000 /* ns */
1195 #define DHT11_THRESHOLD 49000 /* ns */
1196 #define DHT11_AMBIG_LOW 23000 /* ns */
1197 @@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
1198 ret = gpio_direction_output(dht11->gpio, 0);
1199 if (ret)
1200 goto err;
1201 - msleep(DHT11_START_TRANSMISSION);
1202 + usleep_range(DHT11_START_TRANSMISSION_MIN,
1203 + DHT11_START_TRANSMISSION_MAX);
1204 ret = gpio_direction_input(dht11->gpio);
1205 if (ret)
1206 goto err;
1207 diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
1208 index bb0fde6e2047..cc2243f6cc7f 100644
1209 --- a/drivers/infiniband/hw/cxgb4/qp.c
1210 +++ b/drivers/infiniband/hw/cxgb4/qp.c
1211 @@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
1212 FW_RI_RES_WR_DCAEN_V(0) |
1213 FW_RI_RES_WR_DCACPU_V(0) |
1214 FW_RI_RES_WR_FBMIN_V(2) |
1215 - FW_RI_RES_WR_FBMAX_V(2) |
1216 + (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
1217 + FW_RI_RES_WR_FBMAX_V(3)) |
1218 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
1219 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
1220 FW_RI_RES_WR_EQSIZE_V(eqsize));
1221 @@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
1222 FW_RI_RES_WR_DCAEN_V(0) |
1223 FW_RI_RES_WR_DCACPU_V(0) |
1224 FW_RI_RES_WR_FBMIN_V(2) |
1225 - FW_RI_RES_WR_FBMAX_V(2) |
1226 + FW_RI_RES_WR_FBMAX_V(3) |
1227 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
1228 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
1229 FW_RI_RES_WR_EQSIZE_V(eqsize));
1230 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1231 index e1e274a0a34f..ba637ff8aa7e 100644
1232 --- a/drivers/mmc/host/sdhci.c
1233 +++ b/drivers/mmc/host/sdhci.c
1234 @@ -2719,7 +2719,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
1235 if (intmask & SDHCI_INT_RETUNE)
1236 mmc_retune_needed(host->mmc);
1237
1238 - if (intmask & SDHCI_INT_CARD_INT) {
1239 + if ((intmask & SDHCI_INT_CARD_INT) &&
1240 + (host->ier & SDHCI_INT_CARD_INT)) {
1241 sdhci_enable_sdio_irq_nolock(host, false);
1242 host->thread_isr |= SDHCI_INT_CARD_INT;
1243 result = IRQ_WAKE_THREAD;
1244 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
1245 index d02ca1491d16..8d3e53fac1da 100644
1246 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
1247 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
1248 @@ -91,7 +91,7 @@
1249
1250 #define IWL8000_FW_PRE "iwlwifi-8000C-"
1251 #define IWL8000_MODULE_FIRMWARE(api) \
1252 - IWL8000_FW_PRE "-" __stringify(api) ".ucode"
1253 + IWL8000_FW_PRE __stringify(api) ".ucode"
1254
1255 #define IWL8265_FW_PRE "iwlwifi-8265-"
1256 #define IWL8265_MODULE_FIRMWARE(api) \
1257 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1258 index fc771885e383..52de3c6d760c 100644
1259 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1260 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1261 @@ -1144,9 +1144,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1262 .frame_limit = IWL_FRAME_LIMIT,
1263 };
1264
1265 - /* Make sure reserved queue is still marked as such (or allocated) */
1266 - mvm->queue_info[mvm_sta->reserved_queue].status =
1267 - IWL_MVM_QUEUE_RESERVED;
1268 + /* Make sure reserved queue is still marked as such (if allocated) */
1269 + if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1270 + mvm->queue_info[mvm_sta->reserved_queue].status =
1271 + IWL_MVM_QUEUE_RESERVED;
1272
1273 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1274 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1275 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
1276 index 0ec649d961d7..b0916b126923 100644
1277 --- a/drivers/pci/pcie/aspm.c
1278 +++ b/drivers/pci/pcie/aspm.c
1279 @@ -518,25 +518,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
1280 link = kzalloc(sizeof(*link), GFP_KERNEL);
1281 if (!link)
1282 return NULL;
1283 +
1284 INIT_LIST_HEAD(&link->sibling);
1285 INIT_LIST_HEAD(&link->children);
1286 INIT_LIST_HEAD(&link->link);
1287 link->pdev = pdev;
1288 - if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
1289 +
1290 + /*
1291 + * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
1292 + * hierarchies.
1293 + */
1294 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
1295 + pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
1296 + link->root = link;
1297 + } else {
1298 struct pcie_link_state *parent;
1299 +
1300 parent = pdev->bus->parent->self->link_state;
1301 if (!parent) {
1302 kfree(link);
1303 return NULL;
1304 }
1305 +
1306 link->parent = parent;
1307 + link->root = link->parent->root;
1308 list_add(&link->link, &parent->children);
1309 }
1310 - /* Setup a pointer to the root port link */
1311 - if (!link->parent)
1312 - link->root = link;
1313 - else
1314 - link->root = link->parent->root;
1315
1316 list_add(&link->sibling, &link_list);
1317 pdev->link_state = link;
1318 diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
1319 index 079015385fd8..583ae3f38fc0 100644
1320 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c
1321 +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
1322 @@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
1323 int reg)
1324 {
1325 struct byt_community *comm = byt_get_community(vg, offset);
1326 - u32 reg_offset = 0;
1327 + u32 reg_offset;
1328
1329 if (!comm)
1330 return NULL;
1331
1332 offset -= comm->pin_base;
1333 - if (reg == BYT_INT_STAT_REG)
1334 + switch (reg) {
1335 + case BYT_INT_STAT_REG:
1336 reg_offset = (offset / 32) * 4;
1337 - else
1338 + break;
1339 + case BYT_DEBOUNCE_REG:
1340 + reg_offset = 0;
1341 + break;
1342 + default:
1343 reg_offset = comm->pad_map[offset] * 16;
1344 + break;
1345 + }
1346
1347 return comm->reg_base + reg_offset + reg;
1348 }
1349 @@ -1612,7 +1619,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1350 continue;
1351 }
1352
1353 + raw_spin_lock(&vg->lock);
1354 pending = readl(reg);
1355 + raw_spin_unlock(&vg->lock);
1356 for_each_set_bit(pin, &pending, 32) {
1357 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
1358 generic_handle_irq(virq);
1359 diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
1360 index 7826c7f0cb7c..9931be6af0ca 100644
1361 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c
1362 +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
1363 @@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
1364 unsigned int i;
1365 int ret;
1366
1367 + if (!mrfld_buf_available(mp, pin))
1368 + return -ENOTSUPP;
1369 +
1370 for (i = 0; i < nconfigs; i++) {
1371 switch (pinconf_to_config_param(configs[i])) {
1372 case PIN_CONFIG_BIAS_DISABLE:
1373 diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
1374 index e6a512ebeae2..a3ade9e4ef47 100644
1375 --- a/drivers/regulator/axp20x-regulator.c
1376 +++ b/drivers/regulator/axp20x-regulator.c
1377 @@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
1378 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
1379 BIT(3)),
1380 AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
1381 - AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
1382 + AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
1383 AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
1384 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
1385 AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
1386 diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
1387 index 113f3d6c4b3a..27f75b17679b 100644
1388 --- a/drivers/staging/greybus/timesync_platform.c
1389 +++ b/drivers/staging/greybus/timesync_platform.c
1390 @@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
1391
1392 int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
1393 {
1394 + if (!arche_platform_change_state_cb)
1395 + return 0;
1396 +
1397 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
1398 pdata);
1399 }
1400
1401 void gb_timesync_platform_unlock_bus(void)
1402 {
1403 + if (!arche_platform_change_state_cb)
1404 + return;
1405 +
1406 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
1407 }
1408
1409 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1410 index d2e50a27140c..24f9f98968a5 100644
1411 --- a/drivers/usb/core/quirks.c
1412 +++ b/drivers/usb/core/quirks.c
1413 @@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
1414 /* CBM - Flash disk */
1415 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
1416
1417 + /* WORLDE easy key (easykey.25) MIDI controller */
1418 + { USB_DEVICE(0x0218, 0x0401), .driver_info =
1419 + USB_QUIRK_CONFIG_INTF_STRINGS },
1420 +
1421 /* HP 5300/5370C scanner */
1422 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
1423 USB_QUIRK_STRING_FETCH_255 },
1424 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1425 index 17989b72cdae..8d412d8b1f29 100644
1426 --- a/drivers/usb/gadget/function/f_fs.c
1427 +++ b/drivers/usb/gadget/function/f_fs.c
1428 @@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
1429 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
1430 return -EINVAL;
1431 length = le32_to_cpu(d->dwSize);
1432 + if (len < length)
1433 + return -EINVAL;
1434 type = le32_to_cpu(d->dwPropertyDataType);
1435 if (type < USB_EXT_PROP_UNICODE ||
1436 type > USB_EXT_PROP_UNICODE_MULTI) {
1437 @@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
1438 return -EINVAL;
1439 }
1440 pnl = le16_to_cpu(d->wPropertyNameLength);
1441 + if (length < 14 + pnl) {
1442 + pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
1443 + length, pnl, type);
1444 + return -EINVAL;
1445 + }
1446 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
1447 if (length != 14 + pnl + pdl) {
1448 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
1449 @@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
1450 }
1451 }
1452 if (flags & (1 << i)) {
1453 + if (len < 4) {
1454 + goto error;
1455 + }
1456 os_descs_count = get_unaligned_le32(data);
1457 data += 4;
1458 len -= 4;
1459 @@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
1460
1461 ENTER();
1462
1463 - if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
1464 + if (unlikely(len < 16 ||
1465 + get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
1466 get_unaligned_le32(data + 4) != len))
1467 goto error;
1468 str_count = get_unaligned_le32(data + 8);
1469 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
1470 index c3e172e15ec3..338575fb2d27 100644
1471 --- a/drivers/usb/musb/musb_core.c
1472 +++ b/drivers/usb/musb/musb_core.c
1473 @@ -578,11 +578,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
1474 | MUSB_PORT_STAT_RESUME;
1475 musb->rh_timer = jiffies
1476 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
1477 - musb->need_finish_resume = 1;
1478 -
1479 musb->xceiv->otg->state = OTG_STATE_A_HOST;
1480 musb->is_active = 1;
1481 musb_host_resume_root_hub(musb);
1482 + schedule_delayed_work(&musb->finish_resume_work,
1483 + msecs_to_jiffies(USB_RESUME_TIMEOUT));
1484 break;
1485 case OTG_STATE_B_WAIT_ACON:
1486 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
1487 @@ -2691,11 +2691,6 @@ static int musb_resume(struct device *dev)
1488 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
1489 if ((devctl & mask) != (musb->context.devctl & mask))
1490 musb->port1_status = 0;
1491 - if (musb->need_finish_resume) {
1492 - musb->need_finish_resume = 0;
1493 - schedule_delayed_work(&musb->finish_resume_work,
1494 - msecs_to_jiffies(USB_RESUME_TIMEOUT));
1495 - }
1496
1497 /*
1498 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
1499 @@ -2747,12 +2742,6 @@ static int musb_runtime_resume(struct device *dev)
1500
1501 musb_restore_context(musb);
1502
1503 - if (musb->need_finish_resume) {
1504 - musb->need_finish_resume = 0;
1505 - schedule_delayed_work(&musb->finish_resume_work,
1506 - msecs_to_jiffies(USB_RESUME_TIMEOUT));
1507 - }
1508 -
1509 spin_lock_irqsave(&musb->lock, flags);
1510 error = musb_run_resume_work(musb);
1511 if (error)
1512 diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
1513 index 47331dbdde29..854fbf7b6b23 100644
1514 --- a/drivers/usb/musb/musb_core.h
1515 +++ b/drivers/usb/musb/musb_core.h
1516 @@ -410,7 +410,6 @@ struct musb {
1517
1518 /* is_suspended means USB B_PERIPHERAL suspend */
1519 unsigned is_suspended:1;
1520 - unsigned need_finish_resume :1;
1521
1522 /* may_wakeup means remote wakeup is enabled */
1523 unsigned may_wakeup:1;
1524 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1525 index 7ce31a4c7e7f..42cc72e54c05 100644
1526 --- a/drivers/usb/serial/option.c
1527 +++ b/drivers/usb/serial/option.c
1528 @@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
1529 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
1530 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
1531 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1532 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1533 { } /* Terminating entry */
1534 };
1535 MODULE_DEVICE_TABLE(usb, option_ids);
1536 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1537 index 46fca6b75846..1db4b61bdf7b 100644
1538 --- a/drivers/usb/serial/pl2303.c
1539 +++ b/drivers/usb/serial/pl2303.c
1540 @@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
1541 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
1542 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
1543 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
1544 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
1545 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
1546 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
1547 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
1548 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1549 index e3b7af8adfb7..09d9be88209e 100644
1550 --- a/drivers/usb/serial/pl2303.h
1551 +++ b/drivers/usb/serial/pl2303.h
1552 @@ -27,6 +27,7 @@
1553 #define ATEN_VENDOR_ID 0x0557
1554 #define ATEN_VENDOR_ID2 0x0547
1555 #define ATEN_PRODUCT_ID 0x2008
1556 +#define ATEN_PRODUCT_ID2 0x2118
1557
1558 #define IODATA_VENDOR_ID 0x04bb
1559 #define IODATA_PRODUCT_ID 0x0a03
1560 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1561 index 1bc6089b9008..696458db7e3c 100644
1562 --- a/drivers/usb/serial/qcserial.c
1563 +++ b/drivers/usb/serial/qcserial.c
1564 @@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
1565 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
1566 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
1567 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
1568 + {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
1569 {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
1570 {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
1571 {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
1572 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1573 index c6f2d89c0e97..64613fbf5cf8 100644
1574 --- a/drivers/vhost/vhost.c
1575 +++ b/drivers/vhost/vhost.c
1576 @@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
1577
1578 static void vhost_init_is_le(struct vhost_virtqueue *vq)
1579 {
1580 - if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
1581 - vq->is_le = true;
1582 + vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
1583 + || virtio_legacy_is_little_endian();
1584 }
1585 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
1586
1587 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
1588 {
1589 - vq->is_le = virtio_legacy_is_little_endian();
1590 + vhost_init_is_le(vq);
1591 }
1592
1593 struct vhost_flush_struct {
1594 @@ -1713,10 +1713,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
1595 int r;
1596 bool is_le = vq->is_le;
1597
1598 - if (!vq->private_data) {
1599 - vhost_reset_is_le(vq);
1600 + if (!vq->private_data)
1601 return 0;
1602 - }
1603
1604 vhost_init_is_le(vq);
1605
1606 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1607 index f1360487a594..489bfc61cf30 100644
1608 --- a/drivers/virtio/virtio_ring.c
1609 +++ b/drivers/virtio/virtio_ring.c
1610 @@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
1611 if (xen_domain())
1612 return true;
1613
1614 - /*
1615 - * On ARM-based machines, the DMA ops will do the right thing,
1616 - * so always use them with legacy devices.
1617 - */
1618 - if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
1619 - return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
1620 -
1621 return false;
1622 }
1623
1624 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
1625 index 8f6a2a5863b9..a27fc8791551 100644
1626 --- a/fs/cifs/readdir.c
1627 +++ b/fs/cifs/readdir.c
1628 @@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
1629 rc = -ENOMEM;
1630 goto error_exit;
1631 }
1632 + spin_lock_init(&cifsFile->file_info_lock);
1633 file->private_data = cifsFile;
1634 cifsFile->tlink = cifs_get_tlink(tlink);
1635 tcon = tlink_tcon(tlink);
1636 diff --git a/fs/dax.c b/fs/dax.c
1637 index 014defd2e744..bf6218da7928 100644
1638 --- a/fs/dax.c
1639 +++ b/fs/dax.c
1640 @@ -1270,6 +1270,11 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1641 struct blk_dax_ctl dax = { 0 };
1642 ssize_t map_len;
1643
1644 + if (fatal_signal_pending(current)) {
1645 + ret = -EINTR;
1646 + break;
1647 + }
1648 +
1649 dax.sector = iomap->blkno +
1650 (((pos & PAGE_MASK) - iomap->offset) >> 9);
1651 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1652 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1653 index 478630af0d19..bbc316db9495 100644
1654 --- a/fs/ext4/super.c
1655 +++ b/fs/ext4/super.c
1656 @@ -3827,6 +3827,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1657 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1658 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
1659 EXT4_DESC_PER_BLOCK(sb);
1660 + if (ext4_has_feature_meta_bg(sb)) {
1661 + if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
1662 + ext4_msg(sb, KERN_WARNING,
1663 + "first meta block group too large: %u "
1664 + "(group descriptor block count %u)",
1665 + le32_to_cpu(es->s_first_meta_bg), db_count);
1666 + goto failed_mount;
1667 + }
1668 + }
1669 sbi->s_group_desc = ext4_kvmalloc(db_count *
1670 sizeof(struct buffer_head *),
1671 GFP_KERNEL);
1672 diff --git a/fs/iomap.c b/fs/iomap.c
1673 index a8ee8c33ca78..814ae8f9587d 100644
1674 --- a/fs/iomap.c
1675 +++ b/fs/iomap.c
1676 @@ -113,6 +113,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
1677
1678 BUG_ON(pos + len > iomap->offset + iomap->length);
1679
1680 + if (fatal_signal_pending(current))
1681 + return -EINTR;
1682 +
1683 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
1684 if (!page)
1685 return -ENOMEM;
1686 diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
1687 index 42aace4fc4c8..64813697f4c4 100644
1688 --- a/fs/nfsd/nfs4layouts.c
1689 +++ b/fs/nfsd/nfs4layouts.c
1690 @@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
1691 struct nfs4_layout_stateid *ls;
1692 struct nfs4_stid *stp;
1693
1694 - stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
1695 + stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
1696 + nfsd4_free_layout_stateid);
1697 if (!stp)
1698 return NULL;
1699 - stp->sc_free = nfsd4_free_layout_stateid;
1700 +
1701 get_nfs4_file(fp);
1702 stp->sc_file = fp;
1703
1704 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1705 index 4b4beaaa4eaa..a0dee8ae9f97 100644
1706 --- a/fs/nfsd/nfs4state.c
1707 +++ b/fs/nfsd/nfs4state.c
1708 @@ -633,8 +633,8 @@ find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
1709 return co;
1710 }
1711
1712 -struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1713 - struct kmem_cache *slab)
1714 +struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
1715 + void (*sc_free)(struct nfs4_stid *))
1716 {
1717 struct nfs4_stid *stid;
1718 int new_id;
1719 @@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1720 idr_preload_end();
1721 if (new_id < 0)
1722 goto out_free;
1723 +
1724 + stid->sc_free = sc_free;
1725 stid->sc_client = cl;
1726 stid->sc_stateid.si_opaque.so_id = new_id;
1727 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
1728 @@ -675,15 +677,12 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1729 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1730 {
1731 struct nfs4_stid *stid;
1732 - struct nfs4_ol_stateid *stp;
1733
1734 - stid = nfs4_alloc_stid(clp, stateid_slab);
1735 + stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1736 if (!stid)
1737 return NULL;
1738
1739 - stp = openlockstateid(stid);
1740 - stp->st_stid.sc_free = nfs4_free_ol_stateid;
1741 - return stp;
1742 + return openlockstateid(stid);
1743 }
1744
1745 static void nfs4_free_deleg(struct nfs4_stid *stid)
1746 @@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
1747 goto out_dec;
1748 if (delegation_blocked(&current_fh->fh_handle))
1749 goto out_dec;
1750 - dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
1751 + dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1752 if (dp == NULL)
1753 goto out_dec;
1754
1755 - dp->dl_stid.sc_free = nfs4_free_deleg;
1756 /*
1757 * delegation seqid's are never incremented. The 4.1 special
1758 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1759 @@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
1760 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
1761 get_nfs4_file(fp);
1762 stp->st_stid.sc_file = fp;
1763 - stp->st_stid.sc_free = nfs4_free_lock_stateid;
1764 stp->st_access_bmap = 0;
1765 stp->st_deny_bmap = open_stp->st_deny_bmap;
1766 stp->st_openstp = open_stp;
1767 @@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
1768 lst = find_lock_stateid(lo, fi);
1769 if (lst == NULL) {
1770 spin_unlock(&clp->cl_lock);
1771 - ns = nfs4_alloc_stid(clp, stateid_slab);
1772 + ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
1773 if (ns == NULL)
1774 return NULL;
1775
1776 diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
1777 index c9399366f9df..4516e8b7d776 100644
1778 --- a/fs/nfsd/state.h
1779 +++ b/fs/nfsd/state.h
1780 @@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
1781 __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
1782 stateid_t *stateid, unsigned char typemask,
1783 struct nfs4_stid **s, struct nfsd_net *nn);
1784 -struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1785 - struct kmem_cache *slab);
1786 +struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
1787 + void (*sc_free)(struct nfs4_stid *));
1788 void nfs4_unhash_stid(struct nfs4_stid *s);
1789 void nfs4_put_stid(struct nfs4_stid *s);
1790 void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
1791 diff --git a/include/linux/irq.h b/include/linux/irq.h
1792 index e79875574b39..39e3254e5769 100644
1793 --- a/include/linux/irq.h
1794 +++ b/include/linux/irq.h
1795 @@ -184,6 +184,7 @@ struct irq_data {
1796 *
1797 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
1798 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
1799 + * IRQD_ACTIVATED - Interrupt has already been activated
1800 * IRQD_NO_BALANCING - Balancing disabled for this IRQ
1801 * IRQD_PER_CPU - Interrupt is per cpu
1802 * IRQD_AFFINITY_SET - Interrupt affinity was set
1803 @@ -202,6 +203,7 @@ struct irq_data {
1804 enum {
1805 IRQD_TRIGGER_MASK = 0xf,
1806 IRQD_SETAFFINITY_PENDING = (1 << 8),
1807 + IRQD_ACTIVATED = (1 << 9),
1808 IRQD_NO_BALANCING = (1 << 10),
1809 IRQD_PER_CPU = (1 << 11),
1810 IRQD_AFFINITY_SET = (1 << 12),
1811 @@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
1812 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
1813 }
1814
1815 +static inline bool irqd_is_activated(struct irq_data *d)
1816 +{
1817 + return __irqd_to_state(d) & IRQD_ACTIVATED;
1818 +}
1819 +
1820 +static inline void irqd_set_activated(struct irq_data *d)
1821 +{
1822 + __irqd_to_state(d) |= IRQD_ACTIVATED;
1823 +}
1824 +
1825 +static inline void irqd_clr_activated(struct irq_data *d)
1826 +{
1827 + __irqd_to_state(d) &= ~IRQD_ACTIVATED;
1828 +}
1829 +
1830 #undef __irqd_to_state
1831
1832 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
1833 diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
1834 index c1784c0b4f35..134a2f69c21a 100644
1835 --- a/include/linux/memory_hotplug.h
1836 +++ b/include/linux/memory_hotplug.h
1837 @@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
1838 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
1839 /* VM interface that may be used by firmware interface */
1840 extern int online_pages(unsigned long, unsigned long, int);
1841 -extern int test_pages_in_a_zone(unsigned long, unsigned long);
1842 +extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1843 + unsigned long *valid_start, unsigned long *valid_end);
1844 extern void __offline_isolated_pages(unsigned long, unsigned long);
1845
1846 typedef void (*online_page_callback_t)(struct page *page);
1847 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
1848 index 1c7eec09e5eb..3a481a49546e 100644
1849 --- a/include/linux/percpu-refcount.h
1850 +++ b/include/linux/percpu-refcount.h
1851 @@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
1852 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
1853 {
1854 unsigned long __percpu *percpu_count;
1855 - int ret;
1856 + bool ret;
1857
1858 rcu_read_lock_sched();
1859
1860 @@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
1861 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
1862 {
1863 unsigned long __percpu *percpu_count;
1864 - int ret = false;
1865 + bool ret = false;
1866
1867 rcu_read_lock_sched();
1868
1869 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1870 index 85bc9beb046d..4e2f3de0e40b 100644
1871 --- a/kernel/cgroup.c
1872 +++ b/kernel/cgroup.c
1873 @@ -5219,6 +5219,11 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
1874 return ERR_PTR(err);
1875 }
1876
1877 +/*
1878 + * The returned cgroup is fully initialized including its control mask, but
1879 + * it isn't associated with its kernfs_node and doesn't have the control
1880 + * mask applied.
1881 + */
1882 static struct cgroup *cgroup_create(struct cgroup *parent)
1883 {
1884 struct cgroup_root *root = parent->root;
1885 @@ -5283,11 +5288,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
1886
1887 cgroup_propagate_control(cgrp);
1888
1889 - /* @cgrp doesn't have dir yet so the following will only create csses */
1890 - ret = cgroup_apply_control_enable(cgrp);
1891 - if (ret)
1892 - goto out_destroy;
1893 -
1894 return cgrp;
1895
1896 out_cancel_ref:
1897 @@ -5295,9 +5295,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
1898 out_free_cgrp:
1899 kfree(cgrp);
1900 return ERR_PTR(ret);
1901 -out_destroy:
1902 - cgroup_destroy_locked(cgrp);
1903 - return ERR_PTR(ret);
1904 }
1905
1906 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
1907 diff --git a/kernel/events/core.c b/kernel/events/core.c
1908 index e5a8839e7076..b1cfd7416db0 100644
1909 --- a/kernel/events/core.c
1910 +++ b/kernel/events/core.c
1911 @@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1912 static void
1913 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1914 {
1915 -
1916 lockdep_assert_held(&ctx->lock);
1917
1918 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1919 @@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
1920 {
1921 struct perf_event *group_leader = event->group_leader, *pos;
1922
1923 + lockdep_assert_held(&event->ctx->lock);
1924 +
1925 /*
1926 * We can have double attach due to group movement in perf_event_open.
1927 */
1928 @@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
1929 struct perf_event *sibling, *tmp;
1930 struct list_head *list = NULL;
1931
1932 + lockdep_assert_held(&event->ctx->lock);
1933 +
1934 /*
1935 * We can have double detach due to exit/hot-unplug + close.
1936 */
1937 @@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
1938 */
1939 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1940 {
1941 - lockdep_assert_held(&event->ctx->mutex);
1942 + struct perf_event_context *ctx = event->ctx;
1943 +
1944 + lockdep_assert_held(&ctx->mutex);
1945
1946 event_function_call(event, __perf_remove_from_context, (void *)flags);
1947 +
1948 + /*
1949 + * The above event_function_call() can NO-OP when it hits
1950 + * TASK_TOMBSTONE. In that case we must already have been detached
1951 + * from the context (by perf_event_exit_event()) but the grouping
1952 + * might still be in-tact.
1953 + */
1954 + WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1955 + if ((flags & DETACH_GROUP) &&
1956 + (event->attach_state & PERF_ATTACH_GROUP)) {
1957 + /*
1958 + * Since in that case we cannot possibly be scheduled, simply
1959 + * detach now.
1960 + */
1961 + raw_spin_lock_irq(&ctx->lock);
1962 + perf_group_detach(event);
1963 + raw_spin_unlock_irq(&ctx->lock);
1964 + }
1965 }
1966
1967 /*
1968 @@ -6583,6 +6606,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
1969 char *buf = NULL;
1970 char *name;
1971
1972 + if (vma->vm_flags & VM_READ)
1973 + prot |= PROT_READ;
1974 + if (vma->vm_flags & VM_WRITE)
1975 + prot |= PROT_WRITE;
1976 + if (vma->vm_flags & VM_EXEC)
1977 + prot |= PROT_EXEC;
1978 +
1979 + if (vma->vm_flags & VM_MAYSHARE)
1980 + flags = MAP_SHARED;
1981 + else
1982 + flags = MAP_PRIVATE;
1983 +
1984 + if (vma->vm_flags & VM_DENYWRITE)
1985 + flags |= MAP_DENYWRITE;
1986 + if (vma->vm_flags & VM_MAYEXEC)
1987 + flags |= MAP_EXECUTABLE;
1988 + if (vma->vm_flags & VM_LOCKED)
1989 + flags |= MAP_LOCKED;
1990 + if (vma->vm_flags & VM_HUGETLB)
1991 + flags |= MAP_HUGETLB;
1992 +
1993 if (file) {
1994 struct inode *inode;
1995 dev_t dev;
1996 @@ -6609,27 +6653,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
1997 maj = MAJOR(dev);
1998 min = MINOR(dev);
1999
2000 - if (vma->vm_flags & VM_READ)
2001 - prot |= PROT_READ;
2002 - if (vma->vm_flags & VM_WRITE)
2003 - prot |= PROT_WRITE;
2004 - if (vma->vm_flags & VM_EXEC)
2005 - prot |= PROT_EXEC;
2006 -
2007 - if (vma->vm_flags & VM_MAYSHARE)
2008 - flags = MAP_SHARED;
2009 - else
2010 - flags = MAP_PRIVATE;
2011 -
2012 - if (vma->vm_flags & VM_DENYWRITE)
2013 - flags |= MAP_DENYWRITE;
2014 - if (vma->vm_flags & VM_MAYEXEC)
2015 - flags |= MAP_EXECUTABLE;
2016 - if (vma->vm_flags & VM_LOCKED)
2017 - flags |= MAP_LOCKED;
2018 - if (vma->vm_flags & VM_HUGETLB)
2019 - flags |= MAP_HUGETLB;
2020 -
2021 goto got_name;
2022 } else {
2023 if (vma->vm_ops && vma->vm_ops->name) {
2024 diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
2025 index 8c0a0ae43521..b59e6768c5e9 100644
2026 --- a/kernel/irq/irqdomain.c
2027 +++ b/kernel/irq/irqdomain.c
2028 @@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
2029 }
2030 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
2031
2032 +static void __irq_domain_activate_irq(struct irq_data *irq_data)
2033 +{
2034 + if (irq_data && irq_data->domain) {
2035 + struct irq_domain *domain = irq_data->domain;
2036 +
2037 + if (irq_data->parent_data)
2038 + __irq_domain_activate_irq(irq_data->parent_data);
2039 + if (domain->ops->activate)
2040 + domain->ops->activate(domain, irq_data);
2041 + }
2042 +}
2043 +
2044 +static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
2045 +{
2046 + if (irq_data && irq_data->domain) {
2047 + struct irq_domain *domain = irq_data->domain;
2048 +
2049 + if (domain->ops->deactivate)
2050 + domain->ops->deactivate(domain, irq_data);
2051 + if (irq_data->parent_data)
2052 + __irq_domain_deactivate_irq(irq_data->parent_data);
2053 + }
2054 +}
2055 +
2056 /**
2057 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
2058 * interrupt
2059 @@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
2060 */
2061 void irq_domain_activate_irq(struct irq_data *irq_data)
2062 {
2063 - if (irq_data && irq_data->domain) {
2064 - struct irq_domain *domain = irq_data->domain;
2065 -
2066 - if (irq_data->parent_data)
2067 - irq_domain_activate_irq(irq_data->parent_data);
2068 - if (domain->ops->activate)
2069 - domain->ops->activate(domain, irq_data);
2070 + if (!irqd_is_activated(irq_data)) {
2071 + __irq_domain_activate_irq(irq_data);
2072 + irqd_set_activated(irq_data);
2073 }
2074 }
2075
2076 @@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
2077 */
2078 void irq_domain_deactivate_irq(struct irq_data *irq_data)
2079 {
2080 - if (irq_data && irq_data->domain) {
2081 - struct irq_domain *domain = irq_data->domain;
2082 -
2083 - if (domain->ops->deactivate)
2084 - domain->ops->deactivate(domain, irq_data);
2085 - if (irq_data->parent_data)
2086 - irq_domain_deactivate_irq(irq_data->parent_data);
2087 + if (irqd_is_activated(irq_data)) {
2088 + __irq_domain_deactivate_irq(irq_data);
2089 + irqd_clr_activated(irq_data);
2090 }
2091 }
2092
2093 diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
2094 index b97286c48735..f00b0131c8f9 100644
2095 --- a/kernel/trace/trace_hwlat.c
2096 +++ b/kernel/trace/trace_hwlat.c
2097 @@ -266,7 +266,7 @@ static int get_sample(void)
2098 static struct cpumask save_cpumask;
2099 static bool disable_migrate;
2100
2101 -static void move_to_next_cpu(void)
2102 +static void move_to_next_cpu(bool initmask)
2103 {
2104 static struct cpumask *current_mask;
2105 int next_cpu;
2106 @@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
2107 return;
2108
2109 /* Just pick the first CPU on first iteration */
2110 - if (!current_mask) {
2111 + if (initmask) {
2112 current_mask = &save_cpumask;
2113 get_online_cpus();
2114 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
2115 @@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
2116 static int kthread_fn(void *data)
2117 {
2118 u64 interval;
2119 + bool initmask = true;
2120
2121 while (!kthread_should_stop()) {
2122
2123 - move_to_next_cpu();
2124 + move_to_next_cpu(initmask);
2125 + initmask = false;
2126
2127 local_irq_disable();
2128 get_sample();
2129 diff --git a/mm/filemap.c b/mm/filemap.c
2130 index 779801092ef1..d8d7df82c69a 100644
2131 --- a/mm/filemap.c
2132 +++ b/mm/filemap.c
2133 @@ -1703,6 +1703,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
2134
2135 cond_resched();
2136 find_page:
2137 + if (fatal_signal_pending(current)) {
2138 + error = -EINTR;
2139 + goto out;
2140 + }
2141 +
2142 page = find_get_page(mapping, index);
2143 if (!page) {
2144 page_cache_sync_readahead(mapping,
2145 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2146 index c3a8141ac788..ede137345a99 100644
2147 --- a/mm/memory_hotplug.c
2148 +++ b/mm/memory_hotplug.c
2149 @@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
2150 }
2151
2152 /*
2153 - * Confirm all pages in a range [start, end) is belongs to the same zone.
2154 + * Confirm all pages in a range [start, end) belong to the same zone.
2155 + * When true, return its valid [start, end).
2156 */
2157 -int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
2158 +int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
2159 + unsigned long *valid_start, unsigned long *valid_end)
2160 {
2161 unsigned long pfn, sec_end_pfn;
2162 + unsigned long start, end;
2163 struct zone *zone = NULL;
2164 struct page *page;
2165 int i;
2166 - for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
2167 + for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
2168 pfn < end_pfn;
2169 - pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
2170 + pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
2171 /* Make sure the memory section is present first */
2172 if (!present_section_nr(pfn_to_section_nr(pfn)))
2173 continue;
2174 @@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
2175 page = pfn_to_page(pfn + i);
2176 if (zone && page_zone(page) != zone)
2177 return 0;
2178 + if (!zone)
2179 + start = pfn + i;
2180 zone = page_zone(page);
2181 + end = pfn + MAX_ORDER_NR_PAGES;
2182 }
2183 }
2184 - return 1;
2185 +
2186 + if (zone) {
2187 + *valid_start = start;
2188 + *valid_end = end;
2189 + return 1;
2190 + } else {
2191 + return 0;
2192 + }
2193 }
2194
2195 /*
2196 @@ -1859,6 +1872,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
2197 long offlined_pages;
2198 int ret, drain, retry_max, node;
2199 unsigned long flags;
2200 + unsigned long valid_start, valid_end;
2201 struct zone *zone;
2202 struct memory_notify arg;
2203
2204 @@ -1869,10 +1883,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
2205 return -EINVAL;
2206 /* This makes hotplug much easier...and readable.
2207 we assume this for now. .*/
2208 - if (!test_pages_in_a_zone(start_pfn, end_pfn))
2209 + if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
2210 return -EINVAL;
2211
2212 - zone = page_zone(pfn_to_page(start_pfn));
2213 + zone = page_zone(pfn_to_page(valid_start));
2214 node = zone_to_nid(zone);
2215 nr_pages = end_pfn - start_pfn;
2216
2217 diff --git a/mm/zswap.c b/mm/zswap.c
2218 index 275b22cc8df4..dbef27822a98 100644
2219 --- a/mm/zswap.c
2220 +++ b/mm/zswap.c
2221 @@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
2222
2223 /* Enable/disable zswap (disabled by default) */
2224 static bool zswap_enabled;
2225 -module_param_named(enabled, zswap_enabled, bool, 0644);
2226 +static int zswap_enabled_param_set(const char *,
2227 + const struct kernel_param *);
2228 +static struct kernel_param_ops zswap_enabled_param_ops = {
2229 + .set = zswap_enabled_param_set,
2230 + .get = param_get_bool,
2231 +};
2232 +module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2233
2234 /* Crypto compressor to use */
2235 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
2236 @@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
2237 /* used by param callback function */
2238 static bool zswap_init_started;
2239
2240 +/* fatal error during init */
2241 +static bool zswap_init_failed;
2242 +
2243 /*********************************
2244 * helpers and fwd declarations
2245 **********************************/
2246 @@ -706,6 +715,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
2247 char *s = strstrip((char *)val);
2248 int ret;
2249
2250 + if (zswap_init_failed) {
2251 + pr_err("can't set param, initialization failed\n");
2252 + return -ENODEV;
2253 + }
2254 +
2255 /* no change required */
2256 if (!strcmp(s, *(char **)kp->arg))
2257 return 0;
2258 @@ -785,6 +799,17 @@ static int zswap_zpool_param_set(const char *val,
2259 return __zswap_param_set(val, kp, NULL, zswap_compressor);
2260 }
2261
2262 +static int zswap_enabled_param_set(const char *val,
2263 + const struct kernel_param *kp)
2264 +{
2265 + if (zswap_init_failed) {
2266 + pr_err("can't enable, initialization failed\n");
2267 + return -ENODEV;
2268 + }
2269 +
2270 + return param_set_bool(val, kp);
2271 +}
2272 +
2273 /*********************************
2274 * writeback code
2275 **********************************/
2276 @@ -1271,6 +1296,9 @@ static int __init init_zswap(void)
2277 dstmem_fail:
2278 zswap_entry_cache_destroy();
2279 cache_fail:
2280 + /* if built-in, we aren't unloaded on failure; don't allow use */
2281 + zswap_init_failed = true;
2282 + zswap_enabled = false;
2283 return -ENOMEM;
2284 }
2285 /* must be late so crypto has time to come up */
2286 diff --git a/net/can/bcm.c b/net/can/bcm.c
2287 index 436a7537e6a9..5e9ed5ec2860 100644
2288 --- a/net/can/bcm.c
2289 +++ b/net/can/bcm.c
2290 @@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
2291
2292 static void bcm_remove_op(struct bcm_op *op)
2293 {
2294 - hrtimer_cancel(&op->timer);
2295 - hrtimer_cancel(&op->thrtimer);
2296 -
2297 - if (op->tsklet.func)
2298 - tasklet_kill(&op->tsklet);
2299 + if (op->tsklet.func) {
2300 + while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
2301 + test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
2302 + hrtimer_active(&op->timer)) {
2303 + hrtimer_cancel(&op->timer);
2304 + tasklet_kill(&op->tsklet);
2305 + }
2306 + }
2307
2308 - if (op->thrtsklet.func)
2309 - tasklet_kill(&op->thrtsklet);
2310 + if (op->thrtsklet.func) {
2311 + while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
2312 + test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
2313 + hrtimer_active(&op->thrtimer)) {
2314 + hrtimer_cancel(&op->thrtimer);
2315 + tasklet_kill(&op->thrtsklet);
2316 + }
2317 + }
2318
2319 if ((op->frames) && (op->frames != &op->sframe))
2320 kfree(op->frames);
2321 diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
2322 index dc6fb79a361f..25d9a9cf7b66 100644
2323 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
2324 +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
2325 @@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
2326 if (!oa->data)
2327 return -ENOMEM;
2328
2329 - creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
2330 + creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
2331 if (!creds) {
2332 kfree(oa->data);
2333 return -ENOMEM;