Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0245-5.4.146-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 3 weeks ago) by niro
File size: 171525 byte(s)
-add missing
1 diff --git a/Makefile b/Makefile
2 index c32a36c8ffc90..48d0c03acfc55 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 145
10 +SUBLEVEL = 146
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
15 index 5b8bf58e89cb4..996e006e06c25 100644
16 --- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
17 +++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
18 @@ -208,12 +208,12 @@
19 };
20
21 pinctrl_hvi3c3_default: hvi3c3_default {
22 - function = "HVI3C3";
23 + function = "I3C3";
24 groups = "HVI3C3";
25 };
26
27 pinctrl_hvi3c4_default: hvi3c4_default {
28 - function = "HVI3C4";
29 + function = "I3C4";
30 groups = "HVI3C4";
31 };
32
33 diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
34 index 3efe9d41c2bb6..d7c9dbee0f016 100644
35 --- a/arch/arm/boot/dts/meson8.dtsi
36 +++ b/arch/arm/boot/dts/meson8.dtsi
37 @@ -241,8 +241,13 @@
38 "pp2", "ppmmu2", "pp4", "ppmmu4",
39 "pp5", "ppmmu5", "pp6", "ppmmu6";
40 resets = <&reset RESET_MALI>;
41 +
42 clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
43 clock-names = "bus", "core";
44 +
45 + assigned-clocks = <&clkc CLKID_MALI>;
46 + assigned-clock-rates = <318750000>;
47 +
48 operating-points-v2 = <&gpu_opp_table>;
49 };
50 };
51 diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
52 index bed1dfef19857..32d1c322dbc65 100644
53 --- a/arch/arm/boot/dts/meson8b-ec100.dts
54 +++ b/arch/arm/boot/dts/meson8b-ec100.dts
55 @@ -148,7 +148,7 @@
56 regulator-min-microvolt = <860000>;
57 regulator-max-microvolt = <1140000>;
58
59 - vin-supply = <&vcc_5v>;
60 + pwm-supply = <&vcc_5v>;
61
62 pwms = <&pwm_cd 0 1148 0>;
63 pwm-dutycycle-range = <100 0>;
64 @@ -232,7 +232,7 @@
65 regulator-min-microvolt = <860000>;
66 regulator-max-microvolt = <1140000>;
67
68 - vin-supply = <&vcc_5v>;
69 + pwm-supply = <&vcc_5v>;
70
71 pwms = <&pwm_cd 1 1148 0>;
72 pwm-dutycycle-range = <100 0>;
73 diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts
74 index 6e39ad52e42d3..ab8fe55963f7c 100644
75 --- a/arch/arm/boot/dts/meson8b-mxq.dts
76 +++ b/arch/arm/boot/dts/meson8b-mxq.dts
77 @@ -39,6 +39,8 @@
78 regulator-min-microvolt = <860000>;
79 regulator-max-microvolt = <1140000>;
80
81 + pwm-supply = <&vcc_5v>;
82 +
83 pwms = <&pwm_cd 0 1148 0>;
84 pwm-dutycycle-range = <100 0>;
85
86 @@ -84,7 +86,7 @@
87 regulator-min-microvolt = <860000>;
88 regulator-max-microvolt = <1140000>;
89
90 - vin-supply = <&vcc_5v>;
91 + pwm-supply = <&vcc_5v>;
92
93 pwms = <&pwm_cd 1 1148 0>;
94 pwm-dutycycle-range = <100 0>;
95 diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
96 index 0f9c71137bed5..c413af9a7af8e 100644
97 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts
98 +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
99 @@ -130,7 +130,7 @@
100 regulator-min-microvolt = <860000>;
101 regulator-max-microvolt = <1140000>;
102
103 - vin-supply = <&p5v0>;
104 + pwm-supply = <&p5v0>;
105
106 pwms = <&pwm_cd 0 12218 0>;
107 pwm-dutycycle-range = <91 0>;
108 @@ -162,7 +162,7 @@
109 regulator-min-microvolt = <860000>;
110 regulator-max-microvolt = <1140000>;
111
112 - vin-supply = <&p5v0>;
113 + pwm-supply = <&p5v0>;
114
115 pwms = <&pwm_cd 1 12218 0>;
116 pwm-dutycycle-range = <91 0>;
117 diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
118 index 7216653424fd6..b51a8c7b01114 100644
119 --- a/arch/arm/net/bpf_jit_32.c
120 +++ b/arch/arm/net/bpf_jit_32.c
121 @@ -1602,6 +1602,9 @@ exit:
122 rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
123 emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
124 break;
125 + /* speculation barrier */
126 + case BPF_ST | BPF_NOSPEC:
127 + break;
128 /* ST: *(size *)(dst + off) = imm */
129 case BPF_ST | BPF_MEM | BPF_W:
130 case BPF_ST | BPF_MEM | BPF_H:
131 diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
132 index 25549d9552ae2..84f92b44c3235 100644
133 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
134 +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
135 @@ -113,7 +113,7 @@
136 #address-cells = <0>;
137 interrupt-controller;
138 reg = <0x11001000 0x1000>,
139 - <0x11002000 0x1000>,
140 + <0x11002000 0x2000>,
141 <0x11004000 0x2000>,
142 <0x11006000 0x2000>;
143 };
144 diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
145 index 67634cb01d6b6..cbdd46ed3ca63 100644
146 --- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
147 +++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
148 @@ -277,10 +277,6 @@
149 interrupt-parent = <&gpio1>;
150 interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
151
152 - /* Depends on LVDS */
153 - max-clock = <135000000>;
154 - min-vrefresh = <50>;
155 -
156 adi,input-depth = <8>;
157 adi,input-colorspace = "rgb";
158 adi,input-clock = "1x";
159 diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
160 index 945e5f690edec..afc7d41347f73 100644
161 --- a/arch/arm64/net/bpf_jit_comp.c
162 +++ b/arch/arm64/net/bpf_jit_comp.c
163 @@ -701,6 +701,19 @@ emit_cond_jmp:
164 }
165 break;
166
167 + /* speculation barrier */
168 + case BPF_ST | BPF_NOSPEC:
169 + /*
170 + * Nothing required here.
171 + *
172 + * In case of arm64, we rely on the firmware mitigation of
173 + * Speculative Store Bypass as controlled via the ssbd kernel
174 + * parameter. Whenever the mitigation is enabled, it works
175 + * for all of the kernel code with no need to provide any
176 + * additional instructions.
177 + */
178 + break;
179 +
180 /* ST: *(size *)(dst + off) = imm */
181 case BPF_ST | BPF_MEM | BPF_W:
182 case BPF_ST | BPF_MEM | BPF_H:
183 diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
184 index a4ebd2445edae..e5831cd293d05 100644
185 --- a/arch/m68k/emu/nfeth.c
186 +++ b/arch/m68k/emu/nfeth.c
187 @@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
188
189 for (i = 0; i < MAX_UNIT; i++) {
190 if (nfeth_dev[i]) {
191 - unregister_netdev(nfeth_dev[0]);
192 - free_netdev(nfeth_dev[0]);
193 + unregister_netdev(nfeth_dev[i]);
194 + free_netdev(nfeth_dev[i]);
195 }
196 }
197 free_irq(nfEtherIRQ, nfeth_interrupt);
198 diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
199 index 561154cbcc401..b31b91e57c341 100644
200 --- a/arch/mips/net/ebpf_jit.c
201 +++ b/arch/mips/net/ebpf_jit.c
202 @@ -1355,6 +1355,9 @@ jeq_common:
203 }
204 break;
205
206 + case BPF_ST | BPF_NOSPEC: /* speculation barrier */
207 + break;
208 +
209 case BPF_ST | BPF_B | BPF_MEM:
210 case BPF_ST | BPF_H | BPF_MEM:
211 case BPF_ST | BPF_W | BPF_MEM:
212 diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
213 index be3517ef0574d..20bfd753bcba6 100644
214 --- a/arch/powerpc/net/bpf_jit_comp64.c
215 +++ b/arch/powerpc/net/bpf_jit_comp64.c
216 @@ -644,6 +644,12 @@ emit_clear:
217 }
218 break;
219
220 + /*
221 + * BPF_ST NOSPEC (speculation barrier)
222 + */
223 + case BPF_ST | BPF_NOSPEC:
224 + break;
225 +
226 /*
227 * BPF_ST(X)
228 */
229 diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c
230 index e2279fed8f564..0eefe6193253b 100644
231 --- a/arch/riscv/net/bpf_jit_comp.c
232 +++ b/arch/riscv/net/bpf_jit_comp.c
233 @@ -1313,6 +1313,10 @@ out_be:
234 emit(rv_ld(rd, 0, RV_REG_T1), ctx);
235 break;
236
237 + /* speculation barrier */
238 + case BPF_ST | BPF_NOSPEC:
239 + break;
240 +
241 /* ST: *(size *)(dst + off) = imm */
242 case BPF_ST | BPF_MEM | BPF_B:
243 emit_imm(RV_REG_T1, imm, ctx);
244 diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
245 index 0fe5600a037e4..4d59d11e6813d 100644
246 --- a/arch/s390/include/asm/kvm_host.h
247 +++ b/arch/s390/include/asm/kvm_host.h
248 @@ -873,6 +873,7 @@ struct kvm_arch{
249 atomic64_t cmma_dirty_pages;
250 /* subset of available cpu features enabled by user space */
251 DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
252 + /* indexed by vcpu_idx */
253 DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
254 struct kvm_s390_gisa_interrupt gisa_int;
255 };
256 diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
257 index 7184d55d87aae..b1aadc3ad065d 100644
258 --- a/arch/s390/kernel/debug.c
259 +++ b/arch/s390/kernel/debug.c
260 @@ -327,24 +327,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
261 goto out;
262
263 rc->mode = mode & ~S_IFMT;
264 -
265 - /* create root directory */
266 - rc->debugfs_root_entry = debugfs_create_dir(rc->name,
267 - debug_debugfs_root_entry);
268 -
269 - /* append new element to linked list */
270 - if (!debug_area_first) {
271 - /* first element in list */
272 - debug_area_first = rc;
273 - rc->prev = NULL;
274 - } else {
275 - /* append element to end of list */
276 - debug_area_last->next = rc;
277 - rc->prev = debug_area_last;
278 - }
279 - debug_area_last = rc;
280 - rc->next = NULL;
281 -
282 refcount_set(&rc->ref_count, 1);
283 out:
284 return rc;
285 @@ -404,27 +386,10 @@ static void debug_info_get(debug_info_t *db_info)
286 */
287 static void debug_info_put(debug_info_t *db_info)
288 {
289 - int i;
290 -
291 if (!db_info)
292 return;
293 - if (refcount_dec_and_test(&db_info->ref_count)) {
294 - for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
295 - if (!db_info->views[i])
296 - continue;
297 - debugfs_remove(db_info->debugfs_entries[i]);
298 - }
299 - debugfs_remove(db_info->debugfs_root_entry);
300 - if (db_info == debug_area_first)
301 - debug_area_first = db_info->next;
302 - if (db_info == debug_area_last)
303 - debug_area_last = db_info->prev;
304 - if (db_info->prev)
305 - db_info->prev->next = db_info->next;
306 - if (db_info->next)
307 - db_info->next->prev = db_info->prev;
308 + if (refcount_dec_and_test(&db_info->ref_count))
309 debug_info_free(db_info);
310 - }
311 }
312
313 /*
314 @@ -648,6 +613,31 @@ static int debug_close(struct inode *inode, struct file *file)
315 return 0; /* success */
316 }
317
318 +/* Create debugfs entries and add to internal list. */
319 +static void _debug_register(debug_info_t *id)
320 +{
321 + /* create root directory */
322 + id->debugfs_root_entry = debugfs_create_dir(id->name,
323 + debug_debugfs_root_entry);
324 +
325 + /* append new element to linked list */
326 + if (!debug_area_first) {
327 + /* first element in list */
328 + debug_area_first = id;
329 + id->prev = NULL;
330 + } else {
331 + /* append element to end of list */
332 + debug_area_last->next = id;
333 + id->prev = debug_area_last;
334 + }
335 + debug_area_last = id;
336 + id->next = NULL;
337 +
338 + debug_register_view(id, &debug_level_view);
339 + debug_register_view(id, &debug_flush_view);
340 + debug_register_view(id, &debug_pages_view);
341 +}
342 +
343 /**
344 * debug_register_mode() - creates and initializes debug area.
345 *
346 @@ -677,19 +667,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
347 if ((uid != 0) || (gid != 0))
348 pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
349 BUG_ON(!initialized);
350 - mutex_lock(&debug_mutex);
351
352 /* create new debug_info */
353 rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
354 - if (!rc)
355 - goto out;
356 - debug_register_view(rc, &debug_level_view);
357 - debug_register_view(rc, &debug_flush_view);
358 - debug_register_view(rc, &debug_pages_view);
359 -out:
360 - if (!rc)
361 + if (rc) {
362 + mutex_lock(&debug_mutex);
363 + _debug_register(rc);
364 + mutex_unlock(&debug_mutex);
365 + } else {
366 pr_err("Registering debug feature %s failed\n", name);
367 - mutex_unlock(&debug_mutex);
368 + }
369 return rc;
370 }
371 EXPORT_SYMBOL(debug_register_mode);
372 @@ -718,6 +705,27 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
373 }
374 EXPORT_SYMBOL(debug_register);
375
376 +/* Remove debugfs entries and remove from internal list. */
377 +static void _debug_unregister(debug_info_t *id)
378 +{
379 + int i;
380 +
381 + for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
382 + if (!id->views[i])
383 + continue;
384 + debugfs_remove(id->debugfs_entries[i]);
385 + }
386 + debugfs_remove(id->debugfs_root_entry);
387 + if (id == debug_area_first)
388 + debug_area_first = id->next;
389 + if (id == debug_area_last)
390 + debug_area_last = id->prev;
391 + if (id->prev)
392 + id->prev->next = id->next;
393 + if (id->next)
394 + id->next->prev = id->prev;
395 +}
396 +
397 /**
398 * debug_unregister() - give back debug area.
399 *
400 @@ -731,8 +739,10 @@ void debug_unregister(debug_info_t *id)
401 if (!id)
402 return;
403 mutex_lock(&debug_mutex);
404 - debug_info_put(id);
405 + _debug_unregister(id);
406 mutex_unlock(&debug_mutex);
407 +
408 + debug_info_put(id);
409 }
410 EXPORT_SYMBOL(debug_unregister);
411
412 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
413 index 62388a678b91a..fa9483aa4f575 100644
414 --- a/arch/s390/kvm/interrupt.c
415 +++ b/arch/s390/kvm/interrupt.c
416 @@ -408,13 +408,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
417 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
418 {
419 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
420 - set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
421 + set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
422 }
423
424 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
425 {
426 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
427 - clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
428 + clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
429 }
430
431 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
432 @@ -2984,18 +2984,18 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
433
434 static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
435 {
436 - int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
437 + int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
438 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
439 struct kvm_vcpu *vcpu;
440
441 - for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
442 - vcpu = kvm_get_vcpu(kvm, vcpu_id);
443 + for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
444 + vcpu = kvm_get_vcpu(kvm, vcpu_idx);
445 if (psw_ioint_disabled(vcpu))
446 continue;
447 deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
448 if (deliverable_mask) {
449 /* lately kicked but not yet running */
450 - if (test_and_set_bit(vcpu_id, gi->kicked_mask))
451 + if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
452 return;
453 kvm_s390_vcpu_wakeup(vcpu);
454 return;
455 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
456 index 20ba8537dbcc2..9ed2fee612297 100644
457 --- a/arch/s390/kvm/kvm-s390.c
458 +++ b/arch/s390/kvm/kvm-s390.c
459 @@ -3726,7 +3726,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
460 kvm_s390_patch_guest_per_regs(vcpu);
461 }
462
463 - clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
464 + clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
465
466 vcpu->arch.sie_block->icptcode = 0;
467 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
468 diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
469 index 6d9448dbd052b..63d94a5253a8f 100644
470 --- a/arch/s390/kvm/kvm-s390.h
471 +++ b/arch/s390/kvm/kvm-s390.h
472 @@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
473
474 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
475 {
476 - return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
477 + return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
478 }
479
480 static inline int kvm_is_ucontrol(struct kvm *kvm)
481 diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
482 index 460f255729402..5182e0836ca71 100644
483 --- a/arch/s390/mm/kasan_init.c
484 +++ b/arch/s390/mm/kasan_init.c
485 @@ -101,6 +101,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
486 pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
487 sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
488
489 + /*
490 + * The first 1MB of 1:1 mapping is mapped with 4KB pages
491 + */
492 while (address < end) {
493 pg_dir = pgd_offset_k(address);
494 if (pgd_none(*pg_dir)) {
495 @@ -146,30 +149,26 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
496
497 pm_dir = pmd_offset(pu_dir, address);
498 if (pmd_none(*pm_dir)) {
499 - if (mode == POPULATE_ZERO_SHADOW &&
500 - IS_ALIGNED(address, PMD_SIZE) &&
501 + if (IS_ALIGNED(address, PMD_SIZE) &&
502 end - address >= PMD_SIZE) {
503 - pmd_populate(&init_mm, pm_dir,
504 - kasan_early_shadow_pte);
505 - address = (address + PMD_SIZE) & PMD_MASK;
506 - continue;
507 - }
508 - /* the first megabyte of 1:1 is mapped with 4k pages */
509 - if (has_edat && address && end - address >= PMD_SIZE &&
510 - mode != POPULATE_ZERO_SHADOW) {
511 - void *page;
512 -
513 - if (mode == POPULATE_ONE2ONE) {
514 - page = (void *)address;
515 - } else {
516 - page = kasan_early_alloc_segment();
517 - memset(page, 0, _SEGMENT_SIZE);
518 + if (mode == POPULATE_ZERO_SHADOW) {
519 + pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
520 + address = (address + PMD_SIZE) & PMD_MASK;
521 + continue;
522 + } else if (has_edat && address) {
523 + void *page;
524 +
525 + if (mode == POPULATE_ONE2ONE) {
526 + page = (void *)address;
527 + } else {
528 + page = kasan_early_alloc_segment();
529 + memset(page, 0, _SEGMENT_SIZE);
530 + }
531 + pmd_val(*pm_dir) = __pa(page) | sgt_prot;
532 + address = (address + PMD_SIZE) & PMD_MASK;
533 + continue;
534 }
535 - pmd_val(*pm_dir) = __pa(page) | sgt_prot;
536 - address = (address + PMD_SIZE) & PMD_MASK;
537 - continue;
538 }
539 -
540 pt_dir = kasan_early_pte_alloc();
541 pmd_populate(&init_mm, pm_dir, pt_dir);
542 } else if (pmd_large(*pm_dir)) {
543 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
544 index e160f4650f8e4..3e6612d8b921c 100644
545 --- a/arch/s390/net/bpf_jit_comp.c
546 +++ b/arch/s390/net/bpf_jit_comp.c
547 @@ -913,6 +913,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
548 break;
549 }
550 break;
551 + /*
552 + * BPF_NOSPEC (speculation barrier)
553 + */
554 + case BPF_ST | BPF_NOSPEC:
555 + break;
556 /*
557 * BPF_ST(X)
558 */
559 diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
560 index 3364e2a009899..fef734473c0f3 100644
561 --- a/arch/sparc/net/bpf_jit_comp_64.c
562 +++ b/arch/sparc/net/bpf_jit_comp_64.c
563 @@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
564 return 1;
565 break;
566 }
567 + /* speculation barrier */
568 + case BPF_ST | BPF_NOSPEC:
569 + break;
570 /* ST: *(size *)(dst + off) = imm */
571 case BPF_ST | BPF_MEM | BPF_W:
572 case BPF_ST | BPF_MEM | BPF_H:
573 diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
574 index ff07040287c40..b7baaa9733173 100644
575 --- a/arch/x86/events/amd/ibs.c
576 +++ b/arch/x86/events/amd/ibs.c
577 @@ -562,6 +562,7 @@ static struct perf_ibs perf_ibs_op = {
578 .start = perf_ibs_start,
579 .stop = perf_ibs_stop,
580 .read = perf_ibs_read,
581 + .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
582 },
583 .msr = MSR_AMD64_IBSOPCTL,
584 .config_mask = IBS_OP_CONFIG_MASK,
585 diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
586 index 21eb593e0313e..008bcb15fe96a 100644
587 --- a/arch/x86/kernel/cpu/resctrl/monitor.c
588 +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
589 @@ -242,6 +242,12 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
590 case QOS_L3_MBM_LOCAL_EVENT_ID:
591 m = &rr->d->mbm_local[rmid];
592 break;
593 + default:
594 + /*
595 + * Code would never reach here because an invalid
596 + * event id would fail the __rmid_read.
597 + */
598 + return RMID_VAL_ERROR;
599 }
600
601 if (rr->first) {
602 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
603 index 023bd3e1aa0d2..3041015b05f71 100644
604 --- a/arch/x86/kvm/vmx/nested.c
605 +++ b/arch/x86/kvm/vmx/nested.c
606 @@ -2057,12 +2057,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
607 ~PIN_BASED_VMX_PREEMPTION_TIMER);
608
609 /* Posted interrupts setting is only taken from vmcs12. */
610 - if (nested_cpu_has_posted_intr(vmcs12)) {
611 + vmx->nested.pi_pending = false;
612 + if (nested_cpu_has_posted_intr(vmcs12))
613 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
614 - vmx->nested.pi_pending = false;
615 - } else {
616 + else
617 exec_control &= ~PIN_BASED_POSTED_INTR;
618 - }
619 pin_controls_set(vmx, exec_control);
620
621 /*
622 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
623 index 364e96e508d90..f1a0eebdcf641 100644
624 --- a/arch/x86/kvm/x86.c
625 +++ b/arch/x86/kvm/x86.c
626 @@ -2764,6 +2764,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
627 if (!msr_info->host_initiated) {
628 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
629 adjust_tsc_offset_guest(vcpu, adj);
630 + /* Before back to guest, tsc_timestamp must be adjusted
631 + * as well, otherwise guest's percpu pvclock time could jump.
632 + */
633 + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
634 }
635 vcpu->arch.ia32_tsc_adjust_msr = data;
636 }
637 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
638 index 6e884f17634fe..55f62dca28aa2 100644
639 --- a/arch/x86/net/bpf_jit_comp.c
640 +++ b/arch/x86/net/bpf_jit_comp.c
641 @@ -728,6 +728,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
642 }
643 break;
644
645 + /* speculation barrier */
646 + case BPF_ST | BPF_NOSPEC:
647 + if (boot_cpu_has(X86_FEATURE_XMM2))
648 + /* Emit 'lfence' */
649 + EMIT3(0x0F, 0xAE, 0xE8);
650 + break;
651 +
652 /* ST: *(u8*)(dst_reg + off) = imm */
653 case BPF_ST | BPF_MEM | BPF_B:
654 if (is_ereg(dst_reg))
655 diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
656 index 0fcba32077c87..2914f900034e0 100644
657 --- a/arch/x86/net/bpf_jit_comp32.c
658 +++ b/arch/x86/net/bpf_jit_comp32.c
659 @@ -1705,6 +1705,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
660 i++;
661 break;
662 }
663 + /* speculation barrier */
664 + case BPF_ST | BPF_NOSPEC:
665 + if (boot_cpu_has(X86_FEATURE_XMM2))
666 + /* Emit 'lfence' */
667 + EMIT3(0x0F, 0xAE, 0xE8);
668 + break;
669 /* ST: *(u8*)(dst_reg + off) = imm */
670 case BPF_ST | BPF_MEM | BPF_H:
671 case BPF_ST | BPF_MEM | BPF_B:
672 diff --git a/certs/Makefile b/certs/Makefile
673 index f4b90bad8690a..2baef6fba029e 100644
674 --- a/certs/Makefile
675 +++ b/certs/Makefile
676 @@ -46,11 +46,19 @@ endif
677 redirect_openssl = 2>&1
678 quiet_redirect_openssl = 2>&1
679 silent_redirect_openssl = 2>/dev/null
680 +openssl_available = $(shell openssl help 2>/dev/null && echo yes)
681
682 # We do it this way rather than having a boolean option for enabling an
683 # external private key, because 'make randconfig' might enable such a
684 # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
685 ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
686 +
687 +ifeq ($(openssl_available),yes)
688 +X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
689 +
690 +$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
691 +endif
692 +
693 $(obj)/signing_key.pem: $(obj)/x509.genkey
694 @$(kecho) "###"
695 @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
696 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
697 index f67b3fb33d579..7788af0ca1090 100644
698 --- a/drivers/ata/libata-core.c
699 +++ b/drivers/ata/libata-core.c
700 @@ -6394,7 +6394,7 @@ int ata_host_start(struct ata_host *host)
701 have_stop = 1;
702 }
703
704 - if (host->ops->host_stop)
705 + if (host->ops && host->ops->host_stop)
706 have_stop = 1;
707
708 if (have_stop) {
709 diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
710 index e0893f1b14522..43c0452a8ba91 100644
711 --- a/drivers/base/regmap/regmap.c
712 +++ b/drivers/base/regmap/regmap.c
713 @@ -1505,7 +1505,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
714 if (ret) {
715 dev_err(map->dev,
716 "Error in caching of register: %x ret: %d\n",
717 - reg + i, ret);
718 + reg + regmap_get_offset(map, i), ret);
719 return ret;
720 }
721 }
722 diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
723 index 6535614a7dc13..1df2b5801c3bc 100644
724 --- a/drivers/bcma/main.c
725 +++ b/drivers/bcma/main.c
726 @@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
727
728 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
729 {
730 + device_initialize(&core->dev);
731 core->dev.release = bcma_release_core_dev;
732 core->dev.bus = &bcma_bus_type;
733 dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
734 @@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
735 {
736 int err;
737
738 - err = device_register(&core->dev);
739 + err = device_add(&core->dev);
740 if (err) {
741 bcma_err(bus, "Could not register dev for core 0x%03X\n",
742 core->id.id);
743 - put_device(&core->dev);
744 return;
745 }
746 core->dev_registered = true;
747 @@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
748 /* Now noone uses internally-handled cores, we can free them */
749 list_for_each_entry_safe(core, tmp, &bus->cores, list) {
750 list_del(&core->list);
751 - kfree(core);
752 + put_device(&core->dev);
753 }
754 }
755
756 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
757 index 25e81b1a59a54..bc3ab98855cf0 100644
758 --- a/drivers/block/nbd.c
759 +++ b/drivers/block/nbd.c
760 @@ -1744,7 +1744,17 @@ static int nbd_dev_add(int index)
761 refcount_set(&nbd->refs, 1);
762 INIT_LIST_HEAD(&nbd->list);
763 disk->major = NBD_MAJOR;
764 +
765 + /* Too big first_minor can cause duplicate creation of
766 + * sysfs files/links, since first_minor will be truncated to
767 + * byte in __device_add_disk().
768 + */
769 disk->first_minor = index << part_shift;
770 + if (disk->first_minor > 0xff) {
771 + err = -EINVAL;
772 + goto out_free_idr;
773 + }
774 +
775 disk->fops = &nbd_fops;
776 disk->private_data = nbd;
777 sprintf(disk->disk_name, "nbd%d", index);
778 diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
779 index 47680237d0beb..8bc893df47364 100644
780 --- a/drivers/clk/mvebu/kirkwood.c
781 +++ b/drivers/clk/mvebu/kirkwood.c
782 @@ -265,6 +265,7 @@ static const char *powersave_parents[] = {
783 static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
784 { "powersave", powersave_parents, ARRAY_SIZE(powersave_parents),
785 11, 1, 0 },
786 + { }
787 };
788
789 static struct clk *clk_muxing_get_src(
790 diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
791 index ef773db080e90..a0570213170d8 100644
792 --- a/drivers/clocksource/sh_cmt.c
793 +++ b/drivers/clocksource/sh_cmt.c
794 @@ -568,7 +568,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
795 ch->flags |= flag;
796
797 /* setup timeout if no clockevent */
798 - if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
799 + if (ch->cmt->num_channels == 1 &&
800 + flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
801 __sh_cmt_set_next(ch, ch->max_match_value);
802 out:
803 raw_spin_unlock_irqrestore(&ch->lock, flags);
804 @@ -604,20 +605,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
805 static u64 sh_cmt_clocksource_read(struct clocksource *cs)
806 {
807 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
808 - unsigned long flags;
809 u32 has_wrapped;
810 - u64 value;
811 - u32 raw;
812
813 - raw_spin_lock_irqsave(&ch->lock, flags);
814 - value = ch->total_cycles;
815 - raw = sh_cmt_get_counter(ch, &has_wrapped);
816 + if (ch->cmt->num_channels == 1) {
817 + unsigned long flags;
818 + u64 value;
819 + u32 raw;
820
821 - if (unlikely(has_wrapped))
822 - raw += ch->match_value + 1;
823 - raw_spin_unlock_irqrestore(&ch->lock, flags);
824 + raw_spin_lock_irqsave(&ch->lock, flags);
825 + value = ch->total_cycles;
826 + raw = sh_cmt_get_counter(ch, &has_wrapped);
827 +
828 + if (unlikely(has_wrapped))
829 + raw += ch->match_value + 1;
830 + raw_spin_unlock_irqrestore(&ch->lock, flags);
831 +
832 + return value + raw;
833 + }
834
835 - return value + raw;
836 + return sh_cmt_get_counter(ch, &has_wrapped);
837 }
838
839 static int sh_cmt_clocksource_enable(struct clocksource *cs)
840 @@ -680,7 +686,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
841 cs->disable = sh_cmt_clocksource_disable;
842 cs->suspend = sh_cmt_clocksource_suspend;
843 cs->resume = sh_cmt_clocksource_resume;
844 - cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
845 + cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
846 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
847
848 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
849 diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
850 index 5c23a9a56921b..f261a57af1c01 100644
851 --- a/drivers/counter/104-quad-8.c
852 +++ b/drivers/counter/104-quad-8.c
853 @@ -1230,12 +1230,13 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
854 case 1:
855 case 3:
856 quad8_preset_register_set(priv, count->id, ceiling);
857 - break;
858 + mutex_unlock(&priv->lock);
859 + return len;
860 }
861
862 mutex_unlock(&priv->lock);
863
864 - return len;
865 + return -EINVAL;
866 }
867
868 static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,
869 diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
870 index f8a48a84df2ab..66fa524b6261e 100644
871 --- a/drivers/crypto/mxs-dcp.c
872 +++ b/drivers/crypto/mxs-dcp.c
873 @@ -168,15 +168,19 @@ static struct dcp *global_sdcp;
874
875 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
876 {
877 + int dma_err;
878 struct dcp *sdcp = global_sdcp;
879 const int chan = actx->chan;
880 uint32_t stat;
881 unsigned long ret;
882 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
883 -
884 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
885 DMA_TO_DEVICE);
886
887 + dma_err = dma_mapping_error(sdcp->dev, desc_phys);
888 + if (dma_err)
889 + return dma_err;
890 +
891 reinit_completion(&sdcp->completion[chan]);
892
893 /* Clear status register. */
894 @@ -214,18 +218,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
895 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
896 struct ablkcipher_request *req, int init)
897 {
898 + dma_addr_t key_phys, src_phys, dst_phys;
899 struct dcp *sdcp = global_sdcp;
900 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
901 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
902 int ret;
903
904 - dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
905 - 2 * AES_KEYSIZE_128,
906 - DMA_TO_DEVICE);
907 - dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
908 - DCP_BUF_SZ, DMA_TO_DEVICE);
909 - dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
910 - DCP_BUF_SZ, DMA_FROM_DEVICE);
911 + key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
912 + 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
913 + ret = dma_mapping_error(sdcp->dev, key_phys);
914 + if (ret)
915 + return ret;
916 +
917 + src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
918 + DCP_BUF_SZ, DMA_TO_DEVICE);
919 + ret = dma_mapping_error(sdcp->dev, src_phys);
920 + if (ret)
921 + goto err_src;
922 +
923 + dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
924 + DCP_BUF_SZ, DMA_FROM_DEVICE);
925 + ret = dma_mapping_error(sdcp->dev, dst_phys);
926 + if (ret)
927 + goto err_dst;
928
929 if (actx->fill % AES_BLOCK_SIZE) {
930 dev_err(sdcp->dev, "Invalid block size!\n");
931 @@ -263,10 +278,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
932 ret = mxs_dcp_start_dma(actx);
933
934 aes_done_run:
935 + dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
936 +err_dst:
937 + dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
938 +err_src:
939 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
940 DMA_TO_DEVICE);
941 - dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
942 - dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
943
944 return ret;
945 }
946 @@ -565,6 +582,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
947 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
948 DCP_BUF_SZ, DMA_TO_DEVICE);
949
950 + ret = dma_mapping_error(sdcp->dev, buf_phys);
951 + if (ret)
952 + return ret;
953 +
954 /* Fill in the DMA descriptor. */
955 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
956 MXS_DCP_CONTROL0_INTERRUPT |
957 @@ -597,6 +618,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
958 if (rctx->fini) {
959 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
960 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
961 + ret = dma_mapping_error(sdcp->dev, digest_phys);
962 + if (ret)
963 + goto done_run;
964 +
965 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
966 desc->payload = digest_phys;
967 }
968 diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
969 index f80db1eb29945..f8a146554b1f3 100644
970 --- a/drivers/crypto/omap-sham.c
971 +++ b/drivers/crypto/omap-sham.c
972 @@ -1734,7 +1734,7 @@ static void omap_sham_done_task(unsigned long data)
973 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
974 goto finish;
975 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
976 - if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
977 + if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
978 omap_sham_update_dma_stop(dd);
979 if (dd->err) {
980 err = dd->err;
981 diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
982 index d2d0ae445fd89..7c7d49a8a4034 100644
983 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
984 +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
985 @@ -123,10 +123,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
986 hw_data->enable_error_correction = adf_vf_void_noop;
987 hw_data->init_admin_comms = adf_vf_int_noop;
988 hw_data->exit_admin_comms = adf_vf_void_noop;
989 - hw_data->send_admin_init = adf_vf2pf_init;
990 + hw_data->send_admin_init = adf_vf2pf_notify_init;
991 hw_data->init_arb = adf_vf_int_noop;
992 hw_data->exit_arb = adf_vf_void_noop;
993 - hw_data->disable_iov = adf_vf2pf_shutdown;
994 + hw_data->disable_iov = adf_vf2pf_notify_shutdown;
995 hw_data->get_accel_mask = get_accel_mask;
996 hw_data->get_ae_mask = get_ae_mask;
997 hw_data->get_num_accels = get_num_accels;
998 diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
999 index 38e4bc04f407b..90e8a7564756b 100644
1000 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
1001 +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
1002 @@ -123,10 +123,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
1003 hw_data->enable_error_correction = adf_vf_void_noop;
1004 hw_data->init_admin_comms = adf_vf_int_noop;
1005 hw_data->exit_admin_comms = adf_vf_void_noop;
1006 - hw_data->send_admin_init = adf_vf2pf_init;
1007 + hw_data->send_admin_init = adf_vf2pf_notify_init;
1008 hw_data->init_arb = adf_vf_int_noop;
1009 hw_data->exit_arb = adf_vf_void_noop;
1010 - hw_data->disable_iov = adf_vf2pf_shutdown;
1011 + hw_data->disable_iov = adf_vf2pf_notify_shutdown;
1012 hw_data->get_accel_mask = get_accel_mask;
1013 hw_data->get_ae_mask = get_ae_mask;
1014 hw_data->get_num_accels = get_num_accels;
1015 diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
1016 index d78f8d5c89c3f..289dd7e48d4a4 100644
1017 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h
1018 +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
1019 @@ -239,8 +239,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
1020 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
1021 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
1022
1023 -int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
1024 -void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
1025 +int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
1026 +void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
1027 int adf_init_pf_wq(void);
1028 void adf_exit_pf_wq(void);
1029 int adf_init_vf_wq(void);
1030 @@ -263,12 +263,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
1031 {
1032 }
1033
1034 -static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
1035 +static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
1036 {
1037 return 0;
1038 }
1039
1040 -static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
1041 +static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
1042 {
1043 }
1044
1045 diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
1046 index 26556c7130497..7a7d43c475342 100644
1047 --- a/drivers/crypto/qat/qat_common/adf_init.c
1048 +++ b/drivers/crypto/qat/qat_common/adf_init.c
1049 @@ -105,6 +105,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
1050 struct service_hndl *service;
1051 struct list_head *list_itr;
1052 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1053 + int ret;
1054
1055 if (!hw_data) {
1056 dev_err(&GET_DEV(accel_dev),
1057 @@ -171,9 +172,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
1058 }
1059
1060 hw_data->enable_error_correction(accel_dev);
1061 - hw_data->enable_vf2pf_comms(accel_dev);
1062 + ret = hw_data->enable_vf2pf_comms(accel_dev);
1063
1064 - return 0;
1065 + return ret;
1066 }
1067 EXPORT_SYMBOL_GPL(adf_dev_init);
1068
1069 diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
1070 index 4898ef41fd9fd..7d319c5c071c4 100644
1071 --- a/drivers/crypto/qat/qat_common/adf_isr.c
1072 +++ b/drivers/crypto/qat/qat_common/adf_isr.c
1073 @@ -59,6 +59,8 @@
1074 #include "adf_transport_access_macros.h"
1075 #include "adf_transport_internal.h"
1076
1077 +#define ADF_MAX_NUM_VFS 32
1078 +
1079 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
1080 {
1081 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
1082 @@ -111,7 +113,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
1083 struct adf_bar *pmisc =
1084 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
1085 void __iomem *pmisc_bar_addr = pmisc->virt_addr;
1086 - u32 vf_mask;
1087 + unsigned long vf_mask;
1088
1089 /* Get the interrupt sources triggered by VFs */
1090 vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
1091 @@ -132,8 +134,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
1092 * unless the VF is malicious and is attempting to
1093 * flood the host OS with VF2PF interrupts.
1094 */
1095 - for_each_set_bit(i, (const unsigned long *)&vf_mask,
1096 - (sizeof(vf_mask) * BITS_PER_BYTE)) {
1097 + for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
1098 vf_info = accel_dev->pf.vf_info + i;
1099
1100 if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
1101 diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
1102 index b3875fdf6cd72..c64481160b711 100644
1103 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
1104 +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
1105 @@ -231,7 +231,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
1106
1107 return ret;
1108 }
1109 -EXPORT_SYMBOL_GPL(adf_iov_putmsg);
1110
1111 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
1112 {
1113 @@ -361,6 +360,8 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
1114 msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
1115 BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
1116
1117 + reinit_completion(&accel_dev->vf.iov_msg_completion);
1118 +
1119 /* Send request from VF to PF */
1120 ret = adf_iov_putmsg(accel_dev, msg, 0);
1121 if (ret) {
1122 diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
1123 index cd5f37dffe8a6..1830194567e84 100644
1124 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
1125 +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
1126 @@ -49,14 +49,14 @@
1127 #include "adf_pf2vf_msg.h"
1128
1129 /**
1130 - * adf_vf2pf_init() - send init msg to PF
1131 + * adf_vf2pf_notify_init() - send init msg to PF
1132 * @accel_dev: Pointer to acceleration VF device.
1133 *
1134 * Function sends an init messge from the VF to a PF
1135 *
1136 * Return: 0 on success, error code otherwise.
1137 */
1138 -int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
1139 +int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
1140 {
1141 u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
1142 (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
1143 @@ -69,17 +69,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
1144 set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
1145 return 0;
1146 }
1147 -EXPORT_SYMBOL_GPL(adf_vf2pf_init);
1148 +EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
1149
1150 /**
1151 - * adf_vf2pf_shutdown() - send shutdown msg to PF
1152 + * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
1153 * @accel_dev: Pointer to acceleration VF device.
1154 *
1155 * Function sends a shutdown messge from the VF to a PF
1156 *
1157 * Return: void
1158 */
1159 -void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
1160 +void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
1161 {
1162 u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
1163 (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
1164 @@ -89,4 +89,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
1165 dev_err(&GET_DEV(accel_dev),
1166 "Failed to send Shutdown event to PF\n");
1167 }
1168 -EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
1169 +EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
1170 diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
1171 index df9a1f35b8320..ef90902c8200d 100644
1172 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
1173 +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
1174 @@ -203,6 +203,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
1175 struct adf_bar *pmisc =
1176 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
1177 void __iomem *pmisc_bar_addr = pmisc->virt_addr;
1178 + bool handled = false;
1179 u32 v_int;
1180
1181 /* Read VF INT source CSR to determine the source of VF interrupt */
1182 @@ -215,7 +216,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
1183
1184 /* Schedule tasklet to handle interrupt BH */
1185 tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
1186 - return IRQ_HANDLED;
1187 + handled = true;
1188 }
1189
1190 /* Check bundle interrupt */
1191 @@ -227,10 +228,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
1192 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
1193 0);
1194 tasklet_hi_schedule(&bank->resp_handler);
1195 - return IRQ_HANDLED;
1196 + handled = true;
1197 }
1198
1199 - return IRQ_NONE;
1200 + return handled ? IRQ_HANDLED : IRQ_NONE;
1201 }
1202
1203 static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
1204 diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
1205 index a3b4dd8099a7b..3a8361c83f0b1 100644
1206 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
1207 +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
1208 @@ -123,10 +123,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
1209 hw_data->enable_error_correction = adf_vf_void_noop;
1210 hw_data->init_admin_comms = adf_vf_int_noop;
1211 hw_data->exit_admin_comms = adf_vf_void_noop;
1212 - hw_data->send_admin_init = adf_vf2pf_init;
1213 + hw_data->send_admin_init = adf_vf2pf_notify_init;
1214 hw_data->init_arb = adf_vf_int_noop;
1215 hw_data->exit_arb = adf_vf_void_noop;
1216 - hw_data->disable_iov = adf_vf2pf_shutdown;
1217 + hw_data->disable_iov = adf_vf2pf_notify_shutdown;
1218 hw_data->get_accel_mask = get_accel_mask;
1219 hw_data->get_ae_mask = get_ae_mask;
1220 hw_data->get_num_accels = get_num_accels;
1221 diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
1222 index f72be5f94e6fa..29576922df78f 100644
1223 --- a/drivers/edac/i10nm_base.c
1224 +++ b/drivers/edac/i10nm_base.c
1225 @@ -26,8 +26,8 @@
1226 pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
1227 #define I10NM_GET_DIMMMTR(m, i, j) \
1228 readl((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)
1229 -#define I10NM_GET_MCDDRTCFG(m, i, j) \
1230 - readl((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4)
1231 +#define I10NM_GET_MCDDRTCFG(m, i) \
1232 + readl((m)->mbase + 0x20970 + (i) * 0x4000)
1233 #define I10NM_GET_MCMTR(m, i) \
1234 readl((m)->mbase + 0x20ef8 + (i) * 0x4000)
1235
1236 @@ -156,11 +156,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
1237 continue;
1238
1239 ndimms = 0;
1240 + mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
1241 for (j = 0; j < I10NM_NUM_DIMMS; j++) {
1242 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
1243 mci->n_layers, i, j, 0);
1244 mtr = I10NM_GET_DIMMMTR(imc, i, j);
1245 - mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
1246 edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
1247 mtr, mcddrtcfg, imc->mc, i, j);
1248
1249 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
1250 index 82155ac3288a0..64ee44c2fdd1d 100644
1251 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
1252 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
1253 @@ -163,17 +163,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
1254 return 0;
1255 }
1256
1257 -static struct device *get_mfd_cell_dev(const char *device_name, int r)
1258 +static int acp_genpd_add_device(struct device *dev, void *data)
1259 {
1260 - char auto_dev_name[25];
1261 - struct device *dev;
1262 + struct generic_pm_domain *gpd = data;
1263 + int ret;
1264
1265 - snprintf(auto_dev_name, sizeof(auto_dev_name),
1266 - "%s.%d.auto", device_name, r);
1267 - dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
1268 - dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
1269 + ret = pm_genpd_add_device(gpd, dev);
1270 + if (ret)
1271 + dev_err(dev, "Failed to add dev to genpd %d\n", ret);
1272
1273 - return dev;
1274 + return ret;
1275 +}
1276 +
1277 +static int acp_genpd_remove_device(struct device *dev, void *data)
1278 +{
1279 + int ret;
1280 +
1281 + ret = pm_genpd_remove_device(dev);
1282 + if (ret)
1283 + dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
1284 +
1285 + /* Continue to remove */
1286 + return 0;
1287 }
1288
1289 /**
1290 @@ -184,11 +195,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
1291 */
1292 static int acp_hw_init(void *handle)
1293 {
1294 - int r, i;
1295 + int r;
1296 uint64_t acp_base;
1297 u32 val = 0;
1298 u32 count = 0;
1299 - struct device *dev;
1300 struct i2s_platform_data *i2s_pdata = NULL;
1301
1302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303 @@ -344,15 +354,10 @@ static int acp_hw_init(void *handle)
1304 if (r)
1305 goto failure;
1306
1307 - for (i = 0; i < ACP_DEVS ; i++) {
1308 - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
1309 - r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
1310 - if (r) {
1311 - dev_err(dev, "Failed to add dev to genpd\n");
1312 - goto failure;
1313 - }
1314 - }
1315 -
1316 + r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
1317 + acp_genpd_add_device);
1318 + if (r)
1319 + goto failure;
1320
1321 /* Assert Soft reset of ACP */
1322 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
1323 @@ -413,10 +418,8 @@ failure:
1324 */
1325 static int acp_hw_fini(void *handle)
1326 {
1327 - int i, ret;
1328 u32 val = 0;
1329 u32 count = 0;
1330 - struct device *dev;
1331 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332
1333 /* return early if no ACP */
1334 @@ -461,13 +464,8 @@ static int acp_hw_fini(void *handle)
1335 udelay(100);
1336 }
1337
1338 - for (i = 0; i < ACP_DEVS ; i++) {
1339 - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
1340 - ret = pm_genpd_remove_device(dev);
1341 - /* If removal fails, dont giveup and try rest */
1342 - if (ret)
1343 - dev_err(dev, "remove dev from genpd failed\n");
1344 - }
1345 + device_for_each_child(adev->acp.parent, NULL,
1346 + acp_genpd_remove_device);
1347
1348 mfd_remove_devices(adev->acp.parent);
1349 kfree(adev->acp.acp_res);
1350 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
1351 index 179e8d52cadb4..a08ca7a47400f 100644
1352 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
1353 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
1354 @@ -281,10 +281,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
1355 int i;
1356
1357 for (i = 0; i < ctx->mixer_count; i++) {
1358 - DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
1359 - DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
1360 - DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
1361 - DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
1362 + enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
1363 +
1364 + DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
1365 + DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
1366 + DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
1367 + DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
1368 }
1369 }
1370
1371 diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
1372 index 55ea4bc2ee9cb..0d37ae5b310c4 100644
1373 --- a/drivers/gpu/drm/msm/dsi/dsi.c
1374 +++ b/drivers/gpu/drm/msm/dsi/dsi.c
1375 @@ -26,8 +26,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
1376 }
1377
1378 phy_pdev = of_find_device_by_node(phy_node);
1379 - if (phy_pdev)
1380 + if (phy_pdev) {
1381 msm_dsi->phy = platform_get_drvdata(phy_pdev);
1382 + msm_dsi->phy_dev = &phy_pdev->dev;
1383 + }
1384
1385 of_node_put(phy_node);
1386
1387 @@ -36,8 +38,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
1388 return -EPROBE_DEFER;
1389 }
1390
1391 - msm_dsi->phy_dev = get_device(&phy_pdev->dev);
1392 -
1393 return 0;
1394 }
1395
1396 diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
1397 index 238fb6d54df47..413bf314a2bc3 100644
1398 --- a/drivers/gpu/drm/panfrost/panfrost_device.c
1399 +++ b/drivers/gpu/drm/panfrost/panfrost_device.c
1400 @@ -59,7 +59,8 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
1401 if (IS_ERR(pfdev->bus_clock)) {
1402 dev_err(pfdev->dev, "get bus_clock failed %ld\n",
1403 PTR_ERR(pfdev->bus_clock));
1404 - return PTR_ERR(pfdev->bus_clock);
1405 + err = PTR_ERR(pfdev->bus_clock);
1406 + goto disable_clock;
1407 }
1408
1409 if (pfdev->bus_clock) {
1410 diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
1411 index ff340d7ae2e52..6a880c2623808 100644
1412 --- a/drivers/i2c/busses/i2c-highlander.c
1413 +++ b/drivers/i2c/busses/i2c-highlander.c
1414 @@ -379,7 +379,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
1415 platform_set_drvdata(pdev, dev);
1416
1417 dev->irq = platform_get_irq(pdev, 0);
1418 - if (iic_force_poll)
1419 + if (dev->irq < 0 || iic_force_poll)
1420 dev->irq = 0;
1421
1422 if (dev->irq) {
1423 diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
1424 index 2f8b8050a2233..899624721c1ea 100644
1425 --- a/drivers/i2c/busses/i2c-iop3xx.c
1426 +++ b/drivers/i2c/busses/i2c-iop3xx.c
1427 @@ -467,16 +467,14 @@ iop3xx_i2c_probe(struct platform_device *pdev)
1428
1429 irq = platform_get_irq(pdev, 0);
1430 if (irq < 0) {
1431 - ret = -ENXIO;
1432 + ret = irq;
1433 goto unmap;
1434 }
1435 ret = request_irq(irq, iop3xx_i2c_irq_handler, 0,
1436 pdev->name, adapter_data);
1437
1438 - if (ret) {
1439 - ret = -EIO;
1440 + if (ret)
1441 goto unmap;
1442 - }
1443
1444 memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
1445 new_adapter->owner = THIS_MODULE;
1446 diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
1447 index e1ef0122ef759..5587e7c549c4f 100644
1448 --- a/drivers/i2c/busses/i2c-mt65xx.c
1449 +++ b/drivers/i2c/busses/i2c-mt65xx.c
1450 @@ -932,7 +932,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
1451 return PTR_ERR(i2c->pdmabase);
1452
1453 irq = platform_get_irq(pdev, 0);
1454 - if (irq <= 0)
1455 + if (irq < 0)
1456 return irq;
1457
1458 init_completion(&i2c->msg_complete);
1459 diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
1460 index d6322698b2458..e6f927c6f8af9 100644
1461 --- a/drivers/i2c/busses/i2c-s3c2410.c
1462 +++ b/drivers/i2c/busses/i2c-s3c2410.c
1463 @@ -1141,7 +1141,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1464 */
1465 if (!(i2c->quirks & QUIRK_POLL)) {
1466 i2c->irq = ret = platform_get_irq(pdev, 0);
1467 - if (ret <= 0) {
1468 + if (ret < 0) {
1469 dev_err(&pdev->dev, "cannot find IRQ\n");
1470 clk_unprepare(i2c->clk);
1471 return ret;
1472 diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
1473 index c94995f0daa2a..03ae33093ce63 100644
1474 --- a/drivers/leds/leds-lt3593.c
1475 +++ b/drivers/leds/leds-lt3593.c
1476 @@ -103,10 +103,9 @@ static int lt3593_led_probe(struct platform_device *pdev)
1477 init_data.default_label = ":";
1478
1479 ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
1480 - if (ret < 0) {
1481 - fwnode_handle_put(child);
1482 + fwnode_handle_put(child);
1483 + if (ret < 0)
1484 return ret;
1485 - }
1486
1487 led_data->cdev.dev->of_node = dev->of_node;
1488 platform_set_drvdata(pdev, led_data);
1489 diff --git a/drivers/leds/trigger/ledtrig-audio.c b/drivers/leds/trigger/ledtrig-audio.c
1490 index f76621e88482d..c6b437e6369b8 100644
1491 --- a/drivers/leds/trigger/ledtrig-audio.c
1492 +++ b/drivers/leds/trigger/ledtrig-audio.c
1493 @@ -6,10 +6,33 @@
1494 #include <linux/kernel.h>
1495 #include <linux/leds.h>
1496 #include <linux/module.h>
1497 +#include "../leds.h"
1498
1499 -static struct led_trigger *ledtrig_audio[NUM_AUDIO_LEDS];
1500 static enum led_brightness audio_state[NUM_AUDIO_LEDS];
1501
1502 +static int ledtrig_audio_mute_activate(struct led_classdev *led_cdev)
1503 +{
1504 + led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MUTE]);
1505 + return 0;
1506 +}
1507 +
1508 +static int ledtrig_audio_micmute_activate(struct led_classdev *led_cdev)
1509 +{
1510 + led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MICMUTE]);
1511 + return 0;
1512 +}
1513 +
1514 +static struct led_trigger ledtrig_audio[NUM_AUDIO_LEDS] = {
1515 + [LED_AUDIO_MUTE] = {
1516 + .name = "audio-mute",
1517 + .activate = ledtrig_audio_mute_activate,
1518 + },
1519 + [LED_AUDIO_MICMUTE] = {
1520 + .name = "audio-micmute",
1521 + .activate = ledtrig_audio_micmute_activate,
1522 + },
1523 +};
1524 +
1525 enum led_brightness ledtrig_audio_get(enum led_audio type)
1526 {
1527 return audio_state[type];
1528 @@ -19,24 +42,22 @@ EXPORT_SYMBOL_GPL(ledtrig_audio_get);
1529 void ledtrig_audio_set(enum led_audio type, enum led_brightness state)
1530 {
1531 audio_state[type] = state;
1532 - led_trigger_event(ledtrig_audio[type], state);
1533 + led_trigger_event(&ledtrig_audio[type], state);
1534 }
1535 EXPORT_SYMBOL_GPL(ledtrig_audio_set);
1536
1537 static int __init ledtrig_audio_init(void)
1538 {
1539 - led_trigger_register_simple("audio-mute",
1540 - &ledtrig_audio[LED_AUDIO_MUTE]);
1541 - led_trigger_register_simple("audio-micmute",
1542 - &ledtrig_audio[LED_AUDIO_MICMUTE]);
1543 + led_trigger_register(&ledtrig_audio[LED_AUDIO_MUTE]);
1544 + led_trigger_register(&ledtrig_audio[LED_AUDIO_MICMUTE]);
1545 return 0;
1546 }
1547 module_init(ledtrig_audio_init);
1548
1549 static void __exit ledtrig_audio_exit(void)
1550 {
1551 - led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MUTE]);
1552 - led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MICMUTE]);
1553 + led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MUTE]);
1554 + led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MICMUTE]);
1555 }
1556 module_exit(ledtrig_audio_exit);
1557
1558 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1559 index b0d569032dd4e..efdf6ce0443ea 100644
1560 --- a/drivers/md/bcache/super.c
1561 +++ b/drivers/md/bcache/super.c
1562 @@ -839,20 +839,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
1563 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
1564 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
1565 if (!d->full_dirty_stripes)
1566 - return -ENOMEM;
1567 + goto out_free_stripe_sectors_dirty;
1568
1569 idx = ida_simple_get(&bcache_device_idx, 0,
1570 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
1571 if (idx < 0)
1572 - return idx;
1573 + goto out_free_full_dirty_stripes;
1574
1575 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
1576 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
1577 - goto err;
1578 + goto out_ida_remove;
1579
1580 d->disk = alloc_disk(BCACHE_MINORS);
1581 if (!d->disk)
1582 - goto err;
1583 + goto out_bioset_exit;
1584
1585 set_capacity(d->disk, sectors);
1586 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
1587 @@ -887,8 +887,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
1588
1589 return 0;
1590
1591 -err:
1592 +out_bioset_exit:
1593 + bioset_exit(&d->bio_split);
1594 +out_ida_remove:
1595 ida_simple_remove(&bcache_device_idx, idx);
1596 +out_free_full_dirty_stripes:
1597 + kvfree(d->full_dirty_stripes);
1598 +out_free_stripe_sectors_dirty:
1599 + kvfree(d->stripe_sectors_dirty);
1600 return -ENOMEM;
1601
1602 }
1603 diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
1604 index e43d8327b8103..1088161498df0 100644
1605 --- a/drivers/media/i2c/tda1997x.c
1606 +++ b/drivers/media/i2c/tda1997x.c
1607 @@ -2233,6 +2233,7 @@ static int tda1997x_core_init(struct v4l2_subdev *sd)
1608 /* get initial HDMI status */
1609 state->hdmi_status = io_read(sd, REG_HDMI_FLAGS);
1610
1611 + io_write(sd, REG_EDID_ENABLE, EDID_ENABLE_A_EN | EDID_ENABLE_B_EN);
1612 return 0;
1613 }
1614
1615 diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
1616 index 00c7bed3dd572..e6b68be09f8f0 100644
1617 --- a/drivers/media/platform/coda/coda-bit.c
1618 +++ b/drivers/media/platform/coda/coda-bit.c
1619 @@ -2023,17 +2023,25 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
1620 u32 src_fourcc, dst_fourcc;
1621 int ret;
1622
1623 + q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1624 + q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1625 + src_fourcc = q_data_src->fourcc;
1626 + dst_fourcc = q_data_dst->fourcc;
1627 +
1628 if (!ctx->initialized) {
1629 ret = __coda_decoder_seq_init(ctx);
1630 if (ret < 0)
1631 return ret;
1632 + } else {
1633 + ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
1634 + CODA9_FRAME_TILED2LINEAR);
1635 + if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
1636 + ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
1637 + if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
1638 + ctx->frame_mem_ctrl |= (0x3 << 9) |
1639 + ((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
1640 }
1641
1642 - q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1643 - q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1644 - src_fourcc = q_data_src->fourcc;
1645 - dst_fourcc = q_data_dst->fourcc;
1646 -
1647 coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
1648
1649 ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
1650 diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
1651 index 30028ceb548b1..766ca497f8565 100644
1652 --- a/drivers/media/platform/qcom/venus/venc.c
1653 +++ b/drivers/media/platform/qcom/venus/venc.c
1654 @@ -308,6 +308,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
1655 else
1656 return NULL;
1657 fmt = find_format(inst, pixmp->pixelformat, f->type);
1658 + if (!fmt)
1659 + return NULL;
1660 }
1661
1662 pixmp->width = clamp(pixmp->width, frame_width_min(inst),
1663 diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
1664 index 4077217777f92..93194f03764d2 100644
1665 --- a/drivers/media/spi/cxd2880-spi.c
1666 +++ b/drivers/media/spi/cxd2880-spi.c
1667 @@ -524,13 +524,13 @@ cxd2880_spi_probe(struct spi_device *spi)
1668 if (IS_ERR(dvb_spi->vcc_supply)) {
1669 if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER) {
1670 ret = -EPROBE_DEFER;
1671 - goto fail_adapter;
1672 + goto fail_regulator;
1673 }
1674 dvb_spi->vcc_supply = NULL;
1675 } else {
1676 ret = regulator_enable(dvb_spi->vcc_supply);
1677 if (ret)
1678 - goto fail_adapter;
1679 + goto fail_regulator;
1680 }
1681
1682 dvb_spi->spi = spi;
1683 @@ -618,6 +618,9 @@ fail_frontend:
1684 fail_attach:
1685 dvb_unregister_adapter(&dvb_spi->adapter);
1686 fail_adapter:
1687 + if (!dvb_spi->vcc_supply)
1688 + regulator_disable(dvb_spi->vcc_supply);
1689 +fail_regulator:
1690 kfree(dvb_spi);
1691 return ret;
1692 }
1693 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
1694 index 2e07106f46803..bc4b2abdde1a4 100644
1695 --- a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
1696 +++ b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
1697 @@ -17,7 +17,8 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
1698
1699 if (d->props.i2c_algo == NULL) {
1700 err("no i2c algorithm specified");
1701 - return -EINVAL;
1702 + ret = -EINVAL;
1703 + goto err;
1704 }
1705
1706 strscpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name));
1707 @@ -27,11 +28,15 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
1708
1709 i2c_set_adapdata(&d->i2c_adap, d);
1710
1711 - if ((ret = i2c_add_adapter(&d->i2c_adap)) < 0)
1712 + ret = i2c_add_adapter(&d->i2c_adap);
1713 + if (ret < 0) {
1714 err("could not add i2c adapter");
1715 + goto err;
1716 + }
1717
1718 d->state |= DVB_USB_STATE_I2C;
1719
1720 +err:
1721 return ret;
1722 }
1723
1724 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
1725 index f57c4627624f5..e7720ff11d3d9 100644
1726 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
1727 +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
1728 @@ -194,8 +194,8 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
1729
1730 err_adapter_init:
1731 dvb_usb_adapter_exit(d);
1732 -err_i2c_init:
1733 dvb_usb_i2c_exit(d);
1734 +err_i2c_init:
1735 if (d->priv && d->props.priv_destroy)
1736 d->props.priv_destroy(d);
1737 err_priv_init:
1738 diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
1739 index e368935a50894..c16d4f1624952 100644
1740 --- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
1741 +++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
1742 @@ -130,7 +130,7 @@ ret:
1743
1744 static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
1745 {
1746 - int i;
1747 + int i, ret;
1748 u8 b;
1749
1750 mac[0] = 0x00;
1751 @@ -139,7 +139,9 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
1752
1753 /* this is a complete guess, but works for my box */
1754 for (i = 136; i < 139; i++) {
1755 - dibusb_read_eeprom_byte(d,i, &b);
1756 + ret = dibusb_read_eeprom_byte(d, i, &b);
1757 + if (ret)
1758 + return ret;
1759
1760 mac[5 - (i - 136)] = b;
1761 }
1762 diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
1763 index 381b5c898a076..b7ee972455e5b 100644
1764 --- a/drivers/media/usb/dvb-usb/vp702x.c
1765 +++ b/drivers/media/usb/dvb-usb/vp702x.c
1766 @@ -291,16 +291,22 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
1767 static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6])
1768 {
1769 u8 i, *buf;
1770 + int ret;
1771 struct vp702x_device_state *st = d->priv;
1772
1773 mutex_lock(&st->buf_mutex);
1774 buf = st->buf;
1775 - for (i = 6; i < 12; i++)
1776 - vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1);
1777 + for (i = 6; i < 12; i++) {
1778 + ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1,
1779 + &buf[i - 6], 1);
1780 + if (ret < 0)
1781 + goto err;
1782 + }
1783
1784 memcpy(mac, buf, 6);
1785 +err:
1786 mutex_unlock(&st->buf_mutex);
1787 - return 0;
1788 + return ret;
1789 }
1790
1791 static int vp702x_frontend_attach(struct dvb_usb_adapter *adap)
1792 diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
1793 index 59529cbf9cd0b..0b6d77c3bec86 100644
1794 --- a/drivers/media/usb/em28xx/em28xx-input.c
1795 +++ b/drivers/media/usb/em28xx/em28xx-input.c
1796 @@ -842,7 +842,6 @@ error:
1797 kfree(ir);
1798 ref_put:
1799 em28xx_shutdown_buttons(dev);
1800 - kref_put(&dev->ref, em28xx_free_device);
1801 return err;
1802 }
1803
1804 diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
1805 index 153a0c3e3da64..b9302d77d6c83 100644
1806 --- a/drivers/media/usb/go7007/go7007-driver.c
1807 +++ b/drivers/media/usb/go7007/go7007-driver.c
1808 @@ -691,49 +691,23 @@ struct go7007 *go7007_alloc(const struct go7007_board_info *board,
1809 struct device *dev)
1810 {
1811 struct go7007 *go;
1812 - int i;
1813
1814 go = kzalloc(sizeof(struct go7007), GFP_KERNEL);
1815 if (go == NULL)
1816 return NULL;
1817 go->dev = dev;
1818 go->board_info = board;
1819 - go->board_id = 0;
1820 go->tuner_type = -1;
1821 - go->channel_number = 0;
1822 - go->name[0] = 0;
1823 mutex_init(&go->hw_lock);
1824 init_waitqueue_head(&go->frame_waitq);
1825 spin_lock_init(&go->spinlock);
1826 go->status = STATUS_INIT;
1827 - memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter));
1828 - go->i2c_adapter_online = 0;
1829 - go->interrupt_available = 0;
1830 init_waitqueue_head(&go->interrupt_waitq);
1831 - go->input = 0;
1832 go7007_update_board(go);
1833 - go->encoder_h_halve = 0;
1834 - go->encoder_v_halve = 0;
1835 - go->encoder_subsample = 0;
1836 go->format = V4L2_PIX_FMT_MJPEG;
1837 go->bitrate = 1500000;
1838 go->fps_scale = 1;
1839 - go->pali = 0;
1840 go->aspect_ratio = GO7007_RATIO_1_1;
1841 - go->gop_size = 0;
1842 - go->ipb = 0;
1843 - go->closed_gop = 0;
1844 - go->repeat_seqhead = 0;
1845 - go->seq_header_enable = 0;
1846 - go->gop_header_enable = 0;
1847 - go->dvd_mode = 0;
1848 - go->interlace_coding = 0;
1849 - for (i = 0; i < 4; ++i)
1850 - go->modet[i].enable = 0;
1851 - for (i = 0; i < 1624; ++i)
1852 - go->modet_map[i] = 0;
1853 - go->audio_deliver = NULL;
1854 - go->audio_enabled = 0;
1855
1856 return go;
1857 }
1858 diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
1859 index 6ace82028667b..7b280cb363271 100644
1860 --- a/drivers/mmc/host/dw_mmc.c
1861 +++ b/drivers/mmc/host/dw_mmc.c
1862 @@ -782,6 +782,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
1863 int ret = 0;
1864
1865 /* Set external dma config: burst size, burst width */
1866 + memset(&cfg, 0, sizeof(cfg));
1867 cfg.dst_addr = host->phy_regs + fifo_offset;
1868 cfg.src_addr = cfg.dst_addr;
1869 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1870 diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
1871 index a0670e9cd0127..5553a5643f405 100644
1872 --- a/drivers/mmc/host/moxart-mmc.c
1873 +++ b/drivers/mmc/host/moxart-mmc.c
1874 @@ -631,6 +631,7 @@ static int moxart_probe(struct platform_device *pdev)
1875 host->dma_chan_tx, host->dma_chan_rx);
1876 host->have_dma = true;
1877
1878 + memset(&cfg, 0, sizeof(cfg));
1879 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1880 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1881
1882 diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
1883 index d82a519a0cd9a..f9f246c82c974 100644
1884 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
1885 +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
1886 @@ -2013,10 +2013,11 @@ int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
1887 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
1888 if (index >= mcam->bmap_entries)
1889 break;
1890 + entry = index + 1;
1891 +
1892 if (mcam->entry2cntr_map[index] != req->cntr)
1893 continue;
1894
1895 - entry = index + 1;
1896 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
1897 index, req->cntr);
1898 }
1899 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
1900 index d48292ccda294..9239d767443f2 100644
1901 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
1902 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
1903 @@ -234,18 +234,12 @@ struct ttc_params {
1904
1905 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
1906 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
1907 -void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
1908
1909 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1910 struct mlx5e_ttc_table *ttc);
1911 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1912 struct mlx5e_ttc_table *ttc);
1913
1914 -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1915 - struct mlx5e_ttc_table *ttc);
1916 -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1917 - struct mlx5e_ttc_table *ttc);
1918 -
1919 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
1920
1921 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
1922 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
1923 index c4ac7a9968d16..c3b9278486a1e 100644
1924 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
1925 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
1926 @@ -1123,7 +1123,7 @@ void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1927 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1928 }
1929
1930 -void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1931 +static void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1932 {
1933 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1934
1935 @@ -1142,8 +1142,8 @@ void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1936 ft_attr->prio = MLX5E_NIC_PRIO;
1937 }
1938
1939 -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1940 - struct mlx5e_ttc_table *ttc)
1941 +static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1942 + struct mlx5e_ttc_table *ttc)
1943 {
1944 struct mlx5e_flow_table *ft = &ttc->ft;
1945 int err;
1946 @@ -1173,8 +1173,8 @@ err:
1947 return err;
1948 }
1949
1950 -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1951 - struct mlx5e_ttc_table *ttc)
1952 +static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1953 + struct mlx5e_ttc_table *ttc)
1954 {
1955 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1956 return;
1957 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
1958 index 0fed2419623d1..1f3d12faa2a5b 100644
1959 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
1960 +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
1961 @@ -319,17 +319,6 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
1962 }
1963
1964 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1965 - mlx5e_set_inner_ttc_ft_params(&ttc_params);
1966 - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1967 - ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1968 -
1969 - err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1970 - if (err) {
1971 - netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1972 - err);
1973 - goto err_destroy_arfs_tables;
1974 - }
1975 -
1976 mlx5e_set_ttc_ft_params(&ttc_params);
1977 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1978 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1979 @@ -338,13 +327,11 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
1980 if (err) {
1981 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1982 err);
1983 - goto err_destroy_inner_ttc_table;
1984 + goto err_destroy_arfs_tables;
1985 }
1986
1987 return 0;
1988
1989 -err_destroy_inner_ttc_table:
1990 - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1991 err_destroy_arfs_tables:
1992 mlx5e_arfs_destroy_tables(priv);
1993
1994 @@ -354,7 +341,6 @@ err_destroy_arfs_tables:
1995 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
1996 {
1997 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1998 - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1999 mlx5e_arfs_destroy_tables(priv);
2000 }
2001
2002 @@ -379,7 +365,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
2003 if (err)
2004 goto err_destroy_indirect_rqts;
2005
2006 - err = mlx5e_create_indirect_tirs(priv, true);
2007 + err = mlx5e_create_indirect_tirs(priv, false);
2008 if (err)
2009 goto err_destroy_direct_rqts;
2010
2011 diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
2012 index baac016f3ec0b..15591ad5fe4ea 100644
2013 --- a/drivers/net/ethernet/qualcomm/qca_spi.c
2014 +++ b/drivers/net/ethernet/qualcomm/qca_spi.c
2015 @@ -434,7 +434,7 @@ qcaspi_receive(struct qcaspi *qca)
2016 skb_put(qca->rx_skb, retcode);
2017 qca->rx_skb->protocol = eth_type_trans(
2018 qca->rx_skb, qca->rx_skb->dev);
2019 - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
2020 + skb_checksum_none_assert(qca->rx_skb);
2021 netif_rx_ni(qca->rx_skb);
2022 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
2023 net_dev->mtu + VLAN_ETH_HLEN);
2024 diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
2025 index 0981068504fa7..ade70f5df496c 100644
2026 --- a/drivers/net/ethernet/qualcomm/qca_uart.c
2027 +++ b/drivers/net/ethernet/qualcomm/qca_uart.c
2028 @@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
2029 skb_put(qca->rx_skb, retcode);
2030 qca->rx_skb->protocol = eth_type_trans(
2031 qca->rx_skb, qca->rx_skb->dev);
2032 - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
2033 + skb_checksum_none_assert(qca->rx_skb);
2034 netif_rx_ni(qca->rx_skb);
2035 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
2036 netdev->mtu +
2037 diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
2038 index c610fe21c85c0..31ffec3a59727 100644
2039 --- a/drivers/net/wireless/ath/ath6kl/wmi.c
2040 +++ b/drivers/net/wireless/ath/ath6kl/wmi.c
2041 @@ -2510,8 +2510,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2042 goto free_data_skb;
2043
2044 for (index = 0; index < num_pri_streams; index++) {
2045 - if (WARN_ON(!data_sync_bufs[index].skb))
2046 + if (WARN_ON(!data_sync_bufs[index].skb)) {
2047 + ret = -ENOMEM;
2048 goto free_data_skb;
2049 + }
2050
2051 ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
2052 data_sync_bufs[index].
2053 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
2054 index bda042138e967..e6001f0a81a3a 100644
2055 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
2056 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
2057 @@ -2073,7 +2073,7 @@ cleanup:
2058
2059 err = brcmf_pcie_probe(pdev, NULL);
2060 if (err)
2061 - brcmf_err(bus, "probe after resume failed, err=%d\n", err);
2062 + __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
2063
2064 return err;
2065 }
2066 diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
2067 index 03791f3fe480c..5e6c5d13319d5 100644
2068 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c
2069 +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
2070 @@ -1038,8 +1038,10 @@ static int rsi_load_9116_firmware(struct rsi_hw *adapter)
2071 }
2072
2073 ta_firmware = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
2074 - if (!ta_firmware)
2075 + if (!ta_firmware) {
2076 + status = -ENOMEM;
2077 goto fail_release_fw;
2078 + }
2079 fw_p = ta_firmware;
2080 instructions_sz = fw_entry->size;
2081 rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", instructions_sz);
2082 diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
2083 index a296f4e0d324a..e8aa3d4bda885 100644
2084 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
2085 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
2086 @@ -806,6 +806,7 @@ static int rsi_probe(struct usb_interface *pfunction,
2087 } else {
2088 rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
2089 __func__, id->idProduct);
2090 + status = -ENODEV;
2091 goto err1;
2092 }
2093
2094 diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
2095 index b8c0f75bfb7ba..dcc3d2393605e 100644
2096 --- a/drivers/nvme/host/rdma.c
2097 +++ b/drivers/nvme/host/rdma.c
2098 @@ -665,13 +665,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
2099 if (ret)
2100 return ret;
2101
2102 - ctrl->ctrl.queue_count = nr_io_queues + 1;
2103 - if (ctrl->ctrl.queue_count < 2) {
2104 + if (nr_io_queues == 0) {
2105 dev_err(ctrl->ctrl.device,
2106 "unable to set any I/O queues\n");
2107 return -ENOMEM;
2108 }
2109
2110 + ctrl->ctrl.queue_count = nr_io_queues + 1;
2111 dev_info(ctrl->ctrl.device,
2112 "creating %d I/O queues.\n", nr_io_queues);
2113
2114 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
2115 index 718152adc6254..f6427a10a9908 100644
2116 --- a/drivers/nvme/host/tcp.c
2117 +++ b/drivers/nvme/host/tcp.c
2118 @@ -1649,13 +1649,13 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
2119 if (ret)
2120 return ret;
2121
2122 - ctrl->queue_count = nr_io_queues + 1;
2123 - if (ctrl->queue_count < 2) {
2124 + if (nr_io_queues == 0) {
2125 dev_err(ctrl->device,
2126 "unable to set any I/O queues\n");
2127 return -ENOMEM;
2128 }
2129
2130 + ctrl->queue_count = nr_io_queues + 1;
2131 dev_info(ctrl->device,
2132 "creating %d I/O queues.\n", nr_io_queues);
2133
2134 diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
2135 index 4e9004fe5c6f3..5e47395afc1d5 100644
2136 --- a/drivers/nvme/target/fabrics-cmd.c
2137 +++ b/drivers/nvme/target/fabrics-cmd.c
2138 @@ -116,6 +116,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
2139 if (!sqsize) {
2140 pr_warn("queue size zero!\n");
2141 req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
2142 + req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
2143 ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
2144 goto err;
2145 }
2146 @@ -250,11 +251,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
2147 }
2148
2149 status = nvmet_install_queue(ctrl, req);
2150 - if (status) {
2151 - /* pass back cntlid that had the issue of installing queue */
2152 - req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
2153 + if (status)
2154 goto out_ctrl_put;
2155 - }
2156 +
2157 + /* pass back cntlid for successful completion */
2158 + req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
2159
2160 pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
2161
2162 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2163 index 3c3bc9f584983..58c33b65d451a 100644
2164 --- a/drivers/pci/pci.c
2165 +++ b/drivers/pci/pci.c
2166 @@ -2253,7 +2253,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
2167 if (enable) {
2168 int error;
2169
2170 - if (pci_pme_capable(dev, state))
2171 + /*
2172 + * Enable PME signaling if the device can signal PME from
2173 + * D3cold regardless of whether or not it can signal PME from
2174 + * the current target state, because that will allow it to
2175 + * signal PME when the hierarchy above it goes into D3cold and
2176 + * the device itself ends up in D3cold as a result of that.
2177 + */
2178 + if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2179 pci_pme_active(dev, true);
2180 else
2181 ret = 1;
2182 @@ -2357,16 +2364,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2183 if (dev->current_state == PCI_D3cold)
2184 target_state = PCI_D3cold;
2185
2186 - if (wakeup) {
2187 + if (wakeup && dev->pme_support) {
2188 + pci_power_t state = target_state;
2189 +
2190 /*
2191 * Find the deepest state from which the device can generate
2192 * PME#.
2193 */
2194 - if (dev->pme_support) {
2195 - while (target_state
2196 - && !(dev->pme_support & (1 << target_state)))
2197 - target_state--;
2198 - }
2199 + while (state && !(dev->pme_support & (1 << state)))
2200 + state--;
2201 +
2202 + if (state)
2203 + return state;
2204 + else if (dev->pme_support & 1)
2205 + return PCI_D0;
2206 }
2207
2208 return target_state;
2209 diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
2210 index f40fa0e63b6e5..993e4a4a34b38 100644
2211 --- a/drivers/power/supply/axp288_fuel_gauge.c
2212 +++ b/drivers/power/supply/axp288_fuel_gauge.c
2213 @@ -149,7 +149,7 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
2214 }
2215
2216 if (ret < 0) {
2217 - dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
2218 + dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
2219 return ret;
2220 }
2221
2222 @@ -163,7 +163,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
2223 ret = regmap_write(info->regmap, reg, (unsigned int)val);
2224
2225 if (ret < 0)
2226 - dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
2227 + dev_err(&info->pdev->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
2228
2229 return ret;
2230 }
2231 diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
2232 index fa862f0380c41..ab4740c3bf573 100644
2233 --- a/drivers/power/supply/max17042_battery.c
2234 +++ b/drivers/power/supply/max17042_battery.c
2235 @@ -726,7 +726,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
2236 struct max17042_config_data *config = chip->pdata->config_data;
2237
2238 max17042_override_por(map, MAX17042_TGAIN, config->tgain);
2239 - max17042_override_por(map, MAx17042_TOFF, config->toff);
2240 + max17042_override_por(map, MAX17042_TOFF, config->toff);
2241 max17042_override_por(map, MAX17042_CGAIN, config->cgain);
2242 max17042_override_por(map, MAX17042_COFF, config->coff);
2243
2244 diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
2245 index cbadb1c996790..d2a37978fc3a8 100644
2246 --- a/drivers/regulator/vctrl-regulator.c
2247 +++ b/drivers/regulator/vctrl-regulator.c
2248 @@ -37,7 +37,6 @@ struct vctrl_voltage_table {
2249 struct vctrl_data {
2250 struct regulator_dev *rdev;
2251 struct regulator_desc desc;
2252 - struct regulator *ctrl_reg;
2253 bool enabled;
2254 unsigned int min_slew_down_rate;
2255 unsigned int ovp_threshold;
2256 @@ -82,7 +81,12 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
2257 static int vctrl_get_voltage(struct regulator_dev *rdev)
2258 {
2259 struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
2260 - int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
2261 + int ctrl_uV;
2262 +
2263 + if (!rdev->supply)
2264 + return -EPROBE_DEFER;
2265 +
2266 + ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
2267
2268 return vctrl_calc_output_voltage(vctrl, ctrl_uV);
2269 }
2270 @@ -92,14 +96,19 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
2271 unsigned int *selector)
2272 {
2273 struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
2274 - struct regulator *ctrl_reg = vctrl->ctrl_reg;
2275 - int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
2276 - int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
2277 + int orig_ctrl_uV;
2278 + int uV;
2279 int ret;
2280
2281 + if (!rdev->supply)
2282 + return -EPROBE_DEFER;
2283 +
2284 + orig_ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
2285 + uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
2286 +
2287 if (req_min_uV >= uV || !vctrl->ovp_threshold)
2288 /* voltage rising or no OVP */
2289 - return regulator_set_voltage_rdev(ctrl_reg->rdev,
2290 + return regulator_set_voltage_rdev(rdev->supply->rdev,
2291 vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
2292 vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
2293 PM_SUSPEND_ON);
2294 @@ -117,7 +126,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
2295 next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
2296 next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
2297
2298 - ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
2299 + ret = regulator_set_voltage_rdev(rdev->supply->rdev,
2300 next_ctrl_uV,
2301 next_ctrl_uV,
2302 PM_SUSPEND_ON);
2303 @@ -134,7 +143,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
2304
2305 err:
2306 /* Try to go back to original voltage */
2307 - regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
2308 + regulator_set_voltage_rdev(rdev->supply->rdev, orig_ctrl_uV, orig_ctrl_uV,
2309 PM_SUSPEND_ON);
2310
2311 return ret;
2312 @@ -151,16 +160,18 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
2313 unsigned int selector)
2314 {
2315 struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
2316 - struct regulator *ctrl_reg = vctrl->ctrl_reg;
2317 unsigned int orig_sel = vctrl->sel;
2318 int ret;
2319
2320 + if (!rdev->supply)
2321 + return -EPROBE_DEFER;
2322 +
2323 if (selector >= rdev->desc->n_voltages)
2324 return -EINVAL;
2325
2326 if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
2327 /* voltage rising or no OVP */
2328 - ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
2329 + ret = regulator_set_voltage_rdev(rdev->supply->rdev,
2330 vctrl->vtable[selector].ctrl,
2331 vctrl->vtable[selector].ctrl,
2332 PM_SUSPEND_ON);
2333 @@ -179,7 +190,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
2334 else
2335 next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
2336
2337 - ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
2338 + ret = regulator_set_voltage_rdev(rdev->supply->rdev,
2339 vctrl->vtable[next_sel].ctrl,
2340 vctrl->vtable[next_sel].ctrl,
2341 PM_SUSPEND_ON);
2342 @@ -202,7 +213,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
2343 err:
2344 if (vctrl->sel != orig_sel) {
2345 /* Try to go back to original voltage */
2346 - if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
2347 + if (!regulator_set_voltage_rdev(rdev->supply->rdev,
2348 vctrl->vtable[orig_sel].ctrl,
2349 vctrl->vtable[orig_sel].ctrl,
2350 PM_SUSPEND_ON))
2351 @@ -234,10 +245,6 @@ static int vctrl_parse_dt(struct platform_device *pdev,
2352 u32 pval;
2353 u32 vrange_ctrl[2];
2354
2355 - vctrl->ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
2356 - if (IS_ERR(vctrl->ctrl_reg))
2357 - return PTR_ERR(vctrl->ctrl_reg);
2358 -
2359 ret = of_property_read_u32(np, "ovp-threshold-percent", &pval);
2360 if (!ret) {
2361 vctrl->ovp_threshold = pval;
2362 @@ -315,11 +322,11 @@ static int vctrl_cmp_ctrl_uV(const void *a, const void *b)
2363 return at->ctrl - bt->ctrl;
2364 }
2365
2366 -static int vctrl_init_vtable(struct platform_device *pdev)
2367 +static int vctrl_init_vtable(struct platform_device *pdev,
2368 + struct regulator *ctrl_reg)
2369 {
2370 struct vctrl_data *vctrl = platform_get_drvdata(pdev);
2371 struct regulator_desc *rdesc = &vctrl->desc;
2372 - struct regulator *ctrl_reg = vctrl->ctrl_reg;
2373 struct vctrl_voltage_range *vrange_ctrl = &vctrl->vrange.ctrl;
2374 int n_voltages;
2375 int ctrl_uV;
2376 @@ -395,23 +402,19 @@ static int vctrl_init_vtable(struct platform_device *pdev)
2377 static int vctrl_enable(struct regulator_dev *rdev)
2378 {
2379 struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
2380 - int ret = regulator_enable(vctrl->ctrl_reg);
2381
2382 - if (!ret)
2383 - vctrl->enabled = true;
2384 + vctrl->enabled = true;
2385
2386 - return ret;
2387 + return 0;
2388 }
2389
2390 static int vctrl_disable(struct regulator_dev *rdev)
2391 {
2392 struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
2393 - int ret = regulator_disable(vctrl->ctrl_reg);
2394
2395 - if (!ret)
2396 - vctrl->enabled = false;
2397 + vctrl->enabled = false;
2398
2399 - return ret;
2400 + return 0;
2401 }
2402
2403 static int vctrl_is_enabled(struct regulator_dev *rdev)
2404 @@ -447,6 +450,7 @@ static int vctrl_probe(struct platform_device *pdev)
2405 struct regulator_desc *rdesc;
2406 struct regulator_config cfg = { };
2407 struct vctrl_voltage_range *vrange_ctrl;
2408 + struct regulator *ctrl_reg;
2409 int ctrl_uV;
2410 int ret;
2411
2412 @@ -461,15 +465,20 @@ static int vctrl_probe(struct platform_device *pdev)
2413 if (ret)
2414 return ret;
2415
2416 + ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
2417 + if (IS_ERR(ctrl_reg))
2418 + return PTR_ERR(ctrl_reg);
2419 +
2420 vrange_ctrl = &vctrl->vrange.ctrl;
2421
2422 rdesc = &vctrl->desc;
2423 rdesc->name = "vctrl";
2424 rdesc->type = REGULATOR_VOLTAGE;
2425 rdesc->owner = THIS_MODULE;
2426 + rdesc->supply_name = "ctrl";
2427
2428 - if ((regulator_get_linear_step(vctrl->ctrl_reg) == 1) ||
2429 - (regulator_count_voltages(vctrl->ctrl_reg) == -EINVAL)) {
2430 + if ((regulator_get_linear_step(ctrl_reg) == 1) ||
2431 + (regulator_count_voltages(ctrl_reg) == -EINVAL)) {
2432 rdesc->continuous_voltage_range = true;
2433 rdesc->ops = &vctrl_ops_cont;
2434 } else {
2435 @@ -486,11 +495,12 @@ static int vctrl_probe(struct platform_device *pdev)
2436 cfg.init_data = init_data;
2437
2438 if (!rdesc->continuous_voltage_range) {
2439 - ret = vctrl_init_vtable(pdev);
2440 + ret = vctrl_init_vtable(pdev, ctrl_reg);
2441 if (ret)
2442 return ret;
2443
2444 - ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
2445 + /* Use locked consumer API when not in regulator framework */
2446 + ctrl_uV = regulator_get_voltage(ctrl_reg);
2447 if (ctrl_uV < 0) {
2448 dev_err(&pdev->dev, "failed to get control voltage\n");
2449 return ctrl_uV;
2450 @@ -513,6 +523,9 @@ static int vctrl_probe(struct platform_device *pdev)
2451 }
2452 }
2453
2454 + /* Drop ctrl-supply here in favor of regulator core managed supply */
2455 + devm_regulator_put(ctrl_reg);
2456 +
2457 vctrl->rdev = devm_regulator_register(&pdev->dev, rdesc, &cfg);
2458 if (IS_ERR(vctrl->rdev)) {
2459 ret = PTR_ERR(vctrl->rdev);
2460 diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
2461 index 5734a78dbb8e6..7950ac59b1744 100644
2462 --- a/drivers/s390/cio/css.c
2463 +++ b/drivers/s390/cio/css.c
2464 @@ -426,9 +426,26 @@ static ssize_t pimpampom_show(struct device *dev,
2465 }
2466 static DEVICE_ATTR_RO(pimpampom);
2467
2468 +static ssize_t dev_busid_show(struct device *dev,
2469 + struct device_attribute *attr,
2470 + char *buf)
2471 +{
2472 + struct subchannel *sch = to_subchannel(dev);
2473 + struct pmcw *pmcw = &sch->schib.pmcw;
2474 +
2475 + if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
2476 + pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
2477 + return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
2478 + pmcw->dev);
2479 + else
2480 + return sysfs_emit(buf, "none\n");
2481 +}
2482 +static DEVICE_ATTR_RO(dev_busid);
2483 +
2484 static struct attribute *io_subchannel_type_attrs[] = {
2485 &dev_attr_chpids.attr,
2486 &dev_attr_pimpampom.attr,
2487 + &dev_attr_dev_busid.attr,
2488 NULL,
2489 };
2490 ATTRIBUTE_GROUPS(io_subchannel_type);
2491 diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
2492 index 51850cc68b701..aa24237a78405 100644
2493 --- a/drivers/soc/qcom/rpmhpd.c
2494 +++ b/drivers/soc/qcom/rpmhpd.c
2495 @@ -235,12 +235,11 @@ static int rpmhpd_power_on(struct generic_pm_domain *domain)
2496 static int rpmhpd_power_off(struct generic_pm_domain *domain)
2497 {
2498 struct rpmhpd *pd = domain_to_rpmhpd(domain);
2499 - int ret = 0;
2500 + int ret;
2501
2502 mutex_lock(&rpmhpd_lock);
2503
2504 - ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
2505 -
2506 + ret = rpmhpd_aggregate_corner(pd, 0);
2507 if (!ret)
2508 pd->enabled = false;
2509
2510 diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
2511 index 70c3c90b997c9..c428d0f78816e 100644
2512 --- a/drivers/soc/qcom/smsm.c
2513 +++ b/drivers/soc/qcom/smsm.c
2514 @@ -109,7 +109,7 @@ struct smsm_entry {
2515 DECLARE_BITMAP(irq_enabled, 32);
2516 DECLARE_BITMAP(irq_rising, 32);
2517 DECLARE_BITMAP(irq_falling, 32);
2518 - u32 last_value;
2519 + unsigned long last_value;
2520
2521 u32 *remote_state;
2522 u32 *subscription;
2523 @@ -204,8 +204,7 @@ static irqreturn_t smsm_intr(int irq, void *data)
2524 u32 val;
2525
2526 val = readl(entry->remote_state);
2527 - changed = val ^ entry->last_value;
2528 - entry->last_value = val;
2529 + changed = val ^ xchg(&entry->last_value, val);
2530
2531 for_each_set_bit(i, entry->irq_enabled, 32) {
2532 if (!(changed & BIT(i)))
2533 @@ -266,6 +265,12 @@ static void smsm_unmask_irq(struct irq_data *irqd)
2534 struct qcom_smsm *smsm = entry->smsm;
2535 u32 val;
2536
2537 + /* Make sure our last cached state is up-to-date */
2538 + if (readl(entry->remote_state) & BIT(irq))
2539 + set_bit(irq, &entry->last_value);
2540 + else
2541 + clear_bit(irq, &entry->last_value);
2542 +
2543 set_bit(irq, entry->irq_enabled);
2544
2545 if (entry->subscription) {
2546 diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
2547 index b71b73bf5fc5c..785990720479c 100644
2548 --- a/drivers/soc/rockchip/Kconfig
2549 +++ b/drivers/soc/rockchip/Kconfig
2550 @@ -6,8 +6,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST
2551 #
2552
2553 config ROCKCHIP_GRF
2554 - bool
2555 - default y
2556 + bool "Rockchip General Register Files support" if COMPILE_TEST
2557 + default y if ARCH_ROCKCHIP
2558 help
2559 The General Register Files are a central component providing
2560 special additional settings registers for a lot of soc-components.
2561 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
2562 index 40dccc580e866..3e0200618af30 100644
2563 --- a/drivers/spi/spi-fsl-dspi.c
2564 +++ b/drivers/spi/spi-fsl-dspi.c
2565 @@ -423,6 +423,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
2566 goto err_rx_dma_buf;
2567 }
2568
2569 + memset(&cfg, 0, sizeof(cfg));
2570 cfg.src_addr = phy_addr + SPI_POPR;
2571 cfg.dst_addr = phy_addr + SPI_PUSHR;
2572 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2573 diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
2574 index 8272bde5d706f..b5268b0d7b4c8 100644
2575 --- a/drivers/spi/spi-pic32.c
2576 +++ b/drivers/spi/spi-pic32.c
2577 @@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
2578 struct dma_slave_config cfg;
2579 int ret;
2580
2581 + memset(&cfg, 0, sizeof(cfg));
2582 cfg.device_fc = true;
2583 cfg.src_addr = pic32s->dma_base + buf_offset;
2584 cfg.dst_addr = pic32s->dma_base + buf_offset;
2585 diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
2586 index 09f983524d51b..e804a3854c351 100644
2587 --- a/drivers/spi/spi-sprd-adi.c
2588 +++ b/drivers/spi/spi-sprd-adi.c
2589 @@ -102,7 +102,7 @@
2590 #define HWRST_STATUS_WATCHDOG 0xf0
2591
2592 /* Use default timeout 50 ms that converts to watchdog values */
2593 -#define WDG_LOAD_VAL ((50 * 1000) / 32768)
2594 +#define WDG_LOAD_VAL ((50 * 32768) / 1000)
2595 #define WDG_LOAD_MASK GENMASK(15, 0)
2596 #define WDG_UNLOCK_KEY 0xe551
2597
2598 diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
2599 index 5cf6993ddce57..1ced6eb8b3303 100644
2600 --- a/drivers/spi/spi-zynq-qspi.c
2601 +++ b/drivers/spi/spi-zynq-qspi.c
2602 @@ -533,7 +533,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
2603 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
2604 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
2605 ZYNQ_QSPI_IXR_RXTX_MASK);
2606 - if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
2607 + if (!wait_for_completion_timeout(&xqspi->data_completion,
2608 msecs_to_jiffies(1000)))
2609 err = -ETIMEDOUT;
2610 }
2611 @@ -551,7 +551,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
2612 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
2613 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
2614 ZYNQ_QSPI_IXR_RXTX_MASK);
2615 - if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
2616 + if (!wait_for_completion_timeout(&xqspi->data_completion,
2617 msecs_to_jiffies(1000)))
2618 err = -ETIMEDOUT;
2619 }
2620 @@ -567,7 +567,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
2621 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
2622 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
2623 ZYNQ_QSPI_IXR_RXTX_MASK);
2624 - if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
2625 + if (!wait_for_completion_timeout(&xqspi->data_completion,
2626 msecs_to_jiffies(1000)))
2627 err = -ETIMEDOUT;
2628
2629 @@ -591,7 +591,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
2630 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
2631 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
2632 ZYNQ_QSPI_IXR_RXTX_MASK);
2633 - if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
2634 + if (!wait_for_completion_timeout(&xqspi->data_completion,
2635 msecs_to_jiffies(1000)))
2636 err = -ETIMEDOUT;
2637 }
2638 diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
2639 index b053345dfd1ae..13e705b53217d 100644
2640 --- a/drivers/tty/serial/fsl_lpuart.c
2641 +++ b/drivers/tty/serial/fsl_lpuart.c
2642 @@ -2414,7 +2414,7 @@ static int lpuart_probe(struct platform_device *pdev)
2643 return PTR_ERR(sport->port.membase);
2644
2645 sport->port.membase += sdata->reg_off;
2646 - sport->port.mapbase = res->start;
2647 + sport->port.mapbase = res->start + sdata->reg_off;
2648 sport->port.dev = &pdev->dev;
2649 sport->port.type = PORT_LPUART;
2650 sport->devtype = sdata->devtype;
2651 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2652 index cee7514c3aaf2..ddfe873b5fccb 100644
2653 --- a/drivers/tty/tty_io.c
2654 +++ b/drivers/tty/tty_io.c
2655 @@ -2176,8 +2176,6 @@ static int tty_fasync(int fd, struct file *filp, int on)
2656 * Locking:
2657 * Called functions take tty_ldiscs_lock
2658 * current->signal->tty check is safe without locks
2659 - *
2660 - * FIXME: may race normal receive processing
2661 */
2662
2663 static int tiocsti(struct tty_struct *tty, char __user *p)
2664 @@ -2193,8 +2191,10 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2665 ld = tty_ldisc_ref_wait(tty);
2666 if (!ld)
2667 return -EIO;
2668 + tty_buffer_lock_exclusive(tty->port);
2669 if (ld->ops->receive_buf)
2670 ld->ops->receive_buf(tty, &ch, &mbz, 1);
2671 + tty_buffer_unlock_exclusive(tty->port);
2672 tty_ldisc_deref(ld);
2673 return 0;
2674 }
2675 diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
2676 index 194ffb1ed4620..d7714c94b1196 100644
2677 --- a/drivers/usb/gadget/udc/at91_udc.c
2678 +++ b/drivers/usb/gadget/udc/at91_udc.c
2679 @@ -1878,7 +1878,9 @@ static int at91udc_probe(struct platform_device *pdev)
2680 clk_disable(udc->iclk);
2681
2682 /* request UDC and maybe VBUS irqs */
2683 - udc->udp_irq = platform_get_irq(pdev, 0);
2684 + udc->udp_irq = retval = platform_get_irq(pdev, 0);
2685 + if (retval < 0)
2686 + goto err_unprepare_iclk;
2687 retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0,
2688 driver_name, udc);
2689 if (retval) {
2690 diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
2691 index 3d33499db50b5..845aead48d85b 100644
2692 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c
2693 +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
2694 @@ -565,7 +565,8 @@ static int bdc_probe(struct platform_device *pdev)
2695 if (ret) {
2696 dev_err(dev,
2697 "No suitable DMA config available, abort\n");
2698 - return -ENOTSUPP;
2699 + ret = -ENOTSUPP;
2700 + goto phycleanup;
2701 }
2702 dev_dbg(dev, "Using 32-bit address\n");
2703 }
2704 diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
2705 index 35e02a8d0091a..bdba3f48c0527 100644
2706 --- a/drivers/usb/gadget/udc/mv_u3d_core.c
2707 +++ b/drivers/usb/gadget/udc/mv_u3d_core.c
2708 @@ -1922,14 +1922,6 @@ static int mv_u3d_probe(struct platform_device *dev)
2709 goto err_get_irq;
2710 }
2711 u3d->irq = r->start;
2712 - if (request_irq(u3d->irq, mv_u3d_irq,
2713 - IRQF_SHARED, driver_name, u3d)) {
2714 - u3d->irq = 0;
2715 - dev_err(&dev->dev, "Request irq %d for u3d failed\n",
2716 - u3d->irq);
2717 - retval = -ENODEV;
2718 - goto err_request_irq;
2719 - }
2720
2721 /* initialize gadget structure */
2722 u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
2723 @@ -1942,6 +1934,15 @@ static int mv_u3d_probe(struct platform_device *dev)
2724
2725 mv_u3d_eps_init(u3d);
2726
2727 + if (request_irq(u3d->irq, mv_u3d_irq,
2728 + IRQF_SHARED, driver_name, u3d)) {
2729 + u3d->irq = 0;
2730 + dev_err(&dev->dev, "Request irq %d for u3d failed\n",
2731 + u3d->irq);
2732 + retval = -ENODEV;
2733 + goto err_request_irq;
2734 + }
2735 +
2736 /* external vbus detection */
2737 if (u3d->vbus) {
2738 u3d->clock_gating = 1;
2739 @@ -1965,8 +1966,8 @@ static int mv_u3d_probe(struct platform_device *dev)
2740
2741 err_unregister:
2742 free_irq(u3d->irq, u3d);
2743 -err_request_irq:
2744 err_get_irq:
2745 +err_request_irq:
2746 kfree(u3d->status_req);
2747 err_alloc_status_req:
2748 kfree(u3d->eps);
2749 diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
2750 index 08a93cf68efff..b6653bc7acc26 100644
2751 --- a/drivers/usb/gadget/udc/renesas_usb3.c
2752 +++ b/drivers/usb/gadget/udc/renesas_usb3.c
2753 @@ -2692,10 +2692,15 @@ static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
2754
2755 static const struct of_device_id usb3_of_match[] = {
2756 {
2757 + .compatible = "renesas,r8a774c0-usb3-peri",
2758 + .data = &renesas_usb3_priv_r8a77990,
2759 + }, {
2760 .compatible = "renesas,r8a7795-usb3-peri",
2761 .data = &renesas_usb3_priv_gen3,
2762 - },
2763 - {
2764 + }, {
2765 + .compatible = "renesas,r8a77990-usb3-peri",
2766 + .data = &renesas_usb3_priv_r8a77990,
2767 + }, {
2768 .compatible = "renesas,rcar-gen3-usb3-peri",
2769 .data = &renesas_usb3_priv_gen3,
2770 },
2771 @@ -2704,18 +2709,10 @@ static const struct of_device_id usb3_of_match[] = {
2772 MODULE_DEVICE_TABLE(of, usb3_of_match);
2773
2774 static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
2775 - {
2776 - .soc_id = "r8a774c0",
2777 - .data = &renesas_usb3_priv_r8a77990,
2778 - },
2779 {
2780 .soc_id = "r8a7795", .revision = "ES1.*",
2781 .data = &renesas_usb3_priv_r8a7795_es1,
2782 },
2783 - {
2784 - .soc_id = "r8a77990",
2785 - .data = &renesas_usb3_priv_r8a77990,
2786 - },
2787 { /* sentinel */ },
2788 };
2789
2790 diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
2791 index a319b1df3011c..3626758b3e2aa 100644
2792 --- a/drivers/usb/host/ehci-orion.c
2793 +++ b/drivers/usb/host/ehci-orion.c
2794 @@ -264,8 +264,11 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
2795 * the clock does not exists.
2796 */
2797 priv->clk = devm_clk_get(&pdev->dev, NULL);
2798 - if (!IS_ERR(priv->clk))
2799 - clk_prepare_enable(priv->clk);
2800 + if (!IS_ERR(priv->clk)) {
2801 + err = clk_prepare_enable(priv->clk);
2802 + if (err)
2803 + goto err_put_hcd;
2804 + }
2805
2806 priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
2807 if (IS_ERR(priv->phy)) {
2808 @@ -311,6 +314,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
2809 err_dis_clk:
2810 if (!IS_ERR(priv->clk))
2811 clk_disable_unprepare(priv->clk);
2812 +err_put_hcd:
2813 usb_put_hcd(hcd);
2814 err:
2815 dev_err(&pdev->dev, "init %s fail, %d\n",
2816 diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
2817 index fb6f5e9ae5c62..fed43c6dd85cc 100644
2818 --- a/drivers/usb/host/ohci-tmio.c
2819 +++ b/drivers/usb/host/ohci-tmio.c
2820 @@ -202,6 +202,9 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
2821 if (!cell)
2822 return -EINVAL;
2823
2824 + if (irq < 0)
2825 + return irq;
2826 +
2827 hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
2828 if (!hcd) {
2829 ret = -ENOMEM;
2830 diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
2831 index b451f4695f3f0..446c7bf67873c 100644
2832 --- a/drivers/usb/phy/phy-fsl-usb.c
2833 +++ b/drivers/usb/phy/phy-fsl-usb.c
2834 @@ -873,6 +873,8 @@ int usb_otg_start(struct platform_device *pdev)
2835
2836 /* request irq */
2837 p_otg->irq = platform_get_irq(pdev, 0);
2838 + if (p_otg->irq < 0)
2839 + return p_otg->irq;
2840 status = request_irq(p_otg->irq, fsl_otg_isr,
2841 IRQF_SHARED, driver_name, p_otg);
2842 if (status) {
2843 diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
2844 index baebb1f5a9737..a3e043e3e4aae 100644
2845 --- a/drivers/usb/phy/phy-tahvo.c
2846 +++ b/drivers/usb/phy/phy-tahvo.c
2847 @@ -393,7 +393,9 @@ static int tahvo_usb_probe(struct platform_device *pdev)
2848
2849 dev_set_drvdata(&pdev->dev, tu);
2850
2851 - tu->irq = platform_get_irq(pdev, 0);
2852 + tu->irq = ret = platform_get_irq(pdev, 0);
2853 + if (ret < 0)
2854 + return ret;
2855 ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
2856 IRQF_ONESHOT,
2857 "tahvo-vbus", tu);
2858 diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
2859 index 9a7e655d52801..9337c30f0743b 100644
2860 --- a/drivers/usb/phy/phy-twl6030-usb.c
2861 +++ b/drivers/usb/phy/phy-twl6030-usb.c
2862 @@ -348,6 +348,11 @@ static int twl6030_usb_probe(struct platform_device *pdev)
2863 twl->irq2 = platform_get_irq(pdev, 1);
2864 twl->linkstat = MUSB_UNKNOWN;
2865
2866 + if (twl->irq1 < 0)
2867 + return twl->irq1;
2868 + if (twl->irq2 < 0)
2869 + return twl->irq2;
2870 +
2871 twl->comparator.set_vbus = twl6030_set_vbus;
2872 twl->comparator.start_srp = twl6030_start_srp;
2873
2874 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
2875 index 746eebc411dfa..047f80ee37e81 100644
2876 --- a/drivers/video/backlight/pwm_bl.c
2877 +++ b/drivers/video/backlight/pwm_bl.c
2878 @@ -415,6 +415,33 @@ static bool pwm_backlight_is_linear(struct platform_pwm_backlight_data *data)
2879 static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
2880 {
2881 struct device_node *node = pb->dev->of_node;
2882 + bool active = true;
2883 +
2884 + /*
2885 + * If the enable GPIO is present, observable (either as input
2886 + * or output) and off then the backlight is not currently active.
2887 + * */
2888 + if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
2889 + active = false;
2890 +
2891 + if (!regulator_is_enabled(pb->power_supply))
2892 + active = false;
2893 +
2894 + if (!pwm_is_enabled(pb->pwm))
2895 + active = false;
2896 +
2897 + /*
2898 + * Synchronize the enable_gpio with the observed state of the
2899 + * hardware.
2900 + */
2901 + if (pb->enable_gpio)
2902 + gpiod_direction_output(pb->enable_gpio, active);
2903 +
2904 + /*
2905 + * Do not change pb->enabled here! pb->enabled essentially
2906 + * tells us if we own one of the regulator's use counts and
2907 + * right now we do not.
2908 + */
2909
2910 /* Not booted with device tree or no phandle link to the node */
2911 if (!node || !node->phandle)
2912 @@ -426,20 +453,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
2913 * assume that another driver will enable the backlight at the
2914 * appropriate time. Therefore, if it is disabled, keep it so.
2915 */
2916 -
2917 - /* if the enable GPIO is disabled, do not enable the backlight */
2918 - if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
2919 - return FB_BLANK_POWERDOWN;
2920 -
2921 - /* The regulator is disabled, do not enable the backlight */
2922 - if (!regulator_is_enabled(pb->power_supply))
2923 - return FB_BLANK_POWERDOWN;
2924 -
2925 - /* The PWM is disabled, keep it like this */
2926 - if (!pwm_is_enabled(pb->pwm))
2927 - return FB_BLANK_POWERDOWN;
2928 -
2929 - return FB_BLANK_UNBLANK;
2930 + return active ? FB_BLANK_UNBLANK: FB_BLANK_POWERDOWN;
2931 }
2932
2933 static int pwm_backlight_probe(struct platform_device *pdev)
2934 @@ -508,18 +522,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
2935 pb->enable_gpio = gpio_to_desc(data->enable_gpio);
2936 }
2937
2938 - /*
2939 - * If the GPIO is not known to be already configured as output, that
2940 - * is, if gpiod_get_direction returns either 1 or -EINVAL, change the
2941 - * direction to output and set the GPIO as active.
2942 - * Do not force the GPIO to active when it was already output as it
2943 - * could cause backlight flickering or we would enable the backlight too
2944 - * early. Leave the decision of the initial backlight state for later.
2945 - */
2946 - if (pb->enable_gpio &&
2947 - gpiod_get_direction(pb->enable_gpio) != 0)
2948 - gpiod_direction_output(pb->enable_gpio, 1);
2949 -
2950 pb->power_supply = devm_regulator_get(&pdev->dev, "power");
2951 if (IS_ERR(pb->power_supply)) {
2952 ret = PTR_ERR(pb->power_supply);
2953 diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
2954 index d87de5d467189..03b1bf994cc90 100644
2955 --- a/drivers/video/fbdev/core/fbmem.c
2956 +++ b/drivers/video/fbdev/core/fbmem.c
2957 @@ -957,6 +957,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
2958 struct fb_var_screeninfo old_var;
2959 struct fb_videomode mode;
2960 struct fb_event event;
2961 + u32 unused;
2962
2963 if (var->activate & FB_ACTIVATE_INV_MODE) {
2964 struct fb_videomode mode1, mode2;
2965 @@ -1003,6 +1004,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
2966 if (var->xres < 8 || var->yres < 8)
2967 return -EINVAL;
2968
2969 + /* Too huge resolution causes multiplication overflow. */
2970 + if (check_mul_overflow(var->xres, var->yres, &unused) ||
2971 + check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
2972 + return -EINVAL;
2973 +
2974 ret = info->fbops->fb_check_var(var, info);
2975
2976 if (ret)
2977 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
2978 index 9bd03a2310328..171ad8b42107e 100644
2979 --- a/fs/cifs/cifs_unicode.c
2980 +++ b/fs/cifs/cifs_unicode.c
2981 @@ -358,14 +358,9 @@ cifs_strndup_from_utf16(const char *src, const int maxlen,
2982 if (!dst)
2983 return NULL;
2984 cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
2985 - NO_MAP_UNI_RSVD);
2986 + NO_MAP_UNI_RSVD);
2987 } else {
2988 - len = strnlen(src, maxlen);
2989 - len++;
2990 - dst = kmalloc(len, GFP_KERNEL);
2991 - if (!dst)
2992 - return NULL;
2993 - strlcpy(dst, src, len);
2994 + dst = kstrndup(src, maxlen, GFP_KERNEL);
2995 }
2996
2997 return dst;
2998 diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
2999 index 943637298f650..a32c5c7dcfd89 100644
3000 --- a/fs/debugfs/file.c
3001 +++ b/fs/debugfs/file.c
3002 @@ -178,8 +178,10 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
3003 if (!fops_get(real_fops)) {
3004 #ifdef CONFIG_MODULES
3005 if (real_fops->owner &&
3006 - real_fops->owner->state == MODULE_STATE_GOING)
3007 + real_fops->owner->state == MODULE_STATE_GOING) {
3008 + r = -ENXIO;
3009 goto out;
3010 + }
3011 #endif
3012
3013 /* Huh? Module did not clean up after itself at exit? */
3014 @@ -313,8 +315,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
3015 if (!fops_get(real_fops)) {
3016 #ifdef CONFIG_MODULES
3017 if (real_fops->owner &&
3018 - real_fops->owner->state == MODULE_STATE_GOING)
3019 + real_fops->owner->state == MODULE_STATE_GOING) {
3020 + r = -ENXIO;
3021 goto out;
3022 + }
3023 #endif
3024
3025 /* Huh? Module did not cleanup after itself at exit? */
3026 diff --git a/fs/fcntl.c b/fs/fcntl.c
3027 index 3dc90e5293e65..fa0fdd829613c 100644
3028 --- a/fs/fcntl.c
3029 +++ b/fs/fcntl.c
3030 @@ -993,13 +993,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
3031 {
3032 while (fa) {
3033 struct fown_struct *fown;
3034 + unsigned long flags;
3035
3036 if (fa->magic != FASYNC_MAGIC) {
3037 printk(KERN_ERR "kill_fasync: bad magic number in "
3038 "fasync_struct!\n");
3039 return;
3040 }
3041 - read_lock(&fa->fa_lock);
3042 + read_lock_irqsave(&fa->fa_lock, flags);
3043 if (fa->fa_file) {
3044 fown = &fa->fa_file->f_owner;
3045 /* Don't send SIGURG to processes which have not set a
3046 @@ -1008,7 +1009,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
3047 if (!(sig == SIGURG && fown->signum == 0))
3048 send_sigio(fown, fa->fa_fd, band);
3049 }
3050 - read_unlock(&fa->fa_lock);
3051 + read_unlock_irqrestore(&fa->fa_lock, flags);
3052 fa = rcu_dereference(fa->fa_next);
3053 }
3054 }
3055 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3056 index 0883e5b24c901..1de59998e0e75 100644
3057 --- a/fs/fuse/file.c
3058 +++ b/fs/fuse/file.c
3059 @@ -193,12 +193,11 @@ void fuse_finish_open(struct inode *inode, struct file *file)
3060 struct fuse_file *ff = file->private_data;
3061 struct fuse_conn *fc = get_fuse_conn(inode);
3062
3063 - if (!(ff->open_flags & FOPEN_KEEP_CACHE))
3064 - invalidate_inode_pages2(inode->i_mapping);
3065 if (ff->open_flags & FOPEN_STREAM)
3066 stream_open(inode, file);
3067 else if (ff->open_flags & FOPEN_NONSEEKABLE)
3068 nonseekable_open(inode, file);
3069 +
3070 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
3071 struct fuse_inode *fi = get_fuse_inode(inode);
3072
3073 @@ -206,10 +205,14 @@ void fuse_finish_open(struct inode *inode, struct file *file)
3074 fi->attr_version = atomic64_inc_return(&fc->attr_version);
3075 i_size_write(inode, 0);
3076 spin_unlock(&fi->lock);
3077 + truncate_pagecache(inode, 0);
3078 fuse_invalidate_attr(inode);
3079 if (fc->writeback_cache)
3080 file_update_time(file);
3081 + } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
3082 + invalidate_inode_pages2(inode->i_mapping);
3083 }
3084 +
3085 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
3086 fuse_link_write_file(file);
3087 }
3088 @@ -3185,7 +3188,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3089
3090 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3091 {
3092 - int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
3093 + int err = filemap_write_and_wait_range(inode->i_mapping, start, -1);
3094
3095 if (!err)
3096 fuse_sync_writes(inode);
3097 diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
3098 index bd0cc3dcc9807..2d18246f67266 100644
3099 --- a/fs/iomap/swapfile.c
3100 +++ b/fs/iomap/swapfile.c
3101 @@ -30,11 +30,16 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
3102 {
3103 struct iomap *iomap = &isi->iomap;
3104 unsigned long nr_pages;
3105 + unsigned long max_pages;
3106 uint64_t first_ppage;
3107 uint64_t first_ppage_reported;
3108 uint64_t next_ppage;
3109 int error;
3110
3111 + if (unlikely(isi->nr_pages >= isi->sis->max))
3112 + return 0;
3113 + max_pages = isi->sis->max - isi->nr_pages;
3114 +
3115 /*
3116 * Round the start up and the end down so that the physical
3117 * extent aligns to a page boundary.
3118 @@ -47,6 +52,7 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
3119 if (first_ppage >= next_ppage)
3120 return 0;
3121 nr_pages = next_ppage - first_ppage;
3122 + nr_pages = min(nr_pages, max_pages);
3123
3124 /*
3125 * Calculate how much swap space we're adding; the first page contains
3126 diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
3127 index 62c0462dc89f3..bf30f6ce8dd10 100644
3128 --- a/fs/isofs/inode.c
3129 +++ b/fs/isofs/inode.c
3130 @@ -155,7 +155,6 @@ struct iso9660_options{
3131 unsigned int overriderockperm:1;
3132 unsigned int uid_set:1;
3133 unsigned int gid_set:1;
3134 - unsigned int utf8:1;
3135 unsigned char map;
3136 unsigned char check;
3137 unsigned int blocksize;
3138 @@ -355,7 +354,6 @@ static int parse_options(char *options, struct iso9660_options *popt)
3139 popt->gid = GLOBAL_ROOT_GID;
3140 popt->uid = GLOBAL_ROOT_UID;
3141 popt->iocharset = NULL;
3142 - popt->utf8 = 0;
3143 popt->overriderockperm = 0;
3144 popt->session=-1;
3145 popt->sbsector=-1;
3146 @@ -388,10 +386,13 @@ static int parse_options(char *options, struct iso9660_options *popt)
3147 case Opt_cruft:
3148 popt->cruft = 1;
3149 break;
3150 +#ifdef CONFIG_JOLIET
3151 case Opt_utf8:
3152 - popt->utf8 = 1;
3153 + kfree(popt->iocharset);
3154 + popt->iocharset = kstrdup("utf8", GFP_KERNEL);
3155 + if (!popt->iocharset)
3156 + return 0;
3157 break;
3158 -#ifdef CONFIG_JOLIET
3159 case Opt_iocharset:
3160 kfree(popt->iocharset);
3161 popt->iocharset = match_strdup(&args[0]);
3162 @@ -494,7 +495,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
3163 if (sbi->s_nocompress) seq_puts(m, ",nocompress");
3164 if (sbi->s_overriderockperm) seq_puts(m, ",overriderockperm");
3165 if (sbi->s_showassoc) seq_puts(m, ",showassoc");
3166 - if (sbi->s_utf8) seq_puts(m, ",utf8");
3167
3168 if (sbi->s_check) seq_printf(m, ",check=%c", sbi->s_check);
3169 if (sbi->s_mapping) seq_printf(m, ",map=%c", sbi->s_mapping);
3170 @@ -517,9 +517,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
3171 seq_printf(m, ",fmode=%o", sbi->s_fmode);
3172
3173 #ifdef CONFIG_JOLIET
3174 - if (sbi->s_nls_iocharset &&
3175 - strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
3176 + if (sbi->s_nls_iocharset)
3177 seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
3178 + else
3179 + seq_puts(m, ",iocharset=utf8");
3180 #endif
3181 return 0;
3182 }
3183 @@ -867,14 +868,13 @@ root_found:
3184 sbi->s_nls_iocharset = NULL;
3185
3186 #ifdef CONFIG_JOLIET
3187 - if (joliet_level && opt.utf8 == 0) {
3188 + if (joliet_level) {
3189 char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
3190 - sbi->s_nls_iocharset = load_nls(p);
3191 - if (! sbi->s_nls_iocharset) {
3192 - /* Fail only if explicit charset specified */
3193 - if (opt.iocharset)
3194 + if (strcmp(p, "utf8") != 0) {
3195 + sbi->s_nls_iocharset = opt.iocharset ?
3196 + load_nls(opt.iocharset) : load_nls_default();
3197 + if (!sbi->s_nls_iocharset)
3198 goto out_freesbi;
3199 - sbi->s_nls_iocharset = load_nls_default();
3200 }
3201 }
3202 #endif
3203 @@ -890,7 +890,6 @@ root_found:
3204 sbi->s_gid = opt.gid;
3205 sbi->s_uid_set = opt.uid_set;
3206 sbi->s_gid_set = opt.gid_set;
3207 - sbi->s_utf8 = opt.utf8;
3208 sbi->s_nocompress = opt.nocompress;
3209 sbi->s_overriderockperm = opt.overriderockperm;
3210 /*
3211 diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
3212 index 055ec6c586f7f..dcdc191ed1834 100644
3213 --- a/fs/isofs/isofs.h
3214 +++ b/fs/isofs/isofs.h
3215 @@ -44,7 +44,6 @@ struct isofs_sb_info {
3216 unsigned char s_session;
3217 unsigned int s_high_sierra:1;
3218 unsigned int s_rock:2;
3219 - unsigned int s_utf8:1;
3220 unsigned int s_cruft:1; /* Broken disks with high byte of length
3221 * containing junk */
3222 unsigned int s_nocompress:1;
3223 diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c
3224 index be8b6a9d0b926..c0f04a1e7f695 100644
3225 --- a/fs/isofs/joliet.c
3226 +++ b/fs/isofs/joliet.c
3227 @@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
3228 int
3229 get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
3230 {
3231 - unsigned char utf8;
3232 struct nls_table *nls;
3233 unsigned char len = 0;
3234
3235 - utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
3236 nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
3237
3238 - if (utf8) {
3239 + if (!nls) {
3240 len = utf16s_to_utf8s((const wchar_t *) de->name,
3241 de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
3242 outname, PAGE_SIZE);
3243 diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
3244 index 61d3cc2283dc8..498cb70c2c0d0 100644
3245 --- a/fs/lockd/svclock.c
3246 +++ b/fs/lockd/svclock.c
3247 @@ -634,7 +634,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
3248 conflock->caller = "somehost"; /* FIXME */
3249 conflock->len = strlen(conflock->caller);
3250 conflock->oh.len = 0; /* don't return OH info */
3251 - conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
3252 + conflock->svid = lock->fl.fl_pid;
3253 conflock->fl.fl_type = lock->fl.fl_type;
3254 conflock->fl.fl_start = lock->fl.fl_start;
3255 conflock->fl.fl_end = lock->fl.fl_end;
3256 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3257 index 8cb2f744dde6b..3283cc2a4e42c 100644
3258 --- a/fs/nfsd/nfs4state.c
3259 +++ b/fs/nfsd/nfs4state.c
3260 @@ -2572,9 +2572,9 @@ static void force_expire_client(struct nfs4_client *clp)
3261 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3262 bool already_expired;
3263
3264 - spin_lock(&clp->cl_lock);
3265 + spin_lock(&nn->client_lock);
3266 clp->cl_time = 0;
3267 - spin_unlock(&clp->cl_lock);
3268 + spin_unlock(&nn->client_lock);
3269
3270 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
3271 spin_lock(&nn->client_lock);
3272 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
3273 index 401e64cde1be0..853bcff51043f 100644
3274 --- a/fs/udf/misc.c
3275 +++ b/fs/udf/misc.c
3276 @@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
3277 else
3278 offset = le32_to_cpu(eahd->appAttrLocation);
3279
3280 - while (offset < iinfo->i_lenEAttr) {
3281 + while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
3282 + uint32_t attrLength;
3283 +
3284 gaf = (struct genericFormat *)&ea[offset];
3285 + attrLength = le32_to_cpu(gaf->attrLength);
3286 +
3287 + /* Detect undersized elements and buffer overflows */
3288 + if ((attrLength < sizeof(*gaf)) ||
3289 + (attrLength > (iinfo->i_lenEAttr - offset)))
3290 + break;
3291 +
3292 if (le32_to_cpu(gaf->attrType) == type &&
3293 gaf->attrSubtype == subtype)
3294 return gaf;
3295 else
3296 - offset += le32_to_cpu(gaf->attrLength);
3297 + offset += attrLength;
3298 }
3299 }
3300
3301 diff --git a/fs/udf/super.c b/fs/udf/super.c
3302 index 8bb001c7927f0..5663bae95700c 100644
3303 --- a/fs/udf/super.c
3304 +++ b/fs/udf/super.c
3305 @@ -108,16 +108,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
3306 return NULL;
3307 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
3308 partnum = le32_to_cpu(lvid->numOfPartitions);
3309 - if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
3310 - offsetof(struct logicalVolIntegrityDesc, impUse)) /
3311 - (2 * sizeof(uint32_t)) < partnum) {
3312 - udf_err(sb, "Logical volume integrity descriptor corrupted "
3313 - "(numOfPartitions = %u)!\n", partnum);
3314 - return NULL;
3315 - }
3316 /* The offset is to skip freeSpaceTable and sizeTable arrays */
3317 offset = partnum * 2 * sizeof(uint32_t);
3318 - return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
3319 + return (struct logicalVolIntegrityDescImpUse *)
3320 + (((uint8_t *)(lvid + 1)) + offset);
3321 }
3322
3323 /* UDF filesystem type */
3324 @@ -349,10 +343,10 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
3325 seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
3326 if (sbi->s_anchor != 0)
3327 seq_printf(seq, ",anchor=%u", sbi->s_anchor);
3328 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
3329 - seq_puts(seq, ",utf8");
3330 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
3331 + if (sbi->s_nls_map)
3332 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
3333 + else
3334 + seq_puts(seq, ",iocharset=utf8");
3335
3336 return 0;
3337 }
3338 @@ -557,19 +551,24 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
3339 /* Ignored (never implemented properly) */
3340 break;
3341 case Opt_utf8:
3342 - uopt->flags |= (1 << UDF_FLAG_UTF8);
3343 + if (!remount) {
3344 + unload_nls(uopt->nls_map);
3345 + uopt->nls_map = NULL;
3346 + }
3347 break;
3348 case Opt_iocharset:
3349 if (!remount) {
3350 - if (uopt->nls_map)
3351 - unload_nls(uopt->nls_map);
3352 - /*
3353 - * load_nls() failure is handled later in
3354 - * udf_fill_super() after all options are
3355 - * parsed.
3356 - */
3357 + unload_nls(uopt->nls_map);
3358 + uopt->nls_map = NULL;
3359 + }
3360 + /* When nls_map is not loaded then UTF-8 is used */
3361 + if (!remount && strcmp(args[0].from, "utf8") != 0) {
3362 uopt->nls_map = load_nls(args[0].from);
3363 - uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
3364 + if (!uopt->nls_map) {
3365 + pr_err("iocharset %s not found\n",
3366 + args[0].from);
3367 + return 0;
3368 + }
3369 }
3370 break;
3371 case Opt_uforget:
3372 @@ -1548,6 +1547,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
3373 struct udf_sb_info *sbi = UDF_SB(sb);
3374 struct logicalVolIntegrityDesc *lvid;
3375 int indirections = 0;
3376 + u32 parts, impuselen;
3377
3378 while (++indirections <= UDF_MAX_LVID_NESTING) {
3379 final_bh = NULL;
3380 @@ -1574,15 +1574,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
3381
3382 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
3383 if (lvid->nextIntegrityExt.extLength == 0)
3384 - return;
3385 + goto check;
3386
3387 loc = leea_to_cpu(lvid->nextIntegrityExt);
3388 }
3389
3390 udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
3391 UDF_MAX_LVID_NESTING);
3392 +out_err:
3393 brelse(sbi->s_lvid_bh);
3394 sbi->s_lvid_bh = NULL;
3395 + return;
3396 +check:
3397 + parts = le32_to_cpu(lvid->numOfPartitions);
3398 + impuselen = le32_to_cpu(lvid->lengthOfImpUse);
3399 + if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
3400 + sizeof(struct logicalVolIntegrityDesc) + impuselen +
3401 + 2 * parts * sizeof(u32) > sb->s_blocksize) {
3402 + udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
3403 + "ignoring.\n", parts, impuselen);
3404 + goto out_err;
3405 + }
3406 }
3407
3408 /*
3409 @@ -2145,21 +2157,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3410 if (!udf_parse_options((char *)options, &uopt, false))
3411 goto parse_options_failure;
3412
3413 - if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
3414 - uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
3415 - udf_err(sb, "utf8 cannot be combined with iocharset\n");
3416 - goto parse_options_failure;
3417 - }
3418 - if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
3419 - uopt.nls_map = load_nls_default();
3420 - if (!uopt.nls_map)
3421 - uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
3422 - else
3423 - udf_debug("Using default NLS map\n");
3424 - }
3425 - if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
3426 - uopt.flags |= (1 << UDF_FLAG_UTF8);
3427 -
3428 fileset.logicalBlockNum = 0xFFFFFFFF;
3429 fileset.partitionReferenceNum = 0xFFFF;
3430
3431 @@ -2314,8 +2311,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3432 error_out:
3433 iput(sbi->s_vat_inode);
3434 parse_options_failure:
3435 - if (uopt.nls_map)
3436 - unload_nls(uopt.nls_map);
3437 + unload_nls(uopt.nls_map);
3438 if (lvid_open)
3439 udf_close_lvid(sb);
3440 brelse(sbi->s_lvid_bh);
3441 @@ -2365,8 +2361,7 @@ static void udf_put_super(struct super_block *sb)
3442 sbi = UDF_SB(sb);
3443
3444 iput(sbi->s_vat_inode);
3445 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
3446 - unload_nls(sbi->s_nls_map);
3447 + unload_nls(sbi->s_nls_map);
3448 if (!sb_rdonly(sb))
3449 udf_close_lvid(sb);
3450 brelse(sbi->s_lvid_bh);
3451 diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
3452 index 3d83be54c4748..8eace7a633d38 100644
3453 --- a/fs/udf/udf_sb.h
3454 +++ b/fs/udf/udf_sb.h
3455 @@ -20,8 +20,6 @@
3456 #define UDF_FLAG_UNDELETE 6
3457 #define UDF_FLAG_UNHIDE 7
3458 #define UDF_FLAG_VARCONV 8
3459 -#define UDF_FLAG_NLS_MAP 9
3460 -#define UDF_FLAG_UTF8 10
3461 #define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */
3462 #define UDF_FLAG_GID_FORGET 12
3463 #define UDF_FLAG_UID_SET 13
3464 diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
3465 index 5fcfa96463ebb..622569007b530 100644
3466 --- a/fs/udf/unicode.c
3467 +++ b/fs/udf/unicode.c
3468 @@ -177,7 +177,7 @@ static int udf_name_from_CS0(struct super_block *sb,
3469 return 0;
3470 }
3471
3472 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
3473 + if (UDF_SB(sb)->s_nls_map)
3474 conv_f = UDF_SB(sb)->s_nls_map->uni2char;
3475 else
3476 conv_f = NULL;
3477 @@ -285,7 +285,7 @@ static int udf_name_to_CS0(struct super_block *sb,
3478 if (ocu_max_len <= 0)
3479 return 0;
3480
3481 - if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
3482 + if (UDF_SB(sb)->s_nls_map)
3483 conv_f = UDF_SB(sb)->s_nls_map->char2uni;
3484 else
3485 conv_f = NULL;
3486 diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
3487 index 22f070085971b..ee10a9f06b97c 100644
3488 --- a/include/linux/bpf_verifier.h
3489 +++ b/include/linux/bpf_verifier.h
3490 @@ -194,6 +194,13 @@ struct bpf_idx_pair {
3491 u32 idx;
3492 };
3493
3494 +struct bpf_id_pair {
3495 + u32 old;
3496 + u32 cur;
3497 +};
3498 +
3499 +/* Maximum number of register states that can exist at once */
3500 +#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3501 #define MAX_CALL_FRAMES 8
3502 struct bpf_verifier_state {
3503 /* call stack tracking */
3504 @@ -301,8 +308,8 @@ struct bpf_insn_aux_data {
3505 };
3506 };
3507 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
3508 - int sanitize_stack_off; /* stack slot to be cleared */
3509 bool seen; /* this insn was processed by the verifier */
3510 + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
3511 bool zext_dst; /* this insn zero extends dst reg */
3512 u8 alu_state; /* used in combination with alu_limit */
3513 bool prune_point;
3514 @@ -364,12 +371,14 @@ struct bpf_verifier_env {
3515 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
3516 u32 used_map_cnt; /* number of used maps */
3517 u32 id_gen; /* used to generate unique reg IDs */
3518 + bool explore_alu_limits;
3519 bool allow_ptr_leaks;
3520 bool seen_direct_write;
3521 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
3522 const struct bpf_line_info *prev_linfo;
3523 struct bpf_verifier_log log;
3524 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
3525 + struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
3526 struct {
3527 int *insn_state;
3528 int *insn_stack;
3529 diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
3530 index 73f8c3cb95888..9ee6ccc18424f 100644
3531 --- a/include/linux/energy_model.h
3532 +++ b/include/linux/energy_model.h
3533 @@ -42,6 +42,22 @@ struct em_perf_domain {
3534
3535 #define EM_CPU_MAX_POWER 0xFFFF
3536
3537 +/*
3538 + * Increase resolution of energy estimation calculations for 64-bit
3539 + * architectures. The extra resolution improves decision made by EAS for the
3540 + * task placement when two Performance Domains might provide similar energy
3541 + * estimation values (w/o better resolution the values could be equal).
3542 + *
3543 + * We increase resolution only if we have enough bits to allow this increased
3544 + * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
3545 + * are pretty high and the returns do not justify the increased costs.
3546 + */
3547 +#ifdef CONFIG_64BIT
3548 +#define em_scale_power(p) ((p) * 1000)
3549 +#else
3550 +#define em_scale_power(p) (p)
3551 +#endif
3552 +
3553 struct em_data_callback {
3554 /**
3555 * active_power() - Provide power at the next capacity state of a CPU
3556 diff --git a/include/linux/filter.h b/include/linux/filter.h
3557 index c53e2fe3c8f7f..c4f89340f4986 100644
3558 --- a/include/linux/filter.h
3559 +++ b/include/linux/filter.h
3560 @@ -68,6 +68,11 @@ struct ctl_table_header;
3561 /* unused opcode to mark call to interpreter with arguments */
3562 #define BPF_CALL_ARGS 0xe0
3563
3564 +/* unused opcode to mark speculation barrier for mitigating
3565 + * Speculative Store Bypass
3566 + */
3567 +#define BPF_NOSPEC 0xc0
3568 +
3569 /* As per nm, we expose JITed images as text (code) section for
3570 * kallsyms. That way, tools like perf can find it to match
3571 * addresses.
3572 @@ -368,6 +373,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
3573 .off = 0, \
3574 .imm = 0 })
3575
3576 +/* Speculation barrier */
3577 +
3578 +#define BPF_ST_NOSPEC() \
3579 + ((struct bpf_insn) { \
3580 + .code = BPF_ST | BPF_NOSPEC, \
3581 + .dst_reg = 0, \
3582 + .src_reg = 0, \
3583 + .off = 0, \
3584 + .imm = 0 })
3585 +
3586 /* Internal classic blocks for direct assignment */
3587
3588 #define __BPF_STMT(CODE, K) \
3589 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
3590 index 1f98b52118f0a..48be92aded5ee 100644
3591 --- a/include/linux/hrtimer.h
3592 +++ b/include/linux/hrtimer.h
3593 @@ -317,16 +317,12 @@ struct clock_event_device;
3594
3595 extern void hrtimer_interrupt(struct clock_event_device *dev);
3596
3597 -extern void clock_was_set_delayed(void);
3598 -
3599 extern unsigned int hrtimer_resolution;
3600
3601 #else
3602
3603 #define hrtimer_resolution (unsigned int)LOW_RES_NSEC
3604
3605 -static inline void clock_was_set_delayed(void) { }
3606 -
3607 #endif
3608
3609 static inline ktime_t
3610 @@ -350,7 +346,6 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
3611 timer->base->get_time());
3612 }
3613
3614 -extern void clock_was_set(void);
3615 #ifdef CONFIG_TIMERFD
3616 extern void timerfd_clock_was_set(void);
3617 #else
3618 diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
3619 index 4badd53229490..2f9ff5017f122 100644
3620 --- a/include/linux/power/max17042_battery.h
3621 +++ b/include/linux/power/max17042_battery.h
3622 @@ -69,7 +69,7 @@ enum max17042_register {
3623 MAX17042_RelaxCFG = 0x2A,
3624 MAX17042_MiscCFG = 0x2B,
3625 MAX17042_TGAIN = 0x2C,
3626 - MAx17042_TOFF = 0x2D,
3627 + MAX17042_TOFF = 0x2D,
3628 MAX17042_CGAIN = 0x2E,
3629 MAX17042_COFF = 0x2F,
3630
3631 diff --git a/include/linux/time64.h b/include/linux/time64.h
3632 index 5eab3f2635186..f6059c505986b 100644
3633 --- a/include/linux/time64.h
3634 +++ b/include/linux/time64.h
3635 @@ -33,7 +33,9 @@ struct itimerspec64 {
3636 #define TIME64_MIN (-TIME64_MAX - 1)
3637
3638 #define KTIME_MAX ((s64)~((u64)1 << 63))
3639 +#define KTIME_MIN (-KTIME_MAX - 1)
3640 #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
3641 +#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC)
3642
3643 /*
3644 * Limits for settimeofday():
3645 @@ -132,10 +134,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
3646 */
3647 static inline s64 timespec64_to_ns(const struct timespec64 *ts)
3648 {
3649 - /* Prevent multiplication overflow */
3650 - if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
3651 + /* Prevent multiplication overflow / underflow */
3652 + if (ts->tv_sec >= KTIME_SEC_MAX)
3653 return KTIME_MAX;
3654
3655 + if (ts->tv_sec <= KTIME_SEC_MIN)
3656 + return KTIME_MIN;
3657 +
3658 return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
3659 }
3660
3661 diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
3662 index 8649422e760cc..63038eb23560b 100644
3663 --- a/include/uapi/linux/bpf.h
3664 +++ b/include/uapi/linux/bpf.h
3665 @@ -2264,7 +2264,7 @@ union bpf_attr {
3666 * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
3667 * Description
3668 * Select a **SO_REUSEPORT** socket from a
3669 - * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
3670 + * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
3671 * It checks the selected socket is matching the incoming
3672 * request in the socket buffer.
3673 * Return
3674 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
3675 index 323913ba13b38..d9a3d995bd966 100644
3676 --- a/kernel/bpf/core.c
3677 +++ b/kernel/bpf/core.c
3678 @@ -31,6 +31,7 @@
3679 #include <linux/rcupdate.h>
3680 #include <linux/perf_event.h>
3681
3682 +#include <asm/barrier.h>
3683 #include <asm/unaligned.h>
3684
3685 /* Registers */
3686 @@ -1310,6 +1311,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
3687 /* Non-UAPI available opcodes. */
3688 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
3689 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
3690 + [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
3691 };
3692 #undef BPF_INSN_3_LBL
3693 #undef BPF_INSN_2_LBL
3694 @@ -1550,7 +1552,21 @@ out:
3695 COND_JMP(s, JSGE, >=)
3696 COND_JMP(s, JSLE, <=)
3697 #undef COND_JMP
3698 - /* STX and ST and LDX*/
3699 + /* ST, STX and LDX*/
3700 + ST_NOSPEC:
3701 + /* Speculation barrier for mitigating Speculative Store Bypass.
3702 + * In case of arm64, we rely on the firmware mitigation as
3703 + * controlled via the ssbd kernel parameter. Whenever the
3704 + * mitigation is enabled, it works for all of the kernel code
3705 + * with no need to provide any additional instructions here.
3706 + * In case of x86, we use 'lfence' insn for mitigation. We
3707 + * reuse preexisting logic from Spectre v1 mitigation that
3708 + * happens to produce the required code on x86 for v4 as well.
3709 + */
3710 +#ifdef CONFIG_X86
3711 + barrier_nospec();
3712 +#endif
3713 + CONT;
3714 #define LDST(SIZEOP, SIZE) \
3715 STX_MEM_##SIZEOP: \
3716 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
3717 diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
3718 index b44d8c447afd1..ff1dd7d45b58a 100644
3719 --- a/kernel/bpf/disasm.c
3720 +++ b/kernel/bpf/disasm.c
3721 @@ -162,15 +162,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
3722 else
3723 verbose(cbs->private_data, "BUG_%02x\n", insn->code);
3724 } else if (class == BPF_ST) {
3725 - if (BPF_MODE(insn->code) != BPF_MEM) {
3726 + if (BPF_MODE(insn->code) == BPF_MEM) {
3727 + verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
3728 + insn->code,
3729 + bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
3730 + insn->dst_reg,
3731 + insn->off, insn->imm);
3732 + } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
3733 + verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
3734 + } else {
3735 verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
3736 - return;
3737 }
3738 - verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
3739 - insn->code,
3740 - bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
3741 - insn->dst_reg,
3742 - insn->off, insn->imm);
3743 } else if (class == BPF_LDX) {
3744 if (BPF_MODE(insn->code) != BPF_MEM) {
3745 verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
3746 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3747 index 4deaf15b7618b..60383b28549be 100644
3748 --- a/kernel/bpf/verifier.c
3749 +++ b/kernel/bpf/verifier.c
3750 @@ -1920,6 +1920,19 @@ static int check_stack_write(struct bpf_verifier_env *env,
3751 cur = env->cur_state->frame[env->cur_state->curframe];
3752 if (value_regno >= 0)
3753 reg = &cur->regs[value_regno];
3754 + if (!env->allow_ptr_leaks) {
3755 + bool sanitize = reg && is_spillable_regtype(reg->type);
3756 +
3757 + for (i = 0; i < size; i++) {
3758 + if (state->stack[spi].slot_type[i] == STACK_INVALID) {
3759 + sanitize = true;
3760 + break;
3761 + }
3762 + }
3763 +
3764 + if (sanitize)
3765 + env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3766 + }
3767
3768 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
3769 !register_is_null(reg) && env->allow_ptr_leaks) {
3770 @@ -1942,47 +1955,10 @@ static int check_stack_write(struct bpf_verifier_env *env,
3771 verbose(env, "invalid size of register spill\n");
3772 return -EACCES;
3773 }
3774 -
3775 if (state != cur && reg->type == PTR_TO_STACK) {
3776 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3777 return -EINVAL;
3778 }
3779 -
3780 - if (!env->allow_ptr_leaks) {
3781 - bool sanitize = false;
3782 -
3783 - if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3784 - register_is_const(&state->stack[spi].spilled_ptr))
3785 - sanitize = true;
3786 - for (i = 0; i < BPF_REG_SIZE; i++)
3787 - if (state->stack[spi].slot_type[i] == STACK_MISC) {
3788 - sanitize = true;
3789 - break;
3790 - }
3791 - if (sanitize) {
3792 - int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
3793 - int soff = (-spi - 1) * BPF_REG_SIZE;
3794 -
3795 - /* detected reuse of integer stack slot with a pointer
3796 - * which means either llvm is reusing stack slot or
3797 - * an attacker is trying to exploit CVE-2018-3639
3798 - * (speculative store bypass)
3799 - * Have to sanitize that slot with preemptive
3800 - * store of zero.
3801 - */
3802 - if (*poff && *poff != soff) {
3803 - /* disallow programs where single insn stores
3804 - * into two different stack slots, since verifier
3805 - * cannot sanitize them
3806 - */
3807 - verbose(env,
3808 - "insn %d cannot access two stack slots fp%d and fp%d",
3809 - insn_idx, *poff, soff);
3810 - return -EINVAL;
3811 - }
3812 - *poff = soff;
3813 - }
3814 - }
3815 save_register_state(state, spi, reg);
3816 } else {
3817 u8 type = STACK_MISC;
3818 @@ -4473,6 +4449,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3819 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
3820 alu_state |= ptr_is_dst_reg ?
3821 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3822 +
3823 + /* Limit pruning on unknown scalars to enable deep search for
3824 + * potential masking differences from other program paths.
3825 + */
3826 + if (!off_is_imm)
3827 + env->explore_alu_limits = true;
3828 }
3829
3830 err = update_alu_sanitation_state(aux, alu_state, alu_limit);
3831 @@ -7000,13 +6982,6 @@ static bool range_within(struct bpf_reg_state *old,
3832 old->smax_value >= cur->smax_value;
3833 }
3834
3835 -/* Maximum number of register states that can exist at once */
3836 -#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3837 -struct idpair {
3838 - u32 old;
3839 - u32 cur;
3840 -};
3841 -
3842 /* If in the old state two registers had the same id, then they need to have
3843 * the same id in the new state as well. But that id could be different from
3844 * the old state, so we need to track the mapping from old to new ids.
3845 @@ -7017,11 +6992,11 @@ struct idpair {
3846 * So we look through our idmap to see if this old id has been seen before. If
3847 * so, we require the new id to match; otherwise, we add the id pair to the map.
3848 */
3849 -static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
3850 +static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
3851 {
3852 unsigned int i;
3853
3854 - for (i = 0; i < ID_MAP_SIZE; i++) {
3855 + for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
3856 if (!idmap[i].old) {
3857 /* Reached an empty slot; haven't seen this id before */
3858 idmap[i].old = old_id;
3859 @@ -7133,8 +7108,8 @@ next:
3860 }
3861
3862 /* Returns true if (rold safe implies rcur safe) */
3863 -static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3864 - struct idpair *idmap)
3865 +static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
3866 + struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
3867 {
3868 bool equal;
3869
3870 @@ -7160,6 +7135,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3871 return false;
3872 switch (rold->type) {
3873 case SCALAR_VALUE:
3874 + if (env->explore_alu_limits)
3875 + return false;
3876 if (rcur->type == SCALAR_VALUE) {
3877 if (!rold->precise && !rcur->precise)
3878 return true;
3879 @@ -7249,9 +7226,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3880 return false;
3881 }
3882
3883 -static bool stacksafe(struct bpf_func_state *old,
3884 - struct bpf_func_state *cur,
3885 - struct idpair *idmap)
3886 +static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
3887 + struct bpf_func_state *cur, struct bpf_id_pair *idmap)
3888 {
3889 int i, spi;
3890
3891 @@ -7296,9 +7272,8 @@ static bool stacksafe(struct bpf_func_state *old,
3892 continue;
3893 if (old->stack[spi].slot_type[0] != STACK_SPILL)
3894 continue;
3895 - if (!regsafe(&old->stack[spi].spilled_ptr,
3896 - &cur->stack[spi].spilled_ptr,
3897 - idmap))
3898 + if (!regsafe(env, &old->stack[spi].spilled_ptr,
3899 + &cur->stack[spi].spilled_ptr, idmap))
3900 /* when explored and current stack slot are both storing
3901 * spilled registers, check that stored pointers types
3902 * are the same as well.
3903 @@ -7348,32 +7323,24 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
3904 * whereas register type in current state is meaningful, it means that
3905 * the current state will reach 'bpf_exit' instruction safely
3906 */
3907 -static bool func_states_equal(struct bpf_func_state *old,
3908 +static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
3909 struct bpf_func_state *cur)
3910 {
3911 - struct idpair *idmap;
3912 - bool ret = false;
3913 int i;
3914
3915 - idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
3916 - /* If we failed to allocate the idmap, just say it's not safe */
3917 - if (!idmap)
3918 - return false;
3919 -
3920 - for (i = 0; i < MAX_BPF_REG; i++) {
3921 - if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
3922 - goto out_free;
3923 - }
3924 + memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
3925 + for (i = 0; i < MAX_BPF_REG; i++)
3926 + if (!regsafe(env, &old->regs[i], &cur->regs[i],
3927 + env->idmap_scratch))
3928 + return false;
3929
3930 - if (!stacksafe(old, cur, idmap))
3931 - goto out_free;
3932 + if (!stacksafe(env, old, cur, env->idmap_scratch))
3933 + return false;
3934
3935 if (!refsafe(old, cur))
3936 - goto out_free;
3937 - ret = true;
3938 -out_free:
3939 - kfree(idmap);
3940 - return ret;
3941 + return false;
3942 +
3943 + return true;
3944 }
3945
3946 static bool states_equal(struct bpf_verifier_env *env,
3947 @@ -7400,7 +7367,7 @@ static bool states_equal(struct bpf_verifier_env *env,
3948 for (i = 0; i <= old->curframe; i++) {
3949 if (old->frame[i]->callsite != cur->frame[i]->callsite)
3950 return false;
3951 - if (!func_states_equal(old->frame[i], cur->frame[i]))
3952 + if (!func_states_equal(env, old->frame[i], cur->frame[i]))
3953 return false;
3954 }
3955 return true;
3956 @@ -8401,10 +8368,11 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
3957 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
3958 * [0, off) and [off, end) to new locations, so the patched range stays zero
3959 */
3960 -static int adjust_insn_aux_data(struct bpf_verifier_env *env,
3961 - struct bpf_prog *new_prog, u32 off, u32 cnt)
3962 +static void adjust_insn_aux_data(struct bpf_verifier_env *env,
3963 + struct bpf_insn_aux_data *new_data,
3964 + struct bpf_prog *new_prog, u32 off, u32 cnt)
3965 {
3966 - struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
3967 + struct bpf_insn_aux_data *old_data = env->insn_aux_data;
3968 struct bpf_insn *insn = new_prog->insnsi;
3969 bool old_seen = old_data[off].seen;
3970 u32 prog_len;
3971 @@ -8417,12 +8385,9 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
3972 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
3973
3974 if (cnt == 1)
3975 - return 0;
3976 + return;
3977 prog_len = new_prog->len;
3978 - new_data = vzalloc(array_size(prog_len,
3979 - sizeof(struct bpf_insn_aux_data)));
3980 - if (!new_data)
3981 - return -ENOMEM;
3982 +
3983 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
3984 memcpy(new_data + off + cnt - 1, old_data + off,
3985 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
3986 @@ -8433,7 +8398,6 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
3987 }
3988 env->insn_aux_data = new_data;
3989 vfree(old_data);
3990 - return 0;
3991 }
3992
3993 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
3994 @@ -8454,6 +8418,14 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
3995 const struct bpf_insn *patch, u32 len)
3996 {
3997 struct bpf_prog *new_prog;
3998 + struct bpf_insn_aux_data *new_data = NULL;
3999 +
4000 + if (len > 1) {
4001 + new_data = vzalloc(array_size(env->prog->len + len - 1,
4002 + sizeof(struct bpf_insn_aux_data)));
4003 + if (!new_data)
4004 + return NULL;
4005 + }
4006
4007 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4008 if (IS_ERR(new_prog)) {
4009 @@ -8461,10 +8433,10 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
4010 verbose(env,
4011 "insn %d cannot be patched due to 16-bit range\n",
4012 env->insn_aux_data[off].orig_idx);
4013 + vfree(new_data);
4014 return NULL;
4015 }
4016 - if (adjust_insn_aux_data(env, new_prog, off, len))
4017 - return NULL;
4018 + adjust_insn_aux_data(env, new_data, new_prog, off, len);
4019 adjust_subprog_starts(env, off, len);
4020 return new_prog;
4021 }
4022 @@ -8849,35 +8821,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
4023
4024 for (i = 0; i < insn_cnt; i++, insn++) {
4025 bpf_convert_ctx_access_t convert_ctx_access;
4026 + bool ctx_access;
4027
4028 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
4029 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
4030 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
4031 - insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
4032 + insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
4033 type = BPF_READ;
4034 - else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
4035 - insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
4036 - insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
4037 - insn->code == (BPF_STX | BPF_MEM | BPF_DW))
4038 + ctx_access = true;
4039 + } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
4040 + insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
4041 + insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
4042 + insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
4043 + insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
4044 + insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
4045 + insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
4046 + insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
4047 type = BPF_WRITE;
4048 - else
4049 + ctx_access = BPF_CLASS(insn->code) == BPF_STX;
4050 + } else {
4051 continue;
4052 + }
4053
4054 if (type == BPF_WRITE &&
4055 - env->insn_aux_data[i + delta].sanitize_stack_off) {
4056 + env->insn_aux_data[i + delta].sanitize_stack_spill) {
4057 struct bpf_insn patch[] = {
4058 - /* Sanitize suspicious stack slot with zero.
4059 - * There are no memory dependencies for this store,
4060 - * since it's only using frame pointer and immediate
4061 - * constant of zero
4062 - */
4063 - BPF_ST_MEM(BPF_DW, BPF_REG_FP,
4064 - env->insn_aux_data[i + delta].sanitize_stack_off,
4065 - 0),
4066 - /* the original STX instruction will immediately
4067 - * overwrite the same stack slot with appropriate value
4068 - */
4069 *insn,
4070 + BPF_ST_NOSPEC(),
4071 };
4072
4073 cnt = ARRAY_SIZE(patch);
4074 @@ -8891,6 +8861,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
4075 continue;
4076 }
4077
4078 + if (!ctx_access)
4079 + continue;
4080 +
4081 switch (env->insn_aux_data[i + delta].ptr_type) {
4082 case PTR_TO_CTX:
4083 if (!ops->convert_ctx_access)
4084 @@ -8952,6 +8925,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
4085 if (is_narrower_load && size < target_size) {
4086 u8 shift = bpf_ctx_narrow_access_offset(
4087 off, size, size_default) * 8;
4088 + if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
4089 + verbose(env, "bpf verifier narrow ctx load misconfigured\n");
4090 + return -EINVAL;
4091 + }
4092 if (ctx_field_size <= 4) {
4093 if (shift)
4094 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
4095 diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
4096 index bab6a934862e3..badfa8f153599 100644
4097 --- a/kernel/cgroup/cpuset.c
4098 +++ b/kernel/cgroup/cpuset.c
4099 @@ -3166,6 +3166,13 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
4100 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
4101 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
4102
4103 + /*
4104 + * In the rare case that hotplug removes all the cpus in subparts_cpus,
4105 + * we assumed that cpus are updated.
4106 + */
4107 + if (!cpus_updated && top_cpuset.nr_subparts_cpus)
4108 + cpus_updated = true;
4109 +
4110 /* synchronize cpus_allowed to cpu_active_mask */
4111 if (cpus_updated) {
4112 spin_lock_irq(&callback_lock);
4113 diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
4114 index b5985da80acf0..7ccc8edce46dc 100644
4115 --- a/kernel/irq/timings.c
4116 +++ b/kernel/irq/timings.c
4117 @@ -799,12 +799,14 @@ static int __init irq_timings_test_irqs(struct timings_intervals *ti)
4118
4119 __irq_timings_store(irq, irqs, ti->intervals[i]);
4120 if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
4121 + ret = -EBADSLT;
4122 pr_err("Failed to store in the circular buffer\n");
4123 goto out;
4124 }
4125 }
4126
4127 if (irqs->count != ti->count) {
4128 + ret = -ERANGE;
4129 pr_err("Count differs\n");
4130 goto out;
4131 }
4132 diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
4133 index c0c7784f074b2..b02fff28221f0 100644
4134 --- a/kernel/locking/mutex.c
4135 +++ b/kernel/locking/mutex.c
4136 @@ -938,7 +938,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4137 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
4138 {
4139 struct mutex_waiter waiter;
4140 - bool first = false;
4141 struct ww_mutex *ww;
4142 int ret;
4143
4144 @@ -1017,6 +1016,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4145
4146 set_current_state(state);
4147 for (;;) {
4148 + bool first;
4149 +
4150 /*
4151 * Once we hold wait_lock, we're serialized against
4152 * mutex_unlock() handing the lock off to us, do a trylock
4153 @@ -1045,15 +1046,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4154 spin_unlock(&lock->wait_lock);
4155 schedule_preempt_disabled();
4156
4157 - /*
4158 - * ww_mutex needs to always recheck its position since its waiter
4159 - * list is not FIFO ordered.
4160 - */
4161 - if (ww_ctx || !first) {
4162 - first = __mutex_waiter_is_first(lock, &waiter);
4163 - if (first)
4164 - __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
4165 - }
4166 + first = __mutex_waiter_is_first(lock, &waiter);
4167 + if (first)
4168 + __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
4169
4170 set_current_state(state);
4171 /*
4172 diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
4173 index 8dac32bd90894..7ef35eb985baf 100644
4174 --- a/kernel/power/energy_model.c
4175 +++ b/kernel/power/energy_model.c
4176 @@ -149,7 +149,9 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
4177 /* Compute the cost of each capacity_state. */
4178 fmax = (u64) table[nr_states - 1].frequency;
4179 for (i = 0; i < nr_states; i++) {
4180 - table[i].cost = div64_u64(fmax * table[i].power,
4181 + unsigned long power_res = em_scale_power(table[i].power);
4182 +
4183 + table[i].cost = div64_u64(fmax * power_res,
4184 table[i].frequency);
4185 }
4186
4187 diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
4188 index c0b8c458d8a6a..b8c9744ad595b 100644
4189 --- a/kernel/rcu/tree_stall.h
4190 +++ b/kernel/rcu/tree_stall.h
4191 @@ -7,6 +7,8 @@
4192 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
4193 */
4194
4195 +#include <linux/kvm_para.h>
4196 +
4197 //////////////////////////////////////////////////////////////////////////////
4198 //
4199 // Controlling CPU stall warnings, including delay calculation.
4200 @@ -525,6 +527,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
4201 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
4202 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
4203
4204 + /*
4205 + * If a virtual machine is stopped by the host it can look to
4206 + * the watchdog like an RCU stall. Check to see if the host
4207 + * stopped the vm.
4208 + */
4209 + if (kvm_check_and_clear_guest_paused())
4210 + return;
4211 +
4212 /* We haven't checked in, so go dump stack. */
4213 print_cpu_stall();
4214 if (rcu_cpu_stall_ftrace_dump)
4215 @@ -534,6 +544,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
4216 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
4217 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
4218
4219 + /*
4220 + * If a virtual machine is stopped by the host it can look to
4221 + * the watchdog like an RCU stall. Check to see if the host
4222 + * stopped the vm.
4223 + */
4224 + if (kvm_check_and_clear_guest_paused())
4225 + return;
4226 +
4227 /* They had a few time units to dump stack, so complain. */
4228 print_other_cpu_stall(gs2);
4229 if (rcu_cpu_stall_ftrace_dump)
4230 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4231 index 8294debf68c4d..5dc43d37e6a2b 100644
4232 --- a/kernel/sched/core.c
4233 +++ b/kernel/sched/core.c
4234 @@ -1110,6 +1110,23 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
4235 uclamp_rq_dec_id(rq, p, clamp_id);
4236 }
4237
4238 +static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
4239 + enum uclamp_id clamp_id)
4240 +{
4241 + if (!p->uclamp[clamp_id].active)
4242 + return;
4243 +
4244 + uclamp_rq_dec_id(rq, p, clamp_id);
4245 + uclamp_rq_inc_id(rq, p, clamp_id);
4246 +
4247 + /*
4248 + * Make sure to clear the idle flag if we've transiently reached 0
4249 + * active tasks on rq.
4250 + */
4251 + if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
4252 + rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
4253 +}
4254 +
4255 static inline void
4256 uclamp_update_active(struct task_struct *p)
4257 {
4258 @@ -1133,12 +1150,8 @@ uclamp_update_active(struct task_struct *p)
4259 * affecting a valid clamp bucket, the next time it's enqueued,
4260 * it will already see the updated clamp bucket value.
4261 */
4262 - for_each_clamp_id(clamp_id) {
4263 - if (p->uclamp[clamp_id].active) {
4264 - uclamp_rq_dec_id(rq, p, clamp_id);
4265 - uclamp_rq_inc_id(rq, p, clamp_id);
4266 - }
4267 - }
4268 + for_each_clamp_id(clamp_id)
4269 + uclamp_rq_reinc_id(rq, p, clamp_id);
4270
4271 task_rq_unlock(rq, p, &rf);
4272 }
4273 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
4274 index 3cf776d5bce8f..2bda9fdba31c4 100644
4275 --- a/kernel/sched/deadline.c
4276 +++ b/kernel/sched/deadline.c
4277 @@ -1654,6 +1654,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
4278 */
4279 raw_spin_lock(&rq->lock);
4280 if (p->dl.dl_non_contending) {
4281 + update_rq_clock(rq);
4282 sub_running_bw(&p->dl, &rq->dl);
4283 p->dl.dl_non_contending = 0;
4284 /*
4285 @@ -2622,7 +2623,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
4286 dl_se->dl_runtime = attr->sched_runtime;
4287 dl_se->dl_deadline = attr->sched_deadline;
4288 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
4289 - dl_se->flags = attr->sched_flags;
4290 + dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
4291 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
4292 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
4293 }
4294 @@ -2635,7 +2636,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
4295 attr->sched_runtime = dl_se->dl_runtime;
4296 attr->sched_deadline = dl_se->dl_deadline;
4297 attr->sched_period = dl_se->dl_period;
4298 - attr->sched_flags = dl_se->flags;
4299 + attr->sched_flags &= ~SCHED_DL_FLAGS;
4300 + attr->sched_flags |= dl_se->flags;
4301 }
4302
4303 /*
4304 @@ -2710,7 +2712,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
4305 if (dl_se->dl_runtime != attr->sched_runtime ||
4306 dl_se->dl_deadline != attr->sched_deadline ||
4307 dl_se->dl_period != attr->sched_period ||
4308 - dl_se->flags != attr->sched_flags)
4309 + dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
4310 return true;
4311
4312 return false;
4313 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
4314 index 4e490e3db2f86..fe755c1a0af95 100644
4315 --- a/kernel/sched/sched.h
4316 +++ b/kernel/sched/sched.h
4317 @@ -209,6 +209,8 @@ static inline int task_has_dl_policy(struct task_struct *p)
4318 */
4319 #define SCHED_FLAG_SUGOV 0x10000000
4320
4321 +#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
4322 +
4323 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
4324 {
4325 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
4326 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
4327 index 1f3e3a17f67e0..e1e8d5dab0c59 100644
4328 --- a/kernel/time/hrtimer.c
4329 +++ b/kernel/time/hrtimer.c
4330 @@ -759,22 +759,6 @@ static void hrtimer_switch_to_hres(void)
4331 retrigger_next_event(NULL);
4332 }
4333
4334 -static void clock_was_set_work(struct work_struct *work)
4335 -{
4336 - clock_was_set();
4337 -}
4338 -
4339 -static DECLARE_WORK(hrtimer_work, clock_was_set_work);
4340 -
4341 -/*
4342 - * Called from timekeeping and resume code to reprogram the hrtimer
4343 - * interrupt device on all cpus.
4344 - */
4345 -void clock_was_set_delayed(void)
4346 -{
4347 - schedule_work(&hrtimer_work);
4348 -}
4349 -
4350 #else
4351
4352 static inline int hrtimer_is_hres_enabled(void) { return 0; }
4353 @@ -892,6 +876,22 @@ void clock_was_set(void)
4354 timerfd_clock_was_set();
4355 }
4356
4357 +static void clock_was_set_work(struct work_struct *work)
4358 +{
4359 + clock_was_set();
4360 +}
4361 +
4362 +static DECLARE_WORK(hrtimer_work, clock_was_set_work);
4363 +
4364 +/*
4365 + * Called from timekeeping and resume code to reprogram the hrtimer
4366 + * interrupt device on all cpus and to notify timerfd.
4367 + */
4368 +void clock_was_set_delayed(void)
4369 +{
4370 + schedule_work(&hrtimer_work);
4371 +}
4372 +
4373 /*
4374 * During resume we might have to reprogram the high resolution timer
4375 * interrupt on all online CPUs. However, all other CPUs will be
4376 @@ -1031,12 +1031,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
4377 * remove hrtimer, called with base lock held
4378 */
4379 static inline int
4380 -remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
4381 +remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
4382 + bool restart, bool keep_local)
4383 {
4384 u8 state = timer->state;
4385
4386 if (state & HRTIMER_STATE_ENQUEUED) {
4387 - int reprogram;
4388 + bool reprogram;
4389
4390 /*
4391 * Remove the timer and force reprogramming when high
4392 @@ -1049,8 +1050,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
4393 debug_deactivate(timer);
4394 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
4395
4396 + /*
4397 + * If the timer is not restarted then reprogramming is
4398 + * required if the timer is local. If it is local and about
4399 + * to be restarted, avoid programming it twice (on removal
4400 + * and a moment later when it's requeued).
4401 + */
4402 if (!restart)
4403 state = HRTIMER_STATE_INACTIVE;
4404 + else
4405 + reprogram &= !keep_local;
4406
4407 __remove_hrtimer(timer, base, state, reprogram);
4408 return 1;
4409 @@ -1104,9 +1113,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
4410 struct hrtimer_clock_base *base)
4411 {
4412 struct hrtimer_clock_base *new_base;
4413 + bool force_local, first;
4414 +
4415 + /*
4416 + * If the timer is on the local cpu base and is the first expiring
4417 + * timer then this might end up reprogramming the hardware twice
4418 + * (on removal and on enqueue). To avoid that by prevent the
4419 + * reprogram on removal, keep the timer local to the current CPU
4420 + * and enforce reprogramming after it is queued no matter whether
4421 + * it is the new first expiring timer again or not.
4422 + */
4423 + force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
4424 + force_local &= base->cpu_base->next_timer == timer;
4425
4426 - /* Remove an active timer from the queue: */
4427 - remove_hrtimer(timer, base, true);
4428 + /*
4429 + * Remove an active timer from the queue. In case it is not queued
4430 + * on the current CPU, make sure that remove_hrtimer() updates the
4431 + * remote data correctly.
4432 + *
4433 + * If it's on the current CPU and the first expiring timer, then
4434 + * skip reprogramming, keep the timer local and enforce
4435 + * reprogramming later if it was the first expiring timer. This
4436 + * avoids programming the underlying clock event twice (once at
4437 + * removal and once after enqueue).
4438 + */
4439 + remove_hrtimer(timer, base, true, force_local);
4440
4441 if (mode & HRTIMER_MODE_REL)
4442 tim = ktime_add_safe(tim, base->get_time());
4443 @@ -1116,9 +1147,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
4444 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
4445
4446 /* Switch the timer base, if necessary: */
4447 - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
4448 + if (!force_local) {
4449 + new_base = switch_hrtimer_base(timer, base,
4450 + mode & HRTIMER_MODE_PINNED);
4451 + } else {
4452 + new_base = base;
4453 + }
4454 +
4455 + first = enqueue_hrtimer(timer, new_base, mode);
4456 + if (!force_local)
4457 + return first;
4458
4459 - return enqueue_hrtimer(timer, new_base, mode);
4460 + /*
4461 + * Timer was forced to stay on the current CPU to avoid
4462 + * reprogramming on removal and enqueue. Force reprogram the
4463 + * hardware by evaluating the new first expiring timer.
4464 + */
4465 + hrtimer_force_reprogram(new_base->cpu_base, 1);
4466 + return 0;
4467 }
4468
4469 /**
4470 @@ -1184,7 +1230,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
4471 base = lock_hrtimer_base(timer, &flags);
4472
4473 if (!hrtimer_callback_running(timer))
4474 - ret = remove_hrtimer(timer, base, false);
4475 + ret = remove_hrtimer(timer, base, false, false);
4476
4477 unlock_hrtimer_base(timer, &flags);
4478
4479 diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
4480 index eacb0ca301932..30e061b210b7c 100644
4481 --- a/kernel/time/posix-cpu-timers.c
4482 +++ b/kernel/time/posix-cpu-timers.c
4483 @@ -1201,8 +1201,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
4484 }
4485 }
4486
4487 - if (!*newval)
4488 - return;
4489 *newval += now;
4490 }
4491
4492 diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
4493 index 7b24961367292..5294f5b1f9550 100644
4494 --- a/kernel/time/tick-internal.h
4495 +++ b/kernel/time/tick-internal.h
4496 @@ -165,3 +165,6 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
4497
4498 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
4499 void timer_clear_idle(void);
4500 +
4501 +void clock_was_set(void);
4502 +void clock_was_set_delayed(void);
4503 diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
4504 index 20ed0f7667871..00825028cc847 100644
4505 --- a/lib/mpi/mpiutil.c
4506 +++ b/lib/mpi/mpiutil.c
4507 @@ -91,7 +91,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
4508 return 0; /* no need to do it */
4509
4510 if (a->d) {
4511 - p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
4512 + p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
4513 if (!p)
4514 return -ENOMEM;
4515 memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
4516 diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
4517 index 1c140af06d527..600b9563bfc53 100644
4518 --- a/net/6lowpan/debugfs.c
4519 +++ b/net/6lowpan/debugfs.c
4520 @@ -170,7 +170,8 @@ static void lowpan_dev_debugfs_ctx_init(struct net_device *dev,
4521 struct dentry *root;
4522 char buf[32];
4523
4524 - WARN_ON_ONCE(id > LOWPAN_IPHC_CTX_TABLE_SIZE);
4525 + if (WARN_ON_ONCE(id >= LOWPAN_IPHC_CTX_TABLE_SIZE))
4526 + return;
4527
4528 sprintf(buf, "%d", id);
4529
4530 diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
4531 index c32638dddbf94..f6b9dc4e408f2 100644
4532 --- a/net/bluetooth/cmtp/cmtp.h
4533 +++ b/net/bluetooth/cmtp/cmtp.h
4534 @@ -26,7 +26,7 @@
4535 #include <linux/types.h>
4536 #include <net/bluetooth/bluetooth.h>
4537
4538 -#define BTNAMSIZ 18
4539 +#define BTNAMSIZ 21
4540
4541 /* CMTP ioctl defines */
4542 #define CMTPCONNADD _IOW('C', 200, int)
4543 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
4544 index 83a07fca9000f..bdd330527cfa2 100644
4545 --- a/net/bluetooth/hci_core.c
4546 +++ b/net/bluetooth/hci_core.c
4547 @@ -1297,6 +1297,12 @@ int hci_inquiry(void __user *arg)
4548 goto done;
4549 }
4550
4551 + /* Restrict maximum inquiry length to 60 seconds */
4552 + if (ir.length > 60) {
4553 + err = -EINVAL;
4554 + goto done;
4555 + }
4556 +
4557 hci_dev_lock(hdev);
4558 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
4559 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
4560 @@ -1685,6 +1691,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
4561 hci_request_cancel_all(hdev);
4562 hci_req_sync_lock(hdev);
4563
4564 + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
4565 + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4566 + test_bit(HCI_UP, &hdev->flags)) {
4567 + /* Execute vendor specific shutdown routine */
4568 + if (hdev->shutdown)
4569 + hdev->shutdown(hdev);
4570 + }
4571 +
4572 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
4573 cancel_delayed_work_sync(&hdev->cmd_timer);
4574 hci_req_sync_unlock(hdev);
4575 diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
4576 index b91d6b440fdf6..1b7540cb8e5c4 100644
4577 --- a/net/bluetooth/sco.c
4578 +++ b/net/bluetooth/sco.c
4579 @@ -84,7 +84,6 @@ static void sco_sock_timeout(struct timer_list *t)
4580 sk->sk_state_change(sk);
4581 bh_unlock_sock(sk);
4582
4583 - sco_sock_kill(sk);
4584 sock_put(sk);
4585 }
4586
4587 @@ -176,7 +175,6 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
4588 sco_sock_clear_timer(sk);
4589 sco_chan_del(sk, err);
4590 bh_unlock_sock(sk);
4591 - sco_sock_kill(sk);
4592 sock_put(sk);
4593 }
4594
4595 @@ -393,8 +391,7 @@ static void sco_sock_cleanup_listen(struct sock *parent)
4596 */
4597 static void sco_sock_kill(struct sock *sk)
4598 {
4599 - if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
4600 - sock_flag(sk, SOCK_DEAD))
4601 + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
4602 return;
4603
4604 BT_DBG("sk %p state %d", sk, sk->sk_state);
4605 @@ -446,7 +443,6 @@ static void sco_sock_close(struct sock *sk)
4606 lock_sock(sk);
4607 __sco_sock_close(sk);
4608 release_sock(sk);
4609 - sco_sock_kill(sk);
4610 }
4611
4612 static void sco_sock_init(struct sock *sk, struct sock *parent)
4613 @@ -761,6 +757,11 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
4614 cp.max_latency = cpu_to_le16(0xffff);
4615 cp.retrans_effort = 0xff;
4616 break;
4617 + default:
4618 + /* use CVSD settings as fallback */
4619 + cp.max_latency = cpu_to_le16(0xffff);
4620 + cp.retrans_effort = 0xff;
4621 + break;
4622 }
4623
4624 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
4625 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
4626 index c303873496a34..9bf15512601bf 100644
4627 --- a/net/core/net_namespace.c
4628 +++ b/net/core/net_namespace.c
4629 @@ -211,9 +211,9 @@ static int net_eq_idr(int id, void *net, void *peer)
4630 return 0;
4631 }
4632
4633 -/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
4634 - * is set to true, thus the caller knows that the new id must be notified via
4635 - * rtnl.
4636 +/* Must be called from RCU-critical section or with nsid_lock held. If
4637 + * a new id is assigned, the bool alloc is set to true, thus the
4638 + * caller knows that the new id must be notified via rtnl.
4639 */
4640 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
4641 {
4642 @@ -237,7 +237,7 @@ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
4643 return NETNSA_NSID_NOT_ASSIGNED;
4644 }
4645
4646 -/* should be called with nsid_lock held */
4647 +/* Must be called from RCU-critical section or with nsid_lock held */
4648 static int __peernet2id(struct net *net, struct net *peer)
4649 {
4650 bool no = false;
4651 @@ -281,9 +281,10 @@ int peernet2id(struct net *net, struct net *peer)
4652 {
4653 int id;
4654
4655 - spin_lock_bh(&net->nsid_lock);
4656 + rcu_read_lock();
4657 id = __peernet2id(net, peer);
4658 - spin_unlock_bh(&net->nsid_lock);
4659 + rcu_read_unlock();
4660 +
4661 return id;
4662 }
4663 EXPORT_SYMBOL(peernet2id);
4664 @@ -962,6 +963,7 @@ struct rtnl_net_dump_cb {
4665 int s_idx;
4666 };
4667
4668 +/* Runs in RCU-critical section. */
4669 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
4670 {
4671 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
4672 @@ -1046,19 +1048,9 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
4673 goto end;
4674 }
4675
4676 - spin_lock_bh(&net_cb.tgt_net->nsid_lock);
4677 - if (net_cb.fillargs.add_ref &&
4678 - !net_eq(net_cb.ref_net, net_cb.tgt_net) &&
4679 - !spin_trylock_bh(&net_cb.ref_net->nsid_lock)) {
4680 - spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
4681 - err = -EAGAIN;
4682 - goto end;
4683 - }
4684 + rcu_read_lock();
4685 idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
4686 - if (net_cb.fillargs.add_ref &&
4687 - !net_eq(net_cb.ref_net, net_cb.tgt_net))
4688 - spin_unlock_bh(&net_cb.ref_net->nsid_lock);
4689 - spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
4690 + rcu_read_unlock();
4691
4692 cb->args[0] = net_cb.idx;
4693 end:
4694 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
4695 index 0e976848d4bb9..539492998864e 100644
4696 --- a/net/ipv4/route.c
4697 +++ b/net/ipv4/route.c
4698 @@ -610,18 +610,25 @@ static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
4699 }
4700 }
4701
4702 -static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
4703 +static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
4704 {
4705 - struct fib_nh_exception *fnhe, *oldest;
4706 + struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
4707 + struct fib_nh_exception *fnhe, *oldest = NULL;
4708
4709 - oldest = rcu_dereference(hash->chain);
4710 - for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
4711 - fnhe = rcu_dereference(fnhe->fnhe_next)) {
4712 - if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
4713 + for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
4714 + fnhe = rcu_dereference_protected(*fnhe_p,
4715 + lockdep_is_held(&fnhe_lock));
4716 + if (!fnhe)
4717 + break;
4718 + if (!oldest ||
4719 + time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
4720 oldest = fnhe;
4721 + oldest_p = fnhe_p;
4722 + }
4723 }
4724 fnhe_flush_routes(oldest);
4725 - return oldest;
4726 + *oldest_p = oldest->fnhe_next;
4727 + kfree_rcu(oldest, rcu);
4728 }
4729
4730 static inline u32 fnhe_hashfun(__be32 daddr)
4731 @@ -700,16 +707,21 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
4732 if (rt)
4733 fill_route_from_fnhe(rt, fnhe);
4734 } else {
4735 - if (depth > FNHE_RECLAIM_DEPTH)
4736 - fnhe = fnhe_oldest(hash);
4737 - else {
4738 - fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
4739 - if (!fnhe)
4740 - goto out_unlock;
4741 -
4742 - fnhe->fnhe_next = hash->chain;
4743 - rcu_assign_pointer(hash->chain, fnhe);
4744 + /* Randomize max depth to avoid some side channels attacks. */
4745 + int max_depth = FNHE_RECLAIM_DEPTH +
4746 + prandom_u32_max(FNHE_RECLAIM_DEPTH);
4747 +
4748 + while (depth > max_depth) {
4749 + fnhe_remove_oldest(hash);
4750 + depth--;
4751 }
4752 +
4753 + fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
4754 + if (!fnhe)
4755 + goto out_unlock;
4756 +
4757 + fnhe->fnhe_next = hash->chain;
4758 +
4759 fnhe->fnhe_genid = genid;
4760 fnhe->fnhe_daddr = daddr;
4761 fnhe->fnhe_gw = gw;
4762 @@ -717,6 +729,8 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
4763 fnhe->fnhe_mtu_locked = lock;
4764 fnhe->fnhe_expires = max(1UL, expires);
4765
4766 + rcu_assign_pointer(hash->chain, fnhe);
4767 +
4768 /* Exception created; mark the cached routes for the nexthop
4769 * stale, so anyone caching it rechecks if this exception
4770 * applies to them.
4771 @@ -2990,7 +3004,7 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
4772 udph = skb_put_zero(skb, sizeof(struct udphdr));
4773 udph->source = sport;
4774 udph->dest = dport;
4775 - udph->len = sizeof(struct udphdr);
4776 + udph->len = htons(sizeof(struct udphdr));
4777 udph->check = 0;
4778 break;
4779 }
4780 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
4781 index 91788ff19a5d4..2ce85e52aea7c 100644
4782 --- a/net/ipv4/tcp_ipv4.c
4783 +++ b/net/ipv4/tcp_ipv4.c
4784 @@ -2304,6 +2304,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
4785 static void *tcp_seek_last_pos(struct seq_file *seq)
4786 {
4787 struct tcp_iter_state *st = seq->private;
4788 + int bucket = st->bucket;
4789 int offset = st->offset;
4790 int orig_num = st->num;
4791 void *rc = NULL;
4792 @@ -2314,7 +2315,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
4793 break;
4794 st->state = TCP_SEQ_STATE_LISTENING;
4795 rc = listening_get_next(seq, NULL);
4796 - while (offset-- && rc)
4797 + while (offset-- && rc && bucket == st->bucket)
4798 rc = listening_get_next(seq, rc);
4799 if (rc)
4800 break;
4801 @@ -2325,7 +2326,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
4802 if (st->bucket > tcp_hashinfo.ehash_mask)
4803 break;
4804 rc = established_get_first(seq);
4805 - while (offset-- && rc)
4806 + while (offset-- && rc && bucket == st->bucket)
4807 rc = established_get_next(seq, rc);
4808 }
4809
4810 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4811 index d6fc22f7d7a67..575bd0f1b0089 100644
4812 --- a/net/ipv6/route.c
4813 +++ b/net/ipv6/route.c
4814 @@ -1667,6 +1667,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
4815 struct in6_addr *src_key = NULL;
4816 struct rt6_exception *rt6_ex;
4817 struct fib6_nh *nh = res->nh;
4818 + int max_depth;
4819 int err = 0;
4820
4821 spin_lock_bh(&rt6_exception_lock);
4822 @@ -1721,7 +1722,9 @@ static int rt6_insert_exception(struct rt6_info *nrt,
4823 bucket->depth++;
4824 net->ipv6.rt6_stats->fib_rt_cache++;
4825
4826 - if (bucket->depth > FIB6_MAX_DEPTH)
4827 + /* Randomize max depth to avoid some side channels attacks. */
4828 + max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
4829 + while (bucket->depth > max_depth)
4830 rt6_exception_remove_oldest(bucket);
4831
4832 out:
4833 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4834 index 538722522ffe9..4dfac7a25e5ad 100644
4835 --- a/net/mac80211/tx.c
4836 +++ b/net/mac80211/tx.c
4837 @@ -3189,7 +3189,9 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
4838 if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
4839 return true;
4840
4841 - if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
4842 + if (!ieee80211_amsdu_realloc_pad(local, skb,
4843 + sizeof(*amsdu_hdr) +
4844 + local->hw.extra_tx_headroom))
4845 return false;
4846
4847 data = skb_push(skb, sizeof(*amsdu_hdr));
4848 diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
4849 index 4cb43a2c07d14..8cd3daf0e3db6 100644
4850 --- a/net/netlabel/netlabel_cipso_v4.c
4851 +++ b/net/netlabel/netlabel_cipso_v4.c
4852 @@ -187,14 +187,14 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
4853 }
4854 doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size,
4855 sizeof(u32),
4856 - GFP_KERNEL);
4857 + GFP_KERNEL | __GFP_NOWARN);
4858 if (doi_def->map.std->lvl.local == NULL) {
4859 ret_val = -ENOMEM;
4860 goto add_std_failure;
4861 }
4862 doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size,
4863 sizeof(u32),
4864 - GFP_KERNEL);
4865 + GFP_KERNEL | __GFP_NOWARN);
4866 if (doi_def->map.std->lvl.cipso == NULL) {
4867 ret_val = -ENOMEM;
4868 goto add_std_failure;
4869 @@ -263,7 +263,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
4870 doi_def->map.std->cat.local = kcalloc(
4871 doi_def->map.std->cat.local_size,
4872 sizeof(u32),
4873 - GFP_KERNEL);
4874 + GFP_KERNEL | __GFP_NOWARN);
4875 if (doi_def->map.std->cat.local == NULL) {
4876 ret_val = -ENOMEM;
4877 goto add_std_failure;
4878 @@ -271,7 +271,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
4879 doi_def->map.std->cat.cipso = kcalloc(
4880 doi_def->map.std->cat.cipso_size,
4881 sizeof(u32),
4882 - GFP_KERNEL);
4883 + GFP_KERNEL | __GFP_NOWARN);
4884 if (doi_def->map.std->cat.cipso == NULL) {
4885 ret_val = -ENOMEM;
4886 goto add_std_failure;
4887 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
4888 index 39b427dc75128..e5972889cd81c 100644
4889 --- a/net/sched/sch_cbq.c
4890 +++ b/net/sched/sch_cbq.c
4891 @@ -1614,7 +1614,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
4892 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
4893 if (err) {
4894 kfree(cl);
4895 - return err;
4896 + goto failure;
4897 }
4898
4899 if (tca[TCA_RATE]) {
4900 diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
4901 index d2054bec49094..748f3ee27b23d 100644
4902 --- a/security/integrity/ima/Kconfig
4903 +++ b/security/integrity/ima/Kconfig
4904 @@ -6,7 +6,6 @@ config IMA
4905 select SECURITYFS
4906 select CRYPTO
4907 select CRYPTO_HMAC
4908 - select CRYPTO_MD5
4909 select CRYPTO_SHA1
4910 select CRYPTO_HASH_INFO
4911 select TCG_TPM if HAS_IOMEM && !UML
4912 diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
4913 index 1e5c019161738..95cc31525c573 100644
4914 --- a/security/integrity/ima/ima_mok.c
4915 +++ b/security/integrity/ima/ima_mok.c
4916 @@ -21,7 +21,7 @@ struct key *ima_blacklist_keyring;
4917 /*
4918 * Allocate the IMA blacklist keyring
4919 */
4920 -__init int ima_mok_init(void)
4921 +static __init int ima_mok_init(void)
4922 {
4923 struct key_restriction *restriction;
4924
4925 diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
4926 index 81906c25e4a87..016aff97e2fb2 100644
4927 --- a/sound/soc/codecs/wcd9335.c
4928 +++ b/sound/soc/codecs/wcd9335.c
4929 @@ -4076,6 +4076,16 @@ static int wcd9335_setup_irqs(struct wcd9335_codec *wcd)
4930 return ret;
4931 }
4932
4933 +static void wcd9335_teardown_irqs(struct wcd9335_codec *wcd)
4934 +{
4935 + int i;
4936 +
4937 + /* disable interrupts on all slave ports */
4938 + for (i = 0; i < WCD9335_SLIM_NUM_PORT_REG; i++)
4939 + regmap_write(wcd->if_regmap, WCD9335_SLIM_PGD_PORT_INT_EN0 + i,
4940 + 0x00);
4941 +}
4942 +
4943 static void wcd9335_cdc_sido_ccl_enable(struct wcd9335_codec *wcd,
4944 bool ccl_flag)
4945 {
4946 @@ -4844,6 +4854,7 @@ static void wcd9335_codec_init(struct snd_soc_component *component)
4947 static int wcd9335_codec_probe(struct snd_soc_component *component)
4948 {
4949 struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
4950 + int ret;
4951 int i;
4952
4953 snd_soc_component_init_regmap(component, wcd->regmap);
4954 @@ -4861,7 +4872,15 @@ static int wcd9335_codec_probe(struct snd_soc_component *component)
4955 for (i = 0; i < NUM_CODEC_DAIS; i++)
4956 INIT_LIST_HEAD(&wcd->dai[i].slim_ch_list);
4957
4958 - return wcd9335_setup_irqs(wcd);
4959 + ret = wcd9335_setup_irqs(wcd);
4960 + if (ret)
4961 + goto free_clsh_ctrl;
4962 +
4963 + return 0;
4964 +
4965 +free_clsh_ctrl:
4966 + wcd_clsh_ctrl_free(wcd->clsh_ctrl);
4967 + return ret;
4968 }
4969
4970 static void wcd9335_codec_remove(struct snd_soc_component *comp)
4971 @@ -4869,7 +4888,7 @@ static void wcd9335_codec_remove(struct snd_soc_component *comp)
4972 struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
4973
4974 wcd_clsh_ctrl_free(wcd->clsh_ctrl);
4975 - free_irq(regmap_irq_get_virq(wcd->irq_data, WCD9335_IRQ_SLIMBUS), wcd);
4976 + wcd9335_teardown_irqs(wcd);
4977 }
4978
4979 static int wcd9335_codec_set_sysclk(struct snd_soc_component *comp,
4980 diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
4981 index 1940b17f27efa..254b796e635d1 100644
4982 --- a/sound/soc/intel/skylake/skl-topology.c
4983 +++ b/sound/soc/intel/skylake/skl-topology.c
4984 @@ -113,7 +113,7 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
4985
4986 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
4987 {
4988 - struct skl_module_iface *iface = &mcfg->module->formats[0];
4989 + struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
4990
4991 dev_dbg(skl->dev, "Dumping config\n");
4992 dev_dbg(skl->dev, "Input Format:\n");
4993 @@ -195,8 +195,8 @@ static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
4994 struct skl_module_fmt *in_fmt, *out_fmt;
4995
4996 /* Fixups will be applied to pin 0 only */
4997 - in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
4998 - out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
4999 + in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
5000 + out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
5001
5002 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
5003 if (is_fe) {
5004 @@ -239,9 +239,9 @@ static void skl_tplg_update_buffer_size(struct skl_dev *skl,
5005 /* Since fixups is applied to pin 0 only, ibs, obs needs
5006 * change for pin 0 only
5007 */
5008 - res = &mcfg->module->resources[0];
5009 - in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
5010 - out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
5011 + res = &mcfg->module->resources[mcfg->res_idx];
5012 + in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
5013 + out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
5014
5015 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
5016 multiplier = 5;
5017 @@ -1463,12 +1463,6 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
5018 struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
5019
5020 if (ac->params) {
5021 - /*
5022 - * Widget data is expected to be stripped of T and L
5023 - */
5024 - size -= 2 * sizeof(unsigned int);
5025 - data += 2;
5026 -
5027 if (size > ac->max)
5028 return -EINVAL;
5029 ac->size = size;
5030 @@ -1637,11 +1631,12 @@ int skl_tplg_update_pipe_params(struct device *dev,
5031 struct skl_module_cfg *mconfig,
5032 struct skl_pipe_params *params)
5033 {
5034 - struct skl_module_res *res = &mconfig->module->resources[0];
5035 + struct skl_module_res *res;
5036 struct skl_dev *skl = get_skl_ctx(dev);
5037 struct skl_module_fmt *format = NULL;
5038 u8 cfg_idx = mconfig->pipe->cur_config_idx;
5039
5040 + res = &mconfig->module->resources[mconfig->res_idx];
5041 skl_tplg_fill_dma_id(mconfig, params);
5042 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
5043 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
5044 @@ -1650,9 +1645,9 @@ int skl_tplg_update_pipe_params(struct device *dev,
5045 return 0;
5046
5047 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
5048 - format = &mconfig->module->formats[0].inputs[0].fmt;
5049 + format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
5050 else
5051 - format = &mconfig->module->formats[0].outputs[0].fmt;
5052 + format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
5053
5054 /* set the hw_params */
5055 format->s_freq = params->s_freq;
5056 diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
5057 index 8649422e760cc..63038eb23560b 100644
5058 --- a/tools/include/uapi/linux/bpf.h
5059 +++ b/tools/include/uapi/linux/bpf.h
5060 @@ -2264,7 +2264,7 @@ union bpf_attr {
5061 * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
5062 * Description
5063 * Select a **SO_REUSEPORT** socket from a
5064 - * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
5065 + * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
5066 * It checks the selected socket is matching the incoming
5067 * request in the socket buffer.
5068 * Return