Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0151-5.4.52-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3547 - (show annotations) (download)
Fri Jul 24 06:59:28 2020 UTC (3 years, 9 months ago) by niro
File size: 178185 byte(s)
-linux-5.4.52
1 diff --git a/Makefile b/Makefile
2 index 6ac83669e073..435d27be54c9 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 51
10 +SUBLEVEL = 52
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
15 index c77a0e3671ac..0284ace0e1ab 100644
16 --- a/arch/arc/include/asm/elf.h
17 +++ b/arch/arc/include/asm/elf.h
18 @@ -19,7 +19,7 @@
19 #define R_ARC_32_PCREL 0x31
20
21 /*to set parameters in the core dumps */
22 -#define ELF_ARCH EM_ARCOMPACT
23 +#define ELF_ARCH EM_ARC_INUSE
24 #define ELF_CLASS ELFCLASS32
25
26 #ifdef CONFIG_CPU_BIG_ENDIAN
27 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
28 index 72be01270e24..ea74a1eee5d9 100644
29 --- a/arch/arc/kernel/entry.S
30 +++ b/arch/arc/kernel/entry.S
31 @@ -153,7 +153,6 @@ END(EV_Extension)
32 tracesys:
33 ; save EFA in case tracer wants the PC of traced task
34 ; using ERET won't work since next-PC has already committed
35 - lr r12, [efa]
36 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
37 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
38
39 @@ -196,15 +195,9 @@ tracesys_exit:
40 ; Breakpoint TRAP
41 ; ---------------------------------------------
42 trap_with_param:
43 -
44 - ; stop_pc info by gdb needs this info
45 - lr r0, [efa]
46 + mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
47 mov r1, sp
48
49 - ; Now that we have read EFA, it is safe to do "fake" rtie
50 - ; and get out of CPU exception mode
51 - FAKE_RET_FROM_EXCPN
52 -
53 ; Save callee regs in case gdb wants to have a look
54 ; SP will grow up by size of CALLEE Reg-File
55 ; NOTE: clobbers r12
56 @@ -231,6 +224,10 @@ ENTRY(EV_Trap)
57
58 EXCEPTION_PROLOGUE
59
60 + lr r12, [efa]
61 +
62 + FAKE_RET_FROM_EXCPN
63 +
64 ;============ TRAP 1 :breakpoints
65 ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
66 bmsk.f 0, r10, 7
67 @@ -238,9 +235,6 @@ ENTRY(EV_Trap)
68
69 ;============ TRAP (no param): syscall top level
70
71 - ; First return from Exception to pure K mode (Exception/IRQs renabled)
72 - FAKE_RET_FROM_EXCPN
73 -
74 ; If syscall tracing ongoing, invoke pre-post-hooks
75 GET_CURR_THR_INFO_FLAGS r10
76 btst r10, TIF_SYSCALL_TRACE
77 diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
78 index 82f7ae030600..ab91c4ebb146 100644
79 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
80 +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
81 @@ -13,8 +13,10 @@
82 #interrupt-cells = <2>;
83 #address-cells = <1>;
84 #size-cells = <0>;
85 - spi-max-frequency = <3000000>;
86 + spi-max-frequency = <9600000>;
87 spi-cs-high;
88 + spi-cpol;
89 + spi-cpha;
90
91 cpcap_adc: adc {
92 compatible = "motorola,mapphone-cpcap-adc";
93 diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
94 index 1c0ecad3620e..baf3b47601af 100644
95 --- a/arch/arm/mach-imx/pm-imx6.c
96 +++ b/arch/arm/mach-imx/pm-imx6.c
97 @@ -493,14 +493,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
98 if (!ocram_pool) {
99 pr_warn("%s: ocram pool unavailable!\n", __func__);
100 ret = -ENODEV;
101 - goto put_node;
102 + goto put_device;
103 }
104
105 ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
106 if (!ocram_base) {
107 pr_warn("%s: unable to alloc ocram!\n", __func__);
108 ret = -ENOMEM;
109 - goto put_node;
110 + goto put_device;
111 }
112
113 ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
114 @@ -523,7 +523,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
115 ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
116 if (ret) {
117 pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
118 - goto put_node;
119 + goto put_device;
120 }
121
122 ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
123 @@ -570,7 +570,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
124 &imx6_suspend,
125 MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
126
127 - goto put_node;
128 + goto put_device;
129
130 pl310_cache_map_failed:
131 iounmap(pm_info->gpc_base.vbase);
132 @@ -580,6 +580,8 @@ iomuxc_map_failed:
133 iounmap(pm_info->src_base.vbase);
134 src_map_failed:
135 iounmap(pm_info->mmdc_base.vbase);
136 +put_device:
137 + put_device(&pdev->dev);
138 put_node:
139 of_node_put(node);
140
141 diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
142 index 89e4c8b79349..ee9bdaa40532 100644
143 --- a/arch/arm64/include/asm/arch_gicv3.h
144 +++ b/arch/arm64/include/asm/arch_gicv3.h
145 @@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
146 return read_sysreg_s(SYS_ICC_PMR_EL1);
147 }
148
149 -static inline void gic_write_pmr(u32 val)
150 +static __always_inline void gic_write_pmr(u32 val)
151 {
152 write_sysreg_s(val, SYS_ICC_PMR_EL1);
153 }
154 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
155 index 9cde5d2e768f..10d3048dec7c 100644
156 --- a/arch/arm64/include/asm/cpufeature.h
157 +++ b/arch/arm64/include/asm/cpufeature.h
158 @@ -601,7 +601,7 @@ static inline bool system_supports_generic_auth(void)
159 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
160 }
161
162 -static inline bool system_uses_irq_prio_masking(void)
163 +static __always_inline bool system_uses_irq_prio_masking(void)
164 {
165 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
166 cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
167 diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
168 index baf52baaa2a5..99b0a32e25c1 100644
169 --- a/arch/arm64/include/asm/pgtable-prot.h
170 +++ b/arch/arm64/include/asm/pgtable-prot.h
171 @@ -54,7 +54,7 @@
172 #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
173 #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
174 #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
175 -#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
176 +#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
177
178 #define PAGE_S2_MEMATTR(attr) \
179 ({ \
180 diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
181 index 43119922341f..1a157ca33262 100644
182 --- a/arch/arm64/kernel/kgdb.c
183 +++ b/arch/arm64/kernel/kgdb.c
184 @@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
185 if (!kgdb_single_step)
186 return DBG_HOOK_ERROR;
187
188 - kgdb_handle_exception(1, SIGTRAP, 0, regs);
189 + kgdb_handle_exception(0, SIGTRAP, 0, regs);
190 return DBG_HOOK_HANDLED;
191 }
192 NOKPROBE_SYMBOL(kgdb_step_brk_fn);
193 diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
194 index 160be2b4696d..dc41b505507d 100644
195 --- a/arch/arm64/kvm/hyp-init.S
196 +++ b/arch/arm64/kvm/hyp-init.S
197 @@ -136,11 +136,15 @@ ENTRY(__kvm_handle_stub_hvc)
198
199 1: cmp x0, #HVC_RESET_VECTORS
200 b.ne 1f
201 -reset:
202 +
203 /*
204 - * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
205 - * case we coming via HVC_SOFT_RESTART.
206 + * Set the HVC_RESET_VECTORS return code before entering the common
207 + * path so that we do not clobber x0-x2 in case we are coming via
208 + * HVC_SOFT_RESTART.
209 */
210 + mov x0, xzr
211 +reset:
212 + /* Reset kvm back to the hyp stub. */
213 mrs x5, sctlr_el2
214 ldr x6, =SCTLR_ELx_FLAGS
215 bic x5, x5, x6 // Clear SCTL_M and etc
216 @@ -151,7 +155,6 @@ reset:
217 /* Install stub vectors */
218 adr_l x5, __hyp_stub_vectors
219 msr vbar_el2, x5
220 - mov x0, xzr
221 eret
222
223 1: /* Bad stub call */
224 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
225 index f4a8ae918827..784d485218ca 100644
226 --- a/arch/arm64/kvm/reset.c
227 +++ b/arch/arm64/kvm/reset.c
228 @@ -258,7 +258,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
229 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
230 {
231 const struct kvm_regs *cpu_reset;
232 - int ret = -EINVAL;
233 + int ret;
234 bool loaded;
235
236 /* Reset PMU outside of the non-preemptible section */
237 @@ -281,15 +281,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
238
239 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
240 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
241 - if (kvm_vcpu_enable_ptrauth(vcpu))
242 + if (kvm_vcpu_enable_ptrauth(vcpu)) {
243 + ret = -EINVAL;
244 goto out;
245 + }
246 }
247
248 switch (vcpu->arch.target) {
249 default:
250 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
251 - if (!cpu_has_32bit_el1())
252 + if (!cpu_has_32bit_el1()) {
253 + ret = -EINVAL;
254 goto out;
255 + }
256 cpu_reset = &default_regs_reset32;
257 } else {
258 cpu_reset = &default_regs_reset;
259 diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
260 index 43b56f8f6beb..da8375437d16 100644
261 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
262 +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
263 @@ -38,7 +38,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
264 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
265 if (kvmhv_on_pseries())
266 return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
267 - __pa(to), __pa(from), n);
268 + (to != NULL) ? __pa(to): 0,
269 + (from != NULL) ? __pa(from): 0, n);
270
271 quadrant = 1;
272 if (!pid)
273 diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
274 index abe60268335d..0fe5600a037e 100644
275 --- a/arch/s390/include/asm/kvm_host.h
276 +++ b/arch/s390/include/asm/kvm_host.h
277 @@ -31,12 +31,12 @@
278 #define KVM_USER_MEM_SLOTS 32
279
280 /*
281 - * These seem to be used for allocating ->chip in the routing table,
282 - * which we don't use. 4096 is an out-of-thin-air value. If we need
283 - * to look at ->chip later on, we'll need to revisit this.
284 + * These seem to be used for allocating ->chip in the routing table, which we
285 + * don't use. 1 is as small as we can get to reduce the needed memory. If we
286 + * need to look at ->chip later on, we'll need to revisit this.
287 */
288 #define KVM_NR_IRQCHIPS 1
289 -#define KVM_IRQCHIP_NUM_PINS 4096
290 +#define KVM_IRQCHIP_NUM_PINS 1
291 #define KVM_HALT_POLL_NS_DEFAULT 50000
292
293 /* s390-specific vcpu->requests bit members */
294 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
295 index a470f1fa9f2a..324438889fe1 100644
296 --- a/arch/s390/include/asm/uaccess.h
297 +++ b/arch/s390/include/asm/uaccess.h
298 @@ -276,6 +276,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
299 }
300
301 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
302 -void s390_kernel_write(void *dst, const void *src, size_t size);
303 +void *s390_kernel_write(void *dst, const void *src, size_t size);
304
305 #endif /* __S390_UACCESS_H */
306 diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
307 index b432d63d0b37..2531776cf6cf 100644
308 --- a/arch/s390/kernel/early.c
309 +++ b/arch/s390/kernel/early.c
310 @@ -169,6 +169,8 @@ static noinline __init void setup_lowcore_early(void)
311 psw_t psw;
312
313 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
314 + if (IS_ENABLED(CONFIG_KASAN))
315 + psw.mask |= PSW_MASK_DAT;
316 psw.addr = (unsigned long) s390_base_ext_handler;
317 S390_lowcore.external_new_psw = psw;
318 psw.addr = (unsigned long) s390_base_pgm_handler;
319 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
320 index 4366962f4930..07b2b61a0289 100644
321 --- a/arch/s390/kernel/setup.c
322 +++ b/arch/s390/kernel/setup.c
323 @@ -1120,6 +1120,7 @@ void __init setup_arch(char **cmdline_p)
324 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
325 nospec_auto_detect();
326
327 + jump_label_init();
328 parse_early_param();
329 #ifdef CONFIG_CRASH_DUMP
330 /* Deactivate elfcorehdr= kernel parameter */
331 diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
332 index 7dfae86afa47..ff8234bca56c 100644
333 --- a/arch/s390/mm/hugetlbpage.c
334 +++ b/arch/s390/mm/hugetlbpage.c
335 @@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
336 _PAGE_YOUNG);
337 #ifdef CONFIG_MEM_SOFT_DIRTY
338 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
339 - _PAGE_DIRTY);
340 + _PAGE_SOFT_DIRTY);
341 #endif
342 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
343 _PAGE_NOEXEC);
344 diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
345 index de7ca4b6718f..1d17413b319a 100644
346 --- a/arch/s390/mm/maccess.c
347 +++ b/arch/s390/mm/maccess.c
348 @@ -55,19 +55,26 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
349 */
350 static DEFINE_SPINLOCK(s390_kernel_write_lock);
351
352 -void notrace s390_kernel_write(void *dst, const void *src, size_t size)
353 +notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
354 {
355 + void *tmp = dst;
356 unsigned long flags;
357 long copied;
358
359 spin_lock_irqsave(&s390_kernel_write_lock, flags);
360 - while (size) {
361 - copied = s390_kernel_write_odd(dst, src, size);
362 - dst += copied;
363 - src += copied;
364 - size -= copied;
365 + if (!(flags & PSW_MASK_DAT)) {
366 + memcpy(dst, src, size);
367 + } else {
368 + while (size) {
369 + copied = s390_kernel_write_odd(tmp, src, size);
370 + tmp += copied;
371 + src += copied;
372 + size -= copied;
373 + }
374 }
375 spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
376 +
377 + return dst;
378 }
379
380 static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
381 diff --git a/arch/x86/events/Kconfig b/arch/x86/events/Kconfig
382 index 9a7a1446cb3a..4a809c6cbd2f 100644
383 --- a/arch/x86/events/Kconfig
384 +++ b/arch/x86/events/Kconfig
385 @@ -10,11 +10,11 @@ config PERF_EVENTS_INTEL_UNCORE
386 available on NehalemEX and more modern processors.
387
388 config PERF_EVENTS_INTEL_RAPL
389 - tristate "Intel rapl performance events"
390 - depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
391 + tristate "Intel/AMD rapl performance events"
392 + depends on PERF_EVENTS && (CPU_SUP_INTEL || CPU_SUP_AMD) && PCI
393 default y
394 ---help---
395 - Include support for Intel rapl performance events for power
396 + Include support for Intel and AMD rapl performance events for power
397 monitoring on modern processors.
398
399 config PERF_EVENTS_INTEL_CSTATE
400 diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile
401 index 9e07f554333f..726e83c0a31a 100644
402 --- a/arch/x86/events/Makefile
403 +++ b/arch/x86/events/Makefile
404 @@ -1,5 +1,6 @@
405 # SPDX-License-Identifier: GPL-2.0-only
406 obj-y += core.o probe.o
407 +obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += rapl.o
408 obj-y += amd/
409 obj-$(CONFIG_X86_LOCAL_APIC) += msr.o
410 obj-$(CONFIG_CPU_SUP_INTEL) += intel/
411 diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile
412 index 3468b0c1dc7c..e67a5886336c 100644
413 --- a/arch/x86/events/intel/Makefile
414 +++ b/arch/x86/events/intel/Makefile
415 @@ -2,8 +2,6 @@
416 obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o
417 obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
418 obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
419 -obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
420 -intel-rapl-perf-objs := rapl.o
421 obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
422 intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
423 obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
424 diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
425 deleted file mode 100644
426 index 5053a403e4ae..000000000000
427 --- a/arch/x86/events/intel/rapl.c
428 +++ /dev/null
429 @@ -1,802 +0,0 @@
430 -// SPDX-License-Identifier: GPL-2.0-only
431 -/*
432 - * Support Intel RAPL energy consumption counters
433 - * Copyright (C) 2013 Google, Inc., Stephane Eranian
434 - *
435 - * Intel RAPL interface is specified in the IA-32 Manual Vol3b
436 - * section 14.7.1 (September 2013)
437 - *
438 - * RAPL provides more controls than just reporting energy consumption
439 - * however here we only expose the 3 energy consumption free running
440 - * counters (pp0, pkg, dram).
441 - *
442 - * Each of those counters increments in a power unit defined by the
443 - * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
444 - * but it can vary.
445 - *
446 - * Counter to rapl events mappings:
447 - *
448 - * pp0 counter: consumption of all physical cores (power plane 0)
449 - * event: rapl_energy_cores
450 - * perf code: 0x1
451 - *
452 - * pkg counter: consumption of the whole processor package
453 - * event: rapl_energy_pkg
454 - * perf code: 0x2
455 - *
456 - * dram counter: consumption of the dram domain (servers only)
457 - * event: rapl_energy_dram
458 - * perf code: 0x3
459 - *
460 - * gpu counter: consumption of the builtin-gpu domain (client only)
461 - * event: rapl_energy_gpu
462 - * perf code: 0x4
463 - *
464 - * psys counter: consumption of the builtin-psys domain (client only)
465 - * event: rapl_energy_psys
466 - * perf code: 0x5
467 - *
468 - * We manage those counters as free running (read-only). They may be
469 - * use simultaneously by other tools, such as turbostat.
470 - *
471 - * The events only support system-wide mode counting. There is no
472 - * sampling support because it does not make sense and is not
473 - * supported by the RAPL hardware.
474 - *
475 - * Because we want to avoid floating-point operations in the kernel,
476 - * the events are all reported in fixed point arithmetic (32.32).
477 - * Tools must adjust the counts to convert them to Watts using
478 - * the duration of the measurement. Tools may use a function such as
479 - * ldexp(raw_count, -32);
480 - */
481 -
482 -#define pr_fmt(fmt) "RAPL PMU: " fmt
483 -
484 -#include <linux/module.h>
485 -#include <linux/slab.h>
486 -#include <linux/perf_event.h>
487 -#include <linux/nospec.h>
488 -#include <asm/cpu_device_id.h>
489 -#include <asm/intel-family.h>
490 -#include "../perf_event.h"
491 -#include "../probe.h"
492 -
493 -MODULE_LICENSE("GPL");
494 -
495 -/*
496 - * RAPL energy status counters
497 - */
498 -enum perf_rapl_events {
499 - PERF_RAPL_PP0 = 0, /* all cores */
500 - PERF_RAPL_PKG, /* entire package */
501 - PERF_RAPL_RAM, /* DRAM */
502 - PERF_RAPL_PP1, /* gpu */
503 - PERF_RAPL_PSYS, /* psys */
504 -
505 - PERF_RAPL_MAX,
506 - NR_RAPL_DOMAINS = PERF_RAPL_MAX,
507 -};
508 -
509 -static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
510 - "pp0-core",
511 - "package",
512 - "dram",
513 - "pp1-gpu",
514 - "psys",
515 -};
516 -
517 -/*
518 - * event code: LSB 8 bits, passed in attr->config
519 - * any other bit is reserved
520 - */
521 -#define RAPL_EVENT_MASK 0xFFULL
522 -
523 -#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
524 -static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
525 - struct kobj_attribute *attr, \
526 - char *page) \
527 -{ \
528 - BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
529 - return sprintf(page, _format "\n"); \
530 -} \
531 -static struct kobj_attribute format_attr_##_var = \
532 - __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
533 -
534 -#define RAPL_CNTR_WIDTH 32
535 -
536 -#define RAPL_EVENT_ATTR_STR(_name, v, str) \
537 -static struct perf_pmu_events_attr event_attr_##v = { \
538 - .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
539 - .id = 0, \
540 - .event_str = str, \
541 -};
542 -
543 -struct rapl_pmu {
544 - raw_spinlock_t lock;
545 - int n_active;
546 - int cpu;
547 - struct list_head active_list;
548 - struct pmu *pmu;
549 - ktime_t timer_interval;
550 - struct hrtimer hrtimer;
551 -};
552 -
553 -struct rapl_pmus {
554 - struct pmu pmu;
555 - unsigned int maxdie;
556 - struct rapl_pmu *pmus[];
557 -};
558 -
559 -struct rapl_model {
560 - unsigned long events;
561 - bool apply_quirk;
562 -};
563 -
564 - /* 1/2^hw_unit Joule */
565 -static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
566 -static struct rapl_pmus *rapl_pmus;
567 -static cpumask_t rapl_cpu_mask;
568 -static unsigned int rapl_cntr_mask;
569 -static u64 rapl_timer_ms;
570 -static struct perf_msr rapl_msrs[];
571 -
572 -static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
573 -{
574 - unsigned int dieid = topology_logical_die_id(cpu);
575 -
576 - /*
577 - * The unsigned check also catches the '-1' return value for non
578 - * existent mappings in the topology map.
579 - */
580 - return dieid < rapl_pmus->maxdie ? rapl_pmus->pmus[dieid] : NULL;
581 -}
582 -
583 -static inline u64 rapl_read_counter(struct perf_event *event)
584 -{
585 - u64 raw;
586 - rdmsrl(event->hw.event_base, raw);
587 - return raw;
588 -}
589 -
590 -static inline u64 rapl_scale(u64 v, int cfg)
591 -{
592 - if (cfg > NR_RAPL_DOMAINS) {
593 - pr_warn("Invalid domain %d, failed to scale data\n", cfg);
594 - return v;
595 - }
596 - /*
597 - * scale delta to smallest unit (1/2^32)
598 - * users must then scale back: count * 1/(1e9*2^32) to get Joules
599 - * or use ldexp(count, -32).
600 - * Watts = Joules/Time delta
601 - */
602 - return v << (32 - rapl_hw_unit[cfg - 1]);
603 -}
604 -
605 -static u64 rapl_event_update(struct perf_event *event)
606 -{
607 - struct hw_perf_event *hwc = &event->hw;
608 - u64 prev_raw_count, new_raw_count;
609 - s64 delta, sdelta;
610 - int shift = RAPL_CNTR_WIDTH;
611 -
612 -again:
613 - prev_raw_count = local64_read(&hwc->prev_count);
614 - rdmsrl(event->hw.event_base, new_raw_count);
615 -
616 - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
617 - new_raw_count) != prev_raw_count) {
618 - cpu_relax();
619 - goto again;
620 - }
621 -
622 - /*
623 - * Now we have the new raw value and have updated the prev
624 - * timestamp already. We can now calculate the elapsed delta
625 - * (event-)time and add that to the generic event.
626 - *
627 - * Careful, not all hw sign-extends above the physical width
628 - * of the count.
629 - */
630 - delta = (new_raw_count << shift) - (prev_raw_count << shift);
631 - delta >>= shift;
632 -
633 - sdelta = rapl_scale(delta, event->hw.config);
634 -
635 - local64_add(sdelta, &event->count);
636 -
637 - return new_raw_count;
638 -}
639 -
640 -static void rapl_start_hrtimer(struct rapl_pmu *pmu)
641 -{
642 - hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
643 - HRTIMER_MODE_REL_PINNED);
644 -}
645 -
646 -static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
647 -{
648 - struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
649 - struct perf_event *event;
650 - unsigned long flags;
651 -
652 - if (!pmu->n_active)
653 - return HRTIMER_NORESTART;
654 -
655 - raw_spin_lock_irqsave(&pmu->lock, flags);
656 -
657 - list_for_each_entry(event, &pmu->active_list, active_entry)
658 - rapl_event_update(event);
659 -
660 - raw_spin_unlock_irqrestore(&pmu->lock, flags);
661 -
662 - hrtimer_forward_now(hrtimer, pmu->timer_interval);
663 -
664 - return HRTIMER_RESTART;
665 -}
666 -
667 -static void rapl_hrtimer_init(struct rapl_pmu *pmu)
668 -{
669 - struct hrtimer *hr = &pmu->hrtimer;
670 -
671 - hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
672 - hr->function = rapl_hrtimer_handle;
673 -}
674 -
675 -static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
676 - struct perf_event *event)
677 -{
678 - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
679 - return;
680 -
681 - event->hw.state = 0;
682 -
683 - list_add_tail(&event->active_entry, &pmu->active_list);
684 -
685 - local64_set(&event->hw.prev_count, rapl_read_counter(event));
686 -
687 - pmu->n_active++;
688 - if (pmu->n_active == 1)
689 - rapl_start_hrtimer(pmu);
690 -}
691 -
692 -static void rapl_pmu_event_start(struct perf_event *event, int mode)
693 -{
694 - struct rapl_pmu *pmu = event->pmu_private;
695 - unsigned long flags;
696 -
697 - raw_spin_lock_irqsave(&pmu->lock, flags);
698 - __rapl_pmu_event_start(pmu, event);
699 - raw_spin_unlock_irqrestore(&pmu->lock, flags);
700 -}
701 -
702 -static void rapl_pmu_event_stop(struct perf_event *event, int mode)
703 -{
704 - struct rapl_pmu *pmu = event->pmu_private;
705 - struct hw_perf_event *hwc = &event->hw;
706 - unsigned long flags;
707 -
708 - raw_spin_lock_irqsave(&pmu->lock, flags);
709 -
710 - /* mark event as deactivated and stopped */
711 - if (!(hwc->state & PERF_HES_STOPPED)) {
712 - WARN_ON_ONCE(pmu->n_active <= 0);
713 - pmu->n_active--;
714 - if (pmu->n_active == 0)
715 - hrtimer_cancel(&pmu->hrtimer);
716 -
717 - list_del(&event->active_entry);
718 -
719 - WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
720 - hwc->state |= PERF_HES_STOPPED;
721 - }
722 -
723 - /* check if update of sw counter is necessary */
724 - if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
725 - /*
726 - * Drain the remaining delta count out of a event
727 - * that we are disabling:
728 - */
729 - rapl_event_update(event);
730 - hwc->state |= PERF_HES_UPTODATE;
731 - }
732 -
733 - raw_spin_unlock_irqrestore(&pmu->lock, flags);
734 -}
735 -
736 -static int rapl_pmu_event_add(struct perf_event *event, int mode)
737 -{
738 - struct rapl_pmu *pmu = event->pmu_private;
739 - struct hw_perf_event *hwc = &event->hw;
740 - unsigned long flags;
741 -
742 - raw_spin_lock_irqsave(&pmu->lock, flags);
743 -
744 - hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
745 -
746 - if (mode & PERF_EF_START)
747 - __rapl_pmu_event_start(pmu, event);
748 -
749 - raw_spin_unlock_irqrestore(&pmu->lock, flags);
750 -
751 - return 0;
752 -}
753 -
754 -static void rapl_pmu_event_del(struct perf_event *event, int flags)
755 -{
756 - rapl_pmu_event_stop(event, PERF_EF_UPDATE);
757 -}
758 -
759 -static int rapl_pmu_event_init(struct perf_event *event)
760 -{
761 - u64 cfg = event->attr.config & RAPL_EVENT_MASK;
762 - int bit, ret = 0;
763 - struct rapl_pmu *pmu;
764 -
765 - /* only look at RAPL events */
766 - if (event->attr.type != rapl_pmus->pmu.type)
767 - return -ENOENT;
768 -
769 - /* check only supported bits are set */
770 - if (event->attr.config & ~RAPL_EVENT_MASK)
771 - return -EINVAL;
772 -
773 - if (event->cpu < 0)
774 - return -EINVAL;
775 -
776 - event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
777 -
778 - if (!cfg || cfg >= NR_RAPL_DOMAINS + 1)
779 - return -EINVAL;
780 -
781 - cfg = array_index_nospec((long)cfg, NR_RAPL_DOMAINS + 1);
782 - bit = cfg - 1;
783 -
784 - /* check event supported */
785 - if (!(rapl_cntr_mask & (1 << bit)))
786 - return -EINVAL;
787 -
788 - /* unsupported modes and filters */
789 - if (event->attr.sample_period) /* no sampling */
790 - return -EINVAL;
791 -
792 - /* must be done before validate_group */
793 - pmu = cpu_to_rapl_pmu(event->cpu);
794 - if (!pmu)
795 - return -EINVAL;
796 - event->cpu = pmu->cpu;
797 - event->pmu_private = pmu;
798 - event->hw.event_base = rapl_msrs[bit].msr;
799 - event->hw.config = cfg;
800 - event->hw.idx = bit;
801 -
802 - return ret;
803 -}
804 -
805 -static void rapl_pmu_event_read(struct perf_event *event)
806 -{
807 - rapl_event_update(event);
808 -}
809 -
810 -static ssize_t rapl_get_attr_cpumask(struct device *dev,
811 - struct device_attribute *attr, char *buf)
812 -{
813 - return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
814 -}
815 -
816 -static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
817 -
818 -static struct attribute *rapl_pmu_attrs[] = {
819 - &dev_attr_cpumask.attr,
820 - NULL,
821 -};
822 -
823 -static struct attribute_group rapl_pmu_attr_group = {
824 - .attrs = rapl_pmu_attrs,
825 -};
826 -
827 -RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
828 -RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
829 -RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
830 -RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
831 -RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
832 -
833 -RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
834 -RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
835 -RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
836 -RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
837 -RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules");
838 -
839 -/*
840 - * we compute in 0.23 nJ increments regardless of MSR
841 - */
842 -RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
843 -RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
844 -RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
845 -RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
846 -RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
847 -
848 -/*
849 - * There are no default events, but we need to create
850 - * "events" group (with empty attrs) before updating
851 - * it with detected events.
852 - */
853 -static struct attribute *attrs_empty[] = {
854 - NULL,
855 -};
856 -
857 -static struct attribute_group rapl_pmu_events_group = {
858 - .name = "events",
859 - .attrs = attrs_empty,
860 -};
861 -
862 -DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
863 -static struct attribute *rapl_formats_attr[] = {
864 - &format_attr_event.attr,
865 - NULL,
866 -};
867 -
868 -static struct attribute_group rapl_pmu_format_group = {
869 - .name = "format",
870 - .attrs = rapl_formats_attr,
871 -};
872 -
873 -static const struct attribute_group *rapl_attr_groups[] = {
874 - &rapl_pmu_attr_group,
875 - &rapl_pmu_format_group,
876 - &rapl_pmu_events_group,
877 - NULL,
878 -};
879 -
880 -static struct attribute *rapl_events_cores[] = {
881 - EVENT_PTR(rapl_cores),
882 - EVENT_PTR(rapl_cores_unit),
883 - EVENT_PTR(rapl_cores_scale),
884 - NULL,
885 -};
886 -
887 -static struct attribute_group rapl_events_cores_group = {
888 - .name = "events",
889 - .attrs = rapl_events_cores,
890 -};
891 -
892 -static struct attribute *rapl_events_pkg[] = {
893 - EVENT_PTR(rapl_pkg),
894 - EVENT_PTR(rapl_pkg_unit),
895 - EVENT_PTR(rapl_pkg_scale),
896 - NULL,
897 -};
898 -
899 -static struct attribute_group rapl_events_pkg_group = {
900 - .name = "events",
901 - .attrs = rapl_events_pkg,
902 -};
903 -
904 -static struct attribute *rapl_events_ram[] = {
905 - EVENT_PTR(rapl_ram),
906 - EVENT_PTR(rapl_ram_unit),
907 - EVENT_PTR(rapl_ram_scale),
908 - NULL,
909 -};
910 -
911 -static struct attribute_group rapl_events_ram_group = {
912 - .name = "events",
913 - .attrs = rapl_events_ram,
914 -};
915 -
916 -static struct attribute *rapl_events_gpu[] = {
917 - EVENT_PTR(rapl_gpu),
918 - EVENT_PTR(rapl_gpu_unit),
919 - EVENT_PTR(rapl_gpu_scale),
920 - NULL,
921 -};
922 -
923 -static struct attribute_group rapl_events_gpu_group = {
924 - .name = "events",
925 - .attrs = rapl_events_gpu,
926 -};
927 -
928 -static struct attribute *rapl_events_psys[] = {
929 - EVENT_PTR(rapl_psys),
930 - EVENT_PTR(rapl_psys_unit),
931 - EVENT_PTR(rapl_psys_scale),
932 - NULL,
933 -};
934 -
935 -static struct attribute_group rapl_events_psys_group = {
936 - .name = "events",
937 - .attrs = rapl_events_psys,
938 -};
939 -
940 -static bool test_msr(int idx, void *data)
941 -{
942 - return test_bit(idx, (unsigned long *) data);
943 -}
944 -
945 -static struct perf_msr rapl_msrs[] = {
946 - [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr },
947 - [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
948 - [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr },
949 - [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr },
950 - [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr },
951 -};
952 -
953 -static int rapl_cpu_offline(unsigned int cpu)
954 -{
955 - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
956 - int target;
957 -
958 - /* Check if exiting cpu is used for collecting rapl events */
959 - if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
960 - return 0;
961 -
962 - pmu->cpu = -1;
963 - /* Find a new cpu to collect rapl events */
964 - target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
965 -
966 - /* Migrate rapl events to the new target */
967 - if (target < nr_cpu_ids) {
968 - cpumask_set_cpu(target, &rapl_cpu_mask);
969 - pmu->cpu = target;
970 - perf_pmu_migrate_context(pmu->pmu, cpu, target);
971 - }
972 - return 0;
973 -}
974 -
975 -static int rapl_cpu_online(unsigned int cpu)
976 -{
977 - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
978 - int target;
979 -
980 - if (!pmu) {
981 - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
982 - if (!pmu)
983 - return -ENOMEM;
984 -
985 - raw_spin_lock_init(&pmu->lock);
986 - INIT_LIST_HEAD(&pmu->active_list);
987 - pmu->pmu = &rapl_pmus->pmu;
988 - pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
989 - rapl_hrtimer_init(pmu);
990 -
991 - rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
992 - }
993 -
994 - /*
995 - * Check if there is an online cpu in the package which collects rapl
996 - * events already.
997 - */
998 - target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
999 - if (target < nr_cpu_ids)
1000 - return 0;
1001 -
1002 - cpumask_set_cpu(cpu, &rapl_cpu_mask);
1003 - pmu->cpu = cpu;
1004 - return 0;
1005 -}
1006 -
1007 -static int rapl_check_hw_unit(bool apply_quirk)
1008 -{
1009 - u64 msr_rapl_power_unit_bits;
1010 - int i;
1011 -
1012 - /* protect rdmsrl() to handle virtualization */
1013 - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
1014 - return -1;
1015 - for (i = 0; i < NR_RAPL_DOMAINS; i++)
1016 - rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
1017 -
1018 - /*
1019 - * DRAM domain on HSW server and KNL has fixed energy unit which can be
1020 - * different than the unit from power unit MSR. See
1021 - * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
1022 - * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
1023 - */
1024 - if (apply_quirk)
1025 - rapl_hw_unit[PERF_RAPL_RAM] = 16;
1026 -
1027 - /*
1028 - * Calculate the timer rate:
1029 - * Use reference of 200W for scaling the timeout to avoid counter
1030 - * overflows. 200W = 200 Joules/sec
1031 - * Divide interval by 2 to avoid lockstep (2 * 100)
1032 - * if hw unit is 32, then we use 2 ms 1/200/2
1033 - */
1034 - rapl_timer_ms = 2;
1035 - if (rapl_hw_unit[0] < 32) {
1036 - rapl_timer_ms = (1000 / (2 * 100));
1037 - rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
1038 - }
1039 - return 0;
1040 -}
1041 -
1042 -static void __init rapl_advertise(void)
1043 -{
1044 - int i;
1045 -
1046 - pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
1047 - hweight32(rapl_cntr_mask), rapl_timer_ms);
1048 -
1049 - for (i = 0; i < NR_RAPL_DOMAINS; i++) {
1050 - if (rapl_cntr_mask & (1 << i)) {
1051 - pr_info("hw unit of domain %s 2^-%d Joules\n",
1052 - rapl_domain_names[i], rapl_hw_unit[i]);
1053 - }
1054 - }
1055 -}
1056 -
1057 -static void cleanup_rapl_pmus(void)
1058 -{
1059 - int i;
1060 -
1061 - for (i = 0; i < rapl_pmus->maxdie; i++)
1062 - kfree(rapl_pmus->pmus[i]);
1063 - kfree(rapl_pmus);
1064 -}
1065 -
1066 -static const struct attribute_group *rapl_attr_update[] = {
1067 - &rapl_events_cores_group,
1068 - &rapl_events_pkg_group,
1069 - &rapl_events_ram_group,
1070 - &rapl_events_gpu_group,
1071 - &rapl_events_gpu_group,
1072 - NULL,
1073 -};
1074 -
1075 -static int __init init_rapl_pmus(void)
1076 -{
1077 - int maxdie = topology_max_packages() * topology_max_die_per_package();
1078 - size_t size;
1079 -
1080 - size = sizeof(*rapl_pmus) + maxdie * sizeof(struct rapl_pmu *);
1081 - rapl_pmus = kzalloc(size, GFP_KERNEL);
1082 - if (!rapl_pmus)
1083 - return -ENOMEM;
1084 -
1085 - rapl_pmus->maxdie = maxdie;
1086 - rapl_pmus->pmu.attr_groups = rapl_attr_groups;
1087 - rapl_pmus->pmu.attr_update = rapl_attr_update;
1088 - rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
1089 - rapl_pmus->pmu.event_init = rapl_pmu_event_init;
1090 - rapl_pmus->pmu.add = rapl_pmu_event_add;
1091 - rapl_pmus->pmu.del = rapl_pmu_event_del;
1092 - rapl_pmus->pmu.start = rapl_pmu_event_start;
1093 - rapl_pmus->pmu.stop = rapl_pmu_event_stop;
1094 - rapl_pmus->pmu.read = rapl_pmu_event_read;
1095 - rapl_pmus->pmu.module = THIS_MODULE;
1096 - rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1097 - return 0;
1098 -}
1099 -
1100 -#define X86_RAPL_MODEL_MATCH(model, init) \
1101 - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1102 -
1103 -static struct rapl_model model_snb = {
1104 - .events = BIT(PERF_RAPL_PP0) |
1105 - BIT(PERF_RAPL_PKG) |
1106 - BIT(PERF_RAPL_PP1),
1107 - .apply_quirk = false,
1108 -};
1109 -
1110 -static struct rapl_model model_snbep = {
1111 - .events = BIT(PERF_RAPL_PP0) |
1112 - BIT(PERF_RAPL_PKG) |
1113 - BIT(PERF_RAPL_RAM),
1114 - .apply_quirk = false,
1115 -};
1116 -
1117 -static struct rapl_model model_hsw = {
1118 - .events = BIT(PERF_RAPL_PP0) |
1119 - BIT(PERF_RAPL_PKG) |
1120 - BIT(PERF_RAPL_RAM) |
1121 - BIT(PERF_RAPL_PP1),
1122 - .apply_quirk = false,
1123 -};
1124 -
1125 -static struct rapl_model model_hsx = {
1126 - .events = BIT(PERF_RAPL_PP0) |
1127 - BIT(PERF_RAPL_PKG) |
1128 - BIT(PERF_RAPL_RAM),
1129 - .apply_quirk = true,
1130 -};
1131 -
1132 -static struct rapl_model model_knl = {
1133 - .events = BIT(PERF_RAPL_PKG) |
1134 - BIT(PERF_RAPL_RAM),
1135 - .apply_quirk = true,
1136 -};
1137 -
1138 -static struct rapl_model model_skl = {
1139 - .events = BIT(PERF_RAPL_PP0) |
1140 - BIT(PERF_RAPL_PKG) |
1141 - BIT(PERF_RAPL_RAM) |
1142 - BIT(PERF_RAPL_PP1) |
1143 - BIT(PERF_RAPL_PSYS),
1144 - .apply_quirk = false,
1145 -};
1146 -
1147 -static const struct x86_cpu_id rapl_model_match[] __initconst = {
1148 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, model_snb),
1149 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, model_snbep),
1150 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, model_snb),
1151 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep),
1152 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL, model_hsw),
1153 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx),
1154 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_L, model_hsw),
1155 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_G, model_hsw),
1156 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw),
1157 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, model_hsw),
1158 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx),
1159 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, model_hsx),
1160 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl),
1161 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl),
1162 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, model_skl),
1163 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE, model_skl),
1164 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx),
1165 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, model_skl),
1166 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE, model_skl),
1167 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_L, model_skl),
1168 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw),
1169 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_D, model_hsw),
1170 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw),
1171 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, model_skl),
1172 - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE, model_skl),
1173 - {},
1174 -};
1175 -
1176 -MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
1177 -
1178 -static int __init rapl_pmu_init(void)
1179 -{
1180 - const struct x86_cpu_id *id;
1181 - struct rapl_model *rm;
1182 - int ret;
1183 -
1184 - id = x86_match_cpu(rapl_model_match);
1185 - if (!id)
1186 - return -ENODEV;
1187 -
1188 - rm = (struct rapl_model *) id->driver_data;
1189 - rapl_cntr_mask = perf_msr_probe(rapl_msrs, PERF_RAPL_MAX,
1190 - false, (void *) &rm->events);
1191 -
1192 - ret = rapl_check_hw_unit(rm->apply_quirk);
1193 - if (ret)
1194 - return ret;
1195 -
1196 - ret = init_rapl_pmus();
1197 - if (ret)
1198 - return ret;
1199 -
1200 - /*
1201 - * Install callbacks. Core will call them for each online cpu.
1202 - */
1203 - ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
1204 - "perf/x86/rapl:online",
1205 - rapl_cpu_online, rapl_cpu_offline);
1206 - if (ret)
1207 - goto out;
1208 -
1209 - ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
1210 - if (ret)
1211 - goto out1;
1212 -
1213 - rapl_advertise();
1214 - return 0;
1215 -
1216 -out1:
1217 - cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
1218 -out:
1219 - pr_warn("Initialization failed (%d), disabled\n", ret);
1220 - cleanup_rapl_pmus();
1221 - return ret;
1222 -}
1223 -module_init(rapl_pmu_init);
1224 -
1225 -static void __exit intel_rapl_exit(void)
1226 -{
1227 - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
1228 - perf_pmu_unregister(&rapl_pmus->pmu);
1229 - cleanup_rapl_pmus();
1230 -}
1231 -module_exit(intel_rapl_exit);
1232 diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
1233 new file mode 100644
1234 index 000000000000..3c222d6fdee3
1235 --- /dev/null
1236 +++ b/arch/x86/events/rapl.c
1237 @@ -0,0 +1,805 @@
1238 +// SPDX-License-Identifier: GPL-2.0-only
1239 +/*
1240 + * Support Intel/AMD RAPL energy consumption counters
1241 + * Copyright (C) 2013 Google, Inc., Stephane Eranian
1242 + *
1243 + * Intel RAPL interface is specified in the IA-32 Manual Vol3b
1244 + * section 14.7.1 (September 2013)
1245 + *
1246 + * AMD RAPL interface for Fam17h is described in the public PPR:
1247 + * https://bugzilla.kernel.org/show_bug.cgi?id=206537
1248 + *
1249 + * RAPL provides more controls than just reporting energy consumption
1250 + * however here we only expose the 3 energy consumption free running
1251 + * counters (pp0, pkg, dram).
1252 + *
1253 + * Each of those counters increments in a power unit defined by the
1254 + * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
1255 + * but it can vary.
1256 + *
1257 + * Counter to rapl events mappings:
1258 + *
1259 + * pp0 counter: consumption of all physical cores (power plane 0)
1260 + * event: rapl_energy_cores
1261 + * perf code: 0x1
1262 + *
1263 + * pkg counter: consumption of the whole processor package
1264 + * event: rapl_energy_pkg
1265 + * perf code: 0x2
1266 + *
1267 + * dram counter: consumption of the dram domain (servers only)
1268 + * event: rapl_energy_dram
1269 + * perf code: 0x3
1270 + *
1271 + * gpu counter: consumption of the builtin-gpu domain (client only)
1272 + * event: rapl_energy_gpu
1273 + * perf code: 0x4
1274 + *
1275 + * psys counter: consumption of the builtin-psys domain (client only)
1276 + * event: rapl_energy_psys
1277 + * perf code: 0x5
1278 + *
1279 + * We manage those counters as free running (read-only). They may be
1280 + * use simultaneously by other tools, such as turbostat.
1281 + *
1282 + * The events only support system-wide mode counting. There is no
1283 + * sampling support because it does not make sense and is not
1284 + * supported by the RAPL hardware.
1285 + *
1286 + * Because we want to avoid floating-point operations in the kernel,
1287 + * the events are all reported in fixed point arithmetic (32.32).
1288 + * Tools must adjust the counts to convert them to Watts using
1289 + * the duration of the measurement. Tools may use a function such as
1290 + * ldexp(raw_count, -32);
1291 + */
1292 +
1293 +#define pr_fmt(fmt) "RAPL PMU: " fmt
1294 +
1295 +#include <linux/module.h>
1296 +#include <linux/slab.h>
1297 +#include <linux/perf_event.h>
1298 +#include <linux/nospec.h>
1299 +#include <asm/cpu_device_id.h>
1300 +#include <asm/intel-family.h>
1301 +#include "perf_event.h"
1302 +#include "probe.h"
1303 +
1304 +MODULE_LICENSE("GPL");
1305 +
1306 +/*
1307 + * RAPL energy status counters
1308 + */
1309 +enum perf_rapl_events {
1310 + PERF_RAPL_PP0 = 0, /* all cores */
1311 + PERF_RAPL_PKG, /* entire package */
1312 + PERF_RAPL_RAM, /* DRAM */
1313 + PERF_RAPL_PP1, /* gpu */
1314 + PERF_RAPL_PSYS, /* psys */
1315 +
1316 + PERF_RAPL_MAX,
1317 + NR_RAPL_DOMAINS = PERF_RAPL_MAX,
1318 +};
1319 +
1320 +static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
1321 + "pp0-core",
1322 + "package",
1323 + "dram",
1324 + "pp1-gpu",
1325 + "psys",
1326 +};
1327 +
1328 +/*
1329 + * event code: LSB 8 bits, passed in attr->config
1330 + * any other bit is reserved
1331 + */
1332 +#define RAPL_EVENT_MASK 0xFFULL
1333 +
1334 +#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
1335 +static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
1336 + struct kobj_attribute *attr, \
1337 + char *page) \
1338 +{ \
1339 + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1340 + return sprintf(page, _format "\n"); \
1341 +} \
1342 +static struct kobj_attribute format_attr_##_var = \
1343 + __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
1344 +
1345 +#define RAPL_CNTR_WIDTH 32
1346 +
1347 +#define RAPL_EVENT_ATTR_STR(_name, v, str) \
1348 +static struct perf_pmu_events_attr event_attr_##v = { \
1349 + .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1350 + .id = 0, \
1351 + .event_str = str, \
1352 +};
1353 +
1354 +struct rapl_pmu {
1355 + raw_spinlock_t lock;
1356 + int n_active;
1357 + int cpu;
1358 + struct list_head active_list;
1359 + struct pmu *pmu;
1360 + ktime_t timer_interval;
1361 + struct hrtimer hrtimer;
1362 +};
1363 +
1364 +struct rapl_pmus {
1365 + struct pmu pmu;
1366 + unsigned int maxdie;
1367 + struct rapl_pmu *pmus[];
1368 +};
1369 +
1370 +struct rapl_model {
1371 + unsigned long events;
1372 + bool apply_quirk;
1373 +};
1374 +
1375 + /* 1/2^hw_unit Joule */
1376 +static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
1377 +static struct rapl_pmus *rapl_pmus;
1378 +static cpumask_t rapl_cpu_mask;
1379 +static unsigned int rapl_cntr_mask;
1380 +static u64 rapl_timer_ms;
1381 +static struct perf_msr rapl_msrs[];
1382 +
1383 +static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
1384 +{
1385 + unsigned int dieid = topology_logical_die_id(cpu);
1386 +
1387 + /*
1388 + * The unsigned check also catches the '-1' return value for non
1389 + * existent mappings in the topology map.
1390 + */
1391 + return dieid < rapl_pmus->maxdie ? rapl_pmus->pmus[dieid] : NULL;
1392 +}
1393 +
1394 +static inline u64 rapl_read_counter(struct perf_event *event)
1395 +{
1396 + u64 raw;
1397 + rdmsrl(event->hw.event_base, raw);
1398 + return raw;
1399 +}
1400 +
1401 +static inline u64 rapl_scale(u64 v, int cfg)
1402 +{
1403 + if (cfg > NR_RAPL_DOMAINS) {
1404 + pr_warn("Invalid domain %d, failed to scale data\n", cfg);
1405 + return v;
1406 + }
1407 + /*
1408 + * scale delta to smallest unit (1/2^32)
1409 + * users must then scale back: count * 1/(1e9*2^32) to get Joules
1410 + * or use ldexp(count, -32).
1411 + * Watts = Joules/Time delta
1412 + */
1413 + return v << (32 - rapl_hw_unit[cfg - 1]);
1414 +}
1415 +
1416 +static u64 rapl_event_update(struct perf_event *event)
1417 +{
1418 + struct hw_perf_event *hwc = &event->hw;
1419 + u64 prev_raw_count, new_raw_count;
1420 + s64 delta, sdelta;
1421 + int shift = RAPL_CNTR_WIDTH;
1422 +
1423 +again:
1424 + prev_raw_count = local64_read(&hwc->prev_count);
1425 + rdmsrl(event->hw.event_base, new_raw_count);
1426 +
1427 + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
1428 + new_raw_count) != prev_raw_count) {
1429 + cpu_relax();
1430 + goto again;
1431 + }
1432 +
1433 + /*
1434 + * Now we have the new raw value and have updated the prev
1435 + * timestamp already. We can now calculate the elapsed delta
1436 + * (event-)time and add that to the generic event.
1437 + *
1438 + * Careful, not all hw sign-extends above the physical width
1439 + * of the count.
1440 + */
1441 + delta = (new_raw_count << shift) - (prev_raw_count << shift);
1442 + delta >>= shift;
1443 +
1444 + sdelta = rapl_scale(delta, event->hw.config);
1445 +
1446 + local64_add(sdelta, &event->count);
1447 +
1448 + return new_raw_count;
1449 +}
1450 +
1451 +static void rapl_start_hrtimer(struct rapl_pmu *pmu)
1452 +{
1453 + hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
1454 + HRTIMER_MODE_REL_PINNED);
1455 +}
1456 +
1457 +static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
1458 +{
1459 + struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
1460 + struct perf_event *event;
1461 + unsigned long flags;
1462 +
1463 + if (!pmu->n_active)
1464 + return HRTIMER_NORESTART;
1465 +
1466 + raw_spin_lock_irqsave(&pmu->lock, flags);
1467 +
1468 + list_for_each_entry(event, &pmu->active_list, active_entry)
1469 + rapl_event_update(event);
1470 +
1471 + raw_spin_unlock_irqrestore(&pmu->lock, flags);
1472 +
1473 + hrtimer_forward_now(hrtimer, pmu->timer_interval);
1474 +
1475 + return HRTIMER_RESTART;
1476 +}
1477 +
1478 +static void rapl_hrtimer_init(struct rapl_pmu *pmu)
1479 +{
1480 + struct hrtimer *hr = &pmu->hrtimer;
1481 +
1482 + hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1483 + hr->function = rapl_hrtimer_handle;
1484 +}
1485 +
1486 +static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
1487 + struct perf_event *event)
1488 +{
1489 + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1490 + return;
1491 +
1492 + event->hw.state = 0;
1493 +
1494 + list_add_tail(&event->active_entry, &pmu->active_list);
1495 +
1496 + local64_set(&event->hw.prev_count, rapl_read_counter(event));
1497 +
1498 + pmu->n_active++;
1499 + if (pmu->n_active == 1)
1500 + rapl_start_hrtimer(pmu);
1501 +}
1502 +
1503 +static void rapl_pmu_event_start(struct perf_event *event, int mode)
1504 +{
1505 + struct rapl_pmu *pmu = event->pmu_private;
1506 + unsigned long flags;
1507 +
1508 + raw_spin_lock_irqsave(&pmu->lock, flags);
1509 + __rapl_pmu_event_start(pmu, event);
1510 + raw_spin_unlock_irqrestore(&pmu->lock, flags);
1511 +}
1512 +
1513 +static void rapl_pmu_event_stop(struct perf_event *event, int mode)
1514 +{
1515 + struct rapl_pmu *pmu = event->pmu_private;
1516 + struct hw_perf_event *hwc = &event->hw;
1517 + unsigned long flags;
1518 +
1519 + raw_spin_lock_irqsave(&pmu->lock, flags);
1520 +
1521 + /* mark event as deactivated and stopped */
1522 + if (!(hwc->state & PERF_HES_STOPPED)) {
1523 + WARN_ON_ONCE(pmu->n_active <= 0);
1524 + pmu->n_active--;
1525 + if (pmu->n_active == 0)
1526 + hrtimer_cancel(&pmu->hrtimer);
1527 +
1528 + list_del(&event->active_entry);
1529 +
1530 + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1531 + hwc->state |= PERF_HES_STOPPED;
1532 + }
1533 +
1534 + /* check if update of sw counter is necessary */
1535 + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1536 + /*
1537 + * Drain the remaining delta count out of a event
1538 + * that we are disabling:
1539 + */
1540 + rapl_event_update(event);
1541 + hwc->state |= PERF_HES_UPTODATE;
1542 + }
1543 +
1544 + raw_spin_unlock_irqrestore(&pmu->lock, flags);
1545 +}
1546 +
1547 +static int rapl_pmu_event_add(struct perf_event *event, int mode)
1548 +{
1549 + struct rapl_pmu *pmu = event->pmu_private;
1550 + struct hw_perf_event *hwc = &event->hw;
1551 + unsigned long flags;
1552 +
1553 + raw_spin_lock_irqsave(&pmu->lock, flags);
1554 +
1555 + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1556 +
1557 + if (mode & PERF_EF_START)
1558 + __rapl_pmu_event_start(pmu, event);
1559 +
1560 + raw_spin_unlock_irqrestore(&pmu->lock, flags);
1561 +
1562 + return 0;
1563 +}
1564 +
1565 +static void rapl_pmu_event_del(struct perf_event *event, int flags)
1566 +{
1567 + rapl_pmu_event_stop(event, PERF_EF_UPDATE);
1568 +}
1569 +
1570 +static int rapl_pmu_event_init(struct perf_event *event)
1571 +{
1572 + u64 cfg = event->attr.config & RAPL_EVENT_MASK;
1573 + int bit, ret = 0;
1574 + struct rapl_pmu *pmu;
1575 +
1576 + /* only look at RAPL events */
1577 + if (event->attr.type != rapl_pmus->pmu.type)
1578 + return -ENOENT;
1579 +
1580 + /* check only supported bits are set */
1581 + if (event->attr.config & ~RAPL_EVENT_MASK)
1582 + return -EINVAL;
1583 +
1584 + if (event->cpu < 0)
1585 + return -EINVAL;
1586 +
1587 + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
1588 +
1589 + if (!cfg || cfg >= NR_RAPL_DOMAINS + 1)
1590 + return -EINVAL;
1591 +
1592 + cfg = array_index_nospec((long)cfg, NR_RAPL_DOMAINS + 1);
1593 + bit = cfg - 1;
1594 +
1595 + /* check event supported */
1596 + if (!(rapl_cntr_mask & (1 << bit)))
1597 + return -EINVAL;
1598 +
1599 + /* unsupported modes and filters */
1600 + if (event->attr.sample_period) /* no sampling */
1601 + return -EINVAL;
1602 +
1603 + /* must be done before validate_group */
1604 + pmu = cpu_to_rapl_pmu(event->cpu);
1605 + if (!pmu)
1606 + return -EINVAL;
1607 + event->cpu = pmu->cpu;
1608 + event->pmu_private = pmu;
1609 + event->hw.event_base = rapl_msrs[bit].msr;
1610 + event->hw.config = cfg;
1611 + event->hw.idx = bit;
1612 +
1613 + return ret;
1614 +}
1615 +
1616 +static void rapl_pmu_event_read(struct perf_event *event)
1617 +{
1618 + rapl_event_update(event);
1619 +}
1620 +
1621 +static ssize_t rapl_get_attr_cpumask(struct device *dev,
1622 + struct device_attribute *attr, char *buf)
1623 +{
1624 + return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
1625 +}
1626 +
1627 +static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
1628 +
1629 +static struct attribute *rapl_pmu_attrs[] = {
1630 + &dev_attr_cpumask.attr,
1631 + NULL,
1632 +};
1633 +
1634 +static struct attribute_group rapl_pmu_attr_group = {
1635 + .attrs = rapl_pmu_attrs,
1636 +};
1637 +
1638 +RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
1639 +RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
1640 +RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
1641 +RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
1642 +RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
1643 +
1644 +RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
1645 +RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
1646 +RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
1647 +RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
1648 +RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules");
1649 +
1650 +/*
1651 + * we compute in 0.23 nJ increments regardless of MSR
1652 + */
1653 +RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
1654 +RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
1655 +RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
1656 +RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
1657 +RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
1658 +
1659 +/*
1660 + * There are no default events, but we need to create
1661 + * "events" group (with empty attrs) before updating
1662 + * it with detected events.
1663 + */
1664 +static struct attribute *attrs_empty[] = {
1665 + NULL,
1666 +};
1667 +
1668 +static struct attribute_group rapl_pmu_events_group = {
1669 + .name = "events",
1670 + .attrs = attrs_empty,
1671 +};
1672 +
1673 +DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
1674 +static struct attribute *rapl_formats_attr[] = {
1675 + &format_attr_event.attr,
1676 + NULL,
1677 +};
1678 +
1679 +static struct attribute_group rapl_pmu_format_group = {
1680 + .name = "format",
1681 + .attrs = rapl_formats_attr,
1682 +};
1683 +
1684 +static const struct attribute_group *rapl_attr_groups[] = {
1685 + &rapl_pmu_attr_group,
1686 + &rapl_pmu_format_group,
1687 + &rapl_pmu_events_group,
1688 + NULL,
1689 +};
1690 +
1691 +static struct attribute *rapl_events_cores[] = {
1692 + EVENT_PTR(rapl_cores),
1693 + EVENT_PTR(rapl_cores_unit),
1694 + EVENT_PTR(rapl_cores_scale),
1695 + NULL,
1696 +};
1697 +
1698 +static struct attribute_group rapl_events_cores_group = {
1699 + .name = "events",
1700 + .attrs = rapl_events_cores,
1701 +};
1702 +
1703 +static struct attribute *rapl_events_pkg[] = {
1704 + EVENT_PTR(rapl_pkg),
1705 + EVENT_PTR(rapl_pkg_unit),
1706 + EVENT_PTR(rapl_pkg_scale),
1707 + NULL,
1708 +};
1709 +
1710 +static struct attribute_group rapl_events_pkg_group = {
1711 + .name = "events",
1712 + .attrs = rapl_events_pkg,
1713 +};
1714 +
1715 +static struct attribute *rapl_events_ram[] = {
1716 + EVENT_PTR(rapl_ram),
1717 + EVENT_PTR(rapl_ram_unit),
1718 + EVENT_PTR(rapl_ram_scale),
1719 + NULL,
1720 +};
1721 +
1722 +static struct attribute_group rapl_events_ram_group = {
1723 + .name = "events",
1724 + .attrs = rapl_events_ram,
1725 +};
1726 +
1727 +static struct attribute *rapl_events_gpu[] = {
1728 + EVENT_PTR(rapl_gpu),
1729 + EVENT_PTR(rapl_gpu_unit),
1730 + EVENT_PTR(rapl_gpu_scale),
1731 + NULL,
1732 +};
1733 +
1734 +static struct attribute_group rapl_events_gpu_group = {
1735 + .name = "events",
1736 + .attrs = rapl_events_gpu,
1737 +};
1738 +
1739 +static struct attribute *rapl_events_psys[] = {
1740 + EVENT_PTR(rapl_psys),
1741 + EVENT_PTR(rapl_psys_unit),
1742 + EVENT_PTR(rapl_psys_scale),
1743 + NULL,
1744 +};
1745 +
1746 +static struct attribute_group rapl_events_psys_group = {
1747 + .name = "events",
1748 + .attrs = rapl_events_psys,
1749 +};
1750 +
1751 +static bool test_msr(int idx, void *data)
1752 +{
1753 + return test_bit(idx, (unsigned long *) data);
1754 +}
1755 +
1756 +static struct perf_msr rapl_msrs[] = {
1757 + [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr },
1758 + [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
1759 + [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr },
1760 + [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr },
1761 + [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr },
1762 +};
1763 +
1764 +static int rapl_cpu_offline(unsigned int cpu)
1765 +{
1766 + struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
1767 + int target;
1768 +
1769 + /* Check if exiting cpu is used for collecting rapl events */
1770 + if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
1771 + return 0;
1772 +
1773 + pmu->cpu = -1;
1774 + /* Find a new cpu to collect rapl events */
1775 + target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
1776 +
1777 + /* Migrate rapl events to the new target */
1778 + if (target < nr_cpu_ids) {
1779 + cpumask_set_cpu(target, &rapl_cpu_mask);
1780 + pmu->cpu = target;
1781 + perf_pmu_migrate_context(pmu->pmu, cpu, target);
1782 + }
1783 + return 0;
1784 +}
1785 +
1786 +static int rapl_cpu_online(unsigned int cpu)
1787 +{
1788 + struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
1789 + int target;
1790 +
1791 + if (!pmu) {
1792 + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
1793 + if (!pmu)
1794 + return -ENOMEM;
1795 +
1796 + raw_spin_lock_init(&pmu->lock);
1797 + INIT_LIST_HEAD(&pmu->active_list);
1798 + pmu->pmu = &rapl_pmus->pmu;
1799 + pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
1800 + rapl_hrtimer_init(pmu);
1801 +
1802 + rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
1803 + }
1804 +
1805 + /*
1806 + * Check if there is an online cpu in the package which collects rapl
1807 + * events already.
1808 + */
1809 + target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
1810 + if (target < nr_cpu_ids)
1811 + return 0;
1812 +
1813 + cpumask_set_cpu(cpu, &rapl_cpu_mask);
1814 + pmu->cpu = cpu;
1815 + return 0;
1816 +}
1817 +
1818 +static int rapl_check_hw_unit(bool apply_quirk)
1819 +{
1820 + u64 msr_rapl_power_unit_bits;
1821 + int i;
1822 +
1823 + /* protect rdmsrl() to handle virtualization */
1824 + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
1825 + return -1;
1826 + for (i = 0; i < NR_RAPL_DOMAINS; i++)
1827 + rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
1828 +
1829 + /*
1830 + * DRAM domain on HSW server and KNL has fixed energy unit which can be
1831 + * different than the unit from power unit MSR. See
1832 + * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
1833 + * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
1834 + */
1835 + if (apply_quirk)
1836 + rapl_hw_unit[PERF_RAPL_RAM] = 16;
1837 +
1838 + /*
1839 + * Calculate the timer rate:
1840 + * Use reference of 200W for scaling the timeout to avoid counter
1841 + * overflows. 200W = 200 Joules/sec
1842 + * Divide interval by 2 to avoid lockstep (2 * 100)
1843 + * if hw unit is 32, then we use 2 ms 1/200/2
1844 + */
1845 + rapl_timer_ms = 2;
1846 + if (rapl_hw_unit[0] < 32) {
1847 + rapl_timer_ms = (1000 / (2 * 100));
1848 + rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
1849 + }
1850 + return 0;
1851 +}
1852 +
1853 +static void __init rapl_advertise(void)
1854 +{
1855 + int i;
1856 +
1857 + pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
1858 + hweight32(rapl_cntr_mask), rapl_timer_ms);
1859 +
1860 + for (i = 0; i < NR_RAPL_DOMAINS; i++) {
1861 + if (rapl_cntr_mask & (1 << i)) {
1862 + pr_info("hw unit of domain %s 2^-%d Joules\n",
1863 + rapl_domain_names[i], rapl_hw_unit[i]);
1864 + }
1865 + }
1866 +}
1867 +
1868 +static void cleanup_rapl_pmus(void)
1869 +{
1870 + int i;
1871 +
1872 + for (i = 0; i < rapl_pmus->maxdie; i++)
1873 + kfree(rapl_pmus->pmus[i]);
1874 + kfree(rapl_pmus);
1875 +}
1876 +
1877 +static const struct attribute_group *rapl_attr_update[] = {
1878 + &rapl_events_cores_group,
1879 + &rapl_events_pkg_group,
1880 + &rapl_events_ram_group,
1881 + &rapl_events_gpu_group,
1882 + &rapl_events_gpu_group,
1883 + NULL,
1884 +};
1885 +
1886 +static int __init init_rapl_pmus(void)
1887 +{
1888 + int maxdie = topology_max_packages() * topology_max_die_per_package();
1889 + size_t size;
1890 +
1891 + size = sizeof(*rapl_pmus) + maxdie * sizeof(struct rapl_pmu *);
1892 + rapl_pmus = kzalloc(size, GFP_KERNEL);
1893 + if (!rapl_pmus)
1894 + return -ENOMEM;
1895 +
1896 + rapl_pmus->maxdie = maxdie;
1897 + rapl_pmus->pmu.attr_groups = rapl_attr_groups;
1898 + rapl_pmus->pmu.attr_update = rapl_attr_update;
1899 + rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
1900 + rapl_pmus->pmu.event_init = rapl_pmu_event_init;
1901 + rapl_pmus->pmu.add = rapl_pmu_event_add;
1902 + rapl_pmus->pmu.del = rapl_pmu_event_del;
1903 + rapl_pmus->pmu.start = rapl_pmu_event_start;
1904 + rapl_pmus->pmu.stop = rapl_pmu_event_stop;
1905 + rapl_pmus->pmu.read = rapl_pmu_event_read;
1906 + rapl_pmus->pmu.module = THIS_MODULE;
1907 + rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1908 + return 0;
1909 +}
1910 +
1911 +#define X86_RAPL_MODEL_MATCH(model, init) \
1912 + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1913 +
1914 +static struct rapl_model model_snb = {
1915 + .events = BIT(PERF_RAPL_PP0) |
1916 + BIT(PERF_RAPL_PKG) |
1917 + BIT(PERF_RAPL_PP1),
1918 + .apply_quirk = false,
1919 +};
1920 +
1921 +static struct rapl_model model_snbep = {
1922 + .events = BIT(PERF_RAPL_PP0) |
1923 + BIT(PERF_RAPL_PKG) |
1924 + BIT(PERF_RAPL_RAM),
1925 + .apply_quirk = false,
1926 +};
1927 +
1928 +static struct rapl_model model_hsw = {
1929 + .events = BIT(PERF_RAPL_PP0) |
1930 + BIT(PERF_RAPL_PKG) |
1931 + BIT(PERF_RAPL_RAM) |
1932 + BIT(PERF_RAPL_PP1),
1933 + .apply_quirk = false,
1934 +};
1935 +
1936 +static struct rapl_model model_hsx = {
1937 + .events = BIT(PERF_RAPL_PP0) |
1938 + BIT(PERF_RAPL_PKG) |
1939 + BIT(PERF_RAPL_RAM),
1940 + .apply_quirk = true,
1941 +};
1942 +
1943 +static struct rapl_model model_knl = {
1944 + .events = BIT(PERF_RAPL_PKG) |
1945 + BIT(PERF_RAPL_RAM),
1946 + .apply_quirk = true,
1947 +};
1948 +
1949 +static struct rapl_model model_skl = {
1950 + .events = BIT(PERF_RAPL_PP0) |
1951 + BIT(PERF_RAPL_PKG) |
1952 + BIT(PERF_RAPL_RAM) |
1953 + BIT(PERF_RAPL_PP1) |
1954 + BIT(PERF_RAPL_PSYS),
1955 + .apply_quirk = false,
1956 +};
1957 +
1958 +static const struct x86_cpu_id rapl_model_match[] __initconst = {
1959 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, model_snb),
1960 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, model_snbep),
1961 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, model_snb),
1962 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep),
1963 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL, model_hsw),
1964 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx),
1965 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_L, model_hsw),
1966 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_G, model_hsw),
1967 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw),
1968 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, model_hsw),
1969 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx),
1970 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, model_hsx),
1971 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl),
1972 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl),
1973 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, model_skl),
1974 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE, model_skl),
1975 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx),
1976 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, model_skl),
1977 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE, model_skl),
1978 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_L, model_skl),
1979 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw),
1980 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_D, model_hsw),
1981 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw),
1982 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, model_skl),
1983 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE, model_skl),
1984 + {},
1985 +};
1986 +
1987 +MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
1988 +
1989 +static int __init rapl_pmu_init(void)
1990 +{
1991 + const struct x86_cpu_id *id;
1992 + struct rapl_model *rm;
1993 + int ret;
1994 +
1995 + id = x86_match_cpu(rapl_model_match);
1996 + if (!id)
1997 + return -ENODEV;
1998 +
1999 + rm = (struct rapl_model *) id->driver_data;
2000 + rapl_cntr_mask = perf_msr_probe(rapl_msrs, PERF_RAPL_MAX,
2001 + false, (void *) &rm->events);
2002 +
2003 + ret = rapl_check_hw_unit(rm->apply_quirk);
2004 + if (ret)
2005 + return ret;
2006 +
2007 + ret = init_rapl_pmus();
2008 + if (ret)
2009 + return ret;
2010 +
2011 + /*
2012 + * Install callbacks. Core will call them for each online cpu.
2013 + */
2014 + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
2015 + "perf/x86/rapl:online",
2016 + rapl_cpu_online, rapl_cpu_offline);
2017 + if (ret)
2018 + goto out;
2019 +
2020 + ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
2021 + if (ret)
2022 + goto out1;
2023 +
2024 + rapl_advertise();
2025 + return 0;
2026 +
2027 +out1:
2028 + cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
2029 +out:
2030 + pr_warn("Initialization failed (%d), disabled\n", ret);
2031 + cleanup_rapl_pmus();
2032 + return ret;
2033 +}
2034 +module_init(rapl_pmu_init);
2035 +
2036 +static void __exit intel_rapl_exit(void)
2037 +{
2038 + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
2039 + perf_pmu_unregister(&rapl_pmus->pmu);
2040 + cleanup_rapl_pmus();
2041 +}
2042 +module_exit(intel_rapl_exit);
2043 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
2044 index 54f5d54280f6..a07dfdf7759e 100644
2045 --- a/arch/x86/include/asm/processor.h
2046 +++ b/arch/x86/include/asm/processor.h
2047 @@ -334,7 +334,7 @@ struct x86_hw_tss {
2048 #define INVALID_IO_BITMAP_OFFSET 0x8000
2049
2050 struct entry_stack {
2051 - unsigned long words[64];
2052 + char stack[PAGE_SIZE];
2053 };
2054
2055 struct entry_stack_page {
2056 diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
2057 index 1cc6c47dc77e..341f58a01de0 100644
2058 --- a/arch/x86/kvm/kvm_cache_regs.h
2059 +++ b/arch/x86/kvm/kvm_cache_regs.h
2060 @@ -7,7 +7,7 @@
2061 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
2062 #define KVM_POSSIBLE_CR4_GUEST_BITS \
2063 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
2064 - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
2065 + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
2066
2067 #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
2068 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
2069 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
2070 index aab02ea2d2cb..342d9ddf35c3 100644
2071 --- a/arch/x86/kvm/mmu.c
2072 +++ b/arch/x86/kvm/mmu.c
2073 @@ -4580,7 +4580,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2074 nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
2075 rsvd_bits(maxphyaddr, 51);
2076 rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2077 - nonleaf_bit8_rsvd | gbpages_bit_rsvd |
2078 + gbpages_bit_rsvd |
2079 rsvd_bits(maxphyaddr, 51);
2080 rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2081 rsvd_bits(maxphyaddr, 51);
2082 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
2083 index 10e6471896cd..3be65495aeb8 100644
2084 --- a/arch/x86/kvm/vmx/vmx.c
2085 +++ b/arch/x86/kvm/vmx/vmx.c
2086 @@ -3913,6 +3913,8 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
2087
2088 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
2089 {
2090 + BUILD_BUG_ON(KVM_CR4_GUEST_OWNED_BITS & ~KVM_POSSIBLE_CR4_GUEST_BITS);
2091 +
2092 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
2093 if (enable_ept)
2094 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
2095 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2096 index eed1866ae4d3..38b2df0e7109 100644
2097 --- a/arch/x86/kvm/x86.c
2098 +++ b/arch/x86/kvm/x86.c
2099 @@ -980,6 +980,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2100 if (is_long_mode(vcpu)) {
2101 if (!(cr4 & X86_CR4_PAE))
2102 return 1;
2103 + if ((cr4 ^ old_cr4) & X86_CR4_LA57)
2104 + return 1;
2105 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
2106 && ((cr4 ^ old_cr4) & pdptr_bits)
2107 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
2108 diff --git a/block/bio-integrity.c b/block/bio-integrity.c
2109 index ae07dd78e951..c9dc2b17ce25 100644
2110 --- a/block/bio-integrity.c
2111 +++ b/block/bio-integrity.c
2112 @@ -24,6 +24,18 @@ void blk_flush_integrity(void)
2113 flush_workqueue(kintegrityd_wq);
2114 }
2115
2116 +void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip)
2117 +{
2118 + if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
2119 + if (bip->bip_vec)
2120 + bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
2121 + bip->bip_slab);
2122 + mempool_free(bip, &bs->bio_integrity_pool);
2123 + } else {
2124 + kfree(bip);
2125 + }
2126 +}
2127 +
2128 /**
2129 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
2130 * @bio: bio to attach integrity metadata to
2131 @@ -75,7 +87,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
2132
2133 return bip;
2134 err:
2135 - mempool_free(bip, &bs->bio_integrity_pool);
2136 + __bio_integrity_free(bs, bip);
2137 return ERR_PTR(-ENOMEM);
2138 }
2139 EXPORT_SYMBOL(bio_integrity_alloc);
2140 @@ -96,14 +108,7 @@ void bio_integrity_free(struct bio *bio)
2141 kfree(page_address(bip->bip_vec->bv_page) +
2142 bip->bip_vec->bv_offset);
2143
2144 - if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
2145 - bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);
2146 -
2147 - mempool_free(bip, &bs->bio_integrity_pool);
2148 - } else {
2149 - kfree(bip);
2150 - }
2151 -
2152 + __bio_integrity_free(bs, bip);
2153 bio->bi_integrity = NULL;
2154 bio->bi_opf &= ~REQ_INTEGRITY;
2155 }
2156 diff --git a/block/blk-mq.c b/block/blk-mq.c
2157 index f1b930a300a3..ae7d31cb5a4e 100644
2158 --- a/block/blk-mq.c
2159 +++ b/block/blk-mq.c
2160 @@ -829,10 +829,10 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
2161 void *priv, bool reserved)
2162 {
2163 /*
2164 - * If we find a request that is inflight and the queue matches,
2165 + * If we find a request that isn't idle and the queue matches,
2166 * we know the queue is busy. Return false to stop the iteration.
2167 */
2168 - if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
2169 + if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
2170 bool *busy = priv;
2171
2172 *busy = true;
2173 diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
2174 index 508bbd6ea439..320d23de02c2 100644
2175 --- a/drivers/base/regmap/regmap.c
2176 +++ b/drivers/base/regmap/regmap.c
2177 @@ -17,6 +17,7 @@
2178 #include <linux/delay.h>
2179 #include <linux/log2.h>
2180 #include <linux/hwspinlock.h>
2181 +#include <asm/unaligned.h>
2182
2183 #define CREATE_TRACE_POINTS
2184 #include "trace.h"
2185 @@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
2186
2187 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
2188 {
2189 - __be16 *b = buf;
2190 -
2191 - b[0] = cpu_to_be16(val << shift);
2192 + put_unaligned_be16(val << shift, buf);
2193 }
2194
2195 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
2196 {
2197 - __le16 *b = buf;
2198 -
2199 - b[0] = cpu_to_le16(val << shift);
2200 + put_unaligned_le16(val << shift, buf);
2201 }
2202
2203 static void regmap_format_16_native(void *buf, unsigned int val,
2204 unsigned int shift)
2205 {
2206 - *(u16 *)buf = val << shift;
2207 + u16 v = val << shift;
2208 +
2209 + memcpy(buf, &v, sizeof(v));
2210 }
2211
2212 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
2213 @@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
2214
2215 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
2216 {
2217 - __be32 *b = buf;
2218 -
2219 - b[0] = cpu_to_be32(val << shift);
2220 + put_unaligned_be32(val << shift, buf);
2221 }
2222
2223 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
2224 {
2225 - __le32 *b = buf;
2226 -
2227 - b[0] = cpu_to_le32(val << shift);
2228 + put_unaligned_le32(val << shift, buf);
2229 }
2230
2231 static void regmap_format_32_native(void *buf, unsigned int val,
2232 unsigned int shift)
2233 {
2234 - *(u32 *)buf = val << shift;
2235 + u32 v = val << shift;
2236 +
2237 + memcpy(buf, &v, sizeof(v));
2238 }
2239
2240 #ifdef CONFIG_64BIT
2241 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
2242 {
2243 - __be64 *b = buf;
2244 -
2245 - b[0] = cpu_to_be64((u64)val << shift);
2246 + put_unaligned_be64((u64) val << shift, buf);
2247 }
2248
2249 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
2250 {
2251 - __le64 *b = buf;
2252 -
2253 - b[0] = cpu_to_le64((u64)val << shift);
2254 + put_unaligned_le64((u64) val << shift, buf);
2255 }
2256
2257 static void regmap_format_64_native(void *buf, unsigned int val,
2258 unsigned int shift)
2259 {
2260 - *(u64 *)buf = (u64)val << shift;
2261 + u64 v = (u64) val << shift;
2262 +
2263 + memcpy(buf, &v, sizeof(v));
2264 }
2265 #endif
2266
2267 @@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf)
2268
2269 static unsigned int regmap_parse_16_be(const void *buf)
2270 {
2271 - const __be16 *b = buf;
2272 -
2273 - return be16_to_cpu(b[0]);
2274 + return get_unaligned_be16(buf);
2275 }
2276
2277 static unsigned int regmap_parse_16_le(const void *buf)
2278 {
2279 - const __le16 *b = buf;
2280 -
2281 - return le16_to_cpu(b[0]);
2282 + return get_unaligned_le16(buf);
2283 }
2284
2285 static void regmap_parse_16_be_inplace(void *buf)
2286 {
2287 - __be16 *b = buf;
2288 + u16 v = get_unaligned_be16(buf);
2289
2290 - b[0] = be16_to_cpu(b[0]);
2291 + memcpy(buf, &v, sizeof(v));
2292 }
2293
2294 static void regmap_parse_16_le_inplace(void *buf)
2295 {
2296 - __le16 *b = buf;
2297 + u16 v = get_unaligned_le16(buf);
2298
2299 - b[0] = le16_to_cpu(b[0]);
2300 + memcpy(buf, &v, sizeof(v));
2301 }
2302
2303 static unsigned int regmap_parse_16_native(const void *buf)
2304 {
2305 - return *(u16 *)buf;
2306 + u16 v;
2307 +
2308 + memcpy(&v, buf, sizeof(v));
2309 + return v;
2310 }
2311
2312 static unsigned int regmap_parse_24(const void *buf)
2313 @@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf)
2314
2315 static unsigned int regmap_parse_32_be(const void *buf)
2316 {
2317 - const __be32 *b = buf;
2318 -
2319 - return be32_to_cpu(b[0]);
2320 + return get_unaligned_be32(buf);
2321 }
2322
2323 static unsigned int regmap_parse_32_le(const void *buf)
2324 {
2325 - const __le32 *b = buf;
2326 -
2327 - return le32_to_cpu(b[0]);
2328 + return get_unaligned_le32(buf);
2329 }
2330
2331 static void regmap_parse_32_be_inplace(void *buf)
2332 {
2333 - __be32 *b = buf;
2334 + u32 v = get_unaligned_be32(buf);
2335
2336 - b[0] = be32_to_cpu(b[0]);
2337 + memcpy(buf, &v, sizeof(v));
2338 }
2339
2340 static void regmap_parse_32_le_inplace(void *buf)
2341 {
2342 - __le32 *b = buf;
2343 + u32 v = get_unaligned_le32(buf);
2344
2345 - b[0] = le32_to_cpu(b[0]);
2346 + memcpy(buf, &v, sizeof(v));
2347 }
2348
2349 static unsigned int regmap_parse_32_native(const void *buf)
2350 {
2351 - return *(u32 *)buf;
2352 + u32 v;
2353 +
2354 + memcpy(&v, buf, sizeof(v));
2355 + return v;
2356 }
2357
2358 #ifdef CONFIG_64BIT
2359 static unsigned int regmap_parse_64_be(const void *buf)
2360 {
2361 - const __be64 *b = buf;
2362 -
2363 - return be64_to_cpu(b[0]);
2364 + return get_unaligned_be64(buf);
2365 }
2366
2367 static unsigned int regmap_parse_64_le(const void *buf)
2368 {
2369 - const __le64 *b = buf;
2370 -
2371 - return le64_to_cpu(b[0]);
2372 + return get_unaligned_le64(buf);
2373 }
2374
2375 static void regmap_parse_64_be_inplace(void *buf)
2376 {
2377 - __be64 *b = buf;
2378 + u64 v = get_unaligned_be64(buf);
2379
2380 - b[0] = be64_to_cpu(b[0]);
2381 + memcpy(buf, &v, sizeof(v));
2382 }
2383
2384 static void regmap_parse_64_le_inplace(void *buf)
2385 {
2386 - __le64 *b = buf;
2387 + u64 v = get_unaligned_le64(buf);
2388
2389 - b[0] = le64_to_cpu(b[0]);
2390 + memcpy(buf, &v, sizeof(v));
2391 }
2392
2393 static unsigned int regmap_parse_64_native(const void *buf)
2394 {
2395 - return *(u64 *)buf;
2396 + u64 v;
2397 +
2398 + memcpy(&v, buf, sizeof(v));
2399 + return v;
2400 }
2401 #endif
2402
2403 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2404 index 78181908f0df..7b61d53ba050 100644
2405 --- a/drivers/block/nbd.c
2406 +++ b/drivers/block/nbd.c
2407 @@ -1022,25 +1022,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
2408 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
2409 dev_err(disk_to_dev(nbd->disk),
2410 "Device being setup by another task");
2411 - sockfd_put(sock);
2412 - return -EBUSY;
2413 + err = -EBUSY;
2414 + goto put_socket;
2415 + }
2416 +
2417 + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
2418 + if (!nsock) {
2419 + err = -ENOMEM;
2420 + goto put_socket;
2421 }
2422
2423 socks = krealloc(config->socks, (config->num_connections + 1) *
2424 sizeof(struct nbd_sock *), GFP_KERNEL);
2425 if (!socks) {
2426 - sockfd_put(sock);
2427 - return -ENOMEM;
2428 + kfree(nsock);
2429 + err = -ENOMEM;
2430 + goto put_socket;
2431 }
2432
2433 config->socks = socks;
2434
2435 - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
2436 - if (!nsock) {
2437 - sockfd_put(sock);
2438 - return -ENOMEM;
2439 - }
2440 -
2441 nsock->fallback_index = -1;
2442 nsock->dead = false;
2443 mutex_init(&nsock->tx_lock);
2444 @@ -1052,6 +1053,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
2445 atomic_inc(&config->live_connections);
2446
2447 return 0;
2448 +
2449 +put_socket:
2450 + sockfd_put(sock);
2451 + return err;
2452 }
2453
2454 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
2455 diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
2456 index 29ba26742c8f..81f5103dccb6 100644
2457 --- a/drivers/gpio/gpio-pca953x.c
2458 +++ b/drivers/gpio/gpio-pca953x.c
2459 @@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = {
2460 };
2461 MODULE_DEVICE_TABLE(i2c, pca953x_id);
2462
2463 +#ifdef CONFIG_GPIO_PCA953X_IRQ
2464 +
2465 +#include <linux/dmi.h>
2466 +#include <linux/gpio.h>
2467 +#include <linux/list.h>
2468 +
2469 +static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
2470 + {
2471 + /*
2472 + * On Intel Galileo Gen 2 board the IRQ pin of one of
2473 + * the I²C GPIO expanders, which has GpioInt() resource,
2474 + * is provided as an absolute number instead of being
2475 + * relative. Since first controller (gpio-sch.c) and
2476 + * second (gpio-dwapb.c) are at the fixed bases, we may
2477 + * safely refer to the number in the global space to get
2478 + * an IRQ out of it.
2479 + */
2480 + .matches = {
2481 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
2482 + },
2483 + },
2484 + {}
2485 +};
2486 +
2487 +#ifdef CONFIG_ACPI
2488 +static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
2489 +{
2490 + struct acpi_resource_gpio *agpio;
2491 + int *pin = data;
2492 +
2493 + if (acpi_gpio_get_irq_resource(ares, &agpio))
2494 + *pin = agpio->pin_table[0];
2495 + return 1;
2496 +}
2497 +
2498 +static int pca953x_acpi_find_pin(struct device *dev)
2499 +{
2500 + struct acpi_device *adev = ACPI_COMPANION(dev);
2501 + int pin = -ENOENT, ret;
2502 + LIST_HEAD(r);
2503 +
2504 + ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
2505 + acpi_dev_free_resource_list(&r);
2506 + if (ret < 0)
2507 + return ret;
2508 +
2509 + return pin;
2510 +}
2511 +#else
2512 +static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
2513 +#endif
2514 +
2515 +static int pca953x_acpi_get_irq(struct device *dev)
2516 +{
2517 + int pin, ret;
2518 +
2519 + pin = pca953x_acpi_find_pin(dev);
2520 + if (pin < 0)
2521 + return pin;
2522 +
2523 + dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
2524 +
2525 + if (!gpio_is_valid(pin))
2526 + return -EINVAL;
2527 +
2528 + ret = gpio_request(pin, "pca953x interrupt");
2529 + if (ret)
2530 + return ret;
2531 +
2532 + ret = gpio_to_irq(pin);
2533 +
2534 + /* When pin is used as an IRQ, no need to keep it requested */
2535 + gpio_free(pin);
2536 +
2537 + return ret;
2538 +}
2539 +#endif
2540 +
2541 static const struct acpi_device_id pca953x_acpi_ids[] = {
2542 { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
2543 { }
2544 @@ -772,6 +850,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
2545 u8 reg_direction[MAX_BANK];
2546 int ret, i;
2547
2548 + if (dmi_first_match(pca953x_dmi_acpi_irq_info)) {
2549 + ret = pca953x_acpi_get_irq(&client->dev);
2550 + if (ret > 0)
2551 + client->irq = ret;
2552 + }
2553 +
2554 if (!client->irq)
2555 return 0;
2556
2557 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
2558 index 96b2a31ccfed..f06a5142d66e 100644
2559 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
2560 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
2561 @@ -36,7 +36,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
2562
2563 memset(&ti, 0, sizeof(struct amdgpu_task_info));
2564
2565 - if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
2566 + if (amdgpu_gpu_recovery &&
2567 + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
2568 DRM_ERROR("ring %s timeout, but soft recovered\n",
2569 s_job->sched->name);
2570 return;
2571 diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
2572 index ffd95bfeaa94..d00ea384dcbf 100644
2573 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
2574 +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
2575 @@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
2576 int orientation;
2577 };
2578
2579 -static const struct drm_dmi_panel_orientation_data acer_s1003 = {
2580 - .width = 800,
2581 - .height = 1280,
2582 - .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
2583 -};
2584 -
2585 static const struct drm_dmi_panel_orientation_data asus_t100ha = {
2586 .width = 800,
2587 .height = 1280,
2588 @@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = {
2589 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
2590 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
2591 },
2592 - .driver_data = (void *)&acer_s1003,
2593 + .driver_data = (void *)&lcd800x1280_rightside_up,
2594 }, { /* Asus T100HA */
2595 .matches = {
2596 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
2597 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
2598 },
2599 .driver_data = (void *)&asus_t100ha,
2600 + }, { /* Asus T101HA */
2601 + .matches = {
2602 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
2603 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
2604 + },
2605 + .driver_data = (void *)&lcd800x1280_rightside_up,
2606 }, { /* GPD MicroPC (generic strings, also match on bios date) */
2607 .matches = {
2608 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
2609 diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
2610 index 5649887d2b90..16e5fb9ec784 100644
2611 --- a/drivers/gpu/drm/mcde/mcde_drv.c
2612 +++ b/drivers/gpu/drm/mcde/mcde_drv.c
2613 @@ -215,7 +215,6 @@ static int mcde_modeset_init(struct drm_device *drm)
2614
2615 drm_mode_config_reset(drm);
2616 drm_kms_helper_poll_init(drm);
2617 - drm_fbdev_generic_setup(drm, 32);
2618
2619 return 0;
2620
2621 @@ -282,6 +281,8 @@ static int mcde_drm_bind(struct device *dev)
2622 if (ret < 0)
2623 goto unbind;
2624
2625 + drm_fbdev_generic_setup(drm, 32);
2626 +
2627 return 0;
2628
2629 unbind:
2630 diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
2631 index 584a9ecadce6..b7592b16ea94 100644
2632 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
2633 +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
2634 @@ -101,6 +101,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
2635 true, true);
2636 }
2637
2638 +static void mtk_plane_atomic_disable(struct drm_plane *plane,
2639 + struct drm_plane_state *old_state)
2640 +{
2641 + struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
2642 +
2643 + state->pending.enable = false;
2644 + wmb(); /* Make sure the above parameter is set before update */
2645 + state->pending.dirty = true;
2646 +}
2647 +
2648 static void mtk_plane_atomic_update(struct drm_plane *plane,
2649 struct drm_plane_state *old_state)
2650 {
2651 @@ -115,6 +125,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
2652 if (!crtc || WARN_ON(!fb))
2653 return;
2654
2655 + if (!plane->state->visible) {
2656 + mtk_plane_atomic_disable(plane, old_state);
2657 + return;
2658 + }
2659 +
2660 gem = fb->obj[0];
2661 mtk_gem = to_mtk_gem_obj(gem);
2662 addr = mtk_gem->dma_addr;
2663 @@ -136,16 +151,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
2664 state->pending.dirty = true;
2665 }
2666
2667 -static void mtk_plane_atomic_disable(struct drm_plane *plane,
2668 - struct drm_plane_state *old_state)
2669 -{
2670 - struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
2671 -
2672 - state->pending.enable = false;
2673 - wmb(); /* Make sure the above parameter is set before update */
2674 - state->pending.dirty = true;
2675 -}
2676 -
2677 static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
2678 .prepare_fb = drm_gem_fb_prepare_fb,
2679 .atomic_check = mtk_plane_atomic_check,
2680 diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
2681 index c6fd123f60b5..f9685cce1652 100644
2682 --- a/drivers/gpu/drm/radeon/ci_dpm.c
2683 +++ b/drivers/gpu/drm/radeon/ci_dpm.c
2684 @@ -5578,6 +5578,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
2685 if (!rdev->pm.dpm.ps)
2686 return -ENOMEM;
2687 power_state_offset = (u8 *)state_array->states;
2688 + rdev->pm.dpm.num_ps = 0;
2689 for (i = 0; i < state_array->ucNumEntries; i++) {
2690 u8 *idx;
2691 power_state = (union pplib_power_state *)power_state_offset;
2692 @@ -5587,10 +5588,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
2693 if (!rdev->pm.power_state[i].clock_info)
2694 return -EINVAL;
2695 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
2696 - if (ps == NULL) {
2697 - kfree(rdev->pm.dpm.ps);
2698 + if (ps == NULL)
2699 return -ENOMEM;
2700 - }
2701 rdev->pm.dpm.ps[i].ps_priv = ps;
2702 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2703 non_clock_info,
2704 @@ -5612,8 +5611,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
2705 k++;
2706 }
2707 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2708 + rdev->pm.dpm.num_ps = i + 1;
2709 }
2710 - rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2711
2712 /* fill in the vce power states */
2713 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
2714 diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
2715 index 839b49c40e51..767fb440a79d 100644
2716 --- a/drivers/gpu/drm/tegra/hub.c
2717 +++ b/drivers/gpu/drm/tegra/hub.c
2718 @@ -141,7 +141,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
2719 for (i = 0; i < hub->soc->num_wgrps; i++) {
2720 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
2721
2722 - tegra_windowgroup_enable(wgrp);
2723 + /* Skip orphaned window group whose parent DC is disabled */
2724 + if (wgrp->parent)
2725 + tegra_windowgroup_enable(wgrp);
2726 }
2727
2728 return 0;
2729 @@ -158,7 +160,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
2730 for (i = 0; i < hub->soc->num_wgrps; i++) {
2731 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
2732
2733 - tegra_windowgroup_disable(wgrp);
2734 + /* Skip orphaned window group whose parent DC is disabled */
2735 + if (wgrp->parent)
2736 + tegra_windowgroup_disable(wgrp);
2737 }
2738 }
2739
2740 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
2741 index abf165b2f64f..3ce8ad7603c7 100644
2742 --- a/drivers/gpu/drm/ttm/ttm_bo.c
2743 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
2744 @@ -941,8 +941,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
2745 if (!fence)
2746 return 0;
2747
2748 - if (no_wait_gpu)
2749 + if (no_wait_gpu) {
2750 + dma_fence_put(fence);
2751 return -EBUSY;
2752 + }
2753
2754 dma_resv_add_shared_fence(bo->base.resv, fence);
2755
2756 diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
2757 index 742aa9ff21b8..fcda8621ae6f 100644
2758 --- a/drivers/gpu/host1x/bus.c
2759 +++ b/drivers/gpu/host1x/bus.c
2760 @@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
2761 */
2762 void host1x_driver_unregister(struct host1x_driver *driver)
2763 {
2764 + struct host1x *host1x;
2765 +
2766 driver_unregister(&driver->driver);
2767
2768 + mutex_lock(&devices_lock);
2769 +
2770 + list_for_each_entry(host1x, &devices, list)
2771 + host1x_detach_driver(host1x, driver);
2772 +
2773 + mutex_unlock(&devices_lock);
2774 +
2775 mutex_lock(&drivers_lock);
2776 list_del_init(&driver->list);
2777 mutex_unlock(&drivers_lock);
2778 diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
2779 index bddb5434fbed..d2d70c89193f 100644
2780 --- a/drivers/infiniband/core/sa_query.c
2781 +++ b/drivers/infiniband/core/sa_query.c
2782 @@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
2783 return len;
2784 }
2785
2786 -static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
2787 +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
2788 {
2789 struct sk_buff *skb = NULL;
2790 struct nlmsghdr *nlh;
2791 void *data;
2792 struct ib_sa_mad *mad;
2793 int len;
2794 + unsigned long flags;
2795 + unsigned long delay;
2796 + gfp_t gfp_flag;
2797 + int ret;
2798 +
2799 + INIT_LIST_HEAD(&query->list);
2800 + query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
2801
2802 mad = query->mad_buf->mad;
2803 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
2804 @@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
2805 /* Repair the nlmsg header length */
2806 nlmsg_end(skb, nlh);
2807
2808 - return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
2809 -}
2810 + gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
2811 + GFP_NOWAIT;
2812
2813 -static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
2814 -{
2815 - unsigned long flags;
2816 - unsigned long delay;
2817 - int ret;
2818 + spin_lock_irqsave(&ib_nl_request_lock, flags);
2819 + ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
2820
2821 - INIT_LIST_HEAD(&query->list);
2822 - query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
2823 + if (ret)
2824 + goto out;
2825
2826 - /* Put the request on the list first.*/
2827 - spin_lock_irqsave(&ib_nl_request_lock, flags);
2828 + /* Put the request on the list.*/
2829 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
2830 query->timeout = delay + jiffies;
2831 list_add_tail(&query->list, &ib_nl_request_list);
2832 /* Start the timeout if this is the only request */
2833 if (ib_nl_request_list.next == &query->list)
2834 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
2835 - spin_unlock_irqrestore(&ib_nl_request_lock, flags);
2836
2837 - ret = ib_nl_send_msg(query, gfp_mask);
2838 - if (ret) {
2839 - ret = -EIO;
2840 - /* Remove the request */
2841 - spin_lock_irqsave(&ib_nl_request_lock, flags);
2842 - list_del(&query->list);
2843 - spin_unlock_irqrestore(&ib_nl_request_lock, flags);
2844 - }
2845 +out:
2846 + spin_unlock_irqrestore(&ib_nl_request_lock, flags);
2847
2848 return ret;
2849 }
2850 diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
2851 index 26b792bb1027..fbff6b2f00e7 100644
2852 --- a/drivers/infiniband/hw/hfi1/init.c
2853 +++ b/drivers/infiniband/hw/hfi1/init.c
2854 @@ -844,6 +844,29 @@ wq_error:
2855 return -ENOMEM;
2856 }
2857
2858 +/**
2859 + * destroy_workqueues - destroy per port workqueues
2860 + * @dd: the hfi1_ib device
2861 + */
2862 +static void destroy_workqueues(struct hfi1_devdata *dd)
2863 +{
2864 + int pidx;
2865 + struct hfi1_pportdata *ppd;
2866 +
2867 + for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2868 + ppd = dd->pport + pidx;
2869 +
2870 + if (ppd->hfi1_wq) {
2871 + destroy_workqueue(ppd->hfi1_wq);
2872 + ppd->hfi1_wq = NULL;
2873 + }
2874 + if (ppd->link_wq) {
2875 + destroy_workqueue(ppd->link_wq);
2876 + ppd->link_wq = NULL;
2877 + }
2878 + }
2879 +}
2880 +
2881 /**
2882 * enable_general_intr() - Enable the IRQs that will be handled by the
2883 * general interrupt handler.
2884 @@ -1117,15 +1140,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
2885 * We can't count on interrupts since we are stopping.
2886 */
2887 hfi1_quiet_serdes(ppd);
2888 -
2889 - if (ppd->hfi1_wq) {
2890 - destroy_workqueue(ppd->hfi1_wq);
2891 - ppd->hfi1_wq = NULL;
2892 - }
2893 - if (ppd->link_wq) {
2894 - destroy_workqueue(ppd->link_wq);
2895 - ppd->link_wq = NULL;
2896 - }
2897 + if (ppd->hfi1_wq)
2898 + flush_workqueue(ppd->hfi1_wq);
2899 + if (ppd->link_wq)
2900 + flush_workqueue(ppd->link_wq);
2901 }
2902 sdma_exit(dd);
2903 }
2904 @@ -1814,6 +1832,7 @@ static void remove_one(struct pci_dev *pdev)
2905 * clear dma engines, etc.
2906 */
2907 shutdown_device(dd);
2908 + destroy_workqueues(dd);
2909
2910 stop_timers(dd);
2911
2912 diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
2913 index f8e733aa3bb8..acd4400b0092 100644
2914 --- a/drivers/infiniband/hw/hfi1/qp.c
2915 +++ b/drivers/infiniband/hw/hfi1/qp.c
2916 @@ -381,7 +381,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
2917 struct hfi1_ibport *ibp =
2918 to_iport(qp->ibqp.device, qp->port_num);
2919 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2920 - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
2921 + struct hfi1_devdata *dd = ppd->dd;
2922 +
2923 + if (dd->flags & HFI1_SHUTDOWN)
2924 + return true;
2925
2926 return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
2927 priv->s_sde ?
2928 diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
2929 index 8a2e0d9351e9..7c6fd720fb2e 100644
2930 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c
2931 +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
2932 @@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
2933 struct hfi1_ibport *ibp =
2934 to_iport(qp->ibqp.device, qp->port_num);
2935 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2936 - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
2937 + struct hfi1_devdata *dd = ppd->dd;
2938 +
2939 + if ((dd->flags & HFI1_SHUTDOWN))
2940 + return true;
2941
2942 return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
2943 priv->s_sde ?
2944 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
2945 index 4f44a731a48e..b781ad74e6de 100644
2946 --- a/drivers/infiniband/hw/mlx5/main.c
2947 +++ b/drivers/infiniband/hw/mlx5/main.c
2948 @@ -517,7 +517,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
2949 mdev_port_num);
2950 if (err)
2951 goto out;
2952 - ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
2953 + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
2954 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
2955
2956 props->active_width = IB_WIDTH_4X;
2957 diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
2958 index 130b1e31b978..fb66d6757278 100644
2959 --- a/drivers/infiniband/sw/siw/siw_main.c
2960 +++ b/drivers/infiniband/sw/siw/siw_main.c
2961 @@ -66,12 +66,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
2962 static int dev_id = 1;
2963 int rv;
2964
2965 + sdev->vendor_part_id = dev_id++;
2966 +
2967 rv = ib_register_device(base_dev, name);
2968 if (rv) {
2969 pr_warn("siw: device registration error %d\n", rv);
2970 return rv;
2971 }
2972 - sdev->vendor_part_id = dev_id++;
2973
2974 siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
2975
2976 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2977 index 6366b5fbb3a4..cdc1f4736a11 100644
2978 --- a/drivers/iommu/intel-iommu.c
2979 +++ b/drivers/iommu/intel-iommu.c
2980 @@ -5962,6 +5962,23 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
2981 return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
2982 }
2983
2984 +/*
2985 + * Check that the device does not live on an external facing PCI port that is
2986 + * marked as untrusted. Such devices should not be able to apply quirks and
2987 + * thus not be able to bypass the IOMMU restrictions.
2988 + */
2989 +static bool risky_device(struct pci_dev *pdev)
2990 +{
2991 + if (pdev->untrusted) {
2992 + pci_info(pdev,
2993 + "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
2994 + pdev->vendor, pdev->device);
2995 + pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
2996 + return true;
2997 + }
2998 + return false;
2999 +}
3000 +
3001 const struct iommu_ops intel_iommu_ops = {
3002 .capable = intel_iommu_capable,
3003 .domain_alloc = intel_iommu_domain_alloc,
3004 @@ -5990,6 +6007,9 @@ const struct iommu_ops intel_iommu_ops = {
3005
3006 static void quirk_iommu_igfx(struct pci_dev *dev)
3007 {
3008 + if (risky_device(dev))
3009 + return;
3010 +
3011 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
3012 dmar_map_gfx = 0;
3013 }
3014 @@ -6031,6 +6051,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
3015
3016 static void quirk_iommu_rwbf(struct pci_dev *dev)
3017 {
3018 + if (risky_device(dev))
3019 + return;
3020 +
3021 /*
3022 * Mobile 4 Series Chipset neglects to set RWBF capability,
3023 * but needs it. Same seems to hold for the desktop versions.
3024 @@ -6061,6 +6084,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3025 {
3026 unsigned short ggc;
3027
3028 + if (risky_device(dev))
3029 + return;
3030 +
3031 if (pci_read_config_word(dev, GGC, &ggc))
3032 return;
3033
3034 @@ -6094,6 +6120,12 @@ static void __init check_tylersburg_isoch(void)
3035 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3036 if (!pdev)
3037 return;
3038 +
3039 + if (risky_device(pdev)) {
3040 + pci_dev_put(pdev);
3041 + return;
3042 + }
3043 +
3044 pci_dev_put(pdev);
3045
3046 /* System Management Registers. Might be hidden, in which case
3047 @@ -6103,6 +6135,11 @@ static void __init check_tylersburg_isoch(void)
3048 if (!pdev)
3049 return;
3050
3051 + if (risky_device(pdev)) {
3052 + pci_dev_put(pdev);
3053 + return;
3054 + }
3055 +
3056 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
3057 pci_dev_put(pdev);
3058 return;
3059 diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
3060 index 67eb4e972cc3..ed2f711c24c4 100644
3061 --- a/drivers/md/dm-writecache.c
3062 +++ b/drivers/md/dm-writecache.c
3063 @@ -2104,6 +2104,12 @@ invalid_optional:
3064 }
3065
3066 if (WC_MODE_PMEM(wc)) {
3067 + if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
3068 + r = -EOPNOTSUPP;
3069 + ti->error = "Asynchronous persistent memory not supported as pmem cache";
3070 + goto bad;
3071 + }
3072 +
3073 r = persistent_memory_claim(wc);
3074 if (r) {
3075 ti->error = "Unable to map persistent memory for cache";
3076 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3077 index 1e6e0c970e19..915019ec0e25 100644
3078 --- a/drivers/md/dm.c
3079 +++ b/drivers/md/dm.c
3080 @@ -12,6 +12,7 @@
3081 #include <linux/init.h>
3082 #include <linux/module.h>
3083 #include <linux/mutex.h>
3084 +#include <linux/sched/mm.h>
3085 #include <linux/sched/signal.h>
3086 #include <linux/blkpg.h>
3087 #include <linux/bio.h>
3088 @@ -2886,17 +2887,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3089 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3090 unsigned cookie)
3091 {
3092 + int r;
3093 + unsigned noio_flag;
3094 char udev_cookie[DM_COOKIE_LENGTH];
3095 char *envp[] = { udev_cookie, NULL };
3096
3097 + noio_flag = memalloc_noio_save();
3098 +
3099 if (!cookie)
3100 - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
3101 + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
3102 else {
3103 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3104 DM_COOKIE_ENV_VAR_NAME, cookie);
3105 - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
3106 - action, envp);
3107 + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
3108 + action, envp);
3109 }
3110 +
3111 + memalloc_noio_restore(noio_flag);
3112 +
3113 + return r;
3114 }
3115
3116 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3117 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
3118 index f0737c57ed5f..1491561d2e5c 100644
3119 --- a/drivers/message/fusion/mptscsih.c
3120 +++ b/drivers/message/fusion/mptscsih.c
3121 @@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
3122 int mptscsih_resume(struct pci_dev *pdev);
3123 #endif
3124
3125 -#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
3126 -
3127
3128 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3129 /*
3130 @@ -2422,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
3131 /* Copy the sense received into the scsi command block. */
3132 req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
3133 sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
3134 - memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
3135 + memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
3136
3137 /* Log SMART data (asc = 0x5D, non-IM case only) if required.
3138 */
3139 diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
3140 index e712315c7e8d..545c3f2f8a06 100644
3141 --- a/drivers/mmc/host/meson-gx-mmc.c
3142 +++ b/drivers/mmc/host/meson-gx-mmc.c
3143 @@ -1151,9 +1151,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
3144
3145 mmc->caps |= MMC_CAP_CMD23;
3146 if (host->dram_access_quirk) {
3147 + /* Limit segments to 1 due to low available sram memory */
3148 + mmc->max_segs = 1;
3149 /* Limit to the available sram memory */
3150 - mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size;
3151 - mmc->max_blk_count = mmc->max_segs;
3152 + mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
3153 + mmc->max_blk_size;
3154 } else {
3155 mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
3156 mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
3157 diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
3158 index 24a5e99f7fd5..84c4319e3b31 100644
3159 --- a/drivers/net/dsa/microchip/ksz8795.c
3160 +++ b/drivers/net/dsa/microchip/ksz8795.c
3161 @@ -1267,6 +1267,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
3162 return -ENOMEM;
3163 }
3164
3165 + /* set the real number of ports */
3166 + dev->ds->num_ports = dev->port_cnt;
3167 +
3168 return 0;
3169 }
3170
3171 diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
3172 index 50ffc63d6231..3afb596d8e43 100644
3173 --- a/drivers/net/dsa/microchip/ksz9477.c
3174 +++ b/drivers/net/dsa/microchip/ksz9477.c
3175 @@ -1587,6 +1587,9 @@ static int ksz9477_switch_init(struct ksz_device *dev)
3176 return -ENOMEM;
3177 }
3178
3179 + /* set the real number of ports */
3180 + dev->ds->num_ports = dev->port_cnt;
3181 +
3182 return 0;
3183 }
3184
3185 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
3186 index 1046b22220a3..452be9749827 100644
3187 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
3188 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
3189 @@ -398,6 +398,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
3190 }
3191 }
3192
3193 + bp->pf.active_vfs = 0;
3194 kfree(bp->pf.vf);
3195 bp->pf.vf = NULL;
3196 }
3197 @@ -833,7 +834,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
3198
3199 bnxt_free_vf_resources(bp);
3200
3201 - bp->pf.active_vfs = 0;
3202 /* Reclaim all resources for the PF. */
3203 rtnl_lock();
3204 bnxt_restore_pf_fw_resources(bp);
3205 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
3206 index 1ec19d9fab00..01ed4d4296db 100644
3207 --- a/drivers/net/ethernet/cadence/macb_main.c
3208 +++ b/drivers/net/ethernet/cadence/macb_main.c
3209 @@ -4260,7 +4260,7 @@ static int macb_probe(struct platform_device *pdev)
3210 bp->wol = 0;
3211 if (of_get_property(np, "magic-packet", NULL))
3212 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3213 - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3214 + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3215
3216 spin_lock_init(&bp->lock);
3217
3218 @@ -4453,7 +4453,8 @@ static int __maybe_unused macb_suspend(struct device *dev)
3219 netif_carrier_off(netdev);
3220 if (bp->ptp_info)
3221 bp->ptp_info->ptp_remove(netdev);
3222 - pm_runtime_force_suspend(dev);
3223 + if (!device_may_wakeup(dev))
3224 + pm_runtime_force_suspend(dev);
3225
3226 return 0;
3227 }
3228 @@ -4468,7 +4469,8 @@ static int __maybe_unused macb_resume(struct device *dev)
3229 if (!netif_running(netdev))
3230 return 0;
3231
3232 - pm_runtime_force_resume(dev);
3233 + if (!device_may_wakeup(dev))
3234 + pm_runtime_force_resume(dev);
3235
3236 if (bp->wol & MACB_WOL_ENABLED) {
3237 macb_writel(bp, IDR, MACB_BIT(WOL));
3238 @@ -4507,7 +4509,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
3239 struct net_device *netdev = dev_get_drvdata(dev);
3240 struct macb *bp = netdev_priv(netdev);
3241
3242 - if (!(device_may_wakeup(&bp->dev->dev))) {
3243 + if (!(device_may_wakeup(dev))) {
3244 clk_disable_unprepare(bp->tx_clk);
3245 clk_disable_unprepare(bp->hclk);
3246 clk_disable_unprepare(bp->pclk);
3247 @@ -4523,7 +4525,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
3248 struct net_device *netdev = dev_get_drvdata(dev);
3249 struct macb *bp = netdev_priv(netdev);
3250
3251 - if (!(device_may_wakeup(&bp->dev->dev))) {
3252 + if (!(device_may_wakeup(dev))) {
3253 clk_prepare_enable(bp->pclk);
3254 clk_prepare_enable(bp->hclk);
3255 clk_prepare_enable(bp->tx_clk);
3256 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3257 index 375e1be6a2d8..f459313357c7 100644
3258 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3259 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3260 @@ -839,16 +839,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
3261 struct in_addr *addr;
3262
3263 addr = (struct in_addr *)ipmask;
3264 - if (ntohl(addr->s_addr) == 0xffffffff)
3265 + if (addr->s_addr == htonl(0xffffffff))
3266 return true;
3267 } else if (family == AF_INET6) {
3268 struct in6_addr *addr6;
3269
3270 addr6 = (struct in6_addr *)ipmask;
3271 - if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
3272 - ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
3273 - ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
3274 - ntohl(addr6->s6_addr32[3]) == 0xffffffff)
3275 + if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
3276 + addr6->s6_addr32[1] == htonl(0xffffffff) &&
3277 + addr6->s6_addr32[2] == htonl(0xffffffff) &&
3278 + addr6->s6_addr32[3] == htonl(0xffffffff))
3279 return true;
3280 }
3281 return false;
3282 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
3283 index 31fcfc58e337..588b63473c47 100644
3284 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
3285 +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
3286 @@ -3499,7 +3499,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3287 drv_fw = &fw_info->fw_hdr;
3288
3289 /* Read the header of the firmware on the card */
3290 - ret = -t4_read_flash(adap, FLASH_FW_START,
3291 + ret = t4_read_flash(adap, FLASH_FW_START,
3292 sizeof(*card_fw) / sizeof(uint32_t),
3293 (uint32_t *)card_fw, 1);
3294 if (ret == 0) {
3295 @@ -3528,8 +3528,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3296 should_install_fs_fw(adap, card_fw_usable,
3297 be32_to_cpu(fs_fw->fw_ver),
3298 be32_to_cpu(card_fw->fw_ver))) {
3299 - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3300 - fw_size, 0);
3301 + ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
3302 + fw_size, 0);
3303 if (ret != 0) {
3304 dev_err(adap->pdev_dev,
3305 "failed to install firmware: %d\n", ret);
3306 @@ -3560,7 +3560,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3307 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3308 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3309 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3310 - ret = EINVAL;
3311 + ret = -EINVAL;
3312 goto bye;
3313 }
3314
3315 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3316 index 403e0f089f2a..37537c302080 100644
3317 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3318 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3319 @@ -3993,9 +3993,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3320
3321 hns3_put_ring_config(priv);
3322
3323 - hns3_dbg_uninit(handle);
3324 -
3325 out_netdev_free:
3326 + hns3_dbg_uninit(handle);
3327 free_netdev(netdev);
3328 }
3329
3330 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
3331 index 52c9d204fe3d..34e5448d59f6 100644
3332 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
3333 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
3334 @@ -174,18 +174,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
3335 {
3336 struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
3337 unsigned char *packet = skb->data;
3338 + u32 len = skb_headlen(skb);
3339 u32 i;
3340
3341 - for (i = 0; i < skb->len; i++)
3342 + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
3343 +
3344 + for (i = 0; i < len; i++)
3345 if (packet[i] != (unsigned char)(i & 0xff))
3346 break;
3347
3348 /* The packet is correctly received */
3349 - if (i == skb->len)
3350 + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
3351 tqp_vector->rx_group.total_packets++;
3352 else
3353 print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
3354 - skb->data, skb->len, true);
3355 + skb->data, len, true);
3356
3357 dev_kfree_skb_any(skb);
3358 }
3359 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
3360 index 4f503b9a674c..d58597360699 100644
3361 --- a/drivers/net/ethernet/ibm/ibmvnic.c
3362 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
3363 @@ -1878,13 +1878,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
3364 release_sub_crqs(adapter, 1);
3365 } else {
3366 rc = ibmvnic_reset_crq(adapter);
3367 - if (!rc)
3368 + if (rc == H_CLOSED || rc == H_SUCCESS) {
3369 rc = vio_enable_interrupts(adapter->vdev);
3370 + if (rc)
3371 + netdev_err(adapter->netdev,
3372 + "Reset failed to enable interrupts. rc=%d\n",
3373 + rc);
3374 + }
3375 }
3376
3377 if (rc) {
3378 netdev_err(adapter->netdev,
3379 - "Couldn't initialize crq. rc=%d\n", rc);
3380 + "Reset couldn't initialize crq. rc=%d\n", rc);
3381 goto out;
3382 }
3383
3384 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
3385 index a8dd0228b678..095ed81cc0ba 100644
3386 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
3387 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
3388 @@ -458,11 +458,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
3389 i40e_get_netdev_stats_struct_tx(ring, stats);
3390
3391 if (i40e_enabled_xdp_vsi(vsi)) {
3392 - ring++;
3393 + ring = READ_ONCE(vsi->xdp_rings[i]);
3394 + if (!ring)
3395 + continue;
3396 i40e_get_netdev_stats_struct_tx(ring, stats);
3397 }
3398
3399 - ring++;
3400 + ring = READ_ONCE(vsi->rx_rings[i]);
3401 + if (!ring)
3402 + continue;
3403 do {
3404 start = u64_stats_fetch_begin_irq(&ring->syncp);
3405 packets = ring->stats.packets;
3406 @@ -806,6 +810,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
3407 for (q = 0; q < vsi->num_queue_pairs; q++) {
3408 /* locate Tx ring */
3409 p = READ_ONCE(vsi->tx_rings[q]);
3410 + if (!p)
3411 + continue;
3412
3413 do {
3414 start = u64_stats_fetch_begin_irq(&p->syncp);
3415 @@ -819,8 +825,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
3416 tx_linearize += p->tx_stats.tx_linearize;
3417 tx_force_wb += p->tx_stats.tx_force_wb;
3418
3419 - /* Rx queue is part of the same block as Tx queue */
3420 - p = &p[1];
3421 + /* locate Rx ring */
3422 + p = READ_ONCE(vsi->rx_rings[q]);
3423 + if (!p)
3424 + continue;
3425 +
3426 do {
3427 start = u64_stats_fetch_begin_irq(&p->syncp);
3428 packets = p->stats.packets;
3429 @@ -10816,10 +10825,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
3430 if (vsi->tx_rings && vsi->tx_rings[0]) {
3431 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
3432 kfree_rcu(vsi->tx_rings[i], rcu);
3433 - vsi->tx_rings[i] = NULL;
3434 - vsi->rx_rings[i] = NULL;
3435 + WRITE_ONCE(vsi->tx_rings[i], NULL);
3436 + WRITE_ONCE(vsi->rx_rings[i], NULL);
3437 if (vsi->xdp_rings)
3438 - vsi->xdp_rings[i] = NULL;
3439 + WRITE_ONCE(vsi->xdp_rings[i], NULL);
3440 }
3441 }
3442 }
3443 @@ -10853,7 +10862,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
3444 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
3445 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
3446 ring->itr_setting = pf->tx_itr_default;
3447 - vsi->tx_rings[i] = ring++;
3448 + WRITE_ONCE(vsi->tx_rings[i], ring++);
3449
3450 if (!i40e_enabled_xdp_vsi(vsi))
3451 goto setup_rx;
3452 @@ -10871,7 +10880,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
3453 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
3454 set_ring_xdp(ring);
3455 ring->itr_setting = pf->tx_itr_default;
3456 - vsi->xdp_rings[i] = ring++;
3457 + WRITE_ONCE(vsi->xdp_rings[i], ring++);
3458
3459 setup_rx:
3460 ring->queue_index = i;
3461 @@ -10884,7 +10893,7 @@ setup_rx:
3462 ring->size = 0;
3463 ring->dcb_tc = 0;
3464 ring->itr_setting = pf->rx_itr_default;
3465 - vsi->rx_rings[i] = ring;
3466 + WRITE_ONCE(vsi->rx_rings[i], ring);
3467 }
3468
3469 return 0;
3470 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
3471 index cc3196ae5aea..636e6e840afa 100644
3472 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
3473 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
3474 @@ -923,7 +923,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
3475 ring->queue_index = txr_idx;
3476
3477 /* assign ring to adapter */
3478 - adapter->tx_ring[txr_idx] = ring;
3479 + WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
3480
3481 /* update count and index */
3482 txr_count--;
3483 @@ -950,7 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
3484 set_ring_xdp(ring);
3485
3486 /* assign ring to adapter */
3487 - adapter->xdp_ring[xdp_idx] = ring;
3488 + WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
3489
3490 /* update count and index */
3491 xdp_count--;
3492 @@ -993,7 +993,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
3493 ring->queue_index = rxr_idx;
3494
3495 /* assign ring to adapter */
3496 - adapter->rx_ring[rxr_idx] = ring;
3497 + WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
3498
3499 /* update count and index */
3500 rxr_count--;
3501 @@ -1022,13 +1022,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
3502
3503 ixgbe_for_each_ring(ring, q_vector->tx) {
3504 if (ring_is_xdp(ring))
3505 - adapter->xdp_ring[ring->queue_index] = NULL;
3506 + WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
3507 else
3508 - adapter->tx_ring[ring->queue_index] = NULL;
3509 + WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
3510 }
3511
3512 ixgbe_for_each_ring(ring, q_vector->rx)
3513 - adapter->rx_ring[ring->queue_index] = NULL;
3514 + WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
3515
3516 adapter->q_vector[v_idx] = NULL;
3517 napi_hash_del(&q_vector->napi);
3518 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
3519 index edaa0bffa5c3..5336bfcd2d70 100644
3520 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
3521 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
3522 @@ -7064,7 +7064,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3523 }
3524
3525 for (i = 0; i < adapter->num_rx_queues; i++) {
3526 - struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
3527 + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
3528 +
3529 + if (!rx_ring)
3530 + continue;
3531 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
3532 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3533 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3534 @@ -7085,15 +7088,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3535 packets = 0;
3536 /* gather some stats to the adapter struct that are per queue */
3537 for (i = 0; i < adapter->num_tx_queues; i++) {
3538 - struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
3539 + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
3540 +
3541 + if (!tx_ring)
3542 + continue;
3543 restart_queue += tx_ring->tx_stats.restart_queue;
3544 tx_busy += tx_ring->tx_stats.tx_busy;
3545 bytes += tx_ring->stats.bytes;
3546 packets += tx_ring->stats.packets;
3547 }
3548 for (i = 0; i < adapter->num_xdp_queues; i++) {
3549 - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
3550 + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
3551
3552 + if (!xdp_ring)
3553 + continue;
3554 restart_queue += xdp_ring->tx_stats.restart_queue;
3555 tx_busy += xdp_ring->tx_stats.tx_busy;
3556 bytes += xdp_ring->stats.bytes;
3557 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
3558 index a10ae28ebc8a..ffdb7b113f17 100644
3559 --- a/drivers/net/ethernet/marvell/mvneta.c
3560 +++ b/drivers/net/ethernet/marvell/mvneta.c
3561 @@ -104,9 +104,11 @@
3562 #define MVNETA_TX_IN_PRGRS BIT(1)
3563 #define MVNETA_TX_FIFO_EMPTY BIT(8)
3564 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
3565 +/* Only exists on Armada XP and Armada 370 */
3566 #define MVNETA_SERDES_CFG 0x24A0
3567 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
3568 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
3569 +#define MVNETA_HSGMII_SERDES_PROTO 0x1107
3570 #define MVNETA_TYPE_PRIO 0x24bc
3571 #define MVNETA_FORCE_UNI BIT(21)
3572 #define MVNETA_TXQ_CMD_1 0x24e4
3573 @@ -3164,26 +3166,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
3574 return 0;
3575 }
3576
3577 -static int mvneta_comphy_init(struct mvneta_port *pp)
3578 +static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3579 {
3580 int ret;
3581
3582 - if (!pp->comphy)
3583 - return 0;
3584 -
3585 - ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
3586 - pp->phy_interface);
3587 + ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3588 if (ret)
3589 return ret;
3590
3591 return phy_power_on(pp->comphy);
3592 }
3593
3594 +static int mvneta_config_interface(struct mvneta_port *pp,
3595 + phy_interface_t interface)
3596 +{
3597 + int ret = 0;
3598 +
3599 + if (pp->comphy) {
3600 + if (interface == PHY_INTERFACE_MODE_SGMII ||
3601 + interface == PHY_INTERFACE_MODE_1000BASEX ||
3602 + interface == PHY_INTERFACE_MODE_2500BASEX) {
3603 + ret = mvneta_comphy_init(pp, interface);
3604 + }
3605 + } else {
3606 + switch (interface) {
3607 + case PHY_INTERFACE_MODE_QSGMII:
3608 + mvreg_write(pp, MVNETA_SERDES_CFG,
3609 + MVNETA_QSGMII_SERDES_PROTO);
3610 + break;
3611 +
3612 + case PHY_INTERFACE_MODE_SGMII:
3613 + case PHY_INTERFACE_MODE_1000BASEX:
3614 + mvreg_write(pp, MVNETA_SERDES_CFG,
3615 + MVNETA_SGMII_SERDES_PROTO);
3616 + break;
3617 +
3618 + case PHY_INTERFACE_MODE_2500BASEX:
3619 + mvreg_write(pp, MVNETA_SERDES_CFG,
3620 + MVNETA_HSGMII_SERDES_PROTO);
3621 + break;
3622 + default:
3623 + return -EINVAL;
3624 + }
3625 + }
3626 +
3627 + pp->phy_interface = interface;
3628 +
3629 + return ret;
3630 +}
3631 +
3632 static void mvneta_start_dev(struct mvneta_port *pp)
3633 {
3634 int cpu;
3635
3636 - WARN_ON(mvneta_comphy_init(pp));
3637 + WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3638
3639 mvneta_max_rx_size_set(pp, pp->pkt_size);
3640 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3641 @@ -3558,17 +3594,13 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3642 /* When at 2.5G, the link partner can send frames with shortened
3643 * preambles.
3644 */
3645 - if (state->speed == SPEED_2500)
3646 + if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
3647 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
3648
3649 - if (pp->comphy && pp->phy_interface != state->interface &&
3650 - (state->interface == PHY_INTERFACE_MODE_SGMII ||
3651 - state->interface == PHY_INTERFACE_MODE_1000BASEX ||
3652 - state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
3653 - pp->phy_interface = state->interface;
3654 -
3655 - WARN_ON(phy_power_off(pp->comphy));
3656 - WARN_ON(mvneta_comphy_init(pp));
3657 + if (pp->phy_interface != state->interface) {
3658 + if (pp->comphy)
3659 + WARN_ON(phy_power_off(pp->comphy));
3660 + WARN_ON(mvneta_config_interface(pp, state->interface));
3661 }
3662
3663 if (new_ctrl0 != gmac_ctrl0)
3664 @@ -4464,20 +4496,10 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3665 }
3666
3667 /* Power up the port */
3668 -static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
3669 +static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
3670 {
3671 /* MAC Cause register should be cleared */
3672 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
3673 -
3674 - if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
3675 - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
3676 - else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
3677 - phy_interface_mode_is_8023z(phy_mode))
3678 - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
3679 - else if (!phy_interface_mode_is_rgmii(phy_mode))
3680 - return -EINVAL;
3681 -
3682 - return 0;
3683 }
3684
3685 /* Device initialization routine */
3686 @@ -4661,11 +4683,7 @@ static int mvneta_probe(struct platform_device *pdev)
3687 if (err < 0)
3688 goto err_netdev;
3689
3690 - err = mvneta_port_power_up(pp, phy_mode);
3691 - if (err < 0) {
3692 - dev_err(&pdev->dev, "can't power up port\n");
3693 - goto err_netdev;
3694 - }
3695 + mvneta_port_power_up(pp, phy_mode);
3696
3697 /* Armada3700 network controller does not support per-cpu
3698 * operation, so only single NAPI should be initialized.
3699 @@ -4818,11 +4836,7 @@ static int mvneta_resume(struct device *device)
3700 }
3701 }
3702 mvneta_defaults_set(pp);
3703 - err = mvneta_port_power_up(pp, pp->phy_interface);
3704 - if (err < 0) {
3705 - dev_err(device, "can't power up port\n");
3706 - return err;
3707 - }
3708 + mvneta_port_power_up(pp, pp->phy_interface);
3709
3710 netif_device_attach(dev);
3711
3712 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
3713 index fce6eccdcf8b..fa81a97f6ba9 100644
3714 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
3715 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
3716 @@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
3717 [MLX5E_400GAUI_8] = 400000,
3718 };
3719
3720 +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
3721 +{
3722 + struct mlx5e_port_eth_proto eproto;
3723 + int err;
3724 +
3725 + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
3726 + return true;
3727 +
3728 + err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
3729 + if (err)
3730 + return false;
3731 +
3732 + return !!eproto.cap;
3733 +}
3734 +
3735 static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
3736 const u32 **arr, u32 *size,
3737 bool force_legacy)
3738 {
3739 - bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
3740 + bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
3741
3742 *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
3743 ARRAY_SIZE(mlx5e_link_speed);
3744 @@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
3745 bool ext;
3746 int err;
3747
3748 - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
3749 + ext = mlx5e_ptys_ext_supported(mdev);
3750 err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
3751 if (err)
3752 goto out;
3753 @@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
3754 int err;
3755 int i;
3756
3757 - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
3758 + ext = mlx5e_ptys_ext_supported(mdev);
3759 err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
3760 if (err)
3761 return err;
3762 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
3763 index 4a7f4497692b..e196888f7056 100644
3764 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
3765 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
3766 @@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
3767 int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
3768 u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
3769 bool force_legacy);
3770 -
3771 +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
3772 int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
3773 int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
3774 int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
3775 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3776 index 39ee32518b10..8cd529556b21 100644
3777 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3778 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3779 @@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
3780 struct ptys2ethtool_config **arr,
3781 u32 *size)
3782 {
3783 - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
3784 + bool ext = mlx5e_ptys_ext_supported(mdev);
3785
3786 *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
3787 *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
3788 @@ -871,7 +871,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
3789 struct ethtool_link_ksettings *link_ksettings)
3790 {
3791 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
3792 - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
3793 + bool ext = mlx5e_ptys_ext_supported(mdev);
3794
3795 ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
3796 }
3797 @@ -900,7 +900,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
3798 __func__, err);
3799 goto err_query_regs;
3800 }
3801 - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
3802 + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
3803 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
3804 eth_proto_capability);
3805 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
3806 @@ -1052,7 +1052,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
3807 autoneg = link_ksettings->base.autoneg;
3808 speed = link_ksettings->base.speed;
3809
3810 - ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
3811 + ext_supported = mlx5e_ptys_ext_supported(mdev);
3812 ext = ext_requested(autoneg, adver, ext_supported);
3813 if (!ext_supported && ext)
3814 return -EOPNOTSUPP;
3815 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
3816 index cc262b30aed5..dc589322940c 100644
3817 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
3818 +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
3819 @@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
3820 return 0;
3821 }
3822
3823 -static int mlx5_eeprom_page(int offset)
3824 +static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
3825 + u8 *module_id)
3826 +{
3827 + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
3828 + u32 out[MLX5_ST_SZ_DW(mcia_reg)];
3829 + int err, status;
3830 + u8 *ptr;
3831 +
3832 + MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
3833 + MLX5_SET(mcia_reg, in, module, module_num);
3834 + MLX5_SET(mcia_reg, in, device_address, 0);
3835 + MLX5_SET(mcia_reg, in, page_number, 0);
3836 + MLX5_SET(mcia_reg, in, size, 1);
3837 + MLX5_SET(mcia_reg, in, l, 0);
3838 +
3839 + err = mlx5_core_access_reg(dev, in, sizeof(in), out,
3840 + sizeof(out), MLX5_REG_MCIA, 0, 0);
3841 + if (err)
3842 + return err;
3843 +
3844 + status = MLX5_GET(mcia_reg, out, status);
3845 + if (status) {
3846 + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
3847 + status);
3848 + return -EIO;
3849 + }
3850 + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
3851 +
3852 + *module_id = ptr[0];
3853 +
3854 + return 0;
3855 +}
3856 +
3857 +static int mlx5_qsfp_eeprom_page(u16 offset)
3858 {
3859 if (offset < MLX5_EEPROM_PAGE_LENGTH)
3860 /* Addresses between 0-255 - page 00 */
3861 @@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset)
3862 MLX5_EEPROM_HIGH_PAGE_LENGTH);
3863 }
3864
3865 -static int mlx5_eeprom_high_page_offset(int page_num)
3866 +static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
3867 {
3868 if (!page_num) /* Page 0 always start from low page */
3869 return 0;
3870 @@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num)
3871 return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
3872 }
3873
3874 +static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
3875 +{
3876 + *i2c_addr = MLX5_I2C_ADDR_LOW;
3877 + *page_num = mlx5_qsfp_eeprom_page(*offset);
3878 + *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num);
3879 +}
3880 +
3881 +static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
3882 +{
3883 + *i2c_addr = MLX5_I2C_ADDR_LOW;
3884 + *page_num = 0;
3885 +
3886 + if (*offset < MLX5_EEPROM_PAGE_LENGTH)
3887 + return;
3888 +
3889 + *i2c_addr = MLX5_I2C_ADDR_HIGH;
3890 + *offset -= MLX5_EEPROM_PAGE_LENGTH;
3891 +}
3892 +
3893 int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
3894 u16 offset, u16 size, u8 *data)
3895 {
3896 - int module_num, page_num, status, err;
3897 + int module_num, status, err, page_num = 0;
3898 + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
3899 u32 out[MLX5_ST_SZ_DW(mcia_reg)];
3900 - u32 in[MLX5_ST_SZ_DW(mcia_reg)];
3901 - u16 i2c_addr;
3902 - void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
3903 + u16 i2c_addr = 0;
3904 + u8 module_id;
3905 + void *ptr;
3906
3907 err = mlx5_query_module_num(dev, &module_num);
3908 if (err)
3909 return err;
3910
3911 - memset(in, 0, sizeof(in));
3912 - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
3913 -
3914 - /* Get the page number related to the given offset */
3915 - page_num = mlx5_eeprom_page(offset);
3916 + err = mlx5_query_module_id(dev, module_num, &module_id);
3917 + if (err)
3918 + return err;
3919
3920 - /* Set the right offset according to the page number,
3921 - * For page_num > 0, relative offset is always >= 128 (high page).
3922 - */
3923 - offset -= mlx5_eeprom_high_page_offset(page_num);
3924 + switch (module_id) {
3925 + case MLX5_MODULE_ID_SFP:
3926 + mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
3927 + break;
3928 + case MLX5_MODULE_ID_QSFP:
3929 + case MLX5_MODULE_ID_QSFP_PLUS:
3930 + case MLX5_MODULE_ID_QSFP28:
3931 + mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
3932 + break;
3933 + default:
3934 + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
3935 + return -EINVAL;
3936 + }
3937
3938 if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
3939 /* Cross pages read, read until offset 256 in low page */
3940 size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
3941
3942 - i2c_addr = MLX5_I2C_ADDR_LOW;
3943 + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
3944
3945 MLX5_SET(mcia_reg, in, l, 0);
3946 MLX5_SET(mcia_reg, in, module, module_num);
3947 @@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
3948 return -EIO;
3949 }
3950
3951 + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
3952 memcpy(data, ptr, size);
3953
3954 return size;
3955 diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
3956 index f3d1f9411d10..aa4fef789084 100644
3957 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
3958 +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
3959 @@ -1401,23 +1401,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
3960 u16 num_pages;
3961 int err;
3962
3963 - mutex_init(&mlxsw_pci->cmd.lock);
3964 - init_waitqueue_head(&mlxsw_pci->cmd.wait);
3965 -
3966 mlxsw_pci->core = mlxsw_core;
3967
3968 mbox = mlxsw_cmd_mbox_alloc();
3969 if (!mbox)
3970 return -ENOMEM;
3971
3972 - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
3973 - if (err)
3974 - goto mbox_put;
3975 -
3976 - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
3977 - if (err)
3978 - goto err_out_mbox_alloc;
3979 -
3980 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
3981 if (err)
3982 goto err_sw_reset;
3983 @@ -1524,9 +1513,6 @@ err_query_fw:
3984 mlxsw_pci_free_irq_vectors(mlxsw_pci);
3985 err_alloc_irq:
3986 err_sw_reset:
3987 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
3988 -err_out_mbox_alloc:
3989 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
3990 mbox_put:
3991 mlxsw_cmd_mbox_free(mbox);
3992 return err;
3993 @@ -1540,8 +1526,6 @@ static void mlxsw_pci_fini(void *bus_priv)
3994 mlxsw_pci_aqs_fini(mlxsw_pci);
3995 mlxsw_pci_fw_area_fini(mlxsw_pci);
3996 mlxsw_pci_free_irq_vectors(mlxsw_pci);
3997 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
3998 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
3999 }
4000
4001 static struct mlxsw_pci_queue *
4002 @@ -1755,6 +1739,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
4003 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
4004 };
4005
4006 +static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
4007 +{
4008 + int err;
4009 +
4010 + mutex_init(&mlxsw_pci->cmd.lock);
4011 + init_waitqueue_head(&mlxsw_pci->cmd.wait);
4012 +
4013 + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
4014 + if (err)
4015 + goto err_in_mbox_alloc;
4016 +
4017 + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
4018 + if (err)
4019 + goto err_out_mbox_alloc;
4020 +
4021 + return 0;
4022 +
4023 +err_out_mbox_alloc:
4024 + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
4025 +err_in_mbox_alloc:
4026 + mutex_destroy(&mlxsw_pci->cmd.lock);
4027 + return err;
4028 +}
4029 +
4030 +static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
4031 +{
4032 + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
4033 + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
4034 + mutex_destroy(&mlxsw_pci->cmd.lock);
4035 +}
4036 +
4037 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4038 {
4039 const char *driver_name = pdev->driver->name;
4040 @@ -1810,6 +1825,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4041 mlxsw_pci->pdev = pdev;
4042 pci_set_drvdata(pdev, mlxsw_pci);
4043
4044 + err = mlxsw_pci_cmd_init(mlxsw_pci);
4045 + if (err)
4046 + goto err_pci_cmd_init;
4047 +
4048 mlxsw_pci->bus_info.device_kind = driver_name;
4049 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
4050 mlxsw_pci->bus_info.dev = &pdev->dev;
4051 @@ -1827,6 +1846,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4052 return 0;
4053
4054 err_bus_device_register:
4055 + mlxsw_pci_cmd_fini(mlxsw_pci);
4056 +err_pci_cmd_init:
4057 iounmap(mlxsw_pci->hw_addr);
4058 err_ioremap:
4059 err_pci_resource_len_check:
4060 @@ -1844,6 +1865,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
4061 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
4062
4063 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
4064 + mlxsw_pci_cmd_fini(mlxsw_pci);
4065 iounmap(mlxsw_pci->hw_addr);
4066 pci_release_regions(mlxsw_pci->pdev);
4067 pci_disable_device(mlxsw_pci->pdev);
4068 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4069 index efdf8cb5114c..2f013fc71698 100644
4070 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4071 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4072 @@ -6287,7 +6287,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
4073 }
4074
4075 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
4076 - if (WARN_ON(!fib_work))
4077 + if (!fib_work)
4078 return NOTIFY_BAD;
4079
4080 fib_work->mlxsw_sp = router->mlxsw_sp;
4081 diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
4082 index 859caa6c1a1f..8e7be214f959 100644
4083 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
4084 +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
4085 @@ -8197,6 +8197,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
4086 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
4087 }
4088
4089 + /* Re-populate nvm attribute info */
4090 + qed_mcp_nvm_info_free(p_hwfn);
4091 + qed_mcp_nvm_info_populate(p_hwfn);
4092 +
4093 /* nvm cfg1 */
4094 rc = qed_dbg_nvm_image(cdev,
4095 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
4096 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
4097 index ecd14474a603..638047b937c6 100644
4098 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
4099 +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
4100 @@ -4423,12 +4423,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4101 return 0;
4102 }
4103
4104 -static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
4105 -{
4106 - kfree(p_hwfn->nvm_info.image_att);
4107 - p_hwfn->nvm_info.image_att = NULL;
4108 -}
4109 -
4110 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
4111 void __iomem *p_regview,
4112 void __iomem *p_doorbells,
4113 @@ -4513,7 +4507,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
4114 return rc;
4115 err3:
4116 if (IS_LEAD_HWFN(p_hwfn))
4117 - qed_nvm_info_free(p_hwfn);
4118 + qed_mcp_nvm_info_free(p_hwfn);
4119 err2:
4120 if (IS_LEAD_HWFN(p_hwfn))
4121 qed_iov_free_hw_info(p_hwfn->cdev);
4122 @@ -4574,7 +4568,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
4123 if (rc) {
4124 if (IS_PF(cdev)) {
4125 qed_init_free(p_hwfn);
4126 - qed_nvm_info_free(p_hwfn);
4127 + qed_mcp_nvm_info_free(p_hwfn);
4128 qed_mcp_free(p_hwfn);
4129 qed_hw_hwfn_free(p_hwfn);
4130 }
4131 @@ -4608,7 +4602,7 @@ void qed_hw_remove(struct qed_dev *cdev)
4132
4133 qed_iov_free_hw_info(cdev);
4134
4135 - qed_nvm_info_free(p_hwfn);
4136 + qed_mcp_nvm_info_free(p_hwfn);
4137 }
4138
4139 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
4140 diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
4141 index 36ddb89856a8..9401b49275f0 100644
4142 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
4143 +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
4144 @@ -3149,6 +3149,13 @@ err0:
4145 return rc;
4146 }
4147
4148 +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
4149 +{
4150 + kfree(p_hwfn->nvm_info.image_att);
4151 + p_hwfn->nvm_info.image_att = NULL;
4152 + p_hwfn->nvm_info.valid = false;
4153 +}
4154 +
4155 int
4156 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
4157 enum qed_nvm_images image_id,
4158 diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
4159 index 9c4c2763de8d..e38297383b00 100644
4160 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
4161 +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
4162 @@ -1192,6 +1192,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
4163 */
4164 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
4165
4166 +/**
4167 + * @brief Delete nvm info shadow in the given hardware function
4168 + *
4169 + * @param p_hwfn
4170 + */
4171 +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
4172 +
4173 /**
4174 * @brief Get the engine affinity configuration.
4175 *
4176 diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
4177 index 3cf4dc3433f9..bb4ccbda031a 100644
4178 --- a/drivers/net/usb/smsc95xx.c
4179 +++ b/drivers/net/usb/smsc95xx.c
4180 @@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
4181
4182 /* Init all registers */
4183 ret = smsc95xx_reset(dev);
4184 + if (ret)
4185 + goto free_pdata;
4186
4187 /* detect device revision as different features may be available */
4188 ret = smsc95xx_read_reg(dev, ID_REV, &val);
4189 if (ret < 0)
4190 - return ret;
4191 + goto free_pdata;
4192 +
4193 val >>= 16;
4194 pdata->chip_id = val;
4195 pdata->mdix_ctrl = get_mdix_status(dev->net);
4196 @@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
4197 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
4198
4199 return 0;
4200 +
4201 +free_pdata:
4202 + kfree(pdata);
4203 + return ret;
4204 }
4205
4206 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
4207 diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
4208 index 4ed21dad6a8e..6049d3766c64 100644
4209 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c
4210 +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
4211 @@ -643,9 +643,9 @@ err:
4212
4213 static void ath9k_hif_usb_rx_cb(struct urb *urb)
4214 {
4215 - struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
4216 - struct hif_device_usb *hif_dev = rx_buf->hif_dev;
4217 - struct sk_buff *skb = rx_buf->skb;
4218 + struct sk_buff *skb = (struct sk_buff *) urb->context;
4219 + struct hif_device_usb *hif_dev =
4220 + usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
4221 int ret;
4222
4223 if (!skb)
4224 @@ -685,15 +685,14 @@ resubmit:
4225 return;
4226 free:
4227 kfree_skb(skb);
4228 - kfree(rx_buf);
4229 }
4230
4231 static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
4232 {
4233 - struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
4234 - struct hif_device_usb *hif_dev = rx_buf->hif_dev;
4235 - struct sk_buff *skb = rx_buf->skb;
4236 + struct sk_buff *skb = (struct sk_buff *) urb->context;
4237 struct sk_buff *nskb;
4238 + struct hif_device_usb *hif_dev =
4239 + usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
4240 int ret;
4241
4242 if (!skb)
4243 @@ -751,7 +750,6 @@ resubmit:
4244 return;
4245 free:
4246 kfree_skb(skb);
4247 - kfree(rx_buf);
4248 urb->context = NULL;
4249 }
4250
4251 @@ -797,7 +795,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
4252 init_usb_anchor(&hif_dev->mgmt_submitted);
4253
4254 for (i = 0; i < MAX_TX_URB_NUM; i++) {
4255 - tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
4256 + tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
4257 if (!tx_buf)
4258 goto err;
4259
4260 @@ -834,9 +832,8 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
4261
4262 static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
4263 {
4264 - struct rx_buf *rx_buf = NULL;
4265 - struct sk_buff *skb = NULL;
4266 struct urb *urb = NULL;
4267 + struct sk_buff *skb = NULL;
4268 int i, ret;
4269
4270 init_usb_anchor(&hif_dev->rx_submitted);
4271 @@ -844,12 +841,6 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
4272
4273 for (i = 0; i < MAX_RX_URB_NUM; i++) {
4274
4275 - rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
4276 - if (!rx_buf) {
4277 - ret = -ENOMEM;
4278 - goto err_rxb;
4279 - }
4280 -
4281 /* Allocate URB */
4282 urb = usb_alloc_urb(0, GFP_KERNEL);
4283 if (urb == NULL) {
4284 @@ -864,14 +855,11 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
4285 goto err_skb;
4286 }
4287
4288 - rx_buf->hif_dev = hif_dev;
4289 - rx_buf->skb = skb;
4290 -
4291 usb_fill_bulk_urb(urb, hif_dev->udev,
4292 usb_rcvbulkpipe(hif_dev->udev,
4293 USB_WLAN_RX_PIPE),
4294 skb->data, MAX_RX_BUF_SIZE,
4295 - ath9k_hif_usb_rx_cb, rx_buf);
4296 + ath9k_hif_usb_rx_cb, skb);
4297
4298 /* Anchor URB */
4299 usb_anchor_urb(urb, &hif_dev->rx_submitted);
4300 @@ -897,8 +885,6 @@ err_submit:
4301 err_skb:
4302 usb_free_urb(urb);
4303 err_urb:
4304 - kfree(rx_buf);
4305 -err_rxb:
4306 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
4307 return ret;
4308 }
4309 @@ -910,21 +896,14 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
4310
4311 static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
4312 {
4313 - struct rx_buf *rx_buf = NULL;
4314 - struct sk_buff *skb = NULL;
4315 struct urb *urb = NULL;
4316 + struct sk_buff *skb = NULL;
4317 int i, ret;
4318
4319 init_usb_anchor(&hif_dev->reg_in_submitted);
4320
4321 for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
4322
4323 - rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
4324 - if (!rx_buf) {
4325 - ret = -ENOMEM;
4326 - goto err_rxb;
4327 - }
4328 -
4329 /* Allocate URB */
4330 urb = usb_alloc_urb(0, GFP_KERNEL);
4331 if (urb == NULL) {
4332 @@ -939,14 +918,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
4333 goto err_skb;
4334 }
4335
4336 - rx_buf->hif_dev = hif_dev;
4337 - rx_buf->skb = skb;
4338 -
4339 usb_fill_int_urb(urb, hif_dev->udev,
4340 usb_rcvintpipe(hif_dev->udev,
4341 USB_REG_IN_PIPE),
4342 skb->data, MAX_REG_IN_BUF_SIZE,
4343 - ath9k_hif_usb_reg_in_cb, rx_buf, 1);
4344 + ath9k_hif_usb_reg_in_cb, skb, 1);
4345
4346 /* Anchor URB */
4347 usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
4348 @@ -972,8 +948,6 @@ err_submit:
4349 err_skb:
4350 usb_free_urb(urb);
4351 err_urb:
4352 - kfree(rx_buf);
4353 -err_rxb:
4354 ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
4355 return ret;
4356 }
4357 diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
4358 index 5985aa15ca93..a94e7e1c86e9 100644
4359 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h
4360 +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
4361 @@ -86,11 +86,6 @@ struct tx_buf {
4362 struct list_head list;
4363 };
4364
4365 -struct rx_buf {
4366 - struct sk_buff *skb;
4367 - struct hif_device_usb *hif_dev;
4368 -};
4369 -
4370 #define HIF_USB_TX_STOP BIT(0)
4371 #define HIF_USB_TX_FLUSH BIT(1)
4372
4373 diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
4374 index 73e8475ddc8a..cd0d49978190 100644
4375 --- a/drivers/nvme/host/rdma.c
4376 +++ b/drivers/nvme/host/rdma.c
4377 @@ -451,7 +451,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
4378 * Spread I/O queues completion vectors according their queue index.
4379 * Admin queues can always go on completion vector 0.
4380 */
4381 - comp_vector = idx == 0 ? idx : idx - 1;
4382 + comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
4383
4384 /* Polling queues need direct cq polling context */
4385 if (nvme_rdma_poll_queue(queue))
4386 diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
4387 index d0f5c69930d0..77c28313e95f 100644
4388 --- a/drivers/pwm/pwm-jz4740.c
4389 +++ b/drivers/pwm/pwm-jz4740.c
4390 @@ -92,11 +92,12 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
4391 {
4392 struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
4393 unsigned long long tmp;
4394 - unsigned long period, duty;
4395 + unsigned long rate, period, duty;
4396 unsigned int prescaler = 0;
4397 uint16_t ctrl;
4398
4399 - tmp = (unsigned long long)clk_get_rate(jz4740->clk) * state->period;
4400 + rate = clk_get_rate(jz4740->clk);
4401 + tmp = rate * state->period;
4402 do_div(tmp, 1000000000);
4403 period = tmp;
4404
4405 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
4406 index 9a06818d2816..3e0e27731922 100644
4407 --- a/drivers/spi/spi-fsl-dspi.c
4408 +++ b/drivers/spi/spi-fsl-dspi.c
4409 @@ -1,6 +1,7 @@
4410 // SPDX-License-Identifier: GPL-2.0+
4411 //
4412 // Copyright 2013 Freescale Semiconductor, Inc.
4413 +// Copyright 2020 NXP
4414 //
4415 // Freescale DSPI driver
4416 // This file contains a driver for the Freescale DSPI
4417 @@ -33,6 +34,9 @@
4418 #define SPI_MCR_CLR_TXF BIT(11)
4419 #define SPI_MCR_CLR_RXF BIT(10)
4420 #define SPI_MCR_XSPI BIT(3)
4421 +#define SPI_MCR_DIS_TXF BIT(13)
4422 +#define SPI_MCR_DIS_RXF BIT(12)
4423 +#define SPI_MCR_HALT BIT(0)
4424
4425 #define SPI_TCR 0x08
4426 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
4427 @@ -1160,15 +1164,42 @@ static int dspi_remove(struct platform_device *pdev)
4428 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
4429
4430 /* Disconnect from the SPI framework */
4431 + spi_unregister_controller(dspi->ctlr);
4432 +
4433 + /* Disable RX and TX */
4434 + regmap_update_bits(dspi->regmap, SPI_MCR,
4435 + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
4436 + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
4437 +
4438 + /* Stop Running */
4439 + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
4440 +
4441 dspi_release_dma(dspi);
4442 if (dspi->irq)
4443 free_irq(dspi->irq, dspi);
4444 clk_disable_unprepare(dspi->clk);
4445 - spi_unregister_controller(dspi->ctlr);
4446
4447 return 0;
4448 }
4449
4450 +static void dspi_shutdown(struct platform_device *pdev)
4451 +{
4452 + struct spi_controller *ctlr = platform_get_drvdata(pdev);
4453 + struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
4454 +
4455 + /* Disable RX and TX */
4456 + regmap_update_bits(dspi->regmap, SPI_MCR,
4457 + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
4458 + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
4459 +
4460 + /* Stop Running */
4461 + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
4462 +
4463 + dspi_release_dma(dspi);
4464 + clk_disable_unprepare(dspi->clk);
4465 + spi_unregister_controller(dspi->ctlr);
4466 +}
4467 +
4468 static struct platform_driver fsl_dspi_driver = {
4469 .driver.name = DRIVER_NAME,
4470 .driver.of_match_table = fsl_dspi_dt_ids,
4471 @@ -1176,6 +1207,7 @@ static struct platform_driver fsl_dspi_driver = {
4472 .driver.pm = &dspi_pm,
4473 .probe = dspi_probe,
4474 .remove = dspi_remove,
4475 + .shutdown = dspi_shutdown,
4476 };
4477 module_platform_driver(fsl_dspi_driver);
4478
4479 diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
4480 index ab2c3848f5bf..ac6bf1fbbfe6 100644
4481 --- a/drivers/spi/spidev.c
4482 +++ b/drivers/spi/spidev.c
4483 @@ -605,15 +605,20 @@ err_find_dev:
4484 static int spidev_release(struct inode *inode, struct file *filp)
4485 {
4486 struct spidev_data *spidev;
4487 + int dofree;
4488
4489 mutex_lock(&device_list_lock);
4490 spidev = filp->private_data;
4491 filp->private_data = NULL;
4492
4493 + spin_lock_irq(&spidev->spi_lock);
4494 + /* ... after we unbound from the underlying device? */
4495 + dofree = (spidev->spi == NULL);
4496 + spin_unlock_irq(&spidev->spi_lock);
4497 +
4498 /* last close? */
4499 spidev->users--;
4500 if (!spidev->users) {
4501 - int dofree;
4502
4503 kfree(spidev->tx_buffer);
4504 spidev->tx_buffer = NULL;
4505 @@ -621,19 +626,14 @@ static int spidev_release(struct inode *inode, struct file *filp)
4506 kfree(spidev->rx_buffer);
4507 spidev->rx_buffer = NULL;
4508
4509 - spin_lock_irq(&spidev->spi_lock);
4510 - if (spidev->spi)
4511 - spidev->speed_hz = spidev->spi->max_speed_hz;
4512 -
4513 - /* ... after we unbound from the underlying device? */
4514 - dofree = (spidev->spi == NULL);
4515 - spin_unlock_irq(&spidev->spi_lock);
4516 -
4517 if (dofree)
4518 kfree(spidev);
4519 + else
4520 + spidev->speed_hz = spidev->spi->max_speed_hz;
4521 }
4522 #ifdef CONFIG_SPI_SLAVE
4523 - spi_slave_abort(spidev->spi);
4524 + if (!dofree)
4525 + spi_slave_abort(spidev->spi);
4526 #endif
4527 mutex_unlock(&device_list_lock);
4528
4529 @@ -783,13 +783,13 @@ static int spidev_remove(struct spi_device *spi)
4530 {
4531 struct spidev_data *spidev = spi_get_drvdata(spi);
4532
4533 + /* prevent new opens */
4534 + mutex_lock(&device_list_lock);
4535 /* make sure ops on existing fds can abort cleanly */
4536 spin_lock_irq(&spidev->spi_lock);
4537 spidev->spi = NULL;
4538 spin_unlock_irq(&spidev->spi_lock);
4539
4540 - /* prevent new opens */
4541 - mutex_lock(&device_list_lock);
4542 list_del(&spidev->device_entry);
4543 device_destroy(spidev_class, spidev->devt);
4544 clear_bit(MINOR(spidev->devt), minors);
4545 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
4546 index b67372737dc9..96c05b121fac 100644
4547 --- a/drivers/usb/dwc3/dwc3-pci.c
4548 +++ b/drivers/usb/dwc3/dwc3-pci.c
4549 @@ -206,8 +206,10 @@ static void dwc3_pci_resume_work(struct work_struct *work)
4550 int ret;
4551
4552 ret = pm_runtime_get_sync(&dwc3->dev);
4553 - if (ret)
4554 + if (ret) {
4555 + pm_runtime_put_sync_autosuspend(&dwc3->dev);
4556 return;
4557 + }
4558
4559 pm_runtime_mark_last_busy(&dwc3->dev);
4560 pm_runtime_put_sync_autosuspend(&dwc3->dev);
4561 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
4562 index 8aab286f2028..9b214b14a3aa 100644
4563 --- a/fs/btrfs/extent_io.c
4564 +++ b/fs/btrfs/extent_io.c
4565 @@ -5025,25 +5025,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4566 static void check_buffer_tree_ref(struct extent_buffer *eb)
4567 {
4568 int refs;
4569 - /* the ref bit is tricky. We have to make sure it is set
4570 - * if we have the buffer dirty. Otherwise the
4571 - * code to free a buffer can end up dropping a dirty
4572 - * page
4573 + /*
4574 + * The TREE_REF bit is first set when the extent_buffer is added
4575 + * to the radix tree. It is also reset, if unset, when a new reference
4576 + * is created by find_extent_buffer.
4577 *
4578 - * Once the ref bit is set, it won't go away while the
4579 - * buffer is dirty or in writeback, and it also won't
4580 - * go away while we have the reference count on the
4581 - * eb bumped.
4582 + * It is only cleared in two cases: freeing the last non-tree
4583 + * reference to the extent_buffer when its STALE bit is set or
4584 + * calling releasepage when the tree reference is the only reference.
4585 *
4586 - * We can't just set the ref bit without bumping the
4587 - * ref on the eb because free_extent_buffer might
4588 - * see the ref bit and try to clear it. If this happens
4589 - * free_extent_buffer might end up dropping our original
4590 - * ref by mistake and freeing the page before we are able
4591 - * to add one more ref.
4592 + * In both cases, care is taken to ensure that the extent_buffer's
4593 + * pages are not under io. However, releasepage can be concurrently
4594 + * called with creating new references, which is prone to race
4595 + * conditions between the calls to check_buffer_tree_ref in those
4596 + * codepaths and clearing TREE_REF in try_release_extent_buffer.
4597 *
4598 - * So bump the ref count first, then set the bit. If someone
4599 - * beat us to it, drop the ref we added.
4600 + * The actual lifetime of the extent_buffer in the radix tree is
4601 + * adequately protected by the refcount, but the TREE_REF bit and
4602 + * its corresponding reference are not. To protect against this
4603 + * class of races, we call check_buffer_tree_ref from the codepaths
4604 + * which trigger io after they set eb->io_pages. Note that once io is
4605 + * initiated, TREE_REF can no longer be cleared, so that is the
4606 + * moment at which any such race is best fixed.
4607 */
4608 refs = atomic_read(&eb->refs);
4609 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4610 @@ -5493,6 +5496,11 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
4611 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4612 eb->read_mirror = 0;
4613 atomic_set(&eb->io_pages, num_reads);
4614 + /*
4615 + * It is possible for releasepage to clear the TREE_REF bit before we
4616 + * set io_pages. See check_buffer_tree_ref for a more detailed comment.
4617 + */
4618 + check_buffer_tree_ref(eb);
4619 for (i = 0; i < num_pages; i++) {
4620 page = eb->pages[i];
4621
4622 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4623 index 280c45c91ddc..e408181a5eba 100644
4624 --- a/fs/btrfs/inode.c
4625 +++ b/fs/btrfs/inode.c
4626 @@ -1657,12 +1657,8 @@ out_check:
4627 ret = fallback_to_cow(inode, locked_page, cow_start,
4628 found_key.offset - 1,
4629 page_started, nr_written);
4630 - if (ret) {
4631 - if (nocow)
4632 - btrfs_dec_nocow_writers(fs_info,
4633 - disk_bytenr);
4634 + if (ret)
4635 goto error;
4636 - }
4637 cow_start = (u64)-1;
4638 }
4639
4640 @@ -1678,9 +1674,6 @@ out_check:
4641 ram_bytes, BTRFS_COMPRESS_NONE,
4642 BTRFS_ORDERED_PREALLOC);
4643 if (IS_ERR(em)) {
4644 - if (nocow)
4645 - btrfs_dec_nocow_writers(fs_info,
4646 - disk_bytenr);
4647 ret = PTR_ERR(em);
4648 goto error;
4649 }
4650 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
4651 index 6045b4868275..5ae458505f63 100644
4652 --- a/fs/cifs/inode.c
4653 +++ b/fs/cifs/inode.c
4654 @@ -2270,6 +2270,15 @@ set_size_out:
4655 if (rc == 0) {
4656 cifsInode->server_eof = attrs->ia_size;
4657 cifs_setsize(inode, attrs->ia_size);
4658 +
4659 + /*
4660 + * The man page of truncate says if the size changed,
4661 + * then the st_ctime and st_mtime fields for the file
4662 + * are updated.
4663 + */
4664 + attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
4665 + attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
4666 +
4667 cifs_truncate_page(inode->i_mapping, inode->i_size);
4668 }
4669
4670 diff --git a/include/linux/filter.h b/include/linux/filter.h
4671 index 3bbc72dbc69e..79830bc9e45c 100644
4672 --- a/include/linux/filter.h
4673 +++ b/include/linux/filter.h
4674 @@ -853,12 +853,12 @@ void bpf_jit_compile(struct bpf_prog *prog);
4675 bool bpf_jit_needs_zext(void);
4676 bool bpf_helper_changes_pkt_data(void *func);
4677
4678 -static inline bool bpf_dump_raw_ok(void)
4679 +static inline bool bpf_dump_raw_ok(const struct cred *cred)
4680 {
4681 /* Reconstruction of call-sites is dependent on kallsyms,
4682 * thus make dump the same restriction.
4683 */
4684 - return kallsyms_show_value() == 1;
4685 + return kallsyms_show_value(cred);
4686 }
4687
4688 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
4689 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
4690 index 657a83b943f0..1f96ce2b47df 100644
4691 --- a/include/linux/kallsyms.h
4692 +++ b/include/linux/kallsyms.h
4693 @@ -18,6 +18,7 @@
4694 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
4695 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
4696
4697 +struct cred;
4698 struct module;
4699
4700 static inline int is_kernel_inittext(unsigned long addr)
4701 @@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname);
4702 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
4703
4704 /* How and when do we show kallsyms values? */
4705 -extern int kallsyms_show_value(void);
4706 +extern bool kallsyms_show_value(const struct cred *cred);
4707
4708 #else /* !CONFIG_KALLSYMS */
4709
4710 @@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
4711 return -ERANGE;
4712 }
4713
4714 -static inline int kallsyms_show_value(void)
4715 +static inline bool kallsyms_show_value(const struct cred *cred)
4716 {
4717 return false;
4718 }
4719 diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
4720 index bc88d6f964da..006f01922439 100644
4721 --- a/include/sound/compress_driver.h
4722 +++ b/include/sound/compress_driver.h
4723 @@ -59,6 +59,7 @@ struct snd_compr_runtime {
4724 * @direction: stream direction, playback/recording
4725 * @metadata_set: metadata set flag, true when set
4726 * @next_track: has userspace signal next track transition, true when set
4727 + * @partial_drain: undergoing partial_drain for stream, true when set
4728 * @private_data: pointer to DSP private data
4729 */
4730 struct snd_compr_stream {
4731 @@ -70,6 +71,7 @@ struct snd_compr_stream {
4732 enum snd_compr_direction direction;
4733 bool metadata_set;
4734 bool next_track;
4735 + bool partial_drain;
4736 void *private_data;
4737 };
4738
4739 @@ -173,7 +175,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
4740 if (snd_BUG_ON(!stream))
4741 return;
4742
4743 - stream->runtime->state = SNDRV_PCM_STATE_SETUP;
4744 + /* for partial_drain case we are back to running state on success */
4745 + if (stream->partial_drain) {
4746 + stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
4747 + stream->partial_drain = false; /* clear this flag as well */
4748 + } else {
4749 + stream->runtime->state = SNDRV_PCM_STATE_SETUP;
4750 + }
4751
4752 wake_up(&stream->runtime->sleep);
4753 }
4754 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4755 index e7af1ac69d75..8bc904f9badb 100644
4756 --- a/kernel/bpf/syscall.c
4757 +++ b/kernel/bpf/syscall.c
4758 @@ -2248,7 +2248,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4759 return NULL;
4760 }
4761
4762 -static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
4763 +static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4764 + const struct cred *f_cred)
4765 {
4766 const struct bpf_map *map;
4767 struct bpf_insn *insns;
4768 @@ -2271,7 +2272,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
4769 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
4770 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
4771 insns[i].code = BPF_JMP | BPF_CALL;
4772 - if (!bpf_dump_raw_ok())
4773 + if (!bpf_dump_raw_ok(f_cred))
4774 insns[i].imm = 0;
4775 continue;
4776 }
4777 @@ -2323,7 +2324,8 @@ static int set_info_rec_size(struct bpf_prog_info *info)
4778 return 0;
4779 }
4780
4781 -static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
4782 +static int bpf_prog_get_info_by_fd(struct file *file,
4783 + struct bpf_prog *prog,
4784 const union bpf_attr *attr,
4785 union bpf_attr __user *uattr)
4786 {
4787 @@ -2392,11 +2394,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
4788 struct bpf_insn *insns_sanitized;
4789 bool fault;
4790
4791 - if (prog->blinded && !bpf_dump_raw_ok()) {
4792 + if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4793 info.xlated_prog_insns = 0;
4794 goto done;
4795 }
4796 - insns_sanitized = bpf_insn_prepare_dump(prog);
4797 + insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4798 if (!insns_sanitized)
4799 return -ENOMEM;
4800 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4801 @@ -2430,7 +2432,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
4802 }
4803
4804 if (info.jited_prog_len && ulen) {
4805 - if (bpf_dump_raw_ok()) {
4806 + if (bpf_dump_raw_ok(file->f_cred)) {
4807 uinsns = u64_to_user_ptr(info.jited_prog_insns);
4808 ulen = min_t(u32, info.jited_prog_len, ulen);
4809
4810 @@ -2465,7 +2467,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
4811 ulen = info.nr_jited_ksyms;
4812 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4813 if (ulen) {
4814 - if (bpf_dump_raw_ok()) {
4815 + if (bpf_dump_raw_ok(file->f_cred)) {
4816 unsigned long ksym_addr;
4817 u64 __user *user_ksyms;
4818 u32 i;
4819 @@ -2496,7 +2498,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
4820 ulen = info.nr_jited_func_lens;
4821 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4822 if (ulen) {
4823 - if (bpf_dump_raw_ok()) {
4824 + if (bpf_dump_raw_ok(file->f_cred)) {
4825 u32 __user *user_lens;
4826 u32 func_len, i;
4827
4828 @@ -2553,7 +2555,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
4829 else
4830 info.nr_jited_line_info = 0;
4831 if (info.nr_jited_line_info && ulen) {
4832 - if (bpf_dump_raw_ok()) {
4833 + if (bpf_dump_raw_ok(file->f_cred)) {
4834 __u64 __user *user_linfo;
4835 u32 i;
4836
4837 @@ -2599,7 +2601,8 @@ done:
4838 return 0;
4839 }
4840
4841 -static int bpf_map_get_info_by_fd(struct bpf_map *map,
4842 +static int bpf_map_get_info_by_fd(struct file *file,
4843 + struct bpf_map *map,
4844 const union bpf_attr *attr,
4845 union bpf_attr __user *uattr)
4846 {
4847 @@ -2641,7 +2644,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
4848 return 0;
4849 }
4850
4851 -static int bpf_btf_get_info_by_fd(struct btf *btf,
4852 +static int bpf_btf_get_info_by_fd(struct file *file,
4853 + struct btf *btf,
4854 const union bpf_attr *attr,
4855 union bpf_attr __user *uattr)
4856 {
4857 @@ -2673,13 +2677,13 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4858 return -EBADFD;
4859
4860 if (f.file->f_op == &bpf_prog_fops)
4861 - err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
4862 + err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4863 uattr);
4864 else if (f.file->f_op == &bpf_map_fops)
4865 - err = bpf_map_get_info_by_fd(f.file->private_data, attr,
4866 + err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4867 uattr);
4868 else if (f.file->f_op == &btf_fops)
4869 - err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
4870 + err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4871 else
4872 err = -EINVAL;
4873
4874 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
4875 index 136ce049c4ad..61f9d781f70a 100644
4876 --- a/kernel/kallsyms.c
4877 +++ b/kernel/kallsyms.c
4878 @@ -645,19 +645,20 @@ static inline int kallsyms_for_perf(void)
4879 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
4880 * block even that).
4881 */
4882 -int kallsyms_show_value(void)
4883 +bool kallsyms_show_value(const struct cred *cred)
4884 {
4885 switch (kptr_restrict) {
4886 case 0:
4887 if (kallsyms_for_perf())
4888 - return 1;
4889 + return true;
4890 /* fallthrough */
4891 case 1:
4892 - if (has_capability_noaudit(current, CAP_SYSLOG))
4893 - return 1;
4894 + if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
4895 + CAP_OPT_NOAUDIT) == 0)
4896 + return true;
4897 /* fallthrough */
4898 default:
4899 - return 0;
4900 + return false;
4901 }
4902 }
4903
4904 @@ -674,7 +675,11 @@ static int kallsyms_open(struct inode *inode, struct file *file)
4905 return -ENOMEM;
4906 reset_iter(iter, 0);
4907
4908 - iter->show_value = kallsyms_show_value();
4909 + /*
4910 + * Instead of checking this on every s_show() call, cache
4911 + * the result here at open time.
4912 + */
4913 + iter->show_value = kallsyms_show_value(file->f_cred);
4914 return 0;
4915 }
4916
4917 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4918 index 950a5cfd262c..0a967db226d8 100644
4919 --- a/kernel/kprobes.c
4920 +++ b/kernel/kprobes.c
4921 @@ -2362,7 +2362,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
4922 else
4923 kprobe_type = "k";
4924
4925 - if (!kallsyms_show_value())
4926 + if (!kallsyms_show_value(pi->file->f_cred))
4927 addr = NULL;
4928
4929 if (sym)
4930 @@ -2463,7 +2463,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
4931 * If /proc/kallsyms is not showing kernel address, we won't
4932 * show them here either.
4933 */
4934 - if (!kallsyms_show_value())
4935 + if (!kallsyms_show_value(m->file->f_cred))
4936 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
4937 (void *)ent->start_addr);
4938 else
4939 diff --git a/kernel/module.c b/kernel/module.c
4940 index a2a47f4a33a7..6baa1080cdb7 100644
4941 --- a/kernel/module.c
4942 +++ b/kernel/module.c
4943 @@ -1507,8 +1507,7 @@ static inline bool sect_empty(const Elf_Shdr *sect)
4944 }
4945
4946 struct module_sect_attr {
4947 - struct module_attribute mattr;
4948 - char *name;
4949 + struct bin_attribute battr;
4950 unsigned long address;
4951 };
4952
4953 @@ -1518,13 +1517,18 @@ struct module_sect_attrs {
4954 struct module_sect_attr attrs[0];
4955 };
4956
4957 -static ssize_t module_sect_show(struct module_attribute *mattr,
4958 - struct module_kobject *mk, char *buf)
4959 +static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
4960 + struct bin_attribute *battr,
4961 + char *buf, loff_t pos, size_t count)
4962 {
4963 struct module_sect_attr *sattr =
4964 - container_of(mattr, struct module_sect_attr, mattr);
4965 - return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
4966 - (void *)sattr->address : NULL);
4967 + container_of(battr, struct module_sect_attr, battr);
4968 +
4969 + if (pos != 0)
4970 + return -EINVAL;
4971 +
4972 + return sprintf(buf, "0x%px\n",
4973 + kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
4974 }
4975
4976 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
4977 @@ -1532,7 +1536,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
4978 unsigned int section;
4979
4980 for (section = 0; section < sect_attrs->nsections; section++)
4981 - kfree(sect_attrs->attrs[section].name);
4982 + kfree(sect_attrs->attrs[section].battr.attr.name);
4983 kfree(sect_attrs);
4984 }
4985
4986 @@ -1541,42 +1545,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
4987 unsigned int nloaded = 0, i, size[2];
4988 struct module_sect_attrs *sect_attrs;
4989 struct module_sect_attr *sattr;
4990 - struct attribute **gattr;
4991 + struct bin_attribute **gattr;
4992
4993 /* Count loaded sections and allocate structures */
4994 for (i = 0; i < info->hdr->e_shnum; i++)
4995 if (!sect_empty(&info->sechdrs[i]))
4996 nloaded++;
4997 size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
4998 - sizeof(sect_attrs->grp.attrs[0]));
4999 - size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
5000 + sizeof(sect_attrs->grp.bin_attrs[0]));
5001 + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
5002 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
5003 if (sect_attrs == NULL)
5004 return;
5005
5006 /* Setup section attributes. */
5007 sect_attrs->grp.name = "sections";
5008 - sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
5009 + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
5010
5011 sect_attrs->nsections = 0;
5012 sattr = &sect_attrs->attrs[0];
5013 - gattr = &sect_attrs->grp.attrs[0];
5014 + gattr = &sect_attrs->grp.bin_attrs[0];
5015 for (i = 0; i < info->hdr->e_shnum; i++) {
5016 Elf_Shdr *sec = &info->sechdrs[i];
5017 if (sect_empty(sec))
5018 continue;
5019 + sysfs_bin_attr_init(&sattr->battr);
5020 sattr->address = sec->sh_addr;
5021 - sattr->name = kstrdup(info->secstrings + sec->sh_name,
5022 - GFP_KERNEL);
5023 - if (sattr->name == NULL)
5024 + sattr->battr.attr.name =
5025 + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
5026 + if (sattr->battr.attr.name == NULL)
5027 goto out;
5028 sect_attrs->nsections++;
5029 - sysfs_attr_init(&sattr->mattr.attr);
5030 - sattr->mattr.show = module_sect_show;
5031 - sattr->mattr.store = NULL;
5032 - sattr->mattr.attr.name = sattr->name;
5033 - sattr->mattr.attr.mode = S_IRUSR;
5034 - *(gattr++) = &(sattr++)->mattr.attr;
5035 + sattr->battr.read = module_sect_read;
5036 + sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
5037 + sattr->battr.attr.mode = 0400;
5038 + *(gattr++) = &(sattr++)->battr;
5039 }
5040 *gattr = NULL;
5041
5042 @@ -1666,7 +1669,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
5043 continue;
5044 if (info->sechdrs[i].sh_type == SHT_NOTE) {
5045 sysfs_bin_attr_init(nattr);
5046 - nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
5047 + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
5048 nattr->attr.mode = S_IRUGO;
5049 nattr->size = info->sechdrs[i].sh_size;
5050 nattr->private = (void *) info->sechdrs[i].sh_addr;
5051 @@ -4391,7 +4394,7 @@ static int modules_open(struct inode *inode, struct file *file)
5052
5053 if (!err) {
5054 struct seq_file *m = file->private_data;
5055 - m->private = kallsyms_show_value() ? NULL : (void *)8ul;
5056 + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
5057 }
5058
5059 return err;
5060 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5061 index 7238ef445daf..8b3e99d095ae 100644
5062 --- a/kernel/sched/core.c
5063 +++ b/kernel/sched/core.c
5064 @@ -1649,7 +1649,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
5065 goto out;
5066 }
5067
5068 - if (cpumask_equal(p->cpus_ptr, new_mask))
5069 + if (cpumask_equal(&p->cpus_mask, new_mask))
5070 goto out;
5071
5072 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
5073 diff --git a/net/core/skmsg.c b/net/core/skmsg.c
5074 index 0536ea9298e4..118cf1ace43a 100644
5075 --- a/net/core/skmsg.c
5076 +++ b/net/core/skmsg.c
5077 @@ -687,7 +687,7 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
5078 return container_of(parser, struct sk_psock, parser);
5079 }
5080
5081 -static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
5082 +static void sk_psock_skb_redirect(struct sk_buff *skb)
5083 {
5084 struct sk_psock *psock_other;
5085 struct sock *sk_other;
5086 @@ -719,12 +719,11 @@ static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
5087 }
5088 }
5089
5090 -static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
5091 - struct sk_buff *skb, int verdict)
5092 +static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
5093 {
5094 switch (verdict) {
5095 case __SK_REDIRECT:
5096 - sk_psock_skb_redirect(psock, skb);
5097 + sk_psock_skb_redirect(skb);
5098 break;
5099 case __SK_PASS:
5100 case __SK_DROP:
5101 @@ -745,8 +744,8 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
5102 ret = sk_psock_bpf_run(psock, prog, skb);
5103 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
5104 }
5105 + sk_psock_tls_verdict_apply(skb, ret);
5106 rcu_read_unlock();
5107 - sk_psock_tls_verdict_apply(psock, skb, ret);
5108 return ret;
5109 }
5110 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
5111 @@ -774,7 +773,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
5112 }
5113 goto out_free;
5114 case __SK_REDIRECT:
5115 - sk_psock_skb_redirect(psock, skb);
5116 + sk_psock_skb_redirect(skb);
5117 break;
5118 case __SK_DROP:
5119 /* fall-through */
5120 @@ -786,11 +785,18 @@ out_free:
5121
5122 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
5123 {
5124 - struct sk_psock *psock = sk_psock_from_strp(strp);
5125 + struct sk_psock *psock;
5126 struct bpf_prog *prog;
5127 int ret = __SK_DROP;
5128 + struct sock *sk;
5129
5130 rcu_read_lock();
5131 + sk = strp->sk;
5132 + psock = sk_psock(sk);
5133 + if (unlikely(!psock)) {
5134 + kfree_skb(skb);
5135 + goto out;
5136 + }
5137 prog = READ_ONCE(psock->progs.skb_verdict);
5138 if (likely(prog)) {
5139 skb_orphan(skb);
5140 @@ -798,8 +804,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
5141 ret = sk_psock_bpf_run(psock, prog, skb);
5142 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
5143 }
5144 - rcu_read_unlock();
5145 sk_psock_verdict_apply(psock, skb, ret);
5146 +out:
5147 + rcu_read_unlock();
5148 }
5149
5150 static int sk_psock_strp_read_done(struct strparser *strp, int err)
5151 diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
5152 index 9f9e00ba3ad7..669cbe1609d9 100644
5153 --- a/net/core/sysctl_net_core.c
5154 +++ b/net/core/sysctl_net_core.c
5155 @@ -277,7 +277,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
5156 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5157 if (write && !ret) {
5158 if (jit_enable < 2 ||
5159 - (jit_enable == 2 && bpf_dump_raw_ok())) {
5160 + (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
5161 *(int *)table->data = jit_enable;
5162 if (jit_enable == 2)
5163 pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
5164 diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
5165 index d934384f31ad..6e3cf4d19ce8 100644
5166 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c
5167 +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
5168 @@ -314,7 +314,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5169 set->variant = &bitmap_ip;
5170 if (!init_map_ip(set, map, first_ip, last_ip,
5171 elements, hosts, netmask)) {
5172 - kfree(map);
5173 + ip_set_free(map);
5174 return -ENOMEM;
5175 }
5176 if (tb[IPSET_ATTR_TIMEOUT]) {
5177 diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5178 index e8532783b43a..ae7cdc0d0f29 100644
5179 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5180 +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5181 @@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5182 map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5183 set->variant = &bitmap_ipmac;
5184 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
5185 - kfree(map);
5186 + ip_set_free(map);
5187 return -ENOMEM;
5188 }
5189 if (tb[IPSET_ATTR_TIMEOUT]) {
5190 diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
5191 index e3ac914fff1a..d4a14750f5c4 100644
5192 --- a/net/netfilter/ipset/ip_set_bitmap_port.c
5193 +++ b/net/netfilter/ipset/ip_set_bitmap_port.c
5194 @@ -247,7 +247,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5195 map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5196 set->variant = &bitmap_port;
5197 if (!init_map_port(set, map, first_port, last_port)) {
5198 - kfree(map);
5199 + ip_set_free(map);
5200 return -ENOMEM;
5201 }
5202 if (tb[IPSET_ATTR_TIMEOUT]) {
5203 diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
5204 index 2389c9f89e48..a7a982a3e676 100644
5205 --- a/net/netfilter/ipset/ip_set_hash_gen.h
5206 +++ b/net/netfilter/ipset/ip_set_hash_gen.h
5207 @@ -682,7 +682,7 @@ retry:
5208 }
5209 t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
5210 if (!t->hregion) {
5211 - kfree(t);
5212 + ip_set_free(t);
5213 ret = -ENOMEM;
5214 goto out;
5215 }
5216 @@ -1533,7 +1533,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
5217 }
5218 t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
5219 if (!t->hregion) {
5220 - kfree(t);
5221 + ip_set_free(t);
5222 kfree(h);
5223 return -ENOMEM;
5224 }
5225 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
5226 index 48db4aec02de..200cdad3ff3a 100644
5227 --- a/net/netfilter/nf_conntrack_core.c
5228 +++ b/net/netfilter/nf_conntrack_core.c
5229 @@ -2012,6 +2012,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
5230 err = __nf_conntrack_update(net, skb, ct, ctinfo);
5231 if (err < 0)
5232 return err;
5233 +
5234 + ct = nf_ct_get(skb, &ctinfo);
5235 }
5236
5237 return nf_confirm_cthelper(skb, ct, ctinfo);
5238 diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
5239 index 2a4d50e04441..14c101e104ce 100644
5240 --- a/net/qrtr/qrtr.c
5241 +++ b/net/qrtr/qrtr.c
5242 @@ -259,7 +259,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
5243 unsigned int ver;
5244 size_t hdrlen;
5245
5246 - if (len & 3)
5247 + if (len == 0 || len & 3)
5248 return -EINVAL;
5249
5250 skb = netdev_alloc_skb(NULL, len);
5251 @@ -273,6 +273,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
5252
5253 switch (ver) {
5254 case QRTR_PROTO_VER_1:
5255 + if (len < sizeof(*v1))
5256 + goto err;
5257 v1 = data;
5258 hdrlen = sizeof(*v1);
5259
5260 @@ -286,6 +288,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
5261 size = le32_to_cpu(v1->size);
5262 break;
5263 case QRTR_PROTO_VER_2:
5264 + if (len < sizeof(*v2))
5265 + goto err;
5266 v2 = data;
5267 hdrlen = sizeof(*v2) + v2->optlen;
5268
5269 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5270 index b65180e874fb..a34bbca80f49 100644
5271 --- a/net/wireless/nl80211.c
5272 +++ b/net/wireless/nl80211.c
5273 @@ -4798,7 +4798,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
5274 err = nl80211_parse_he_obss_pd(
5275 info->attrs[NL80211_ATTR_HE_OBSS_PD],
5276 &params.he_obss_pd);
5277 - goto out;
5278 + if (err)
5279 + goto out;
5280 }
5281
5282 nl80211_calculate_ap_params(&params);
5283 diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
5284 index f34ce564d92c..1afa06b80f06 100644
5285 --- a/sound/core/compress_offload.c
5286 +++ b/sound/core/compress_offload.c
5287 @@ -722,6 +722,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
5288
5289 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
5290 if (!retval) {
5291 + /* clear flags and stop any drain wait */
5292 + stream->partial_drain = false;
5293 + stream->metadata_set = false;
5294 snd_compr_drain_notify(stream);
5295 stream->runtime->total_bytes_available = 0;
5296 stream->runtime->total_bytes_transferred = 0;
5297 @@ -879,6 +882,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
5298 if (stream->next_track == false)
5299 return -EPERM;
5300
5301 + stream->partial_drain = true;
5302 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
5303 if (retval) {
5304 pr_debug("Partial drain returned failure\n");
5305 diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
5306 index e69a4ef0d6bd..08c10ac9d6c8 100644
5307 --- a/sound/drivers/opl3/opl3_synth.c
5308 +++ b/sound/drivers/opl3/opl3_synth.c
5309 @@ -91,6 +91,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file,
5310 {
5311 struct snd_dm_fm_info info;
5312
5313 + memset(&info, 0, sizeof(info));
5314 +
5315 info.fm_mode = opl3->fm_mode;
5316 info.rhythm = opl3->rhythm;
5317 if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
5318 diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
5319 index 2c6d2becfe1a..824f4ac1a8ce 100644
5320 --- a/sound/pci/hda/hda_auto_parser.c
5321 +++ b/sound/pci/hda/hda_auto_parser.c
5322 @@ -72,6 +72,12 @@ static int compare_input_type(const void *ap, const void *bp)
5323 if (a->type != b->type)
5324 return (int)(a->type - b->type);
5325
5326 + /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */
5327 + if (a->is_headset_mic && b->is_headphone_mic)
5328 + return -1; /* don't swap */
5329 + else if (a->is_headphone_mic && b->is_headset_mic)
5330 + return 1; /* swap */
5331 +
5332 /* In case one has boost and the other one has not,
5333 pick the one with boost first. */
5334 return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
5335 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5336 index 612441508e80..95b0fdffc504 100644
5337 --- a/sound/pci/hda/hda_intel.c
5338 +++ b/sound/pci/hda/hda_intel.c
5339 @@ -2433,6 +2433,9 @@ static const struct pci_device_id azx_ids[] = {
5340 /* Icelake */
5341 { PCI_DEVICE(0x8086, 0x34c8),
5342 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
5343 + /* Icelake-H */
5344 + { PCI_DEVICE(0x8086, 0x3dc8),
5345 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
5346 /* Jasperlake */
5347 { PCI_DEVICE(0x8086, 0x38c8),
5348 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
5349 @@ -2441,9 +2444,14 @@ static const struct pci_device_id azx_ids[] = {
5350 /* Tigerlake */
5351 { PCI_DEVICE(0x8086, 0xa0c8),
5352 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
5353 + /* Tigerlake-H */
5354 + { PCI_DEVICE(0x8086, 0x43c8),
5355 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
5356 /* Elkhart Lake */
5357 { PCI_DEVICE(0x8086, 0x4b55),
5358 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
5359 + { PCI_DEVICE(0x8086, 0x4b58),
5360 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
5361 /* Broxton-P(Apollolake) */
5362 { PCI_DEVICE(0x8086, 0x5a98),
5363 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
5364 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5365 index 34868459104d..160e00e8007a 100644
5366 --- a/sound/pci/hda/patch_realtek.c
5367 +++ b/sound/pci/hda/patch_realtek.c
5368 @@ -6114,6 +6114,9 @@ enum {
5369 ALC236_FIXUP_HP_MUTE_LED,
5370 ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
5371 ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
5372 + ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
5373 + ALC269VC_FIXUP_ACER_HEADSET_MIC,
5374 + ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
5375 };
5376
5377 static const struct hda_fixup alc269_fixups[] = {
5378 @@ -7292,6 +7295,35 @@ static const struct hda_fixup alc269_fixups[] = {
5379 .chained = true,
5380 .chain_id = ALC269_FIXUP_HEADSET_MODE
5381 },
5382 + [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = {
5383 + .type = HDA_FIXUP_PINS,
5384 + .v.pins = (const struct hda_pintbl[]) {
5385 + { 0x14, 0x90100120 }, /* use as internal speaker */
5386 + { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */
5387 + { 0x1a, 0x01011020 }, /* use as line out */
5388 + { },
5389 + },
5390 + .chained = true,
5391 + .chain_id = ALC269_FIXUP_HEADSET_MIC
5392 + },
5393 + [ALC269VC_FIXUP_ACER_HEADSET_MIC] = {
5394 + .type = HDA_FIXUP_PINS,
5395 + .v.pins = (const struct hda_pintbl[]) {
5396 + { 0x18, 0x02a11030 }, /* use as headset mic */
5397 + { }
5398 + },
5399 + .chained = true,
5400 + .chain_id = ALC269_FIXUP_HEADSET_MIC
5401 + },
5402 + [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = {
5403 + .type = HDA_FIXUP_PINS,
5404 + .v.pins = (const struct hda_pintbl[]) {
5405 + { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */
5406 + { }
5407 + },
5408 + .chained = true,
5409 + .chain_id = ALC269_FIXUP_HEADSET_MIC
5410 + },
5411 };
5412
5413 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5414 @@ -7307,10 +7339,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5415 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
5416 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
5417 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
5418 + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
5419 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
5420 SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
5421 SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
5422 SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
5423 + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
5424 + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
5425 SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
5426 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
5427 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
5428 @@ -7536,8 +7571,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5429 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5430 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5431 SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5432 - SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
5433 - SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
5434 + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
5435 SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
5436 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5437 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5438 diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
5439 index d66412a77873..3f79cd03507c 100644
5440 --- a/sound/soc/sof/sof-pci-dev.c
5441 +++ b/sound/soc/sof/sof-pci-dev.c
5442 @@ -420,6 +420,8 @@ static const struct pci_device_id sof_pci_ids[] = {
5443 #if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
5444 { PCI_DEVICE(0x8086, 0x06c8),
5445 .driver_data = (unsigned long)&cml_desc},
5446 + { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */
5447 + .driver_data = (unsigned long)&cml_desc},
5448 #endif
5449 #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
5450 { PCI_DEVICE(0x8086, 0xa0c8),
5451 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
5452 index 426c55b45e79..d5706b8b68a1 100644
5453 --- a/sound/usb/pcm.c
5454 +++ b/sound/usb/pcm.c
5455 @@ -350,6 +350,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
5456 goto add_sync_ep_from_ifnum;
5457 case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
5458 case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
5459 + case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
5460 ep = 0x81;
5461 ifnum = 2;
5462 goto add_sync_ep_from_ifnum;
5463 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
5464 index 042a5e8eb79d..8d1805d9e5a7 100644
5465 --- a/sound/usb/quirks-table.h
5466 +++ b/sound/usb/quirks-table.h
5467 @@ -3695,4 +3695,56 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
5468 }
5469 },
5470
5471 +/*
5472 + * MacroSilicon MS2109 based HDMI capture cards
5473 + *
5474 + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
5475 + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
5476 + * they pretend to be 96kHz mono as a workaround for stereo being broken
5477 + * by that...
5478 + *
5479 + * They also have swapped L-R channels, but that's for userspace to deal
5480 + * with.
5481 + */
5482 +{
5483 + USB_DEVICE(0x534d, 0x2109),
5484 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
5485 + .vendor_name = "MacroSilicon",
5486 + .product_name = "MS2109",
5487 + .ifnum = QUIRK_ANY_INTERFACE,
5488 + .type = QUIRK_COMPOSITE,
5489 + .data = &(const struct snd_usb_audio_quirk[]) {
5490 + {
5491 + .ifnum = 2,
5492 + .type = QUIRK_AUDIO_ALIGN_TRANSFER,
5493 + },
5494 + {
5495 + .ifnum = 2,
5496 + .type = QUIRK_AUDIO_STANDARD_MIXER,
5497 + },
5498 + {
5499 + .ifnum = 3,
5500 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
5501 + .data = &(const struct audioformat) {
5502 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
5503 + .channels = 2,
5504 + .iface = 3,
5505 + .altsetting = 1,
5506 + .altset_idx = 1,
5507 + .attributes = 0,
5508 + .endpoint = 0x82,
5509 + .ep_attr = USB_ENDPOINT_XFER_ISOC |
5510 + USB_ENDPOINT_SYNC_ASYNC,
5511 + .rates = SNDRV_PCM_RATE_CONTINUOUS,
5512 + .rate_min = 48000,
5513 + .rate_max = 48000,
5514 + }
5515 + },
5516 + {
5517 + .ifnum = -1
5518 + }
5519 + }
5520 + }
5521 +},
5522 +
5523 #undef USB_DEVICE_VENDOR_SPEC
5524 diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
5525 index d43f9dec6998..e768c02ef2ab 100644
5526 --- a/tools/perf/arch/x86/util/intel-pt.c
5527 +++ b/tools/perf/arch/x86/util/intel-pt.c
5528 @@ -596,6 +596,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
5529 }
5530 evsel->core.attr.freq = 0;
5531 evsel->core.attr.sample_period = 1;
5532 + evsel->no_aux_samples = true;
5533 intel_pt_evsel = evsel;
5534 opts->full_auxtrace = true;
5535 }
5536 diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
5537 index 7bd73a904b4e..d187e46c2683 100644
5538 --- a/tools/perf/scripts/python/export-to-postgresql.py
5539 +++ b/tools/perf/scripts/python/export-to-postgresql.py
5540 @@ -1055,7 +1055,7 @@ def cbr(id, raw_buf):
5541 cbr = data[0]
5542 MHz = (data[4] + 500) / 1000
5543 percent = ((cbr * 1000 / data[2]) + 5) / 10
5544 - value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent)
5545 + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
5546 cbr_file.write(value)
5547
5548 def mwait(id, raw_buf):
5549 diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
5550 index 4b28c9d08d5a..04217e8f535a 100755
5551 --- a/tools/perf/scripts/python/exported-sql-viewer.py
5552 +++ b/tools/perf/scripts/python/exported-sql-viewer.py
5553 @@ -756,7 +756,8 @@ class CallGraphModel(CallGraphModelBase):
5554 " FROM calls"
5555 " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
5556 " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
5557 - " WHERE symbols.name" + match +
5558 + " WHERE calls.id <> 0"
5559 + " AND symbols.name" + match +
5560 " GROUP BY comm_id, thread_id, call_path_id"
5561 " ORDER BY comm_id, thread_id, call_path_id")
5562
5563 @@ -950,7 +951,8 @@ class CallTreeModel(CallGraphModelBase):
5564 " FROM calls"
5565 " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
5566 " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
5567 - " WHERE symbols.name" + match +
5568 + " WHERE calls.id <> 0"
5569 + " AND symbols.name" + match +
5570 " ORDER BY comm_id, thread_id, call_time, calls.id")
5571
5572 def FindPath(self, query):
5573 @@ -1016,6 +1018,7 @@ class TreeWindowBase(QMdiSubWindow):
5574 child = self.model.index(row, 0, parent)
5575 if child.internalPointer().dbid == dbid:
5576 found = True
5577 + self.view.setExpanded(parent, True)
5578 self.view.setCurrentIndex(child)
5579 parent = child
5580 break
5581 diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
5582 index 88c3df24b748..514cef3a17b4 100644
5583 --- a/tools/perf/ui/browsers/hists.c
5584 +++ b/tools/perf/ui/browsers/hists.c
5585 @@ -2224,6 +2224,11 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser
5586 return browser->he_selection->thread;
5587 }
5588
5589 +static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser)
5590 +{
5591 + return browser->he_selection ? browser->he_selection->res_samples : NULL;
5592 +}
5593 +
5594 /* Check whether the browser is for 'top' or 'report' */
5595 static inline bool is_report_browser(void *timer)
5596 {
5597 @@ -3170,16 +3175,16 @@ skip_annotation:
5598 &options[nr_options], NULL, NULL, evsel);
5599 nr_options += add_res_sample_opt(browser, &actions[nr_options],
5600 &options[nr_options],
5601 - hist_browser__selected_entry(browser)->res_samples,
5602 - evsel, A_NORMAL);
5603 + hist_browser__selected_res_sample(browser),
5604 + evsel, A_NORMAL);
5605 nr_options += add_res_sample_opt(browser, &actions[nr_options],
5606 &options[nr_options],
5607 - hist_browser__selected_entry(browser)->res_samples,
5608 - evsel, A_ASM);
5609 + hist_browser__selected_res_sample(browser),
5610 + evsel, A_ASM);
5611 nr_options += add_res_sample_opt(browser, &actions[nr_options],
5612 &options[nr_options],
5613 - hist_browser__selected_entry(browser)->res_samples,
5614 - evsel, A_SOURCE);
5615 + hist_browser__selected_res_sample(browser),
5616 + evsel, A_SOURCE);
5617 nr_options += add_switch_opt(browser, &actions[nr_options],
5618 &options[nr_options]);
5619 skip_scripting:
5620 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
5621 index abc7fda4a0fe..a844715a352d 100644
5622 --- a/tools/perf/util/evsel.c
5623 +++ b/tools/perf/util/evsel.c
5624 @@ -1028,12 +1028,12 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
5625 if (callchain && callchain->enabled && !evsel->no_aux_samples)
5626 perf_evsel__config_callchain(evsel, opts, callchain);
5627
5628 - if (opts->sample_intr_regs) {
5629 + if (opts->sample_intr_regs && !evsel->no_aux_samples) {
5630 attr->sample_regs_intr = opts->sample_intr_regs;
5631 perf_evsel__set_sample_bit(evsel, REGS_INTR);
5632 }
5633
5634 - if (opts->sample_user_regs) {
5635 + if (opts->sample_user_regs && !evsel->no_aux_samples) {
5636 attr->sample_regs_user |= opts->sample_user_regs;
5637 perf_evsel__set_sample_bit(evsel, REGS_USER);
5638 }
5639 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
5640 index a1c9eb6d4f40..c5cce3a60476 100644
5641 --- a/tools/perf/util/intel-pt.c
5642 +++ b/tools/perf/util/intel-pt.c
5643 @@ -1707,6 +1707,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
5644 u64 sample_type = evsel->core.attr.sample_type;
5645 u64 id = evsel->core.id[0];
5646 u8 cpumode;
5647 + u64 regs[8 * sizeof(sample.intr_regs.mask)];
5648
5649 if (intel_pt_skip_event(pt))
5650 return 0;
5651 @@ -1756,8 +1757,8 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
5652 }
5653
5654 if (sample_type & PERF_SAMPLE_REGS_INTR &&
5655 - items->mask[INTEL_PT_GP_REGS_POS]) {
5656 - u64 regs[sizeof(sample.intr_regs.mask)];
5657 + (items->mask[INTEL_PT_GP_REGS_POS] ||
5658 + items->mask[INTEL_PT_XMM_POS])) {
5659 u64 regs_mask = evsel->core.attr.sample_regs_intr;
5660 u64 *pos;
5661