Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0152-4.14.53-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 298151 byte(s)
-added up to patches-4.14.79
1 diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
2 index 640f65e79ef1..267920a1874b 100644
3 --- a/Documentation/ABI/testing/sysfs-class-cxl
4 +++ b/Documentation/ABI/testing/sysfs-class-cxl
5 @@ -69,7 +69,9 @@ Date: September 2014
6 Contact: linuxppc-dev@lists.ozlabs.org
7 Description: read/write
8 Set the mode for prefaulting in segments into the segment table
9 - when performing the START_WORK ioctl. Possible values:
10 + when performing the START_WORK ioctl. Only applicable when
11 + running under hashed page table mmu.
12 + Possible values:
13 none: No prefaulting (default)
14 work_element_descriptor: Treat the work element
15 descriptor as an effective address and
16 diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
17 index 361789df51ec..d1aecf53badb 100644
18 --- a/Documentation/printk-formats.txt
19 +++ b/Documentation/printk-formats.txt
20 @@ -397,11 +397,10 @@ struct clk
21
22 %pC pll1
23 %pCn pll1
24 - %pCr 1560000000
25
26 For printing struct clk structures. ``%pC`` and ``%pCn`` print the name
27 (Common Clock Framework) or address (legacy clock framework) of the
28 -structure; ``%pCr`` prints the current clock rate.
29 +structure.
30
31 Passed by reference.
32
33 diff --git a/Makefile b/Makefile
34 index e2e4009bbfed..fb66998408f4 100644
35 --- a/Makefile
36 +++ b/Makefile
37 @@ -1,7 +1,7 @@
38 # SPDX-License-Identifier: GPL-2.0
39 VERSION = 4
40 PATCHLEVEL = 14
41 -SUBLEVEL = 52
42 +SUBLEVEL = 53
43 EXTRAVERSION =
44 NAME = Petit Gorille
45
46 diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
47 index 36983a7d7cfd..185357323572 100644
48 --- a/arch/arm/boot/dts/mt7623.dtsi
49 +++ b/arch/arm/boot/dts/mt7623.dtsi
50 @@ -22,11 +22,12 @@
51 #include <dt-bindings/phy/phy.h>
52 #include <dt-bindings/reset/mt2701-resets.h>
53 #include <dt-bindings/thermal/thermal.h>
54 -#include "skeleton64.dtsi"
55
56 / {
57 compatible = "mediatek,mt7623";
58 interrupt-parent = <&sysirq>;
59 + #address-cells = <2>;
60 + #size-cells = <2>;
61
62 cpu_opp_table: opp_table {
63 compatible = "operating-points-v2";
64 diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
65 index 7de704575aee..e96c0ca97972 100644
66 --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
67 +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
68 @@ -100,6 +100,7 @@
69 };
70
71 memory@80000000 {
72 + device_type = "memory";
73 reg = <0 0x80000000 0 0x40000000>;
74 };
75 };
76 diff --git a/arch/arm/boot/dts/mt7623n-rfb.dtsi b/arch/arm/boot/dts/mt7623n-rfb.dtsi
77 index 256c5fd947bf..43c9d7ca23a0 100644
78 --- a/arch/arm/boot/dts/mt7623n-rfb.dtsi
79 +++ b/arch/arm/boot/dts/mt7623n-rfb.dtsi
80 @@ -47,6 +47,7 @@
81 };
82
83 memory@80000000 {
84 + device_type = "memory";
85 reg = <0 0x80000000 0 0x40000000>;
86 };
87
88 diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
89 index 8d9f42a422cb..10d2fa183a9f 100644
90 --- a/arch/arm/boot/dts/socfpga.dtsi
91 +++ b/arch/arm/boot/dts/socfpga.dtsi
92 @@ -744,13 +744,13 @@
93 nand0: nand@ff900000 {
94 #address-cells = <0x1>;
95 #size-cells = <0x1>;
96 - compatible = "denali,denali-nand-dt";
97 + compatible = "altr,socfpga-denali-nand";
98 reg = <0xff900000 0x100000>,
99 <0xffb80000 0x10000>;
100 reg-names = "nand_data", "denali_reg";
101 interrupts = <0x0 0x90 0x4>;
102 dma-mask = <0xffffffff>;
103 - clocks = <&nand_clk>;
104 + clocks = <&nand_x_clk>;
105 status = "disabled";
106 };
107
108 diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
109 index bead79e4b2aa..791ca15c799e 100644
110 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi
111 +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
112 @@ -593,8 +593,7 @@
113 #size-cells = <0>;
114 reg = <0xffda5000 0x100>;
115 interrupts = <0 102 4>;
116 - num-chipselect = <4>;
117 - bus-num = <0>;
118 + num-cs = <4>;
119 /*32bit_access;*/
120 tx-dma-channel = <&pdma 16>;
121 rx-dma-channel = <&pdma 17>;
122 @@ -633,7 +632,7 @@
123 nand: nand@ffb90000 {
124 #address-cells = <1>;
125 #size-cells = <1>;
126 - compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
127 + compatible = "altr,socfpga-denali-nand";
128 reg = <0xffb90000 0x72000>,
129 <0xffb80000 0x10000>;
130 reg-names = "nand_data", "denali_reg";
131 diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
132 index 3b73fdcf3627..8de1100d1067 100644
133 --- a/arch/arm/include/asm/kgdb.h
134 +++ b/arch/arm/include/asm/kgdb.h
135 @@ -77,7 +77,7 @@ extern int kgdb_fault_expected;
136
137 #define KGDB_MAX_NO_CPUS 1
138 #define BUFMAX 400
139 -#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
140 +#define NUMREGBYTES (GDB_MAX_REGS << 2)
141 #define NUMCRITREGBYTES (32 << 2)
142
143 #define _R0 0
144 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
145 index 64c54c92e214..d71cbf596d1f 100644
146 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
147 +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
148 @@ -205,9 +205,6 @@
149
150 bus-width = <4>;
151 cap-sd-highspeed;
152 - sd-uhs-sdr12;
153 - sd-uhs-sdr25;
154 - sd-uhs-sdr50;
155 max-frequency = <100000000>;
156 disable-wp;
157
158 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
159 index 345d4e521191..718822ab6e4b 100644
160 --- a/arch/arm64/kernel/cpufeature.c
161 +++ b/arch/arm64/kernel/cpufeature.c
162 @@ -877,7 +877,7 @@ static int __init parse_kpti(char *str)
163 __kpti_forced = enabled ? 1 : -1;
164 return 0;
165 }
166 -__setup("kpti=", parse_kpti);
167 +early_param("kpti", parse_kpti);
168 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
169
170 static const struct arm64_cpu_capabilities arm64_features[] = {
171 diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
172 index 0bdc96c61bc0..43442b3a463f 100644
173 --- a/arch/arm64/kernel/signal.c
174 +++ b/arch/arm64/kernel/signal.c
175 @@ -676,11 +676,12 @@ static void do_signal(struct pt_regs *regs)
176 unsigned long continue_addr = 0, restart_addr = 0;
177 int retval = 0;
178 struct ksignal ksig;
179 + bool syscall = in_syscall(regs);
180
181 /*
182 * If we were from a system call, check for system call restarting...
183 */
184 - if (in_syscall(regs)) {
185 + if (syscall) {
186 continue_addr = regs->pc;
187 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
188 retval = regs->regs[0];
189 @@ -732,7 +733,7 @@ static void do_signal(struct pt_regs *regs)
190 * Handle restarting a different system call. As above, if a debugger
191 * has chosen to restart at a different PC, ignore the restart.
192 */
193 - if (in_syscall(regs) && regs->pc == restart_addr) {
194 + if (syscall && regs->pc == restart_addr) {
195 if (retval == -ERESTART_RESTARTBLOCK)
196 setup_restart_syscall(regs);
197 user_rewind_single_step(current);
198 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
199 index e338165000e6..bf0821b7b1ab 100644
200 --- a/arch/arm64/mm/proc.S
201 +++ b/arch/arm64/mm/proc.S
202 @@ -196,8 +196,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
203
204 .macro __idmap_kpti_put_pgtable_ent_ng, type
205 orr \type, \type, #PTE_NG // Same bit for blocks and pages
206 - str \type, [cur_\()\type\()p] // Update the entry and ensure it
207 - dc civac, cur_\()\type\()p // is visible to all CPUs.
208 + str \type, [cur_\()\type\()p] // Update the entry and ensure
209 + dmb sy // that it is visible to all
210 + dc civac, cur_\()\type\()p // CPUs.
211 .endm
212
213 /*
214 diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
215 index 22123f7e8f75..2004b3f72d80 100644
216 --- a/arch/m68k/mac/config.c
217 +++ b/arch/m68k/mac/config.c
218 @@ -1017,7 +1017,7 @@ int __init mac_platform_init(void)
219 struct resource swim_rsrc = {
220 .flags = IORESOURCE_MEM,
221 .start = (resource_size_t)swim_base,
222 - .end = (resource_size_t)swim_base + 0x2000,
223 + .end = (resource_size_t)swim_base + 0x1FFF,
224 };
225
226 platform_device_register_simple("swim", -1, &swim_rsrc, 1);
227 diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
228 index c2a38321c96d..3b420f6d8822 100644
229 --- a/arch/m68k/mm/kmap.c
230 +++ b/arch/m68k/mm/kmap.c
231 @@ -89,7 +89,8 @@ static inline void free_io_area(void *addr)
232 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
233 if (tmp->addr == addr) {
234 *p = tmp->next;
235 - __iounmap(tmp->addr, tmp->size);
236 + /* remove gap added in get_io_area() */
237 + __iounmap(tmp->addr, tmp->size - IO_SIZE);
238 kfree(tmp);
239 return;
240 }
241 diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
242 index 6054d49e608e..8c9cbf13d32a 100644
243 --- a/arch/mips/bcm47xx/setup.c
244 +++ b/arch/mips/bcm47xx/setup.c
245 @@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
246 */
247 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
248 cpu_wait = NULL;
249 +
250 + /*
251 + * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
252 + * Enable ExternalSync for sync instruction to take effect
253 + */
254 + set_c0_config7(MIPS_CONF7_ES);
255 break;
256 #endif
257 }
258 diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
259 index a7d0b836f2f7..cea8ad864b3f 100644
260 --- a/arch/mips/include/asm/io.h
261 +++ b/arch/mips/include/asm/io.h
262 @@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
263 __val = *__addr; \
264 slow; \
265 \
266 + /* prevent prefetching of coherent DMA data prematurely */ \
267 + rmb(); \
268 return pfx##ioswab##bwlq(__addr, __val); \
269 }
270
271 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
272 index a6810923b3f0..60c787d943b0 100644
273 --- a/arch/mips/include/asm/mipsregs.h
274 +++ b/arch/mips/include/asm/mipsregs.h
275 @@ -680,6 +680,8 @@
276 #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
277
278 #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
279 +/* ExternalSync */
280 +#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
281
282 #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
283 #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
284 @@ -2745,6 +2747,7 @@ __BUILD_SET_C0(status)
285 __BUILD_SET_C0(cause)
286 __BUILD_SET_C0(config)
287 __BUILD_SET_C0(config5)
288 +__BUILD_SET_C0(config7)
289 __BUILD_SET_C0(intcontrol)
290 __BUILD_SET_C0(intctl)
291 __BUILD_SET_C0(srsmap)
292 diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
293 index f2ee7e1e3342..cff52b283e03 100644
294 --- a/arch/mips/kernel/mcount.S
295 +++ b/arch/mips/kernel/mcount.S
296 @@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
297 EXPORT_SYMBOL(_mcount)
298 PTR_LA t1, ftrace_stub
299 PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
300 - bne t1, t2, static_trace
301 + beq t1, t2, fgraph_trace
302 nop
303
304 + MCOUNT_SAVE_REGS
305 +
306 + move a0, ra /* arg1: self return address */
307 + jalr t2 /* (1) call *ftrace_trace_function */
308 + move a1, AT /* arg2: parent's return address */
309 +
310 + MCOUNT_RESTORE_REGS
311 +
312 +fgraph_trace:
313 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
314 + PTR_LA t1, ftrace_stub
315 PTR_L t3, ftrace_graph_return
316 bne t1, t3, ftrace_graph_caller
317 nop
318 @@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
319 bne t1, t3, ftrace_graph_caller
320 nop
321 #endif
322 - b ftrace_stub
323 -#ifdef CONFIG_32BIT
324 - addiu sp, sp, 8
325 -#else
326 - nop
327 -#endif
328
329 -static_trace:
330 - MCOUNT_SAVE_REGS
331 -
332 - move a0, ra /* arg1: self return address */
333 - jalr t2 /* (1) call *ftrace_trace_function */
334 - move a1, AT /* arg2: parent's return address */
335 -
336 - MCOUNT_RESTORE_REGS
337 #ifdef CONFIG_32BIT
338 addiu sp, sp, 8
339 #endif
340 +
341 .globl ftrace_stub
342 ftrace_stub:
343 RETURN_BACK
344 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
345 index 6f07c687fc05..c194f4c8e66b 100644
346 --- a/arch/powerpc/kernel/entry_64.S
347 +++ b/arch/powerpc/kernel/entry_64.S
348 @@ -597,6 +597,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
349 * actually hit this code path.
350 */
351
352 + isync
353 slbie r6
354 slbie r6 /* Workaround POWER5 < DD2.1 issue */
355 slbmte r7,r0
356 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
357 index 29d2b6050140..d0020bc1f209 100644
358 --- a/arch/powerpc/kernel/fadump.c
359 +++ b/arch/powerpc/kernel/fadump.c
360 @@ -1155,6 +1155,9 @@ void fadump_cleanup(void)
361 init_fadump_mem_struct(&fdm,
362 be64_to_cpu(fdm_active->cpu_state_data.destination_address));
363 fadump_invalidate_dump(&fdm);
364 + } else if (fw_dump.dump_registered) {
365 + /* Un-register Firmware-assisted dump if it was registered. */
366 + fadump_unregister_dump(&fdm);
367 }
368 }
369
370 diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
371 index 53b9c1dfd7d9..ceafad83ef50 100644
372 --- a/arch/powerpc/kernel/hw_breakpoint.c
373 +++ b/arch/powerpc/kernel/hw_breakpoint.c
374 @@ -175,8 +175,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
375 if (cpu_has_feature(CPU_FTR_DAWR)) {
376 length_max = 512 ; /* 64 doublewords */
377 /* DAWR region can't cross 512 boundary */
378 - if ((bp->attr.bp_addr >> 10) !=
379 - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
380 + if ((bp->attr.bp_addr >> 9) !=
381 + ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
382 return -EINVAL;
383 }
384 if (info->len >
385 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
386 index f52ad5bb7109..81750d9624ab 100644
387 --- a/arch/powerpc/kernel/ptrace.c
388 +++ b/arch/powerpc/kernel/ptrace.c
389 @@ -2362,6 +2362,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
390 /* Create a new breakpoint request if one doesn't exist already */
391 hw_breakpoint_init(&attr);
392 attr.bp_addr = hw_brk.address;
393 + attr.bp_len = 8;
394 arch_bp_generic_fields(hw_brk.type,
395 &attr.bp_type);
396
397 diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
398 index da6ba9ba73ed..b73961b95c34 100644
399 --- a/arch/powerpc/perf/imc-pmu.c
400 +++ b/arch/powerpc/perf/imc-pmu.c
401 @@ -1131,7 +1131,7 @@ static int init_nest_pmu_ref(void)
402
403 static void cleanup_all_core_imc_memory(void)
404 {
405 - int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
406 + int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
407 struct imc_mem_info *ptr = core_imc_pmu->mem_info;
408 int size = core_imc_pmu->counter_mem_size;
409
410 @@ -1239,7 +1239,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
411 if (!pmu_ptr->pmu.name)
412 return -ENOMEM;
413
414 - nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
415 + nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
416 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
417 GFP_KERNEL);
418
419 diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h
420 index c9a503623431..e9a6c35f8a29 100644
421 --- a/arch/powerpc/platforms/powernv/copy-paste.h
422 +++ b/arch/powerpc/platforms/powernv/copy-paste.h
423 @@ -42,5 +42,6 @@ static inline int vas_paste(void *paste_address, int offset)
424 : "b" (offset), "b" (paste_address)
425 : "memory", "cr0");
426
427 - return (cr >> CR0_SHIFT) & CR0_MASK;
428 + /* We mask with 0xE to ignore SO */
429 + return (cr >> CR0_SHIFT) & 0xE;
430 }
431 diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
432 index 443d5ca71995..028d6d12ba32 100644
433 --- a/arch/powerpc/platforms/powernv/idle.c
434 +++ b/arch/powerpc/platforms/powernv/idle.c
435 @@ -78,7 +78,7 @@ static int pnv_save_sprs_for_deep_states(void)
436 uint64_t msr_val = MSR_IDLE;
437 uint64_t psscr_val = pnv_deepest_stop_psscr_val;
438
439 - for_each_possible_cpu(cpu) {
440 + for_each_present_cpu(cpu) {
441 uint64_t pir = get_hard_smp_processor_id(cpu);
442 uint64_t hsprg0_val = (uint64_t)&paca[cpu];
443
444 @@ -741,7 +741,7 @@ static int __init pnv_init_idle_states(void)
445 int cpu;
446
447 pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
448 - for_each_possible_cpu(cpu) {
449 + for_each_present_cpu(cpu) {
450 int base_cpu = cpu_first_thread_sibling(cpu);
451 int idx = cpu_thread_in_core(cpu);
452 int i;
453 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
454 index 57f9e55f4352..677b29ef4532 100644
455 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
456 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
457 @@ -3591,7 +3591,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
458 WARN_ON(pe->table_group.group);
459 }
460
461 - pnv_pci_ioda2_table_free_pages(tbl);
462 iommu_tce_table_put(tbl);
463 }
464
465 diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
466 index 8243fdbb9b9c..2dae3f585c01 100644
467 --- a/arch/x86/events/intel/uncore_snbep.c
468 +++ b/arch/x86/events/intel/uncore_snbep.c
469 @@ -3035,11 +3035,19 @@ static struct intel_uncore_type *bdx_msr_uncores[] = {
470 NULL,
471 };
472
473 +/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
474 +static struct event_constraint bdx_uncore_pcu_constraints[] = {
475 + EVENT_CONSTRAINT(0x80, 0xe, 0x80),
476 + EVENT_CONSTRAINT_END
477 +};
478 +
479 void bdx_uncore_cpu_init(void)
480 {
481 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
482 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
483 uncore_msr_uncores = bdx_msr_uncores;
484 +
485 + hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
486 }
487
488 static struct intel_uncore_type bdx_uncore_ha = {
489 diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
490 index 4db77731e130..a04f0c242a28 100644
491 --- a/arch/x86/include/asm/barrier.h
492 +++ b/arch/x86/include/asm/barrier.h
493 @@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
494 {
495 unsigned long mask;
496
497 - asm ("cmp %1,%2; sbb %0,%0;"
498 + asm volatile ("cmp %1,%2; sbb %0,%0;"
499 :"=r" (mask)
500 :"g"(size),"r" (index)
501 :"cc");
502 diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
503 index 4b8187639c2d..c51353569492 100644
504 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
505 +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
506 @@ -143,6 +143,11 @@ static struct severity {
507 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
508 USER
509 ),
510 + MCESEV(
511 + PANIC, "Data load in unrecoverable area of kernel",
512 + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
513 + KERNEL
514 + ),
515 #endif
516 MCESEV(
517 PANIC, "Action required: unknown MCACOD",
518 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
519 index 28d27de08545..58f887f5e036 100644
520 --- a/arch/x86/kernel/cpu/mcheck/mce.c
521 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
522 @@ -760,23 +760,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
523 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
524 struct pt_regs *regs)
525 {
526 - int i, ret = 0;
527 char *tmp;
528 + int i;
529
530 for (i = 0; i < mca_cfg.banks; i++) {
531 m->status = mce_rdmsrl(msr_ops.status(i));
532 - if (m->status & MCI_STATUS_VAL) {
533 - __set_bit(i, validp);
534 - if (quirk_no_way_out)
535 - quirk_no_way_out(i, m, regs);
536 - }
537 + if (!(m->status & MCI_STATUS_VAL))
538 + continue;
539 +
540 + __set_bit(i, validp);
541 + if (quirk_no_way_out)
542 + quirk_no_way_out(i, m, regs);
543
544 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
545 + mce_read_aux(m, i);
546 *msg = tmp;
547 - ret = 1;
548 + return 1;
549 }
550 }
551 - return ret;
552 + return 0;
553 }
554
555 /*
556 @@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
557 lmce = m.mcgstatus & MCG_STATUS_LMCES;
558
559 /*
560 + * Local machine check may already know that we have to panic.
561 + * Broadcast machine check begins rendezvous in mce_start()
562 * Go through all banks in exclusion of the other CPUs. This way we
563 * don't report duplicated events on shared banks because the first one
564 - * to see it will clear it. If this is a Local MCE, then no need to
565 - * perform rendezvous.
566 + * to see it will clear it.
567 */
568 - if (!lmce)
569 + if (lmce) {
570 + if (no_way_out)
571 + mce_panic("Fatal local machine check", &m, msg);
572 + } else {
573 order = mce_start(&no_way_out);
574 + }
575
576 for (i = 0; i < cfg->banks; i++) {
577 __clear_bit(i, toclear);
578 @@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
579 no_way_out = worst >= MCE_PANIC_SEVERITY;
580 } else {
581 /*
582 - * Local MCE skipped calling mce_reign()
583 - * If we found a fatal error, we need to panic here.
584 + * If there was a fatal machine check we should have
585 + * already called mce_panic earlier in this function.
586 + * Since we re-read the banks, we might have found
587 + * something new. Check again to see if we found a
588 + * fatal error. We call "mce_severity()" again to
589 + * make sure we have the right "msg".
590 */
591 - if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
592 - mce_panic("Machine check from unknown source",
593 - NULL, NULL);
594 + if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
595 + mce_severity(&m, cfg->tolerant, &msg, true);
596 + mce_panic("Local fatal machine check!", &m, msg);
597 + }
598 }
599
600 /*
601 diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
602 index 697a4ce04308..736348ead421 100644
603 --- a/arch/x86/kernel/quirks.c
604 +++ b/arch/x86/kernel/quirks.c
605 @@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
606 /* Skylake */
607 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
608 {
609 - u32 capid0;
610 + u32 capid0, capid5;
611
612 pci_read_config_dword(pdev, 0x84, &capid0);
613 + pci_read_config_dword(pdev, 0x98, &capid5);
614
615 - if ((capid0 & 0xc0) == 0xc0)
616 + /*
617 + * CAPID0{7:6} indicate whether this is an advanced RAS SKU
618 + * CAPID5{8:5} indicate that various NVDIMM usage modes are
619 + * enabled, so memory machine check recovery is also enabled.
620 + */
621 + if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
622 static_branch_inc(&mcsafe_key);
623 +
624 }
625 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
626 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
627 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
628 index ef4efb931efa..ed8d78fd4f8c 100644
629 --- a/arch/x86/kernel/traps.c
630 +++ b/arch/x86/kernel/traps.c
631 @@ -828,16 +828,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
632 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
633 "simd exception";
634
635 - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
636 - return;
637 cond_local_irq_enable(regs);
638
639 if (!user_mode(regs)) {
640 - if (!fixup_exception(regs, trapnr)) {
641 - task->thread.error_code = error_code;
642 - task->thread.trap_nr = trapnr;
643 + if (fixup_exception(regs, trapnr))
644 + return;
645 +
646 + task->thread.error_code = error_code;
647 + task->thread.trap_nr = trapnr;
648 +
649 + if (notify_die(DIE_TRAP, str, regs, error_code,
650 + trapnr, SIGFPE) != NOTIFY_STOP)
651 die(str, regs, error_code);
652 - }
653 return;
654 }
655
656 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
657 index 82f5252c723a..071cbbbb60d9 100644
658 --- a/arch/x86/mm/init.c
659 +++ b/arch/x86/mm/init.c
660 @@ -706,7 +706,9 @@ void __init init_mem_mapping(void)
661 */
662 int devmem_is_allowed(unsigned long pagenr)
663 {
664 - if (page_is_ram(pagenr)) {
665 + if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
666 + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
667 + != REGION_DISJOINT) {
668 /*
669 * For disallowed memory regions in the low 1MB range,
670 * request that the page be shown as all zeros.
671 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
672 index f7af598c4f55..ae369c2bbc3e 100644
673 --- a/arch/x86/platform/efi/efi_64.c
674 +++ b/arch/x86/platform/efi/efi_64.c
675 @@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
676 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
677 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
678
679 - if (!(pgd_val(*pgd) & _PAGE_PRESENT))
680 + if (!pgd_present(*pgd))
681 continue;
682
683 for (i = 0; i < PTRS_PER_P4D; i++) {
684 p4d = p4d_offset(pgd,
685 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
686
687 - if (!(p4d_val(*p4d) & _PAGE_PRESENT))
688 + if (!p4d_present(*p4d))
689 continue;
690
691 pud = (pud_t *)p4d_page_vaddr(*p4d);
692 diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
693 index c0c756c76afe..db6d90e451de 100644
694 --- a/arch/x86/xen/smp_pv.c
695 +++ b/arch/x86/xen/smp_pv.c
696 @@ -32,6 +32,7 @@
697 #include <xen/interface/vcpu.h>
698 #include <xen/interface/xenpmu.h>
699
700 +#include <asm/spec-ctrl.h>
701 #include <asm/xen/interface.h>
702 #include <asm/xen/hypercall.h>
703
704 @@ -70,6 +71,8 @@ static void cpu_bringup(void)
705 cpu_data(cpu).x86_max_cores = 1;
706 set_cpu_sibling_map(cpu);
707
708 + speculative_store_bypass_ht_init();
709 +
710 xen_setup_cpu_clockevents();
711
712 notify_cpu_starting(cpu);
713 @@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
714 }
715 set_cpu_sibling_map(0);
716
717 + speculative_store_bypass_ht_init();
718 +
719 xen_pmu_init(0);
720
721 if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
722 diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
723 index bae697a06a98..2986bc88a18e 100644
724 --- a/arch/xtensa/kernel/traps.c
725 +++ b/arch/xtensa/kernel/traps.c
726 @@ -336,7 +336,7 @@ do_unaligned_user (struct pt_regs *regs)
727 info.si_errno = 0;
728 info.si_code = BUS_ADRALN;
729 info.si_addr = (void *) regs->excvaddr;
730 - force_sig_info(SIGSEGV, &info, current);
731 + force_sig_info(SIGBUS, &info, current);
732
733 }
734 #endif
735 diff --git a/block/blk-core.c b/block/blk-core.c
736 index 1feeb1a8aad9..6f6e21821d2d 100644
737 --- a/block/blk-core.c
738 +++ b/block/blk-core.c
739 @@ -3150,6 +3150,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
740 dst->cpu = src->cpu;
741 dst->__sector = blk_rq_pos(src);
742 dst->__data_len = blk_rq_bytes(src);
743 + if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
744 + dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
745 + dst->special_vec = src->special_vec;
746 + }
747 dst->nr_phys_segments = src->nr_phys_segments;
748 dst->ioprio = src->ioprio;
749 dst->extra_len = src->extra_len;
750 diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
751 index ce2df8c9c583..7e6a43ffdcbe 100644
752 --- a/crypto/asymmetric_keys/x509_cert_parser.c
753 +++ b/crypto/asymmetric_keys/x509_cert_parser.c
754 @@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
755 return -EINVAL;
756 }
757
758 + if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
759 + /* Discard the BIT STRING metadata */
760 + if (vlen < 1 || *(const u8 *)value != 0)
761 + return -EBADMSG;
762 +
763 + value++;
764 + vlen--;
765 + }
766 +
767 ctx->cert->raw_sig = value;
768 ctx->cert->raw_sig_size = vlen;
769 return 0;
770 diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
771 index a2be3fd2c72b..602ae58ee2d8 100644
772 --- a/drivers/acpi/acpi_lpss.c
773 +++ b/drivers/acpi/acpi_lpss.c
774 @@ -229,11 +229,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
775
776 static const struct lpss_device_desc byt_pwm_dev_desc = {
777 .flags = LPSS_SAVE_CTX,
778 + .prv_offset = 0x800,
779 .setup = byt_pwm_setup,
780 };
781
782 static const struct lpss_device_desc bsw_pwm_dev_desc = {
783 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
784 + .prv_offset = 0x800,
785 .setup = bsw_pwm_setup,
786 };
787
788 diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
789 index 2c2ed9cf8796..f9413755177b 100644
790 --- a/drivers/auxdisplay/Kconfig
791 +++ b/drivers/auxdisplay/Kconfig
792 @@ -14,9 +14,6 @@ menuconfig AUXDISPLAY
793
794 If you say N, all options in this submenu will be skipped and disabled.
795
796 -config CHARLCD
797 - tristate "Character LCD core support" if COMPILE_TEST
798 -
799 if AUXDISPLAY
800
801 config HD44780
802 @@ -157,8 +154,6 @@ config HT16K33
803 Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
804 LED controller driver with keyscan.
805
806 -endif # AUXDISPLAY
807 -
808 config ARM_CHARLCD
809 bool "ARM Ltd. Character LCD Driver"
810 depends on PLAT_VERSATILE
811 @@ -169,6 +164,8 @@ config ARM_CHARLCD
812 line and the Linux version on the second line, but that's
813 still useful.
814
815 +endif # AUXDISPLAY
816 +
817 config PANEL
818 tristate "Parallel port LCD/Keypad Panel support"
819 depends on PARPORT
820 @@ -448,3 +445,6 @@ config PANEL_BOOT_MESSAGE
821 printf()-formatted message is valid with newline and escape codes.
822
823 endif # PANEL
824 +
825 +config CHARLCD
826 + tristate "Character LCD core support" if COMPILE_TEST
827 diff --git a/drivers/base/core.c b/drivers/base/core.c
828 index a359934ffd85..b054cb2fd2b9 100644
829 --- a/drivers/base/core.c
830 +++ b/drivers/base/core.c
831 @@ -217,6 +217,13 @@ struct device_link *device_link_add(struct device *consumer,
832 link->rpm_active = true;
833 }
834 pm_runtime_new_link(consumer);
835 + /*
836 + * If the link is being added by the consumer driver at probe
837 + * time, balance the decrementation of the supplier's runtime PM
838 + * usage counter after consumer probe in driver_probe_device().
839 + */
840 + if (consumer->links.status == DL_DEV_PROBING)
841 + pm_runtime_get_noresume(supplier);
842 }
843 get_device(supplier);
844 link->supplier = supplier;
845 @@ -235,12 +242,12 @@ struct device_link *device_link_add(struct device *consumer,
846 switch (consumer->links.status) {
847 case DL_DEV_PROBING:
848 /*
849 - * Balance the decrementation of the supplier's
850 - * runtime PM usage counter after consumer probe
851 - * in driver_probe_device().
852 + * Some callers expect the link creation during
853 + * consumer driver probe to resume the supplier
854 + * even without DL_FLAG_RPM_ACTIVE.
855 */
856 if (flags & DL_FLAG_PM_RUNTIME)
857 - pm_runtime_get_sync(supplier);
858 + pm_runtime_resume(supplier);
859
860 link->status = DL_STATE_CONSUMER_PROBE;
861 break;
862 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
863 index b3b78079aa9f..c276ba1c0a19 100644
864 --- a/drivers/base/power/domain.c
865 +++ b/drivers/base/power/domain.c
866 @@ -2162,6 +2162,9 @@ int genpd_dev_pm_attach(struct device *dev)
867 genpd_lock(pd);
868 ret = genpd_power_on(pd, 0);
869 genpd_unlock(pd);
870 +
871 + if (ret)
872 + genpd_remove_device(pd, dev);
873 out:
874 return ret ? -EPROBE_DEFER : 0;
875 }
876 diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
877 index 0459b1204694..d4862775b9f6 100644
878 --- a/drivers/base/power/opp/core.c
879 +++ b/drivers/base/power/opp/core.c
880 @@ -552,7 +552,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
881 }
882
883 /* Scaling up? Scale voltage before frequency */
884 - if (freq > old_freq) {
885 + if (freq >= old_freq) {
886 ret = _set_opp_voltage(dev, reg, new_supply);
887 if (ret)
888 goto restore_voltage;
889 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
890 index fe4fd8aee19f..9057dad2a64c 100644
891 --- a/drivers/block/rbd.c
892 +++ b/drivers/block/rbd.c
893 @@ -3841,7 +3841,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
894 {
895 dout("%s rbd_dev %p\n", __func__, rbd_dev);
896
897 - cancel_delayed_work_sync(&rbd_dev->watch_dwork);
898 cancel_work_sync(&rbd_dev->acquired_lock_work);
899 cancel_work_sync(&rbd_dev->released_lock_work);
900 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
901 @@ -3859,6 +3858,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev)
902 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
903 mutex_unlock(&rbd_dev->watch_mutex);
904
905 + cancel_delayed_work_sync(&rbd_dev->watch_dwork);
906 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
907 }
908
909 diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
910 index c9f0ac083a3e..6f4ebd5e54c8 100644
911 --- a/drivers/bluetooth/hci_qca.c
912 +++ b/drivers/bluetooth/hci_qca.c
913 @@ -936,6 +936,12 @@ static int qca_setup(struct hci_uart *hu)
914 } else if (ret == -ENOENT) {
915 /* No patch/nvm-config found, run with original fw/config */
916 ret = 0;
917 + } else if (ret == -EAGAIN) {
918 + /*
919 + * Userspace firmware loader will return -EAGAIN in case no
920 + * patch/nvm-config is found, so run with original fw/config.
921 + */
922 + ret = 0;
923 }
924
925 /* Setup bdaddr */
926 diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
927 index feafdab734ae..4835b588b783 100644
928 --- a/drivers/char/ipmi/ipmi_bt_sm.c
929 +++ b/drivers/char/ipmi/ipmi_bt_sm.c
930 @@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
931 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
932 BT_CONTROL(BT_H_BUSY);
933
934 + bt->timeout = bt->BT_CAP_req2rsp;
935 +
936 /* Read BT capabilities if it hasn't been done yet */
937 if (!bt->BT_CAP_outreqs)
938 BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
939 SI_SM_CALL_WITHOUT_DELAY);
940 - bt->timeout = bt->BT_CAP_req2rsp;
941 BT_SI_SM_RETURN(SI_SM_IDLE);
942
943 case BT_STATE_XACTION_START:
944 diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
945 index 461bf0b8a094..98cf36fb068d 100644
946 --- a/drivers/char/tpm/tpm-dev-common.c
947 +++ b/drivers/char/tpm/tpm-dev-common.c
948 @@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work)
949 struct file_priv *priv = container_of(work, struct file_priv, work);
950
951 mutex_lock(&priv->buffer_mutex);
952 - atomic_set(&priv->data_pending, 0);
953 + priv->data_pending = 0;
954 memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
955 mutex_unlock(&priv->buffer_mutex);
956 }
957 @@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
958 struct file_priv *priv)
959 {
960 priv->chip = chip;
961 - atomic_set(&priv->data_pending, 0);
962 mutex_init(&priv->buffer_mutex);
963 setup_timer(&priv->user_read_timer, user_reader_timeout,
964 (unsigned long)priv);
965 @@ -59,29 +58,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
966 size_t size, loff_t *off)
967 {
968 struct file_priv *priv = file->private_data;
969 - ssize_t ret_size;
970 - ssize_t orig_ret_size;
971 + ssize_t ret_size = 0;
972 int rc;
973
974 del_singleshot_timer_sync(&priv->user_read_timer);
975 flush_work(&priv->work);
976 - ret_size = atomic_read(&priv->data_pending);
977 - if (ret_size > 0) { /* relay data */
978 - orig_ret_size = ret_size;
979 - if (size < ret_size)
980 - ret_size = size;
981 + mutex_lock(&priv->buffer_mutex);
982
983 - mutex_lock(&priv->buffer_mutex);
984 + if (priv->data_pending) {
985 + ret_size = min_t(ssize_t, size, priv->data_pending);
986 rc = copy_to_user(buf, priv->data_buffer, ret_size);
987 - memset(priv->data_buffer, 0, orig_ret_size);
988 + memset(priv->data_buffer, 0, priv->data_pending);
989 if (rc)
990 ret_size = -EFAULT;
991
992 - mutex_unlock(&priv->buffer_mutex);
993 + priv->data_pending = 0;
994 }
995
996 - atomic_set(&priv->data_pending, 0);
997 -
998 + mutex_unlock(&priv->buffer_mutex);
999 return ret_size;
1000 }
1001
1002 @@ -92,17 +86,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
1003 size_t in_size = size;
1004 ssize_t out_size;
1005
1006 + if (in_size > TPM_BUFSIZE)
1007 + return -E2BIG;
1008 +
1009 + mutex_lock(&priv->buffer_mutex);
1010 +
1011 /* Cannot perform a write until the read has cleared either via
1012 * tpm_read or a user_read_timer timeout. This also prevents split
1013 * buffered writes from blocking here.
1014 */
1015 - if (atomic_read(&priv->data_pending) != 0)
1016 + if (priv->data_pending != 0) {
1017 + mutex_unlock(&priv->buffer_mutex);
1018 return -EBUSY;
1019 -
1020 - if (in_size > TPM_BUFSIZE)
1021 - return -E2BIG;
1022 -
1023 - mutex_lock(&priv->buffer_mutex);
1024 + }
1025
1026 if (copy_from_user
1027 (priv->data_buffer, (void __user *) buf, in_size)) {
1028 @@ -133,7 +129,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
1029 return out_size;
1030 }
1031
1032 - atomic_set(&priv->data_pending, out_size);
1033 + priv->data_pending = out_size;
1034 mutex_unlock(&priv->buffer_mutex);
1035
1036 /* Set a timeout by which the reader must come claim the result */
1037 @@ -150,5 +146,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
1038 del_singleshot_timer_sync(&priv->user_read_timer);
1039 flush_work(&priv->work);
1040 file->private_data = NULL;
1041 - atomic_set(&priv->data_pending, 0);
1042 + priv->data_pending = 0;
1043 }
1044 diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
1045 index ba3b6f9dacf7..b24cfb4d3ee1 100644
1046 --- a/drivers/char/tpm/tpm-dev.h
1047 +++ b/drivers/char/tpm/tpm-dev.h
1048 @@ -8,7 +8,7 @@ struct file_priv {
1049 struct tpm_chip *chip;
1050
1051 /* Data passed to and from the tpm via the read/write calls */
1052 - atomic_t data_pending;
1053 + size_t data_pending;
1054 struct mutex buffer_mutex;
1055
1056 struct timer_list user_read_timer; /* user needs to claim result */
1057 diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
1058 index e2e059d8ffec..d26ea7513226 100644
1059 --- a/drivers/char/tpm/tpm2-space.c
1060 +++ b/drivers/char/tpm/tpm2-space.c
1061 @@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
1062 * TPM_RC_REFERENCE_H0 means the session has been
1063 * flushed outside the space
1064 */
1065 - rc = -ENOENT;
1066 + *handle = 0;
1067 tpm_buf_destroy(&tbuf);
1068 + return -ENOENT;
1069 } else if (rc > 0) {
1070 dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
1071 __func__, rc);
1072 diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
1073 index 7d3223fc7161..72b6091eb7b9 100644
1074 --- a/drivers/clk/at91/clk-pll.c
1075 +++ b/drivers/clk/at91/clk-pll.c
1076 @@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
1077 unsigned long parent_rate)
1078 {
1079 struct clk_pll *pll = to_clk_pll(hw);
1080 - unsigned int pllr;
1081 - u16 mul;
1082 - u8 div;
1083 -
1084 - regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
1085 -
1086 - div = PLL_DIV(pllr);
1087 - mul = PLL_MUL(pllr, pll->layout);
1088 -
1089 - if (!div || !mul)
1090 - return 0;
1091
1092 - return (parent_rate / div) * (mul + 1);
1093 + return (parent_rate / pll->div) * (pll->mul + 1);
1094 }
1095
1096 static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
1097 diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
1098 index e580a5e6346c..30c23b882675 100644
1099 --- a/drivers/clk/renesas/renesas-cpg-mssr.c
1100 +++ b/drivers/clk/renesas/renesas-cpg-mssr.c
1101 @@ -248,8 +248,9 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
1102 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1103 PTR_ERR(clk));
1104 else
1105 - dev_dbg(dev, "clock (%u, %u) is %pC at %pCr Hz\n",
1106 - clkspec->args[0], clkspec->args[1], clk, clk);
1107 + dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1108 + clkspec->args[0], clkspec->args[1], clk,
1109 + clk_get_rate(clk));
1110 return clk;
1111 }
1112
1113 @@ -314,7 +315,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
1114 if (IS_ERR_OR_NULL(clk))
1115 goto fail;
1116
1117 - dev_dbg(dev, "Core clock %pC at %pCr Hz\n", clk, clk);
1118 + dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1119 priv->clks[id] = clk;
1120 return;
1121
1122 @@ -380,7 +381,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
1123 if (IS_ERR(clk))
1124 goto fail;
1125
1126 - dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
1127 + dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1128 priv->clks[id] = clk;
1129 return;
1130
1131 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1132 index 20226d4243f2..a905bbb45667 100644
1133 --- a/drivers/cpufreq/intel_pstate.c
1134 +++ b/drivers/cpufreq/intel_pstate.c
1135 @@ -285,6 +285,7 @@ struct pstate_funcs {
1136 static struct pstate_funcs pstate_funcs __read_mostly;
1137
1138 static int hwp_active __read_mostly;
1139 +static int hwp_mode_bdw __read_mostly;
1140 static bool per_cpu_limits __read_mostly;
1141
1142 static struct cpufreq_driver *intel_pstate_driver __read_mostly;
1143 @@ -1371,7 +1372,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1144 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1145 cpu->pstate.scaling = pstate_funcs.get_scaling();
1146 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1147 - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1148 +
1149 + if (hwp_active && !hwp_mode_bdw) {
1150 + unsigned int phy_max, current_max;
1151 +
1152 + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
1153 + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
1154 + } else {
1155 + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1156 + }
1157
1158 if (pstate_funcs.get_aperf_mperf_shift)
1159 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1160 @@ -2261,28 +2270,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1161 static inline void intel_pstate_request_control_from_smm(void) {}
1162 #endif /* CONFIG_ACPI */
1163
1164 +#define INTEL_PSTATE_HWP_BROADWELL 0x01
1165 +
1166 +#define ICPU_HWP(model, hwp_mode) \
1167 + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
1168 +
1169 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
1170 - { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
1171 + ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
1172 + ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
1173 + ICPU_HWP(X86_MODEL_ANY, 0),
1174 {}
1175 };
1176
1177 static int __init intel_pstate_init(void)
1178 {
1179 + const struct x86_cpu_id *id;
1180 int rc;
1181
1182 if (no_load)
1183 return -ENODEV;
1184
1185 - if (x86_match_cpu(hwp_support_ids)) {
1186 + id = x86_match_cpu(hwp_support_ids);
1187 + if (id) {
1188 copy_cpu_funcs(&core_funcs);
1189 if (!no_hwp) {
1190 hwp_active++;
1191 + hwp_mode_bdw = id->driver_data;
1192 intel_pstate.attr = hwp_cpufreq_attrs;
1193 goto hwp_cpu_matched;
1194 }
1195 } else {
1196 - const struct x86_cpu_id *id;
1197 -
1198 id = x86_match_cpu(intel_pstate_cpu_ids);
1199 if (!id)
1200 return -ENODEV;
1201 diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
1202 index e06605b21841..1d7d5d121d55 100644
1203 --- a/drivers/cpuidle/cpuidle-powernv.c
1204 +++ b/drivers/cpuidle/cpuidle-powernv.c
1205 @@ -43,9 +43,31 @@ struct stop_psscr_table {
1206
1207 static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly;
1208
1209 -static u64 snooze_timeout __read_mostly;
1210 +static u64 default_snooze_timeout __read_mostly;
1211 static bool snooze_timeout_en __read_mostly;
1212
1213 +static u64 get_snooze_timeout(struct cpuidle_device *dev,
1214 + struct cpuidle_driver *drv,
1215 + int index)
1216 +{
1217 + int i;
1218 +
1219 + if (unlikely(!snooze_timeout_en))
1220 + return default_snooze_timeout;
1221 +
1222 + for (i = index + 1; i < drv->state_count; i++) {
1223 + struct cpuidle_state *s = &drv->states[i];
1224 + struct cpuidle_state_usage *su = &dev->states_usage[i];
1225 +
1226 + if (s->disabled || su->disable)
1227 + continue;
1228 +
1229 + return s->target_residency * tb_ticks_per_usec;
1230 + }
1231 +
1232 + return default_snooze_timeout;
1233 +}
1234 +
1235 static int snooze_loop(struct cpuidle_device *dev,
1236 struct cpuidle_driver *drv,
1237 int index)
1238 @@ -56,7 +78,7 @@ static int snooze_loop(struct cpuidle_device *dev,
1239
1240 local_irq_enable();
1241
1242 - snooze_exit_time = get_tb() + snooze_timeout;
1243 + snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
1244 ppc64_runlatch_off();
1245 HMT_very_low();
1246 while (!need_resched()) {
1247 @@ -463,11 +485,9 @@ static int powernv_idle_probe(void)
1248 cpuidle_state_table = powernv_states;
1249 /* Device tree can indicate more idle states */
1250 max_idle_state = powernv_add_idle_states();
1251 - if (max_idle_state > 1) {
1252 + default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
1253 + if (max_idle_state > 1)
1254 snooze_timeout_en = true;
1255 - snooze_timeout = powernv_states[1].target_residency *
1256 - tb_ticks_per_usec;
1257 - }
1258 } else
1259 return -ENODEV;
1260
1261 diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
1262 index 39ab210c44f6..565f7d8d3304 100644
1263 --- a/drivers/iio/accel/sca3000.c
1264 +++ b/drivers/iio/accel/sca3000.c
1265 @@ -1277,7 +1277,7 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
1266 {
1267 struct iio_buffer *buffer;
1268
1269 - buffer = iio_kfifo_allocate();
1270 + buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
1271 if (!buffer)
1272 return -ENOMEM;
1273
1274 @@ -1287,11 +1287,6 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
1275 return 0;
1276 }
1277
1278 -static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
1279 -{
1280 - iio_kfifo_free(indio_dev->buffer);
1281 -}
1282 -
1283 static inline
1284 int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
1285 {
1286 @@ -1547,8 +1542,6 @@ static int sca3000_remove(struct spi_device *spi)
1287 if (spi->irq)
1288 free_irq(spi->irq, indio_dev);
1289
1290 - sca3000_unconfigure_ring(indio_dev);
1291 -
1292 return 0;
1293 }
1294
1295 diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
1296 index 34e353c43ac8..677f812f372a 100644
1297 --- a/drivers/iio/adc/ad7791.c
1298 +++ b/drivers/iio/adc/ad7791.c
1299 @@ -244,58 +244,9 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
1300 return -EINVAL;
1301 }
1302
1303 -static const char * const ad7791_sample_freq_avail[] = {
1304 - [AD7791_FILTER_RATE_120] = "120",
1305 - [AD7791_FILTER_RATE_100] = "100",
1306 - [AD7791_FILTER_RATE_33_3] = "33.3",
1307 - [AD7791_FILTER_RATE_20] = "20",
1308 - [AD7791_FILTER_RATE_16_6] = "16.6",
1309 - [AD7791_FILTER_RATE_16_7] = "16.7",
1310 - [AD7791_FILTER_RATE_13_3] = "13.3",
1311 - [AD7791_FILTER_RATE_9_5] = "9.5",
1312 -};
1313 -
1314 -static ssize_t ad7791_read_frequency(struct device *dev,
1315 - struct device_attribute *attr, char *buf)
1316 -{
1317 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1318 - struct ad7791_state *st = iio_priv(indio_dev);
1319 - unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK;
1320 -
1321 - return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]);
1322 -}
1323 -
1324 -static ssize_t ad7791_write_frequency(struct device *dev,
1325 - struct device_attribute *attr, const char *buf, size_t len)
1326 -{
1327 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1328 - struct ad7791_state *st = iio_priv(indio_dev);
1329 - int i, ret;
1330 -
1331 - i = sysfs_match_string(ad7791_sample_freq_avail, buf);
1332 - if (i < 0)
1333 - return i;
1334 -
1335 - ret = iio_device_claim_direct_mode(indio_dev);
1336 - if (ret)
1337 - return ret;
1338 - st->filter &= ~AD7791_FILTER_RATE_MASK;
1339 - st->filter |= i;
1340 - ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
1341 - st->filter);
1342 - iio_device_release_direct_mode(indio_dev);
1343 -
1344 - return len;
1345 -}
1346 -
1347 -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
1348 - ad7791_read_frequency,
1349 - ad7791_write_frequency);
1350 -
1351 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5");
1352
1353 static struct attribute *ad7791_attributes[] = {
1354 - &iio_dev_attr_sampling_frequency.dev_attr.attr,
1355 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
1356 NULL
1357 };
1358 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1359 index 2b6c9b516070..d76455edd292 100644
1360 --- a/drivers/infiniband/core/umem.c
1361 +++ b/drivers/infiniband/core/umem.c
1362 @@ -119,16 +119,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
1363 umem->length = size;
1364 umem->address = addr;
1365 umem->page_shift = PAGE_SHIFT;
1366 - /*
1367 - * We ask for writable memory if any of the following
1368 - * access flags are set. "Local write" and "remote write"
1369 - * obviously require write access. "Remote atomic" can do
1370 - * things like fetch and add, which will modify memory, and
1371 - * "MW bind" can change permissions by binding a window.
1372 - */
1373 - umem->writable = !!(access &
1374 - (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1375 - IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
1376 + umem->writable = ib_access_writable(access);
1377
1378 if (access & IB_ACCESS_ON_DEMAND) {
1379 ret = ib_umem_odp_get(context, umem, access);
1380 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
1381 index 259562282668..33cf1734c4e5 100644
1382 --- a/drivers/infiniband/hw/hfi1/chip.c
1383 +++ b/drivers/infiniband/hw/hfi1/chip.c
1384 @@ -6829,7 +6829,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
1385 }
1386 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
1387 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
1388 - rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
1389 + rcvmask |= rcd->rcvhdrtail_kvaddr ?
1390 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
1391 hfi1_rcvctrl(dd, rcvmask, rcd);
1392 hfi1_rcd_put(rcd);
1393 @@ -8341,7 +8341,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
1394 u32 tail;
1395 int present;
1396
1397 - if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
1398 + if (!rcd->rcvhdrtail_kvaddr)
1399 present = (rcd->seq_cnt ==
1400 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
1401 else /* is RDMA rtail */
1402 @@ -11813,7 +11813,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
1403 /* reset the tail and hdr addresses, and sequence count */
1404 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
1405 rcd->rcvhdrq_dma);
1406 - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
1407 + if (rcd->rcvhdrtail_kvaddr)
1408 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
1409 rcd->rcvhdrqtailaddr_dma);
1410 rcd->seq_cnt = 1;
1411 @@ -11893,7 +11893,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
1412 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
1413 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
1414 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
1415 - if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
1416 + if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
1417 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
1418 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
1419 /* See comment on RcvCtxtCtrl.TailUpd above */
1420 diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
1421 index 36ae1fd86502..f661b387e916 100644
1422 --- a/drivers/infiniband/hw/hfi1/debugfs.c
1423 +++ b/drivers/infiniband/hw/hfi1/debugfs.c
1424 @@ -1179,7 +1179,8 @@ DEBUGFS_FILE_OPS(fault_stats);
1425
1426 static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd)
1427 {
1428 - debugfs_remove_recursive(ibd->fault_opcode->dir);
1429 + if (ibd->fault_opcode)
1430 + debugfs_remove_recursive(ibd->fault_opcode->dir);
1431 kfree(ibd->fault_opcode);
1432 ibd->fault_opcode = NULL;
1433 }
1434 @@ -1207,6 +1208,7 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
1435 &ibd->fault_opcode->attr);
1436 if (IS_ERR(ibd->fault_opcode->dir)) {
1437 kfree(ibd->fault_opcode);
1438 + ibd->fault_opcode = NULL;
1439 return -ENOENT;
1440 }
1441
1442 @@ -1230,7 +1232,8 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
1443
1444 static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd)
1445 {
1446 - debugfs_remove_recursive(ibd->fault_packet->dir);
1447 + if (ibd->fault_packet)
1448 + debugfs_remove_recursive(ibd->fault_packet->dir);
1449 kfree(ibd->fault_packet);
1450 ibd->fault_packet = NULL;
1451 }
1452 @@ -1256,6 +1259,7 @@ static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd)
1453 &ibd->fault_opcode->attr);
1454 if (IS_ERR(ibd->fault_packet->dir)) {
1455 kfree(ibd->fault_packet);
1456 + ibd->fault_packet = NULL;
1457 return -ENOENT;
1458 }
1459
1460 diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
1461 index ee2253d06984..9abc5a9c47a0 100644
1462 --- a/drivers/infiniband/hw/hfi1/file_ops.c
1463 +++ b/drivers/infiniband/hw/hfi1/file_ops.c
1464 @@ -622,7 +622,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
1465 ret = -EINVAL;
1466 goto done;
1467 }
1468 - if (flags & VM_WRITE) {
1469 + if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
1470 ret = -EPERM;
1471 goto done;
1472 }
1473 @@ -807,8 +807,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
1474 * checks to default and disable the send context.
1475 */
1476 if (uctxt->sc) {
1477 - set_pio_integrity(uctxt->sc);
1478 sc_disable(uctxt->sc);
1479 + set_pio_integrity(uctxt->sc);
1480 }
1481
1482 hfi1_free_ctxt_rcv_groups(uctxt);
1483 diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
1484 index ccc7b9b8637e..13a7bcaa58e6 100644
1485 --- a/drivers/infiniband/hw/hfi1/hfi.h
1486 +++ b/drivers/infiniband/hw/hfi1/hfi.h
1487 @@ -1851,6 +1851,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
1488 #define HFI1_HAS_SDMA_TIMEOUT 0x8
1489 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
1490 #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
1491 +#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
1492
1493 /* IB dword length mask in PBC (lower 11 bits); same for all chips */
1494 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
1495 diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
1496 index 918dbd350c71..ee5cbdfeb3ab 100644
1497 --- a/drivers/infiniband/hw/hfi1/init.c
1498 +++ b/drivers/infiniband/hw/hfi1/init.c
1499 @@ -1029,6 +1029,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
1500 unsigned pidx;
1501 int i;
1502
1503 + if (dd->flags & HFI1_SHUTDOWN)
1504 + return;
1505 + dd->flags |= HFI1_SHUTDOWN;
1506 +
1507 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1508 ppd = dd->pport + pidx;
1509
1510 @@ -1353,6 +1357,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd)
1511
1512 static void remove_one(struct pci_dev *);
1513 static int init_one(struct pci_dev *, const struct pci_device_id *);
1514 +static void shutdown_one(struct pci_dev *);
1515
1516 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1517 #define PFX DRIVER_NAME ": "
1518 @@ -1369,6 +1374,7 @@ static struct pci_driver hfi1_pci_driver = {
1519 .name = DRIVER_NAME,
1520 .probe = init_one,
1521 .remove = remove_one,
1522 + .shutdown = shutdown_one,
1523 .id_table = hfi1_pci_tbl,
1524 .err_handler = &hfi1_pci_err_handler,
1525 };
1526 @@ -1780,6 +1786,13 @@ static void remove_one(struct pci_dev *pdev)
1527 postinit_cleanup(dd);
1528 }
1529
1530 +static void shutdown_one(struct pci_dev *pdev)
1531 +{
1532 + struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1533 +
1534 + shutdown_device(dd);
1535 +}
1536 +
1537 /**
1538 * hfi1_create_rcvhdrq - create a receive header queue
1539 * @dd: the hfi1_ib device
1540 @@ -1795,7 +1808,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1541 u64 reg;
1542
1543 if (!rcd->rcvhdrq) {
1544 - dma_addr_t dma_hdrqtail;
1545 gfp_t gfp_flags;
1546
1547 /*
1548 @@ -1821,13 +1833,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1549 goto bail;
1550 }
1551
1552 - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1553 + if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1554 + HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1555 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
1556 - &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
1557 - gfp_flags);
1558 + &dd->pcidev->dev, PAGE_SIZE,
1559 + &rcd->rcvhdrqtailaddr_dma, gfp_flags);
1560 if (!rcd->rcvhdrtail_kvaddr)
1561 goto bail_free;
1562 - rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
1563 }
1564
1565 rcd->rcvhdrq_size = amt;
1566 diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
1567 index 7108a4b5e94c..a95ac6246559 100644
1568 --- a/drivers/infiniband/hw/hfi1/pio.c
1569 +++ b/drivers/infiniband/hw/hfi1/pio.c
1570 @@ -50,8 +50,6 @@
1571 #include "qp.h"
1572 #include "trace.h"
1573
1574 -#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
1575 -
1576 #define SC(name) SEND_CTXT_##name
1577 /*
1578 * Send Context functions
1579 @@ -977,15 +975,40 @@ void sc_disable(struct send_context *sc)
1580 }
1581
1582 /* return SendEgressCtxtStatus.PacketOccupancy */
1583 -#define packet_occupancy(r) \
1584 - (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
1585 - >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
1586 +static u64 packet_occupancy(u64 reg)
1587 +{
1588 + return (reg &
1589 + SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
1590 + >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
1591 +}
1592
1593 /* is egress halted on the context? */
1594 -#define egress_halted(r) \
1595 - ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
1596 +static bool egress_halted(u64 reg)
1597 +{
1598 + return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
1599 +}
1600
1601 -/* wait for packet egress, optionally pause for credit return */
1602 +/* is the send context halted? */
1603 +static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
1604 +{
1605 + return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
1606 + SC(STATUS_CTXT_HALTED_SMASK));
1607 +}
1608 +
1609 +/**
1610 + * sc_wait_for_packet_egress
1611 + * @sc: valid send context
1612 + * @pause: wait for credit return
1613 + *
1614 + * Wait for packet egress, optionally pause for credit return
1615 + *
1616 + * Egress halt and Context halt are not necessarily the same thing, so
1617 + * check for both.
1618 + *
1619 + * NOTE: The context halt bit may not be set immediately. Because of this,
1620 + * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
1621 + * context bit to determine if the context is halted.
1622 + */
1623 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
1624 {
1625 struct hfi1_devdata *dd = sc->dd;
1626 @@ -997,8 +1020,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
1627 reg_prev = reg;
1628 reg = read_csr(dd, sc->hw_context * 8 +
1629 SEND_EGRESS_CTXT_STATUS);
1630 - /* done if egress is stopped */
1631 - if (egress_halted(reg))
1632 + /* done if any halt bits, SW or HW are set */
1633 + if (sc->flags & SCF_HALTED ||
1634 + is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
1635 break;
1636 reg = packet_occupancy(reg);
1637 if (reg == 0)
1638 diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
1639 index 0793a21d76f4..d604b3d5aa3e 100644
1640 --- a/drivers/infiniband/hw/mlx4/mad.c
1641 +++ b/drivers/infiniband/hw/mlx4/mad.c
1642 @@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1643 "buf:%lld\n", wc.wr_id);
1644 break;
1645 default:
1646 - BUG_ON(1);
1647 break;
1648 }
1649 } else {
1650 diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
1651 index e80a7f764a74..1587cedee13e 100644
1652 --- a/drivers/infiniband/hw/mlx4/mr.c
1653 +++ b/drivers/infiniband/hw/mlx4/mr.c
1654 @@ -131,6 +131,40 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
1655 return err;
1656 }
1657
1658 +static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
1659 + u64 length, u64 virt_addr,
1660 + int access_flags)
1661 +{
1662 + /*
1663 + * Force registering the memory as writable if the underlying pages
1664 + * are writable. This is so rereg can change the access permissions
1665 + * from readable to writable without having to run through ib_umem_get
1666 + * again
1667 + */
1668 + if (!ib_access_writable(access_flags)) {
1669 + struct vm_area_struct *vma;
1670 +
1671 + down_read(&current->mm->mmap_sem);
1672 + /*
1673 + * FIXME: Ideally this would iterate over all the vmas that
1674 + * cover the memory, but for now it requires a single vma to
1675 + * entirely cover the MR to support RO mappings.
1676 + */
1677 + vma = find_vma(current->mm, start);
1678 + if (vma && vma->vm_end >= start + length &&
1679 + vma->vm_start <= start) {
1680 + if (vma->vm_flags & VM_WRITE)
1681 + access_flags |= IB_ACCESS_LOCAL_WRITE;
1682 + } else {
1683 + access_flags |= IB_ACCESS_LOCAL_WRITE;
1684 + }
1685 +
1686 + up_read(&current->mm->mmap_sem);
1687 + }
1688 +
1689 + return ib_umem_get(context, start, length, access_flags, 0);
1690 +}
1691 +
1692 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1693 u64 virt_addr, int access_flags,
1694 struct ib_udata *udata)
1695 @@ -145,10 +179,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1696 if (!mr)
1697 return ERR_PTR(-ENOMEM);
1698
1699 - /* Force registering the memory as writable. */
1700 - /* Used for memory re-registeration. HCA protects the access */
1701 - mr->umem = ib_umem_get(pd->uobject->context, start, length,
1702 - access_flags | IB_ACCESS_LOCAL_WRITE, 0);
1703 + mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
1704 + virt_addr, access_flags);
1705 if (IS_ERR(mr->umem)) {
1706 err = PTR_ERR(mr->umem);
1707 goto err_free;
1708 @@ -215,6 +247,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
1709 }
1710
1711 if (flags & IB_MR_REREG_ACCESS) {
1712 + if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
1713 + return -EPERM;
1714 +
1715 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
1716 convert_access(mr_access_flags));
1717
1718 @@ -228,10 +263,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
1719
1720 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
1721 ib_umem_release(mmr->umem);
1722 - mmr->umem = ib_umem_get(mr->uobject->context, start, length,
1723 - mr_access_flags |
1724 - IB_ACCESS_LOCAL_WRITE,
1725 - 0);
1726 + mmr->umem =
1727 + mlx4_get_umem_mr(mr->uobject->context, start, length,
1728 + virt_addr, mr_access_flags);
1729 if (IS_ERR(mmr->umem)) {
1730 err = PTR_ERR(mmr->umem);
1731 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
1732 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1733 index d804880d637a..be6612fc33ac 100644
1734 --- a/drivers/infiniband/hw/mlx5/cq.c
1735 +++ b/drivers/infiniband/hw/mlx5/cq.c
1736 @@ -646,7 +646,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
1737 }
1738
1739 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
1740 - struct ib_wc *wc)
1741 + struct ib_wc *wc, bool is_fatal_err)
1742 {
1743 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1744 struct mlx5_ib_wc *soft_wc, *next;
1745 @@ -659,6 +659,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
1746 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
1747 cq->mcq.cqn);
1748
1749 + if (unlikely(is_fatal_err)) {
1750 + soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
1751 + soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
1752 + }
1753 wc[npolled++] = soft_wc->wc;
1754 list_del(&soft_wc->list);
1755 kfree(soft_wc);
1756 @@ -679,12 +683,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
1757
1758 spin_lock_irqsave(&cq->lock, flags);
1759 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1760 - mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
1761 + /* make sure no soft wqe's are waiting */
1762 + if (unlikely(!list_empty(&cq->wc_list)))
1763 + soft_polled = poll_soft_wc(cq, num_entries, wc, true);
1764 +
1765 + mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
1766 + wc + soft_polled, &npolled);
1767 goto out;
1768 }
1769
1770 if (unlikely(!list_empty(&cq->wc_list)))
1771 - soft_polled = poll_soft_wc(cq, num_entries, wc);
1772 + soft_polled = poll_soft_wc(cq, num_entries, wc, false);
1773
1774 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
1775 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
1776 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
1777 index f9e1c69603a5..1dda4a2623c9 100644
1778 --- a/drivers/infiniband/hw/qib/qib.h
1779 +++ b/drivers/infiniband/hw/qib/qib.h
1780 @@ -1250,6 +1250,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
1781 #define QIB_BADINTR 0x8000 /* severe interrupt problems */
1782 #define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
1783 #define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
1784 +#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
1785
1786 /*
1787 * values for ppd->lflags (_ib_port_ related flags)
1788 @@ -1448,8 +1449,7 @@ u64 qib_sps_ints(void);
1789 /*
1790 * dma_addr wrappers - all 0's invalid for hw
1791 */
1792 -dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
1793 - size_t, int);
1794 +int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
1795 const char *qib_get_unit_name(int unit);
1796 const char *qib_get_card_name(struct rvt_dev_info *rdi);
1797 struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
1798 diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
1799 index 9396c1807cc3..40efc9151ec4 100644
1800 --- a/drivers/infiniband/hw/qib/qib_file_ops.c
1801 +++ b/drivers/infiniband/hw/qib/qib_file_ops.c
1802 @@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
1803 goto done;
1804 }
1805 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
1806 + dma_addr_t daddr;
1807 +
1808 for (; ntids--; tid++) {
1809 if (tid == tidcnt)
1810 tid = 0;
1811 @@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
1812 ret = -ENOMEM;
1813 break;
1814 }
1815 + ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
1816 + if (ret)
1817 + break;
1818 +
1819 tidlist[i] = tid + tidoff;
1820 /* we "know" system pages and TID pages are same size */
1821 dd->pageshadow[ctxttid + tid] = pagep[i];
1822 - dd->physshadow[ctxttid + tid] =
1823 - qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
1824 - PCI_DMA_FROMDEVICE);
1825 + dd->physshadow[ctxttid + tid] = daddr;
1826 /*
1827 * don't need atomic or it's overhead
1828 */
1829 diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
1830 index c5a4c65636d6..7ba7d2122f3b 100644
1831 --- a/drivers/infiniband/hw/qib/qib_init.c
1832 +++ b/drivers/infiniband/hw/qib/qib_init.c
1833 @@ -850,6 +850,10 @@ static void qib_shutdown_device(struct qib_devdata *dd)
1834 struct qib_pportdata *ppd;
1835 unsigned pidx;
1836
1837 + if (dd->flags & QIB_SHUTDOWN)
1838 + return;
1839 + dd->flags |= QIB_SHUTDOWN;
1840 +
1841 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1842 ppd = dd->pport + pidx;
1843
1844 @@ -1189,6 +1193,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
1845
1846 static void qib_remove_one(struct pci_dev *);
1847 static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1848 +static void qib_shutdown_one(struct pci_dev *);
1849
1850 #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1851 #define PFX QIB_DRV_NAME ": "
1852 @@ -1206,6 +1211,7 @@ static struct pci_driver qib_driver = {
1853 .name = QIB_DRV_NAME,
1854 .probe = qib_init_one,
1855 .remove = qib_remove_one,
1856 + .shutdown = qib_shutdown_one,
1857 .id_table = qib_pci_tbl,
1858 .err_handler = &qib_pci_err_handler,
1859 };
1860 @@ -1556,6 +1562,13 @@ static void qib_remove_one(struct pci_dev *pdev)
1861 qib_postinit_cleanup(dd);
1862 }
1863
1864 +static void qib_shutdown_one(struct pci_dev *pdev)
1865 +{
1866 + struct qib_devdata *dd = pci_get_drvdata(pdev);
1867 +
1868 + qib_shutdown_device(dd);
1869 +}
1870 +
1871 /**
1872 * qib_create_rcvhdrq - create a receive header queue
1873 * @dd: the qlogic_ib device
1874 diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
1875 index ce83ba9a12ef..16543d5e80c3 100644
1876 --- a/drivers/infiniband/hw/qib/qib_user_pages.c
1877 +++ b/drivers/infiniband/hw/qib/qib_user_pages.c
1878 @@ -99,23 +99,27 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
1879 *
1880 * I'm sure we won't be so lucky with other iommu's, so FIXME.
1881 */
1882 -dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
1883 - unsigned long offset, size_t size, int direction)
1884 +int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
1885 {
1886 dma_addr_t phys;
1887
1888 - phys = pci_map_page(hwdev, page, offset, size, direction);
1889 + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
1890 + if (pci_dma_mapping_error(hwdev, phys))
1891 + return -ENOMEM;
1892
1893 - if (phys == 0) {
1894 - pci_unmap_page(hwdev, phys, size, direction);
1895 - phys = pci_map_page(hwdev, page, offset, size, direction);
1896 + if (!phys) {
1897 + pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
1898 + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
1899 + PCI_DMA_FROMDEVICE);
1900 + if (pci_dma_mapping_error(hwdev, phys))
1901 + return -ENOMEM;
1902 /*
1903 * FIXME: If we get 0 again, we should keep this page,
1904 * map another, then free the 0 page.
1905 */
1906 }
1907 -
1908 - return phys;
1909 + *daddr = phys;
1910 + return 0;
1911 }
1912
1913 /**
1914 diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
1915 index 88fa4d44ab5f..76a86f805233 100644
1916 --- a/drivers/infiniband/sw/rdmavt/cq.c
1917 +++ b/drivers/infiniband/sw/rdmavt/cq.c
1918 @@ -121,17 +121,20 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
1919 if (cq->notify == IB_CQ_NEXT_COMP ||
1920 (cq->notify == IB_CQ_SOLICITED &&
1921 (solicited || entry->status != IB_WC_SUCCESS))) {
1922 + struct kthread_worker *worker;
1923 +
1924 /*
1925 * This will cause send_complete() to be called in
1926 * another thread.
1927 */
1928 - spin_lock(&cq->rdi->n_cqs_lock);
1929 - if (likely(cq->rdi->worker)) {
1930 + rcu_read_lock();
1931 + worker = rcu_dereference(cq->rdi->worker);
1932 + if (likely(worker)) {
1933 cq->notify = RVT_CQ_NONE;
1934 cq->triggered++;
1935 - kthread_queue_work(cq->rdi->worker, &cq->comptask);
1936 + kthread_queue_work(worker, &cq->comptask);
1937 }
1938 - spin_unlock(&cq->rdi->n_cqs_lock);
1939 + rcu_read_unlock();
1940 }
1941
1942 spin_unlock_irqrestore(&cq->lock, flags);
1943 @@ -513,7 +516,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
1944 int cpu;
1945 struct kthread_worker *worker;
1946
1947 - if (rdi->worker)
1948 + if (rcu_access_pointer(rdi->worker))
1949 return 0;
1950
1951 spin_lock_init(&rdi->n_cqs_lock);
1952 @@ -525,7 +528,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
1953 return PTR_ERR(worker);
1954
1955 set_user_nice(worker->task, MIN_NICE);
1956 - rdi->worker = worker;
1957 + RCU_INIT_POINTER(rdi->worker, worker);
1958 return 0;
1959 }
1960
1961 @@ -537,15 +540,19 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
1962 {
1963 struct kthread_worker *worker;
1964
1965 - /* block future queuing from send_complete() */
1966 - spin_lock_irq(&rdi->n_cqs_lock);
1967 - worker = rdi->worker;
1968 + if (!rcu_access_pointer(rdi->worker))
1969 + return;
1970 +
1971 + spin_lock(&rdi->n_cqs_lock);
1972 + worker = rcu_dereference_protected(rdi->worker,
1973 + lockdep_is_held(&rdi->n_cqs_lock));
1974 if (!worker) {
1975 - spin_unlock_irq(&rdi->n_cqs_lock);
1976 + spin_unlock(&rdi->n_cqs_lock);
1977 return;
1978 }
1979 - rdi->worker = NULL;
1980 - spin_unlock_irq(&rdi->n_cqs_lock);
1981 + RCU_INIT_POINTER(rdi->worker, NULL);
1982 + spin_unlock(&rdi->n_cqs_lock);
1983 + synchronize_rcu();
1984
1985 kthread_destroy_worker(worker);
1986 }
1987 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1988 index e770c17cbca9..ee3f630c9217 100644
1989 --- a/drivers/infiniband/ulp/isert/ib_isert.c
1990 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1991 @@ -885,15 +885,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
1992 }
1993
1994 static void
1995 -isert_create_send_desc(struct isert_conn *isert_conn,
1996 - struct isert_cmd *isert_cmd,
1997 - struct iser_tx_desc *tx_desc)
1998 +__isert_create_send_desc(struct isert_device *device,
1999 + struct iser_tx_desc *tx_desc)
2000 {
2001 - struct isert_device *device = isert_conn->device;
2002 - struct ib_device *ib_dev = device->ib_device;
2003 -
2004 - ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
2005 - ISER_HEADERS_LEN, DMA_TO_DEVICE);
2006
2007 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
2008 tx_desc->iser_header.flags = ISCSI_CTRL;
2009 @@ -906,6 +900,20 @@ isert_create_send_desc(struct isert_conn *isert_conn,
2010 }
2011 }
2012
2013 +static void
2014 +isert_create_send_desc(struct isert_conn *isert_conn,
2015 + struct isert_cmd *isert_cmd,
2016 + struct iser_tx_desc *tx_desc)
2017 +{
2018 + struct isert_device *device = isert_conn->device;
2019 + struct ib_device *ib_dev = device->ib_device;
2020 +
2021 + ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
2022 + ISER_HEADERS_LEN, DMA_TO_DEVICE);
2023 +
2024 + __isert_create_send_desc(device, tx_desc);
2025 +}
2026 +
2027 static int
2028 isert_init_tx_hdrs(struct isert_conn *isert_conn,
2029 struct iser_tx_desc *tx_desc)
2030 @@ -993,7 +1001,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
2031 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
2032 int ret;
2033
2034 - isert_create_send_desc(isert_conn, NULL, tx_desc);
2035 + __isert_create_send_desc(device, tx_desc);
2036
2037 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
2038 sizeof(struct iscsi_hdr));
2039 @@ -2108,7 +2116,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2040
2041 sig_attrs->check_mask =
2042 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2043 - (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2044 + (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) |
2045 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2046 return 0;
2047 }
2048 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
2049 index 466cef930bf1..53f775c41cd1 100644
2050 --- a/drivers/input/joystick/xpad.c
2051 +++ b/drivers/input/joystick/xpad.c
2052 @@ -126,7 +126,7 @@ static const struct xpad_device {
2053 u8 mapping;
2054 u8 xtype;
2055 } xpad_device[] = {
2056 - { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
2057 + { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
2058 { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
2059 { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
2060 { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
2061 diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
2062 index 599544c1a91c..243e0fa6e3e3 100644
2063 --- a/drivers/input/mouse/elan_i2c.h
2064 +++ b/drivers/input/mouse/elan_i2c.h
2065 @@ -27,6 +27,8 @@
2066 #define ETP_DISABLE_POWER 0x0001
2067 #define ETP_PRESSURE_OFFSET 25
2068
2069 +#define ETP_CALIBRATE_MAX_LEN 3
2070 +
2071 /* IAP Firmware handling */
2072 #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
2073 #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
2074 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
2075 index c4926645c779..7b5fa501bbcf 100644
2076 --- a/drivers/input/mouse/elan_i2c_core.c
2077 +++ b/drivers/input/mouse/elan_i2c_core.c
2078 @@ -610,7 +610,7 @@ static ssize_t calibrate_store(struct device *dev,
2079 int tries = 20;
2080 int retval;
2081 int error;
2082 - u8 val[3];
2083 + u8 val[ETP_CALIBRATE_MAX_LEN];
2084
2085 retval = mutex_lock_interruptible(&data->sysfs_mutex);
2086 if (retval)
2087 @@ -1261,6 +1261,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
2088 { "ELAN060C", 0 },
2089 { "ELAN0611", 0 },
2090 { "ELAN0612", 0 },
2091 + { "ELAN0618", 0 },
2092 { "ELAN1000", 0 },
2093 { }
2094 };
2095 diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
2096 index cfcb32559925..c060d270bc4d 100644
2097 --- a/drivers/input/mouse/elan_i2c_smbus.c
2098 +++ b/drivers/input/mouse/elan_i2c_smbus.c
2099 @@ -56,7 +56,7 @@
2100 static int elan_smbus_initialize(struct i2c_client *client)
2101 {
2102 u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
2103 - u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
2104 + u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
2105 int len, error;
2106
2107 /* Get hello packet */
2108 @@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
2109 static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
2110 {
2111 int error;
2112 + u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
2113 +
2114 + BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
2115
2116 error = i2c_smbus_read_block_data(client,
2117 - ETP_SMBUS_CALIBRATE_QUERY, val);
2118 + ETP_SMBUS_CALIBRATE_QUERY, buf);
2119 if (error < 0)
2120 return error;
2121
2122 + memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
2123 return 0;
2124 }
2125
2126 @@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
2127 {
2128 int len;
2129
2130 + BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
2131 +
2132 len = i2c_smbus_read_block_data(client,
2133 ETP_SMBUS_PACKET_QUERY,
2134 &report[ETP_SMBUS_REPORT_OFFSET]);
2135 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2136 index a4aaa748e987..a250f433eb96 100644
2137 --- a/drivers/input/mouse/elantech.c
2138 +++ b/drivers/input/mouse/elantech.c
2139 @@ -804,7 +804,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
2140 else if (ic_version == 7 && etd->samples[1] == 0x2A)
2141 sanity_check = ((packet[3] & 0x1c) == 0x10);
2142 else
2143 - sanity_check = ((packet[0] & 0x0c) == 0x04 &&
2144 + sanity_check = ((packet[0] & 0x08) == 0x00 &&
2145 (packet[3] & 0x1c) == 0x10);
2146
2147 if (!sanity_check)
2148 @@ -1177,6 +1177,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
2149 { }
2150 };
2151
2152 +static const char * const middle_button_pnp_ids[] = {
2153 + "LEN2131", /* ThinkPad P52 w/ NFC */
2154 + "LEN2132", /* ThinkPad P52 */
2155 + NULL
2156 +};
2157 +
2158 /*
2159 * Set the appropriate event bits for the input subsystem
2160 */
2161 @@ -1196,7 +1202,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
2162 __clear_bit(EV_REL, dev->evbit);
2163
2164 __set_bit(BTN_LEFT, dev->keybit);
2165 - if (dmi_check_system(elantech_dmi_has_middle_button))
2166 + if (dmi_check_system(elantech_dmi_has_middle_button) ||
2167 + psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
2168 __set_bit(BTN_MIDDLE, dev->keybit);
2169 __set_bit(BTN_RIGHT, dev->keybit);
2170
2171 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
2172 index 13f195c9743e..2ea39a83737f 100644
2173 --- a/drivers/irqchip/irq-gic-v3-its.c
2174 +++ b/drivers/irqchip/irq-gic-v3-its.c
2175 @@ -2221,7 +2221,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
2176 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2177
2178 /* Bind the LPI to the first possible CPU */
2179 - cpu = cpumask_first(cpu_mask);
2180 + cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2181 + if (cpu >= nr_cpu_ids) {
2182 + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2183 + return;
2184 +
2185 + cpu = cpumask_first(cpu_online_mask);
2186 + }
2187 +
2188 its_dev->event_map.col_map[event] = cpu;
2189 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2190
2191 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
2192 index 02e42ba2ecbc..72ae5dc50532 100644
2193 --- a/drivers/md/dm-thin.c
2194 +++ b/drivers/md/dm-thin.c
2195 @@ -1380,6 +1380,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
2196
2197 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
2198
2199 +static void requeue_bios(struct pool *pool);
2200 +
2201 static void check_for_space(struct pool *pool)
2202 {
2203 int r;
2204 @@ -1392,8 +1394,10 @@ static void check_for_space(struct pool *pool)
2205 if (r)
2206 return;
2207
2208 - if (nr_free)
2209 + if (nr_free) {
2210 set_pool_mode(pool, PM_WRITE);
2211 + requeue_bios(pool);
2212 + }
2213 }
2214
2215 /*
2216 @@ -1470,7 +1474,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
2217
2218 r = dm_pool_alloc_data_block(pool->pmd, result);
2219 if (r) {
2220 - metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
2221 + if (r == -ENOSPC)
2222 + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
2223 + else
2224 + metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
2225 return r;
2226 }
2227
2228 diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
2229 index 6d7bda6f8190..ba6b0a90ecfb 100644
2230 --- a/drivers/md/dm-zoned-target.c
2231 +++ b/drivers/md/dm-zoned-target.c
2232 @@ -788,7 +788,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2233
2234 /* Chunk BIO work */
2235 mutex_init(&dmz->chunk_lock);
2236 - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
2237 + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
2238 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
2239 0, dev->name);
2240 if (!dmz->chunk_wq) {
2241 diff --git a/drivers/md/md.c b/drivers/md/md.c
2242 index 24e64b04424a..7143c8b9284b 100644
2243 --- a/drivers/md/md.c
2244 +++ b/drivers/md/md.c
2245 @@ -2823,7 +2823,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
2246 err = 0;
2247 }
2248 } else if (cmd_match(buf, "re-add")) {
2249 - if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2250 + if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
2251 + rdev->saved_raid_disk >= 0) {
2252 /* clear_bit is performed _after_ all the devices
2253 * have their local Faulty bit cleared. If any writes
2254 * happen in the meantime in the local node, they
2255 @@ -8594,6 +8595,7 @@ static int remove_and_add_spares(struct mddev *mddev,
2256 if (mddev->pers->hot_remove_disk(
2257 mddev, rdev) == 0) {
2258 sysfs_unlink_rdev(mddev, rdev);
2259 + rdev->saved_raid_disk = rdev->raid_disk;
2260 rdev->raid_disk = -1;
2261 removed++;
2262 }
2263 diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
2264 index 33d844fe2e70..f7d4ec37fdbc 100644
2265 --- a/drivers/media/dvb-core/dvb_frontend.c
2266 +++ b/drivers/media/dvb-core/dvb_frontend.c
2267 @@ -275,8 +275,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
2268 wake_up_interruptible (&events->wait_queue);
2269 }
2270
2271 +static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
2272 + struct dvb_fe_events *events)
2273 +{
2274 + int ret;
2275 +
2276 + up(&fepriv->sem);
2277 + ret = events->eventw != events->eventr;
2278 + down(&fepriv->sem);
2279 +
2280 + return ret;
2281 +}
2282 +
2283 static int dvb_frontend_get_event(struct dvb_frontend *fe,
2284 - struct dvb_frontend_event *event, int flags)
2285 + struct dvb_frontend_event *event, int flags)
2286 {
2287 struct dvb_frontend_private *fepriv = fe->frontend_priv;
2288 struct dvb_fe_events *events = &fepriv->events;
2289 @@ -294,13 +306,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
2290 if (flags & O_NONBLOCK)
2291 return -EWOULDBLOCK;
2292
2293 - up(&fepriv->sem);
2294 -
2295 - ret = wait_event_interruptible (events->wait_queue,
2296 - events->eventw != events->eventr);
2297 -
2298 - if (down_interruptible (&fepriv->sem))
2299 - return -ERESTARTSYS;
2300 + ret = wait_event_interruptible(events->wait_queue,
2301 + dvb_frontend_test_event(fepriv, events));
2302
2303 if (ret < 0)
2304 return ret;
2305 diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
2306 index c2d3b8f0f487..93f69b3ac911 100644
2307 --- a/drivers/media/platform/vsp1/vsp1_video.c
2308 +++ b/drivers/media/platform/vsp1/vsp1_video.c
2309 @@ -849,9 +849,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
2310 return 0;
2311 }
2312
2313 -static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
2314 +static void vsp1_video_release_buffers(struct vsp1_video *video)
2315 {
2316 - struct vsp1_video *video = pipe->output->video;
2317 struct vsp1_vb2_buffer *buffer;
2318 unsigned long flags;
2319
2320 @@ -861,12 +860,18 @@ static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
2321 vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
2322 INIT_LIST_HEAD(&video->irqqueue);
2323 spin_unlock_irqrestore(&video->irqlock, flags);
2324 +}
2325 +
2326 +static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
2327 +{
2328 + lockdep_assert_held(&pipe->lock);
2329
2330 /* Release our partition table allocation */
2331 - mutex_lock(&pipe->lock);
2332 kfree(pipe->part_table);
2333 pipe->part_table = NULL;
2334 - mutex_unlock(&pipe->lock);
2335 +
2336 + vsp1_dl_list_put(pipe->dl);
2337 + pipe->dl = NULL;
2338 }
2339
2340 static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
2341 @@ -881,8 +886,9 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
2342 if (pipe->stream_count == pipe->num_inputs) {
2343 ret = vsp1_video_setup_pipeline(pipe);
2344 if (ret < 0) {
2345 - mutex_unlock(&pipe->lock);
2346 + vsp1_video_release_buffers(video);
2347 vsp1_video_cleanup_pipeline(pipe);
2348 + mutex_unlock(&pipe->lock);
2349 return ret;
2350 }
2351
2352 @@ -932,13 +938,12 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
2353 if (ret == -ETIMEDOUT)
2354 dev_err(video->vsp1->dev, "pipeline stop timeout\n");
2355
2356 - vsp1_dl_list_put(pipe->dl);
2357 - pipe->dl = NULL;
2358 + vsp1_video_cleanup_pipeline(pipe);
2359 }
2360 mutex_unlock(&pipe->lock);
2361
2362 media_pipeline_stop(&video->video.entity);
2363 - vsp1_video_cleanup_pipeline(pipe);
2364 + vsp1_video_release_buffers(video);
2365 vsp1_video_pipeline_put(pipe);
2366 }
2367
2368 diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
2369 index 9b742d569fb5..c30cb0fb165d 100644
2370 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c
2371 +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
2372 @@ -918,6 +918,9 @@ struct usb_device_id cx231xx_id_table[] = {
2373 .driver_info = CX231XX_BOARD_CNXT_RDE_250},
2374 {USB_DEVICE(0x0572, 0x58A0),
2375 .driver_info = CX231XX_BOARD_CNXT_RDU_250},
2376 + /* AverMedia DVD EZMaker 7 */
2377 + {USB_DEVICE(0x07ca, 0xc039),
2378 + .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
2379 {USB_DEVICE(0x2040, 0xb110),
2380 .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
2381 {USB_DEVICE(0x2040, 0xb111),
2382 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2383 index 6730fd08ef03..e159dfc21279 100644
2384 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2385 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2386 @@ -871,7 +871,7 @@ static int put_v4l2_ext_controls32(struct file *file,
2387 get_user(kcontrols, &kp->controls))
2388 return -EFAULT;
2389
2390 - if (!count)
2391 + if (!count || count > (U32_MAX/sizeof(*ucontrols)))
2392 return 0;
2393 if (get_user(p, &up->controls))
2394 return -EFAULT;
2395 diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
2396 index d1c46de89eb4..d9ae983095c5 100644
2397 --- a/drivers/mfd/intel-lpss-pci.c
2398 +++ b/drivers/mfd/intel-lpss-pci.c
2399 @@ -124,6 +124,11 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
2400 .properties = apl_i2c_properties,
2401 };
2402
2403 +static const struct intel_lpss_platform_info cnl_i2c_info = {
2404 + .clk_rate = 216000000,
2405 + .properties = spt_i2c_properties,
2406 +};
2407 +
2408 static const struct pci_device_id intel_lpss_pci_ids[] = {
2409 /* BXT A-Step */
2410 { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
2411 @@ -207,13 +212,13 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
2412 { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info },
2413 { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info },
2414 { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
2415 - { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info },
2416 - { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info },
2417 + { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info },
2418 + { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info },
2419 { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
2420 - { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info },
2421 - { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info },
2422 - { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info },
2423 - { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info },
2424 + { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&cnl_i2c_info },
2425 + { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info },
2426 + { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
2427 + { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
2428 /* SPT-H */
2429 { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
2430 { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
2431 @@ -240,10 +245,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
2432 { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info },
2433 { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
2434 { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
2435 - { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info },
2436 - { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info },
2437 - { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info },
2438 - { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&spt_i2c_info },
2439 + { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info },
2440 + { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info },
2441 + { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
2442 + { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
2443 { }
2444 };
2445 MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
2446 diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
2447 index 0e0ab9bb1530..40e8d9b59d07 100644
2448 --- a/drivers/mfd/intel-lpss.c
2449 +++ b/drivers/mfd/intel-lpss.c
2450 @@ -275,11 +275,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
2451
2452 intel_lpss_deassert_reset(lpss);
2453
2454 + intel_lpss_set_remap_addr(lpss);
2455 +
2456 if (!intel_lpss_has_idma(lpss))
2457 return;
2458
2459 - intel_lpss_set_remap_addr(lpss);
2460 -
2461 /* Make sure that SPI multiblock DMA transfers are re-enabled */
2462 if (lpss->type == LPSS_DEV_SPI)
2463 writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
2464 diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
2465 index a8b6d6a635e9..393a80bdb846 100644
2466 --- a/drivers/misc/cxl/sysfs.c
2467 +++ b/drivers/misc/cxl/sysfs.c
2468 @@ -331,12 +331,20 @@ static ssize_t prefault_mode_store(struct device *device,
2469 struct cxl_afu *afu = to_cxl_afu(device);
2470 enum prefault_modes mode = -1;
2471
2472 - if (!strncmp(buf, "work_element_descriptor", 23))
2473 - mode = CXL_PREFAULT_WED;
2474 - if (!strncmp(buf, "all", 3))
2475 - mode = CXL_PREFAULT_ALL;
2476 if (!strncmp(buf, "none", 4))
2477 mode = CXL_PREFAULT_NONE;
2478 + else {
2479 + if (!radix_enabled()) {
2480 +
2481 + /* only allowed when not in radix mode */
2482 + if (!strncmp(buf, "work_element_descriptor", 23))
2483 + mode = CXL_PREFAULT_WED;
2484 + if (!strncmp(buf, "all", 3))
2485 + mode = CXL_PREFAULT_ALL;
2486 + } else {
2487 + dev_err(device, "Cannot prefault with radix enabled\n");
2488 + }
2489 + }
2490
2491 if (mode == -1)
2492 return -EINVAL;
2493 diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
2494 index d524a64ed754..ac76c10c042f 100644
2495 --- a/drivers/mtd/chips/cfi_cmdset_0002.c
2496 +++ b/drivers/mtd/chips/cfi_cmdset_0002.c
2497 @@ -1880,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2498 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
2499 break;
2500
2501 - if (chip_ready(map, adr)) {
2502 + if (chip_good(map, adr, datum)) {
2503 xip_enable(map, chip, adr);
2504 goto op_done;
2505 }
2506 @@ -2535,7 +2535,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2507
2508 struct ppb_lock {
2509 struct flchip *chip;
2510 - loff_t offset;
2511 + unsigned long adr;
2512 int locked;
2513 };
2514
2515 @@ -2553,8 +2553,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2516 unsigned long timeo;
2517 int ret;
2518
2519 + adr += chip->start;
2520 mutex_lock(&chip->mutex);
2521 - ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2522 + ret = get_chip(map, chip, adr, FL_LOCKING);
2523 if (ret) {
2524 mutex_unlock(&chip->mutex);
2525 return ret;
2526 @@ -2572,8 +2573,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2527
2528 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2529 chip->state = FL_LOCKING;
2530 - map_write(map, CMD(0xA0), chip->start + adr);
2531 - map_write(map, CMD(0x00), chip->start + adr);
2532 + map_write(map, CMD(0xA0), adr);
2533 + map_write(map, CMD(0x00), adr);
2534 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2535 /*
2536 * Unlocking of one specific sector is not supported, so we
2537 @@ -2611,7 +2612,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2538 map_write(map, CMD(0x00), chip->start);
2539
2540 chip->state = FL_READY;
2541 - put_chip(map, chip, adr + chip->start);
2542 + put_chip(map, chip, adr);
2543 mutex_unlock(&chip->mutex);
2544
2545 return ret;
2546 @@ -2668,9 +2669,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2547 * sectors shall be unlocked, so lets keep their locking
2548 * status at "unlocked" (locked=0) for the final re-locking.
2549 */
2550 - if ((adr < ofs) || (adr >= (ofs + len))) {
2551 + if ((offset < ofs) || (offset >= (ofs + len))) {
2552 sect[sectors].chip = &cfi->chips[chipnum];
2553 - sect[sectors].offset = offset;
2554 + sect[sectors].adr = adr;
2555 sect[sectors].locked = do_ppb_xxlock(
2556 map, &cfi->chips[chipnum], adr, 0,
2557 DO_XXLOCK_ONEBLOCK_GETLOCK);
2558 @@ -2684,6 +2685,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2559 i++;
2560
2561 if (adr >> cfi->chipshift) {
2562 + if (offset >= (ofs + len))
2563 + break;
2564 adr = 0;
2565 chipnum++;
2566
2567 @@ -2714,7 +2717,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2568 */
2569 for (i = 0; i < sectors; i++) {
2570 if (sect[i].locked)
2571 - do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2572 + do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2573 DO_XXLOCK_ONEBLOCK_LOCK);
2574 }
2575
2576 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
2577 index defb1cd8d2e1..18a72da759a0 100644
2578 --- a/drivers/mtd/ubi/build.c
2579 +++ b/drivers/mtd/ubi/build.c
2580 @@ -1082,6 +1082,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
2581 if (ubi->bgt_thread)
2582 kthread_stop(ubi->bgt_thread);
2583
2584 +#ifdef CONFIG_MTD_UBI_FASTMAP
2585 + cancel_work_sync(&ubi->fm_work);
2586 +#endif
2587 ubi_debugfs_exit_dev(ubi);
2588 uif_close(ubi);
2589
2590 diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2591 index 388e46be6ad9..d0884bd9d955 100644
2592 --- a/drivers/mtd/ubi/eba.c
2593 +++ b/drivers/mtd/ubi/eba.c
2594 @@ -490,6 +490,82 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
2595 return err;
2596 }
2597
2598 +#ifdef CONFIG_MTD_UBI_FASTMAP
2599 +/**
2600 + * check_mapping - check and fixup a mapping
2601 + * @ubi: UBI device description object
2602 + * @vol: volume description object
2603 + * @lnum: logical eraseblock number
2604 + * @pnum: physical eraseblock number
2605 + *
2606 + * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
2607 + * operations, if such an operation is interrupted the mapping still looks
2608 + * good, but upon first read an ECC is reported to the upper layer.
2609 + * Normaly during the full-scan at attach time this is fixed, for Fastmap
2610 + * we have to deal with it while reading.
2611 + * If the PEB behind a LEB shows this symthom we change the mapping to
2612 + * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
2613 + *
2614 + * Returns 0 on success, negative error code in case of failure.
2615 + */
2616 +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
2617 + int *pnum)
2618 +{
2619 + int err;
2620 + struct ubi_vid_io_buf *vidb;
2621 +
2622 + if (!ubi->fast_attach)
2623 + return 0;
2624 +
2625 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
2626 + if (!vidb)
2627 + return -ENOMEM;
2628 +
2629 + err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
2630 + if (err > 0 && err != UBI_IO_BITFLIPS) {
2631 + int torture = 0;
2632 +
2633 + switch (err) {
2634 + case UBI_IO_FF:
2635 + case UBI_IO_FF_BITFLIPS:
2636 + case UBI_IO_BAD_HDR:
2637 + case UBI_IO_BAD_HDR_EBADMSG:
2638 + break;
2639 + default:
2640 + ubi_assert(0);
2641 + }
2642 +
2643 + if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
2644 + torture = 1;
2645 +
2646 + down_read(&ubi->fm_eba_sem);
2647 + vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
2648 + up_read(&ubi->fm_eba_sem);
2649 + ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
2650 +
2651 + *pnum = UBI_LEB_UNMAPPED;
2652 + } else if (err < 0) {
2653 + ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
2654 + *pnum, err);
2655 +
2656 + goto out_free;
2657 + }
2658 +
2659 + err = 0;
2660 +
2661 +out_free:
2662 + ubi_free_vid_buf(vidb);
2663 +
2664 + return err;
2665 +}
2666 +#else
2667 +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
2668 + int *pnum)
2669 +{
2670 + return 0;
2671 +}
2672 +#endif
2673 +
2674 /**
2675 * ubi_eba_read_leb - read data.
2676 * @ubi: UBI device description object
2677 @@ -522,7 +598,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
2678 return err;
2679
2680 pnum = vol->eba_tbl->entries[lnum].pnum;
2681 - if (pnum < 0) {
2682 + if (pnum >= 0) {
2683 + err = check_mapping(ubi, vol, lnum, &pnum);
2684 + if (err < 0)
2685 + goto out_unlock;
2686 + }
2687 +
2688 + if (pnum == UBI_LEB_UNMAPPED) {
2689 /*
2690 * The logical eraseblock is not mapped, fill the whole buffer
2691 * with 0xFF bytes. The exception is static volumes for which
2692 @@ -930,6 +1012,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
2693 return err;
2694
2695 pnum = vol->eba_tbl->entries[lnum].pnum;
2696 + if (pnum >= 0) {
2697 + err = check_mapping(ubi, vol, lnum, &pnum);
2698 + if (err < 0)
2699 + goto out;
2700 + }
2701 +
2702 if (pnum >= 0) {
2703 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
2704 len, offset, vol_id, lnum, pnum);
2705 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2706 index 668b46202507..23a6986d512b 100644
2707 --- a/drivers/mtd/ubi/wl.c
2708 +++ b/drivers/mtd/ubi/wl.c
2709 @@ -1505,6 +1505,7 @@ int ubi_thread(void *u)
2710 }
2711
2712 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
2713 + ubi->thread_enabled = 0;
2714 return 0;
2715 }
2716
2717 @@ -1514,9 +1515,6 @@ int ubi_thread(void *u)
2718 */
2719 static void shutdown_work(struct ubi_device *ubi)
2720 {
2721 -#ifdef CONFIG_MTD_UBI_FASTMAP
2722 - flush_work(&ubi->fm_work);
2723 -#endif
2724 while (!list_empty(&ubi->works)) {
2725 struct ubi_work *wrk;
2726
2727 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
2728 index baf283986a7e..2fffd42767c7 100644
2729 --- a/drivers/nvdimm/bus.c
2730 +++ b/drivers/nvdimm/bus.c
2731 @@ -565,14 +565,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
2732 {
2733 struct device *dev = disk_to_dev(disk)->parent;
2734 struct nd_region *nd_region = to_nd_region(dev->parent);
2735 - const char *pol = nd_region->ro ? "only" : "write";
2736 + int disk_ro = get_disk_ro(disk);
2737
2738 - if (nd_region->ro == get_disk_ro(disk))
2739 + /*
2740 + * Upgrade to read-only if the region is read-only preserve as
2741 + * read-only if the disk is already read-only.
2742 + */
2743 + if (disk_ro || nd_region->ro == disk_ro)
2744 return 0;
2745
2746 - dev_info(dev, "%s read-%s, marking %s read-%s\n",
2747 - dev_name(&nd_region->dev), pol, disk->disk_name, pol);
2748 - set_disk_ro(disk, nd_region->ro);
2749 + dev_info(dev, "%s read-only, marking %s read-only\n",
2750 + dev_name(&nd_region->dev), disk->disk_name);
2751 + set_disk_ro(disk, 1);
2752
2753 return 0;
2754
2755 diff --git a/drivers/of/platform.c b/drivers/of/platform.c
2756 index ac15d0e3d27d..0f49718c6012 100644
2757 --- a/drivers/of/platform.c
2758 +++ b/drivers/of/platform.c
2759 @@ -533,6 +533,9 @@ int of_platform_device_destroy(struct device *dev, void *data)
2760 if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
2761 device_for_each_child(dev, NULL, of_platform_device_destroy);
2762
2763 + of_node_clear_flag(dev->of_node, OF_POPULATED);
2764 + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
2765 +
2766 if (dev->bus == &platform_bus_type)
2767 platform_device_unregister(to_platform_device(dev));
2768 #ifdef CONFIG_ARM_AMBA
2769 @@ -540,8 +543,6 @@ int of_platform_device_destroy(struct device *dev, void *data)
2770 amba_device_unregister(to_amba_device(dev));
2771 #endif
2772
2773 - of_node_clear_flag(dev->of_node, OF_POPULATED);
2774 - of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
2775 return 0;
2776 }
2777 EXPORT_SYMBOL_GPL(of_platform_device_destroy);
2778 diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
2779 index 99309cb7d372..3bf4b6489fd0 100644
2780 --- a/drivers/of/resolver.c
2781 +++ b/drivers/of/resolver.c
2782 @@ -129,6 +129,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
2783 goto err_fail;
2784 }
2785
2786 + if (offset < 0 || offset + sizeof(__be32) > prop->length) {
2787 + err = -EINVAL;
2788 + goto err_fail;
2789 + }
2790 +
2791 *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
2792 }
2793
2794 diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
2795 index 168ef0bbabde..985a85f281a8 100644
2796 --- a/drivers/of/unittest.c
2797 +++ b/drivers/of/unittest.c
2798 @@ -164,20 +164,20 @@ static void __init of_unittest_dynamic(void)
2799 /* Add a new property - should pass*/
2800 prop->name = "new-property";
2801 prop->value = "new-property-data";
2802 - prop->length = strlen(prop->value);
2803 + prop->length = strlen(prop->value) + 1;
2804 unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
2805
2806 /* Try to add an existing property - should fail */
2807 prop++;
2808 prop->name = "new-property";
2809 prop->value = "new-property-data-should-fail";
2810 - prop->length = strlen(prop->value);
2811 + prop->length = strlen(prop->value) + 1;
2812 unittest(of_add_property(np, prop) != 0,
2813 "Adding an existing property should have failed\n");
2814
2815 /* Try to modify an existing property - should pass */
2816 prop->value = "modify-property-data-should-pass";
2817 - prop->length = strlen(prop->value);
2818 + prop->length = strlen(prop->value) + 1;
2819 unittest(of_update_property(np, prop) == 0,
2820 "Updating an existing property should have passed\n");
2821
2822 @@ -185,7 +185,7 @@ static void __init of_unittest_dynamic(void)
2823 prop++;
2824 prop->name = "modify-property";
2825 prop->value = "modify-missing-property-data-should-pass";
2826 - prop->length = strlen(prop->value);
2827 + prop->length = strlen(prop->value) + 1;
2828 unittest(of_update_property(np, prop) == 0,
2829 "Updating a missing property should have passed\n");
2830
2831 diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
2832 index 0b750228ad70..caea7c618207 100644
2833 --- a/drivers/pci/host/pci-hyperv.c
2834 +++ b/drivers/pci/host/pci-hyperv.c
2835 @@ -1610,17 +1610,6 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2836 get_pcichild(hpdev, hv_pcidev_ref_childlist);
2837 spin_lock_irqsave(&hbus->device_list_lock, flags);
2838
2839 - /*
2840 - * When a device is being added to the bus, we set the PCI domain
2841 - * number to be the device serial number, which is non-zero and
2842 - * unique on the same VM. The serial numbers start with 1, and
2843 - * increase by 1 for each device. So device names including this
2844 - * can have shorter names than based on the bus instance UUID.
2845 - * Only the first device serial number is used for domain, so the
2846 - * domain number will not change after the first device is added.
2847 - */
2848 - if (list_empty(&hbus->children))
2849 - hbus->sysdata.domain = desc->ser;
2850 list_add_tail(&hpdev->list_entry, &hbus->children);
2851 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2852 return hpdev;
2853 diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
2854 index 06109d40c4ac..e7d6cfaf3865 100644
2855 --- a/drivers/pci/hotplug/pciehp.h
2856 +++ b/drivers/pci/hotplug/pciehp.h
2857 @@ -134,7 +134,7 @@ struct controller *pcie_init(struct pcie_device *dev);
2858 int pcie_init_notification(struct controller *ctrl);
2859 int pciehp_enable_slot(struct slot *p_slot);
2860 int pciehp_disable_slot(struct slot *p_slot);
2861 -void pcie_enable_notification(struct controller *ctrl);
2862 +void pcie_reenable_notification(struct controller *ctrl);
2863 int pciehp_power_on_slot(struct slot *slot);
2864 void pciehp_power_off_slot(struct slot *slot);
2865 void pciehp_get_power_status(struct slot *slot, u8 *status);
2866 diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
2867 index 35d84845d5af..1288289cc85d 100644
2868 --- a/drivers/pci/hotplug/pciehp_core.c
2869 +++ b/drivers/pci/hotplug/pciehp_core.c
2870 @@ -297,7 +297,7 @@ static int pciehp_resume(struct pcie_device *dev)
2871 ctrl = get_service_data(dev);
2872
2873 /* reinitialize the chipset's event detection logic */
2874 - pcie_enable_notification(ctrl);
2875 + pcie_reenable_notification(ctrl);
2876
2877 slot = ctrl->slot;
2878
2879 diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
2880 index e5d5ce9e3010..05832b597e53 100644
2881 --- a/drivers/pci/hotplug/pciehp_hpc.c
2882 +++ b/drivers/pci/hotplug/pciehp_hpc.c
2883 @@ -676,7 +676,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
2884 return handled;
2885 }
2886
2887 -void pcie_enable_notification(struct controller *ctrl)
2888 +static void pcie_enable_notification(struct controller *ctrl)
2889 {
2890 u16 cmd, mask;
2891
2892 @@ -714,6 +714,17 @@ void pcie_enable_notification(struct controller *ctrl)
2893 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
2894 }
2895
2896 +void pcie_reenable_notification(struct controller *ctrl)
2897 +{
2898 + /*
2899 + * Clear both Presence and Data Link Layer Changed to make sure
2900 + * those events still fire after we have re-enabled them.
2901 + */
2902 + pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
2903 + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
2904 + pcie_enable_notification(ctrl);
2905 +}
2906 +
2907 static void pcie_disable_notification(struct controller *ctrl)
2908 {
2909 u16 mask;
2910 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2911 index 929d68f744af..ec2911c4ee42 100644
2912 --- a/drivers/pci/quirks.c
2913 +++ b/drivers/pci/quirks.c
2914 @@ -4377,11 +4377,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
2915 * 0xa290-0xa29f PCI Express Root port #{0-16}
2916 * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
2917 *
2918 + * Mobile chipsets are also affected, 7th & 8th Generation
2919 + * Specification update confirms ACS errata 22, status no fix: (7th Generation
2920 + * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
2921 + * Processor Family I/O for U Quad Core Platforms Specification Update,
2922 + * August 2017, Revision 002, Document#: 334660-002)[6]
2923 + * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
2924 + * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
2925 + * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
2926 + *
2927 + * 0x9d10-0x9d1b PCI Express Root port #{1-12}
2928 + *
2929 + * The 300 series chipset suffers from the same bug so include those root
2930 + * ports here as well.
2931 + *
2932 + * 0xa32c-0xa343 PCI Express Root port #{0-24}
2933 + *
2934 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
2935 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
2936 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
2937 * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
2938 * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
2939 + * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
2940 + * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
2941 */
2942 static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
2943 {
2944 @@ -4391,6 +4409,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
2945 switch (dev->device) {
2946 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
2947 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
2948 + case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
2949 + case 0xa32c ... 0xa343: /* 300 series */
2950 return true;
2951 }
2952
2953 diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
2954 index b601039d6c69..c4aa411f5935 100644
2955 --- a/drivers/pinctrl/devicetree.c
2956 +++ b/drivers/pinctrl/devicetree.c
2957 @@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
2958 }
2959
2960 static int dt_to_map_one_config(struct pinctrl *p,
2961 - struct pinctrl_dev *pctldev,
2962 + struct pinctrl_dev *hog_pctldev,
2963 const char *statename,
2964 struct device_node *np_config)
2965 {
2966 + struct pinctrl_dev *pctldev = NULL;
2967 struct device_node *np_pctldev;
2968 const struct pinctrl_ops *ops;
2969 int ret;
2970 @@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
2971 return -EPROBE_DEFER;
2972 }
2973 /* If we're creating a hog we can use the passed pctldev */
2974 - if (pctldev && (np_pctldev == p->dev->of_node))
2975 + if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
2976 + pctldev = hog_pctldev;
2977 break;
2978 + }
2979 pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
2980 if (pctldev)
2981 break;
2982 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
2983 index 92aeea174a56..afeb4876ffb2 100644
2984 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
2985 +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
2986 @@ -110,12 +110,12 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
2987 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
2988 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
2989 EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
2990 - EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
2991 EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
2992 EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
2993 EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
2994 EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
2995 EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
2996 + EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
2997 EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
2998 EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
2999 EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
3000 @@ -635,7 +635,6 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
3001 EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20),
3002 EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24),
3003 EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28),
3004 - EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
3005 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c),
3006 EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30),
3007 EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34),
3008 @@ -646,6 +645,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
3009 EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48),
3010 EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c),
3011 EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50),
3012 + EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
3013 EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"),
3014 EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"),
3015 EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"),
3016 diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
3017 index 5d6ed1507d29..5561b9e190f8 100644
3018 --- a/drivers/pwm/pwm-lpss-platform.c
3019 +++ b/drivers/pwm/pwm-lpss-platform.c
3020 @@ -74,6 +74,10 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
3021 return pwm_lpss_remove(lpwm);
3022 }
3023
3024 +static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops,
3025 + pwm_lpss_suspend,
3026 + pwm_lpss_resume);
3027 +
3028 static const struct acpi_device_id pwm_lpss_acpi_match[] = {
3029 { "80860F09", (unsigned long)&pwm_lpss_byt_info },
3030 { "80862288", (unsigned long)&pwm_lpss_bsw_info },
3031 @@ -86,6 +90,7 @@ static struct platform_driver pwm_lpss_driver_platform = {
3032 .driver = {
3033 .name = "pwm-lpss",
3034 .acpi_match_table = pwm_lpss_acpi_match,
3035 + .pm = &pwm_lpss_platform_pm_ops,
3036 },
3037 .probe = pwm_lpss_probe_platform,
3038 .remove = pwm_lpss_remove_platform,
3039 diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
3040 index 8db0d40ccacd..4721a264bac2 100644
3041 --- a/drivers/pwm/pwm-lpss.c
3042 +++ b/drivers/pwm/pwm-lpss.c
3043 @@ -32,10 +32,13 @@
3044 /* Size of each PWM register space if multiple */
3045 #define PWM_SIZE 0x400
3046
3047 +#define MAX_PWMS 4
3048 +
3049 struct pwm_lpss_chip {
3050 struct pwm_chip chip;
3051 void __iomem *regs;
3052 const struct pwm_lpss_boardinfo *info;
3053 + u32 saved_ctrl[MAX_PWMS];
3054 };
3055
3056 static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
3057 @@ -177,6 +180,9 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
3058 unsigned long c;
3059 int ret;
3060
3061 + if (WARN_ON(info->npwm > MAX_PWMS))
3062 + return ERR_PTR(-ENODEV);
3063 +
3064 lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
3065 if (!lpwm)
3066 return ERR_PTR(-ENOMEM);
3067 @@ -212,6 +218,30 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
3068 }
3069 EXPORT_SYMBOL_GPL(pwm_lpss_remove);
3070
3071 +int pwm_lpss_suspend(struct device *dev)
3072 +{
3073 + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
3074 + int i;
3075 +
3076 + for (i = 0; i < lpwm->info->npwm; i++)
3077 + lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM);
3078 +
3079 + return 0;
3080 +}
3081 +EXPORT_SYMBOL_GPL(pwm_lpss_suspend);
3082 +
3083 +int pwm_lpss_resume(struct device *dev)
3084 +{
3085 + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
3086 + int i;
3087 +
3088 + for (i = 0; i < lpwm->info->npwm; i++)
3089 + writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM);
3090 +
3091 + return 0;
3092 +}
3093 +EXPORT_SYMBOL_GPL(pwm_lpss_resume);
3094 +
3095 MODULE_DESCRIPTION("PWM driver for Intel LPSS");
3096 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
3097 MODULE_LICENSE("GPL v2");
3098 diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
3099 index 98306bb02cfe..7a4238ad1fcb 100644
3100 --- a/drivers/pwm/pwm-lpss.h
3101 +++ b/drivers/pwm/pwm-lpss.h
3102 @@ -28,5 +28,7 @@ struct pwm_lpss_boardinfo {
3103 struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
3104 const struct pwm_lpss_boardinfo *info);
3105 int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
3106 +int pwm_lpss_suspend(struct device *dev);
3107 +int pwm_lpss_resume(struct device *dev);
3108
3109 #endif /* __PWM_LPSS_H */
3110 diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
3111 index b01774e9fac0..f1a2147a6d84 100644
3112 --- a/drivers/rpmsg/qcom_smd.c
3113 +++ b/drivers/rpmsg/qcom_smd.c
3114 @@ -1043,12 +1043,12 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
3115 void *info;
3116 int ret;
3117
3118 - channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
3119 + channel = kzalloc(sizeof(*channel), GFP_KERNEL);
3120 if (!channel)
3121 return ERR_PTR(-ENOMEM);
3122
3123 channel->edge = edge;
3124 - channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
3125 + channel->name = kstrdup(name, GFP_KERNEL);
3126 if (!channel->name)
3127 return ERR_PTR(-ENOMEM);
3128
3129 @@ -1098,8 +1098,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
3130 return channel;
3131
3132 free_name_and_channel:
3133 - devm_kfree(&edge->dev, channel->name);
3134 - devm_kfree(&edge->dev, channel);
3135 + kfree(channel->name);
3136 + kfree(channel);
3137
3138 return ERR_PTR(ret);
3139 }
3140 @@ -1320,13 +1320,13 @@ static int qcom_smd_parse_edge(struct device *dev,
3141 */
3142 static void qcom_smd_edge_release(struct device *dev)
3143 {
3144 - struct qcom_smd_channel *channel;
3145 + struct qcom_smd_channel *channel, *tmp;
3146 struct qcom_smd_edge *edge = to_smd_edge(dev);
3147
3148 - list_for_each_entry(channel, &edge->channels, list) {
3149 - SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
3150 - SET_RX_CHANNEL_INFO(channel, head, 0);
3151 - SET_RX_CHANNEL_INFO(channel, tail, 0);
3152 + list_for_each_entry_safe(channel, tmp, &edge->channels, list) {
3153 + list_del(&channel->list);
3154 + kfree(channel->name);
3155 + kfree(channel);
3156 }
3157
3158 kfree(edge);
3159 diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
3160 index 3d2216ccd860..8eb2b6dd36fe 100644
3161 --- a/drivers/rtc/rtc-sun6i.c
3162 +++ b/drivers/rtc/rtc-sun6i.c
3163 @@ -74,7 +74,7 @@
3164 #define SUN6I_ALARM_CONFIG_WAKEUP BIT(0)
3165
3166 #define SUN6I_LOSC_OUT_GATING 0x0060
3167 -#define SUN6I_LOSC_OUT_GATING_EN BIT(0)
3168 +#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0
3169
3170 /*
3171 * Get date values
3172 @@ -253,7 +253,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
3173 &clkout_name);
3174 rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name,
3175 0, rtc->base + SUN6I_LOSC_OUT_GATING,
3176 - SUN6I_LOSC_OUT_GATING_EN, 0,
3177 + SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0,
3178 &rtc->lock);
3179 if (IS_ERR(rtc->ext_losc)) {
3180 pr_crit("Couldn't register the LOSC external gate\n");
3181 diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
3182 index 18c4f933e8b9..b415ba42ca73 100644
3183 --- a/drivers/s390/scsi/zfcp_dbf.c
3184 +++ b/drivers/s390/scsi/zfcp_dbf.c
3185 @@ -664,6 +664,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
3186 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
3187 }
3188
3189 +/**
3190 + * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
3191 + * @tag: Identifier for event.
3192 + * @adapter: Pointer to zfcp adapter as context for this event.
3193 + * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
3194 + * @ret: Return value of calling function.
3195 + *
3196 + * This SCSI trace variant does not depend on any of:
3197 + * scsi_cmnd, zfcp_fsf_req, scsi_device.
3198 + */
3199 +void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
3200 + unsigned int scsi_id, int ret)
3201 +{
3202 + struct zfcp_dbf *dbf = adapter->dbf;
3203 + struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
3204 + unsigned long flags;
3205 + static int const level = 1;
3206 +
3207 + if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
3208 + return;
3209 +
3210 + spin_lock_irqsave(&dbf->scsi_lock, flags);
3211 + memset(rec, 0, sizeof(*rec));
3212 +
3213 + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
3214 + rec->id = ZFCP_DBF_SCSI_CMND;
3215 + rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
3216 + rec->scsi_retries = ~0;
3217 + rec->scsi_allowed = ~0;
3218 + rec->fcp_rsp_info = ~0;
3219 + rec->scsi_id = scsi_id;
3220 + rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
3221 + rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
3222 + rec->host_scribble = ~0;
3223 + memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
3224 +
3225 + debug_event(dbf->scsi, level, rec, sizeof(*rec));
3226 + spin_unlock_irqrestore(&dbf->scsi_lock, flags);
3227 +}
3228 +
3229 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
3230 {
3231 struct debug_info *d;
3232 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
3233 index cbb8156bf5e0..7aa243a6cdbf 100644
3234 --- a/drivers/s390/scsi/zfcp_erp.c
3235 +++ b/drivers/s390/scsi/zfcp_erp.c
3236 @@ -35,11 +35,28 @@ enum zfcp_erp_steps {
3237 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
3238 };
3239
3240 +/**
3241 + * enum zfcp_erp_act_type - Type of ERP action object.
3242 + * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
3243 + * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
3244 + * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
3245 + * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
3246 + * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
3247 + * either of the first four enum values.
3248 + * Used to indicate that an ERP action could not be
3249 + * set up despite a detected need for some recovery.
3250 + * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
3251 + * either of the first four enum values.
3252 + * Used to indicate that ERP not needed because
3253 + * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
3254 + */
3255 enum zfcp_erp_act_type {
3256 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
3257 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
3258 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
3259 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
3260 + ZFCP_ERP_ACTION_NONE = 0xc0,
3261 + ZFCP_ERP_ACTION_FAILED = 0xe0,
3262 };
3263
3264 enum zfcp_erp_act_state {
3265 @@ -126,6 +143,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3266 }
3267 }
3268
3269 +static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
3270 + struct zfcp_port *port,
3271 + struct scsi_device *sdev)
3272 +{
3273 + int need = want;
3274 + struct zfcp_scsi_dev *zsdev;
3275 +
3276 + switch (want) {
3277 + case ZFCP_ERP_ACTION_REOPEN_LUN:
3278 + zsdev = sdev_to_zfcp(sdev);
3279 + if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
3280 + need = 0;
3281 + break;
3282 + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3283 + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
3284 + need = 0;
3285 + break;
3286 + case ZFCP_ERP_ACTION_REOPEN_PORT:
3287 + if (atomic_read(&port->status) &
3288 + ZFCP_STATUS_COMMON_ERP_FAILED) {
3289 + need = 0;
3290 + /* ensure propagation of failed status to new devices */
3291 + zfcp_erp_set_port_status(
3292 + port, ZFCP_STATUS_COMMON_ERP_FAILED);
3293 + }
3294 + break;
3295 + case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3296 + if (atomic_read(&adapter->status) &
3297 + ZFCP_STATUS_COMMON_ERP_FAILED) {
3298 + need = 0;
3299 + /* ensure propagation of failed status to new devices */
3300 + zfcp_erp_set_adapter_status(
3301 + adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
3302 + }
3303 + break;
3304 + default:
3305 + need = 0;
3306 + break;
3307 + }
3308 +
3309 + return need;
3310 +}
3311 +
3312 static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
3313 struct zfcp_port *port,
3314 struct scsi_device *sdev)
3315 @@ -249,16 +309,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
3316 int retval = 1, need;
3317 struct zfcp_erp_action *act;
3318
3319 - if (!adapter->erp_thread)
3320 - return -EIO;
3321 + need = zfcp_erp_handle_failed(want, adapter, port, sdev);
3322 + if (!need) {
3323 + need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
3324 + goto out;
3325 + }
3326 +
3327 + if (!adapter->erp_thread) {
3328 + need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
3329 + retval = -EIO;
3330 + goto out;
3331 + }
3332
3333 need = zfcp_erp_required_act(want, adapter, port, sdev);
3334 if (!need)
3335 goto out;
3336
3337 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
3338 - if (!act)
3339 + if (!act) {
3340 + need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
3341 goto out;
3342 + }
3343 atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
3344 ++adapter->erp_total_count;
3345 list_add_tail(&act->list, &adapter->erp_ready_head);
3346 @@ -269,18 +340,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
3347 return retval;
3348 }
3349
3350 +void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
3351 + u64 port_name, u32 port_id)
3352 +{
3353 + unsigned long flags;
3354 + static /* don't waste stack */ struct zfcp_port tmpport;
3355 +
3356 + write_lock_irqsave(&adapter->erp_lock, flags);
3357 + /* Stand-in zfcp port with fields just good enough for
3358 + * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
3359 + * Under lock because tmpport is static.
3360 + */
3361 + atomic_set(&tmpport.status, -1); /* unknown */
3362 + tmpport.wwpn = port_name;
3363 + tmpport.d_id = port_id;
3364 + zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
3365 + ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
3366 + ZFCP_ERP_ACTION_NONE);
3367 + write_unlock_irqrestore(&adapter->erp_lock, flags);
3368 +}
3369 +
3370 static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
3371 int clear_mask, char *id)
3372 {
3373 zfcp_erp_adapter_block(adapter, clear_mask);
3374 zfcp_scsi_schedule_rports_block(adapter);
3375
3376 - /* ensure propagation of failed status to new devices */
3377 - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
3378 - zfcp_erp_set_adapter_status(adapter,
3379 - ZFCP_STATUS_COMMON_ERP_FAILED);
3380 - return -EIO;
3381 - }
3382 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
3383 adapter, NULL, NULL, id, 0);
3384 }
3385 @@ -299,12 +384,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
3386 zfcp_scsi_schedule_rports_block(adapter);
3387
3388 write_lock_irqsave(&adapter->erp_lock, flags);
3389 - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
3390 - zfcp_erp_set_adapter_status(adapter,
3391 - ZFCP_STATUS_COMMON_ERP_FAILED);
3392 - else
3393 - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
3394 - NULL, NULL, id, 0);
3395 + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
3396 + NULL, NULL, id, 0);
3397 write_unlock_irqrestore(&adapter->erp_lock, flags);
3398 }
3399
3400 @@ -345,9 +426,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
3401 zfcp_erp_port_block(port, clear);
3402 zfcp_scsi_schedule_rport_block(port);
3403
3404 - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
3405 - return;
3406 -
3407 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
3408 port->adapter, port, NULL, id, 0);
3409 }
3410 @@ -373,12 +451,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
3411 zfcp_erp_port_block(port, clear);
3412 zfcp_scsi_schedule_rport_block(port);
3413
3414 - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
3415 - /* ensure propagation of failed status to new devices */
3416 - zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
3417 - return -EIO;
3418 - }
3419 -
3420 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
3421 port->adapter, port, NULL, id, 0);
3422 }
3423 @@ -418,9 +490,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
3424
3425 zfcp_erp_lun_block(sdev, clear);
3426
3427 - if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
3428 - return;
3429 -
3430 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
3431 zfcp_sdev->port, sdev, id, act_status);
3432 }
3433 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
3434 index b1cbb14fb2ae..c1092a11e728 100644
3435 --- a/drivers/s390/scsi/zfcp_ext.h
3436 +++ b/drivers/s390/scsi/zfcp_ext.h
3437 @@ -52,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
3438 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
3439 extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
3440 struct zfcp_fsf_req *);
3441 +extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
3442 + unsigned int scsi_id, int ret);
3443
3444 /* zfcp_erp.c */
3445 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
3446 extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
3447 +extern void zfcp_erp_port_forced_no_port_dbf(char *id,
3448 + struct zfcp_adapter *adapter,
3449 + u64 port_name, u32 port_id);
3450 extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
3451 extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
3452 extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
3453 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
3454 index 22f9562f415c..0b6f51424745 100644
3455 --- a/drivers/s390/scsi/zfcp_scsi.c
3456 +++ b/drivers/s390/scsi/zfcp_scsi.c
3457 @@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
3458 if (abrt_req)
3459 break;
3460
3461 + zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
3462 zfcp_erp_wait(adapter);
3463 ret = fc_block_scsi_eh(scpnt);
3464 if (ret) {
3465 @@ -277,6 +278,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
3466 if (fsf_req)
3467 break;
3468
3469 + zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL);
3470 zfcp_erp_wait(adapter);
3471 ret = fc_block_scsi_eh(scpnt);
3472 if (ret) {
3473 @@ -323,15 +325,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
3474 {
3475 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
3476 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
3477 - int ret;
3478 + int ret = SUCCESS, fc_ret;
3479
3480 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
3481 zfcp_erp_wait(adapter);
3482 - ret = fc_block_scsi_eh(scpnt);
3483 - if (ret)
3484 - return ret;
3485 + fc_ret = fc_block_scsi_eh(scpnt);
3486 + if (fc_ret)
3487 + ret = fc_ret;
3488
3489 - return SUCCESS;
3490 + zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
3491 + return ret;
3492 }
3493
3494 struct scsi_transport_template *zfcp_scsi_transport_template;
3495 @@ -602,6 +605,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
3496 if (port) {
3497 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
3498 put_device(&port->dev);
3499 + } else {
3500 + zfcp_erp_port_forced_no_port_dbf(
3501 + "sctrpin", adapter,
3502 + rport->port_name /* zfcp_scsi_rport_register */,
3503 + rport->port_id /* zfcp_scsi_rport_register */);
3504 }
3505 }
3506
3507 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
3508 index 5fbaf13781b6..604a39dba5d0 100644
3509 --- a/drivers/scsi/hpsa.c
3510 +++ b/drivers/scsi/hpsa.c
3511 @@ -8638,7 +8638,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
3512 kfree(options);
3513 }
3514
3515 -static void hpsa_shutdown(struct pci_dev *pdev)
3516 +static void __hpsa_shutdown(struct pci_dev *pdev)
3517 {
3518 struct ctlr_info *h;
3519
3520 @@ -8653,6 +8653,12 @@ static void hpsa_shutdown(struct pci_dev *pdev)
3521 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
3522 }
3523
3524 +static void hpsa_shutdown(struct pci_dev *pdev)
3525 +{
3526 + __hpsa_shutdown(pdev);
3527 + pci_disable_device(pdev);
3528 +}
3529 +
3530 static void hpsa_free_device_info(struct ctlr_info *h)
3531 {
3532 int i;
3533 @@ -8696,7 +8702,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
3534 scsi_remove_host(h->scsi_host); /* init_one 8 */
3535 /* includes hpsa_free_irqs - init_one 4 */
3536 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
3537 - hpsa_shutdown(pdev);
3538 + __hpsa_shutdown(pdev);
3539
3540 hpsa_free_device_info(h); /* scan */
3541
3542 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3543 index e24f57946a17..bcde6130f121 100644
3544 --- a/drivers/scsi/qla2xxx/qla_init.c
3545 +++ b/drivers/scsi/qla2xxx/qla_init.c
3546 @@ -4627,7 +4627,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3547 return;
3548
3549 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
3550 - fcport->fp_speed > ha->link_data_rate)
3551 + fcport->fp_speed > ha->link_data_rate ||
3552 + !ha->flags.gpsc_supported)
3553 return;
3554
3555 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
3556 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3557 index 13a00a42b3ca..e073eb16f8a4 100644
3558 --- a/drivers/scsi/qla2xxx/qla_isr.c
3559 +++ b/drivers/scsi/qla2xxx/qla_isr.c
3560 @@ -2454,8 +2454,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3561 ox_id = le16_to_cpu(sts24->ox_id);
3562 par_sense_len = sizeof(sts24->data);
3563 /* Valid values of the retry delay timer are 0x1-0xffef */
3564 - if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
3565 - retry_delay = sts24->retry_delay;
3566 + if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
3567 + retry_delay = sts24->retry_delay & 0x3fff;
3568 + ql_dbg(ql_dbg_io, sp->vha, 0x3033,
3569 + "%s: scope=%#x retry_delay=%#x\n", __func__,
3570 + sts24->retry_delay >> 14, retry_delay);
3571 + }
3572 } else {
3573 if (scsi_status & SS_SENSE_LEN_VALID)
3574 sense_len = le16_to_cpu(sts->req_sense_length);
3575 diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
3576 index 40b75748835f..ba009bb9d82b 100644
3577 --- a/drivers/soc/rockchip/pm_domains.c
3578 +++ b/drivers/soc/rockchip/pm_domains.c
3579 @@ -255,7 +255,7 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
3580 return;
3581 else if (pd->info->pwr_w_mask)
3582 regmap_write(pmu->regmap, pmu->info->pwr_offset,
3583 - on ? pd->info->pwr_mask :
3584 + on ? pd->info->pwr_w_mask :
3585 (pd->info->pwr_mask | pd->info->pwr_w_mask));
3586 else
3587 regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
3588 diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
3589 index a4d6a0e2e993..23ad4f9f2143 100644
3590 --- a/drivers/thermal/broadcom/bcm2835_thermal.c
3591 +++ b/drivers/thermal/broadcom/bcm2835_thermal.c
3592 @@ -213,8 +213,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
3593 rate = clk_get_rate(data->clk);
3594 if ((rate < 1920000) || (rate > 5000000))
3595 dev_warn(&pdev->dev,
3596 - "Clock %pCn running at %pCr Hz is outside of the recommended range: 1.92 to 5MHz\n",
3597 - data->clk, data->clk);
3598 + "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n",
3599 + data->clk, rate);
3600
3601 /* register of thermal sensor and get info from DT */
3602 tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
3603 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
3604 index c6daa315ee4e..8bc8fe2b75f7 100644
3605 --- a/drivers/tty/serial/sh-sci.c
3606 +++ b/drivers/tty/serial/sh-sci.c
3607 @@ -2854,16 +2854,15 @@ static void serial_console_write(struct console *co, const char *s,
3608 unsigned long flags;
3609 int locked = 1;
3610
3611 - local_irq_save(flags);
3612 #if defined(SUPPORT_SYSRQ)
3613 if (port->sysrq)
3614 locked = 0;
3615 else
3616 #endif
3617 if (oops_in_progress)
3618 - locked = spin_trylock(&port->lock);
3619 + locked = spin_trylock_irqsave(&port->lock, flags);
3620 else
3621 - spin_lock(&port->lock);
3622 + spin_lock_irqsave(&port->lock, flags);
3623
3624 /* first save SCSCR then disable interrupts, keep clock source */
3625 ctrl = serial_port_in(port, SCSCR);
3626 @@ -2883,8 +2882,7 @@ static void serial_console_write(struct console *co, const char *s,
3627 serial_port_out(port, SCSCR, ctrl);
3628
3629 if (locked)
3630 - spin_unlock(&port->lock);
3631 - local_irq_restore(flags);
3632 + spin_unlock_irqrestore(&port->lock, flags);
3633 }
3634
3635 static int serial_console_setup(struct console *co, char *options)
3636 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3637 index 442be7f312f6..e5f77e611451 100644
3638 --- a/drivers/usb/core/hub.c
3639 +++ b/drivers/usb/core/hub.c
3640 @@ -4519,7 +4519,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
3641 * reset. But only on the first attempt,
3642 * lest we get into a time out/reset loop
3643 */
3644 - if (r == 0 || (r == -ETIMEDOUT && retries == 0))
3645 + if (r == 0 || (r == -ETIMEDOUT &&
3646 + retries == 0 &&
3647 + udev->speed > USB_SPEED_FULL))
3648 break;
3649 }
3650 udev->descriptor.bMaxPacketSize0 =
3651 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3652 index d8b185b0d0f9..e5bccc6d49cf 100644
3653 --- a/drivers/usb/host/xhci.c
3654 +++ b/drivers/usb/host/xhci.c
3655 @@ -3551,6 +3551,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3656 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3657 }
3658
3659 + virt_dev->udev = NULL;
3660 xhci_disable_slot(xhci, udev->slot_id);
3661 /*
3662 * Event command completion handler will free any data structures
3663 diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
3664 index 734a9158946b..e55304d5cf07 100644
3665 --- a/drivers/video/backlight/as3711_bl.c
3666 +++ b/drivers/video/backlight/as3711_bl.c
3667 @@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev,
3668 static int as3711_backlight_parse_dt(struct device *dev)
3669 {
3670 struct as3711_bl_pdata *pdata = dev_get_platdata(dev);
3671 - struct device_node *bl =
3672 - of_find_node_by_name(dev->parent->of_node, "backlight"), *fb;
3673 + struct device_node *bl, *fb;
3674 int ret;
3675
3676 + bl = of_get_child_by_name(dev->parent->of_node, "backlight");
3677 if (!bl) {
3678 dev_dbg(dev, "backlight node not found\n");
3679 return -ENODEV;
3680 @@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
3681 if (pdata->su1_max_uA <= 0)
3682 ret = -EINVAL;
3683 if (ret < 0)
3684 - return ret;
3685 + goto err_put_bl;
3686 }
3687
3688 fb = of_parse_phandle(bl, "su2-dev", 0);
3689 @@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
3690 if (pdata->su2_max_uA <= 0)
3691 ret = -EINVAL;
3692 if (ret < 0)
3693 - return ret;
3694 + goto err_put_bl;
3695
3696 if (of_find_property(bl, "su2-feedback-voltage", NULL)) {
3697 pdata->su2_feedback = AS3711_SU2_VOLTAGE;
3698 @@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
3699 pdata->su2_feedback = AS3711_SU2_CURR_AUTO;
3700 count++;
3701 }
3702 - if (count != 1)
3703 - return -EINVAL;
3704 + if (count != 1) {
3705 + ret = -EINVAL;
3706 + goto err_put_bl;
3707 + }
3708
3709 count = 0;
3710 if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) {
3711 @@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
3712 pdata->su2_fbprot = AS3711_SU2_GPIO4;
3713 count++;
3714 }
3715 - if (count != 1)
3716 - return -EINVAL;
3717 + if (count != 1) {
3718 + ret = -EINVAL;
3719 + goto err_put_bl;
3720 + }
3721
3722 count = 0;
3723 if (of_find_property(bl, "su2-auto-curr1", NULL)) {
3724 @@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev)
3725 * At least one su2-auto-curr* must be specified iff
3726 * AS3711_SU2_CURR_AUTO is used
3727 */
3728 - if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO))
3729 - return -EINVAL;
3730 + if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) {
3731 + ret = -EINVAL;
3732 + goto err_put_bl;
3733 + }
3734 }
3735
3736 + of_node_put(bl);
3737 +
3738 return 0;
3739 +
3740 +err_put_bl:
3741 + of_node_put(bl);
3742 +
3743 + return ret;
3744 }
3745
3746 static int as3711_backlight_probe(struct platform_device *pdev)
3747 diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
3748 index 7b738d60ecc2..f3aa6088f1d9 100644
3749 --- a/drivers/video/backlight/max8925_bl.c
3750 +++ b/drivers/video/backlight/max8925_bl.c
3751 @@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
3752 if (!pdata)
3753 return;
3754
3755 - np = of_find_node_by_name(nproot, "backlight");
3756 + np = of_get_child_by_name(nproot, "backlight");
3757 if (!np) {
3758 dev_err(&pdev->dev, "failed to find backlight node\n");
3759 return;
3760 @@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
3761 if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
3762 pdata->dual_string = val;
3763
3764 + of_node_put(np);
3765 +
3766 pdev->dev.platform_data = pdata;
3767 }
3768
3769 diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
3770 index fd524ad860a5..f45d0c9467db 100644
3771 --- a/drivers/video/backlight/tps65217_bl.c
3772 +++ b/drivers/video/backlight/tps65217_bl.c
3773 @@ -184,11 +184,11 @@ static struct tps65217_bl_pdata *
3774 tps65217_bl_parse_dt(struct platform_device *pdev)
3775 {
3776 struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
3777 - struct device_node *node = of_node_get(tps->dev->of_node);
3778 + struct device_node *node;
3779 struct tps65217_bl_pdata *pdata, *err;
3780 u32 val;
3781
3782 - node = of_find_node_by_name(node, "backlight");
3783 + node = of_get_child_by_name(tps->dev->of_node, "backlight");
3784 if (!node)
3785 return ERR_PTR(-ENODEV);
3786
3787 diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
3788 index 73676eb0244a..c592ca513115 100644
3789 --- a/drivers/video/fbdev/uvesafb.c
3790 +++ b/drivers/video/fbdev/uvesafb.c
3791 @@ -1044,7 +1044,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
3792 info->cmap.len || cmap->start < info->cmap.start)
3793 return -EINVAL;
3794
3795 - entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
3796 + entries = kmalloc_array(cmap->len, sizeof(*entries),
3797 + GFP_KERNEL);
3798 if (!entries)
3799 return -ENOMEM;
3800
3801 diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
3802 index 0c2a5a8327bd..6f9e9505b34c 100644
3803 --- a/drivers/w1/w1.c
3804 +++ b/drivers/w1/w1.c
3805 @@ -750,7 +750,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
3806
3807 /* slave modules need to be loaded in a context with unlocked mutex */
3808 mutex_unlock(&dev->mutex);
3809 - request_module("w1-family-0x%02x", rn->family);
3810 + request_module("w1-family-0x%02X", rn->family);
3811 mutex_lock(&dev->mutex);
3812
3813 spin_lock(&w1_flock);
3814 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
3815 index 762378f1811c..08e4af04d6f2 100644
3816 --- a/drivers/xen/events/events_base.c
3817 +++ b/drivers/xen/events/events_base.c
3818 @@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
3819 xen_irq_info_cleanup(info);
3820 }
3821
3822 - BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
3823 -
3824 xen_free_irq(irq);
3825 }
3826
3827 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3828 index 3a07900971c3..f5b90dc137ec 100644
3829 --- a/fs/btrfs/inode.c
3830 +++ b/fs/btrfs/inode.c
3831 @@ -9769,6 +9769,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
3832 u64 new_idx = 0;
3833 u64 root_objectid;
3834 int ret;
3835 + int ret2;
3836 bool root_log_pinned = false;
3837 bool dest_log_pinned = false;
3838
3839 @@ -9965,7 +9966,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
3840 dest_log_pinned = false;
3841 }
3842 }
3843 - ret = btrfs_end_transaction(trans);
3844 + ret2 = btrfs_end_transaction(trans);
3845 + ret = ret ? ret : ret2;
3846 out_notrans:
3847 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
3848 up_read(&fs_info->subvol_sem);
3849 diff --git a/fs/fuse/control.c b/fs/fuse/control.c
3850 index b9ea99c5b5b3..5be0339dcceb 100644
3851 --- a/fs/fuse/control.c
3852 +++ b/fs/fuse/control.c
3853 @@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
3854 if (!dentry)
3855 return NULL;
3856
3857 - fc->ctl_dentry[fc->ctl_ndents++] = dentry;
3858 inode = new_inode(fuse_control_sb);
3859 - if (!inode)
3860 + if (!inode) {
3861 + dput(dentry);
3862 return NULL;
3863 + }
3864
3865 inode->i_ino = get_next_ino();
3866 inode->i_mode = mode;
3867 @@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
3868 set_nlink(inode, nlink);
3869 inode->i_private = fc;
3870 d_add(dentry, inode);
3871 +
3872 + fc->ctl_dentry[fc->ctl_ndents++] = dentry;
3873 +
3874 return dentry;
3875 }
3876
3877 @@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
3878 for (i = fc->ctl_ndents - 1; i >= 0; i--) {
3879 struct dentry *dentry = fc->ctl_dentry[i];
3880 d_inode(dentry)->i_private = NULL;
3881 - d_drop(dentry);
3882 + if (!i) {
3883 + /* Get rid of submounts: */
3884 + d_invalidate(dentry);
3885 + }
3886 dput(dentry);
3887 }
3888 drop_nlink(d_inode(fuse_control_sb->s_root));
3889 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3890 index 13c65dd2d37d..261fd13a75c6 100644
3891 --- a/fs/fuse/dev.c
3892 +++ b/fs/fuse/dev.c
3893 @@ -381,8 +381,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3894 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
3895 wake_up(&fc->blocked_waitq);
3896
3897 - if (fc->num_background == fc->congestion_threshold &&
3898 - fc->connected && fc->sb) {
3899 + if (fc->num_background == fc->congestion_threshold && fc->sb) {
3900 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
3901 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
3902 }
3903 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
3904 index 24967382a7b1..7a980b4462d9 100644
3905 --- a/fs/fuse/dir.c
3906 +++ b/fs/fuse/dir.c
3907 @@ -1629,8 +1629,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
3908 return err;
3909
3910 if (attr->ia_valid & ATTR_OPEN) {
3911 - if (fc->atomic_o_trunc)
3912 + /* This is coming from open(..., ... | O_TRUNC); */
3913 + WARN_ON(!(attr->ia_valid & ATTR_SIZE));
3914 + WARN_ON(attr->ia_size != 0);
3915 + if (fc->atomic_o_trunc) {
3916 + /*
3917 + * No need to send request to userspace, since actual
3918 + * truncation has already been done by OPEN. But still
3919 + * need to truncate page cache.
3920 + */
3921 + i_size_write(inode, 0);
3922 + truncate_pagecache(inode, 0);
3923 return 0;
3924 + }
3925 file = NULL;
3926 }
3927
3928 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
3929 index 94a745acaef8..a13ecefa9cd1 100644
3930 --- a/fs/fuse/inode.c
3931 +++ b/fs/fuse/inode.c
3932 @@ -1176,6 +1176,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
3933 fuse_dev_free(fud);
3934 err_put_conn:
3935 fuse_conn_put(fc);
3936 + sb->s_fs_info = NULL;
3937 err_fput:
3938 fput(file);
3939 err:
3940 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
3941 index 19151f6c0e97..516b2248cafe 100644
3942 --- a/fs/nfs/callback_proc.c
3943 +++ b/fs/nfs/callback_proc.c
3944 @@ -420,11 +420,8 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
3945 return htonl(NFS4ERR_SEQ_FALSE_RETRY);
3946 }
3947
3948 - /* Wraparound */
3949 - if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
3950 - if (args->csa_sequenceid == 1)
3951 - return htonl(NFS4_OK);
3952 - } else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
3953 + /* Note: wraparound relies on seq_nr being of type u32 */
3954 + if (likely(args->csa_sequenceid == slot->seq_nr + 1))
3955 return htonl(NFS4_OK);
3956
3957 /* Misordered request */
3958 diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
3959 index 22dc30a679a0..b6f9d84ba19b 100644
3960 --- a/fs/nfs/nfs4idmap.c
3961 +++ b/fs/nfs/nfs4idmap.c
3962 @@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
3963 int id_len;
3964 ssize_t ret;
3965
3966 - id_len = snprintf(id_str, sizeof(id_str), "%u", id);
3967 + id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str));
3968 ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
3969 if (ret < 0)
3970 return -EINVAL;
3971 @@ -627,7 +627,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
3972 if (strcmp(upcall->im_name, im->im_name) != 0)
3973 break;
3974 /* Note: here we store the NUL terminator too */
3975 - len = sprintf(id_str, "%d", im->im_id) + 1;
3976 + len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
3977 + sizeof(id_str));
3978 ret = nfs_idmap_instantiate(key, authkey, id_str, len);
3979 break;
3980 case IDMAP_CONV_IDTONAME:
3981 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3982 index 8ff98bbe479b..928bbc397818 100644
3983 --- a/fs/nfs/nfs4proc.c
3984 +++ b/fs/nfs/nfs4proc.c
3985 @@ -750,7 +750,7 @@ static int nfs41_sequence_process(struct rpc_task *task,
3986 * The slot id we used was probably retired. Try again
3987 * using a different slot id.
3988 */
3989 - if (slot->seq_nr < slot->table->target_highest_slotid)
3990 + if (slot->slot_nr < slot->table->target_highest_slotid)
3991 goto session_recover;
3992 goto retry_nowait;
3993 case -NFS4ERR_SEQ_MISORDERED:
3994 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
3995 index df2b8849a63b..f6588cc6816c 100644
3996 --- a/fs/nfsd/nfs4xdr.c
3997 +++ b/fs/nfsd/nfs4xdr.c
3998 @@ -3645,7 +3645,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
3999 nfserr = nfserr_resource;
4000 goto err_no_verf;
4001 }
4002 - maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
4003 + maxcount = svc_max_payload(resp->rqstp);
4004 + maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
4005 /*
4006 * Note the rfc defines rd_maxcount as the size of the
4007 * READDIR4resok structure, which includes the verifier above
4008 @@ -3659,7 +3660,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
4009
4010 /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
4011 if (!readdir->rd_dircount)
4012 - readdir->rd_dircount = INT_MAX;
4013 + readdir->rd_dircount = svc_max_payload(resp->rqstp);
4014
4015 readdir->xdr = xdr;
4016 readdir->rd_maxcount = maxcount;
4017 diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
4018 index 04c4ec6483e5..8ae1cd8611cc 100644
4019 --- a/fs/ubifs/journal.c
4020 +++ b/fs/ubifs/journal.c
4021 @@ -1283,10 +1283,11 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
4022 int *new_len)
4023 {
4024 void *buf;
4025 - int err, dlen, compr_type, out_len, old_dlen;
4026 + int err, compr_type;
4027 + u32 dlen, out_len, old_dlen;
4028
4029 out_len = le32_to_cpu(dn->size);
4030 - buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
4031 + buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
4032 if (!buf)
4033 return -ENOMEM;
4034
4035 diff --git a/fs/udf/directory.c b/fs/udf/directory.c
4036 index 7aa48bd7cbaf..a636b3b17219 100644
4037 --- a/fs/udf/directory.c
4038 +++ b/fs/udf/directory.c
4039 @@ -151,6 +151,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
4040 sizeof(struct fileIdentDesc));
4041 }
4042 }
4043 + /* Got last entry outside of dir size - fs is corrupted! */
4044 + if (*nf_pos > dir->i_size)
4045 + return NULL;
4046 return fi;
4047 }
4048
4049 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
4050 index 6362e3606aa5..4d4af0e94059 100644
4051 --- a/include/linux/blkdev.h
4052 +++ b/include/linux/blkdev.h
4053 @@ -1088,8 +1088,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
4054 if (!q->limits.chunk_sectors)
4055 return q->limits.max_sectors;
4056
4057 - return q->limits.chunk_sectors -
4058 - (offset & (q->limits.chunk_sectors - 1));
4059 + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
4060 + (offset & (q->limits.chunk_sectors - 1))));
4061 }
4062
4063 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
4064 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
4065 index 853929f98962..a704d032713b 100644
4066 --- a/include/linux/compiler.h
4067 +++ b/include/linux/compiler.h
4068 @@ -21,7 +21,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
4069 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
4070
4071 #define __branch_check__(x, expect, is_constant) ({ \
4072 - int ______r; \
4073 + long ______r; \
4074 static struct ftrace_likely_data \
4075 __attribute__((__aligned__(4))) \
4076 __attribute__((section("_ftrace_annotated_branch"))) \
4077 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
4078 index 39fa09bcde23..2038ab531616 100644
4079 --- a/include/linux/slub_def.h
4080 +++ b/include/linux/slub_def.h
4081 @@ -151,8 +151,12 @@ struct kmem_cache {
4082
4083 #ifdef CONFIG_SYSFS
4084 #define SLAB_SUPPORTS_SYSFS
4085 +void sysfs_slab_unlink(struct kmem_cache *);
4086 void sysfs_slab_release(struct kmem_cache *);
4087 #else
4088 +static inline void sysfs_slab_unlink(struct kmem_cache *s)
4089 +{
4090 +}
4091 static inline void sysfs_slab_release(struct kmem_cache *s)
4092 {
4093 }
4094 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
4095 index 08f3d8699a27..5a24b4c700e5 100644
4096 --- a/include/rdma/ib_verbs.h
4097 +++ b/include/rdma/ib_verbs.h
4098 @@ -3558,6 +3558,20 @@ static inline int ib_check_mr_access(int flags)
4099 return 0;
4100 }
4101
4102 +static inline bool ib_access_writable(int access_flags)
4103 +{
4104 + /*
4105 + * We have writable memory backing the MR if any of the following
4106 + * access flags are set. "Local write" and "remote write" obviously
4107 + * require write access. "Remote atomic" can do things like fetch and
4108 + * add, which will modify memory, and "MW bind" can change permissions
4109 + * by binding a window.
4110 + */
4111 + return access_flags &
4112 + (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4113 + IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4114 +}
4115 +
4116 /**
4117 * ib_check_mr_status: lightweight check of MR status.
4118 * This routine may provide status checks on a selected
4119 diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
4120 index 1ba84a78f1c5..c653af91da16 100644
4121 --- a/include/rdma/rdma_vt.h
4122 +++ b/include/rdma/rdma_vt.h
4123 @@ -409,7 +409,7 @@ struct rvt_dev_info {
4124 spinlock_t pending_lock; /* protect pending mmap list */
4125
4126 /* CQ */
4127 - struct kthread_worker *worker; /* per device cq worker */
4128 + struct kthread_worker __rcu *worker; /* per device cq worker */
4129 u32 n_cqs_allocated; /* number of CQs allocated for device */
4130 spinlock_t n_cqs_lock; /* protect count of in use cqs */
4131
4132 diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
4133 index 3cdaeaef9ce1..d989cc238198 100644
4134 --- a/kernel/printk/printk_safe.c
4135 +++ b/kernel/printk/printk_safe.c
4136 @@ -85,6 +85,7 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
4137 {
4138 int add;
4139 size_t len;
4140 + va_list ap;
4141
4142 again:
4143 len = atomic_read(&s->len);
4144 @@ -103,7 +104,9 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
4145 if (!len)
4146 smp_rmb();
4147
4148 - add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
4149 + va_copy(ap, args);
4150 + add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
4151 + va_end(ap);
4152 if (!add)
4153 return 0;
4154
4155 diff --git a/kernel/time/time.c b/kernel/time/time.c
4156 index 44a8c1402133..319935af02fb 100644
4157 --- a/kernel/time/time.c
4158 +++ b/kernel/time/time.c
4159 @@ -28,6 +28,7 @@
4160 */
4161
4162 #include <linux/export.h>
4163 +#include <linux/kernel.h>
4164 #include <linux/timex.h>
4165 #include <linux/capability.h>
4166 #include <linux/timekeeper_internal.h>
4167 @@ -348,9 +349,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
4168 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
4169 #else
4170 # if BITS_PER_LONG == 32
4171 - return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
4172 + return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
4173 + HZ_TO_MSEC_SHR32;
4174 # else
4175 - return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
4176 + return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
4177 # endif
4178 #endif
4179 }
4180 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
4181 index 86c3385b9eb3..4a990f3fd345 100644
4182 --- a/lib/vsprintf.c
4183 +++ b/lib/vsprintf.c
4184 @@ -1392,9 +1392,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
4185 return string(buf, end, NULL, spec);
4186
4187 switch (fmt[1]) {
4188 - case 'r':
4189 - return number(buf, end, clk_get_rate(clk), spec);
4190 -
4191 case 'n':
4192 default:
4193 #ifdef CONFIG_COMMON_CLK
4194 diff --git a/mm/gup.c b/mm/gup.c
4195 index d2ba0be71441..72c921da0f3b 100644
4196 --- a/mm/gup.c
4197 +++ b/mm/gup.c
4198 @@ -1469,32 +1469,48 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
4199 return 1;
4200 }
4201
4202 -static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
4203 +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
4204 unsigned long end, struct page **pages, int *nr)
4205 {
4206 unsigned long fault_pfn;
4207 + int nr_start = *nr;
4208 +
4209 + fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
4210 + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
4211 + return 0;
4212
4213 - fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
4214 - return __gup_device_huge(fault_pfn, addr, end, pages, nr);
4215 + if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
4216 + undo_dev_pagemap(nr, nr_start, pages);
4217 + return 0;
4218 + }
4219 + return 1;
4220 }
4221
4222 -static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
4223 +static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
4224 unsigned long end, struct page **pages, int *nr)
4225 {
4226 unsigned long fault_pfn;
4227 + int nr_start = *nr;
4228 +
4229 + fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
4230 + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
4231 + return 0;
4232
4233 - fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
4234 - return __gup_device_huge(fault_pfn, addr, end, pages, nr);
4235 + if (unlikely(pud_val(orig) != pud_val(*pudp))) {
4236 + undo_dev_pagemap(nr, nr_start, pages);
4237 + return 0;
4238 + }
4239 + return 1;
4240 }
4241 #else
4242 -static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
4243 +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
4244 unsigned long end, struct page **pages, int *nr)
4245 {
4246 BUILD_BUG();
4247 return 0;
4248 }
4249
4250 -static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
4251 +static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
4252 unsigned long end, struct page **pages, int *nr)
4253 {
4254 BUILD_BUG();
4255 @@ -1512,7 +1528,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
4256 return 0;
4257
4258 if (pmd_devmap(orig))
4259 - return __gup_device_huge_pmd(orig, addr, end, pages, nr);
4260 + return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
4261
4262 refs = 0;
4263 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
4264 @@ -1550,7 +1566,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
4265 return 0;
4266
4267 if (pud_devmap(orig))
4268 - return __gup_device_huge_pud(orig, addr, end, pages, nr);
4269 + return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
4270
4271 refs = 0;
4272 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
4273 diff --git a/mm/ksm.c b/mm/ksm.c
4274 index fdc8746ebcb4..f50cc573815f 100644
4275 --- a/mm/ksm.c
4276 +++ b/mm/ksm.c
4277 @@ -199,6 +199,8 @@ struct rmap_item {
4278 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
4279 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
4280 #define STABLE_FLAG 0x200 /* is listed from the stable tree */
4281 +#define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
4282 + /* to mask all the flags */
4283
4284 /* The stable and unstable tree heads */
4285 static struct rb_root one_stable_tree[1] = { RB_ROOT };
4286 @@ -2562,10 +2564,15 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
4287 anon_vma_lock_read(anon_vma);
4288 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
4289 0, ULONG_MAX) {
4290 + unsigned long addr;
4291 +
4292 cond_resched();
4293 vma = vmac->vma;
4294 - if (rmap_item->address < vma->vm_start ||
4295 - rmap_item->address >= vma->vm_end)
4296 +
4297 + /* Ignore the stable/unstable/sqnr flags */
4298 + addr = rmap_item->address & ~KSM_FLAG_MASK;
4299 +
4300 + if (addr < vma->vm_start || addr >= vma->vm_end)
4301 continue;
4302 /*
4303 * Initially we examine only the vma which covers this
4304 @@ -2579,8 +2586,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
4305 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
4306 continue;
4307
4308 - if (!rwc->rmap_one(page, vma,
4309 - rmap_item->address, rwc->arg)) {
4310 + if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
4311 anon_vma_unlock_read(anon_vma);
4312 return;
4313 }
4314 diff --git a/mm/slab_common.c b/mm/slab_common.c
4315 index 65212caa1f2a..91d271b90600 100644
4316 --- a/mm/slab_common.c
4317 +++ b/mm/slab_common.c
4318 @@ -546,10 +546,14 @@ static int shutdown_cache(struct kmem_cache *s)
4319 list_del(&s->list);
4320
4321 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
4322 +#ifdef SLAB_SUPPORTS_SYSFS
4323 + sysfs_slab_unlink(s);
4324 +#endif
4325 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
4326 schedule_work(&slab_caches_to_rcu_destroy_work);
4327 } else {
4328 #ifdef SLAB_SUPPORTS_SYSFS
4329 + sysfs_slab_unlink(s);
4330 sysfs_slab_release(s);
4331 #else
4332 slab_kmem_cache_release(s);
4333 diff --git a/mm/slub.c b/mm/slub.c
4334 index 41c01690d116..c38e71cea6d3 100644
4335 --- a/mm/slub.c
4336 +++ b/mm/slub.c
4337 @@ -5660,7 +5660,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
4338 kset_unregister(s->memcg_kset);
4339 #endif
4340 kobject_uevent(&s->kobj, KOBJ_REMOVE);
4341 - kobject_del(&s->kobj);
4342 out:
4343 kobject_put(&s->kobj);
4344 }
4345 @@ -5745,6 +5744,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
4346 schedule_work(&s->kobj_remove_work);
4347 }
4348
4349 +void sysfs_slab_unlink(struct kmem_cache *s)
4350 +{
4351 + if (slab_state >= FULL)
4352 + kobject_del(&s->kobj);
4353 +}
4354 +
4355 void sysfs_slab_release(struct kmem_cache *s)
4356 {
4357 if (slab_state >= FULL)
4358 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
4359 index 491ae9fc561f..991d5a96f35b 100644
4360 --- a/net/sunrpc/xprtrdma/rpc_rdma.c
4361 +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
4362 @@ -229,7 +229,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
4363 */
4364 *ppages = alloc_page(GFP_ATOMIC);
4365 if (!*ppages)
4366 - return -EAGAIN;
4367 + return -ENOBUFS;
4368 }
4369 seg->mr_page = *ppages;
4370 seg->mr_offset = (char *)page_base;
4371 diff --git a/sound/core/timer.c b/sound/core/timer.c
4372 index 4fdc9e11e832..2c0f292226d7 100644
4373 --- a/sound/core/timer.c
4374 +++ b/sound/core/timer.c
4375 @@ -1514,7 +1514,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
4376 } else {
4377 if (id.subdevice < 0)
4378 id.subdevice = 0;
4379 - else
4380 + else if (id.subdevice < INT_MAX)
4381 id.subdevice++;
4382 }
4383 }
4384 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4385 index 2a8aa2bc5c30..02157e3d82bb 100644
4386 --- a/sound/pci/hda/patch_realtek.c
4387 +++ b/sound/pci/hda/patch_realtek.c
4388 @@ -2518,6 +2518,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
4389 SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
4390 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
4391 SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
4392 + SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
4393 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
4394 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
4395 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
4396 @@ -4844,7 +4845,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
4397 struct alc_spec *spec = codec->spec;
4398
4399 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4400 - spec->shutup = alc_no_shutup; /* reduce click noise */
4401 spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
4402 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4403 codec->power_save_node = 0; /* avoid click noises */
4404 @@ -5243,6 +5243,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
4405 /* for hda_fixup_thinkpad_acpi() */
4406 #include "thinkpad_helper.c"
4407
4408 +static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
4409 + const struct hda_fixup *fix, int action)
4410 +{
4411 + alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
4412 + hda_fixup_thinkpad_acpi(codec, fix, action);
4413 +}
4414 +
4415 /* for dell wmi mic mute led */
4416 #include "dell_wmi_helper.c"
4417
4418 @@ -5786,7 +5793,7 @@ static const struct hda_fixup alc269_fixups[] = {
4419 },
4420 [ALC269_FIXUP_THINKPAD_ACPI] = {
4421 .type = HDA_FIXUP_FUNC,
4422 - .v.func = hda_fixup_thinkpad_acpi,
4423 + .v.func = alc_fixup_thinkpad_acpi,
4424 .chained = true,
4425 .chain_id = ALC269_FIXUP_SKU_IGNORE,
4426 },
4427 @@ -6436,8 +6443,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4428 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
4429 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
4430 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4431 + SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4432 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4433 - SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4434 + SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4435 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4436 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
4437 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
4438 @@ -6614,6 +6622,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4439 {0x14, 0x90170110},
4440 {0x19, 0x02a11030},
4441 {0x21, 0x02211020}),
4442 + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
4443 + {0x14, 0x90170110},
4444 + {0x19, 0x02a11030},
4445 + {0x1a, 0x02a11040},
4446 + {0x1b, 0x01014020},
4447 + {0x21, 0x0221101f}),
4448 SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4449 {0x12, 0x90a60140},
4450 {0x14, 0x90170150},
4451 diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
4452 index c53bd6f2c2d7..3d011abaa266 100644
4453 --- a/sound/soc/cirrus/edb93xx.c
4454 +++ b/sound/soc/cirrus/edb93xx.c
4455 @@ -67,7 +67,7 @@ static struct snd_soc_dai_link edb93xx_dai = {
4456 .cpu_dai_name = "ep93xx-i2s",
4457 .codec_name = "spi0.0",
4458 .codec_dai_name = "cs4271-hifi",
4459 - .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
4460 + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
4461 SND_SOC_DAIFMT_CBS_CFS,
4462 .ops = &edb93xx_ops,
4463 };
4464 diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
4465 index 934f8aefdd90..0dc3852c4621 100644
4466 --- a/sound/soc/cirrus/ep93xx-i2s.c
4467 +++ b/sound/soc/cirrus/ep93xx-i2s.c
4468 @@ -51,7 +51,9 @@
4469 #define EP93XX_I2S_WRDLEN_24 (1 << 0)
4470 #define EP93XX_I2S_WRDLEN_32 (2 << 0)
4471
4472 -#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */
4473 +#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */
4474 +
4475 +#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */
4476
4477 #define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
4478 #define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
4479 @@ -170,25 +172,25 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
4480 unsigned int fmt)
4481 {
4482 struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
4483 - unsigned int clk_cfg, lin_ctrl;
4484 + unsigned int clk_cfg;
4485 + unsigned int txlin_ctrl = 0;
4486 + unsigned int rxlin_ctrl = 0;
4487
4488 clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
4489 - lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA);
4490
4491 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
4492 case SND_SOC_DAIFMT_I2S:
4493 clk_cfg |= EP93XX_I2S_CLKCFG_REL;
4494 - lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
4495 break;
4496
4497 case SND_SOC_DAIFMT_LEFT_J:
4498 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
4499 - lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
4500 break;
4501
4502 case SND_SOC_DAIFMT_RIGHT_J:
4503 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
4504 - lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST;
4505 + rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST;
4506 + txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST;
4507 break;
4508
4509 default:
4510 @@ -213,32 +215,32 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
4511 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
4512 case SND_SOC_DAIFMT_NB_NF:
4513 /* Negative bit clock, lrclk low on left word */
4514 - clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL);
4515 + clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS);
4516 break;
4517
4518 case SND_SOC_DAIFMT_NB_IF:
4519 /* Negative bit clock, lrclk low on right word */
4520 clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
4521 - clk_cfg |= EP93XX_I2S_CLKCFG_REL;
4522 + clk_cfg |= EP93XX_I2S_CLKCFG_LRS;
4523 break;
4524
4525 case SND_SOC_DAIFMT_IB_NF:
4526 /* Positive bit clock, lrclk low on left word */
4527 clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
4528 - clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
4529 + clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS;
4530 break;
4531
4532 case SND_SOC_DAIFMT_IB_IF:
4533 /* Positive bit clock, lrclk low on right word */
4534 - clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL;
4535 + clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS;
4536 break;
4537 }
4538
4539 /* Write new register values */
4540 ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
4541 ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
4542 - ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl);
4543 - ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl);
4544 + ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl);
4545 + ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl);
4546 return 0;
4547 }
4548
4549 diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
4550 index 2334ec19e7eb..11ff7b2672b2 100644
4551 --- a/sound/soc/cirrus/snappercl15.c
4552 +++ b/sound/soc/cirrus/snappercl15.c
4553 @@ -72,7 +72,7 @@ static struct snd_soc_dai_link snappercl15_dai = {
4554 .codec_dai_name = "tlv320aic23-hifi",
4555 .codec_name = "tlv320aic23-codec.0-001a",
4556 .platform_name = "ep93xx-i2s",
4557 - .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
4558 + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
4559 SND_SOC_DAIFMT_CBS_CFS,
4560 .ops = &snappercl15_ops,
4561 };
4562 diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
4563 index 129978d1243e..51ce53e23599 100644
4564 --- a/sound/soc/codecs/cs35l35.c
4565 +++ b/sound/soc/codecs/cs35l35.c
4566 @@ -1106,6 +1106,7 @@ static struct regmap_config cs35l35_regmap = {
4567 .readable_reg = cs35l35_readable_register,
4568 .precious_reg = cs35l35_precious_register,
4569 .cache_type = REGCACHE_RBTREE,
4570 + .use_single_rw = true,
4571 };
4572
4573 static irqreturn_t cs35l35_irq(int irq, void *data)
4574 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
4575 index dcef67a9bd48..1c9f6a0d234f 100644
4576 --- a/sound/soc/soc-dapm.c
4577 +++ b/sound/soc/soc-dapm.c
4578 @@ -430,6 +430,8 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
4579 static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
4580 {
4581 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
4582 +
4583 + list_del(&data->paths);
4584 kfree(data->wlist);
4585 kfree(data);
4586 }
4587 diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
4588 new file mode 100644
4589 index 000000000000..b4791b443a66
4590 --- /dev/null
4591 +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
4592 @@ -0,0 +1,1453 @@
4593 +[
4594 + {
4595 + "CollectPEBSRecord": "1",
4596 + "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
4597 + "EventCode": "0x2E",
4598 + "Counter": "0,1,2,3",
4599 + "UMask": "0x41",
4600 + "PEBScounters": "0,1,2,3",
4601 + "EventName": "LONGEST_LAT_CACHE.MISS",
4602 + "PDIR_COUNTER": "na",
4603 + "SampleAfterValue": "200003",
4604 + "BriefDescription": "L2 cache request misses"
4605 + },
4606 + {
4607 + "CollectPEBSRecord": "1",
4608 + "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
4609 + "EventCode": "0x2E",
4610 + "Counter": "0,1,2,3",
4611 + "UMask": "0x4f",
4612 + "PEBScounters": "0,1,2,3",
4613 + "EventName": "LONGEST_LAT_CACHE.REFERENCE",
4614 + "PDIR_COUNTER": "na",
4615 + "SampleAfterValue": "200003",
4616 + "BriefDescription": "L2 cache requests"
4617 + },
4618 + {
4619 + "CollectPEBSRecord": "1",
4620 + "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
4621 + "EventCode": "0x30",
4622 + "Counter": "0,1,2,3",
4623 + "UMask": "0x0",
4624 + "PEBScounters": "0,1,2,3",
4625 + "EventName": "L2_REJECT_XQ.ALL",
4626 + "PDIR_COUNTER": "na",
4627 + "SampleAfterValue": "200003",
4628 + "BriefDescription": "Requests rejected by the XQ"
4629 + },
4630 + {
4631 + "CollectPEBSRecord": "1",
4632 + "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
4633 + "EventCode": "0x31",
4634 + "Counter": "0,1,2,3",
4635 + "UMask": "0x0",
4636 + "PEBScounters": "0,1,2,3",
4637 + "EventName": "CORE_REJECT_L2Q.ALL",
4638 + "PDIR_COUNTER": "na",
4639 + "SampleAfterValue": "200003",
4640 + "BriefDescription": "Requests rejected by the L2Q"
4641 + },
4642 + {
4643 + "CollectPEBSRecord": "1",
4644 + "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
4645 + "EventCode": "0x51",
4646 + "Counter": "0,1,2,3",
4647 + "UMask": "0x1",
4648 + "PEBScounters": "0,1,2,3",
4649 + "EventName": "DL1.REPLACEMENT",
4650 + "PDIR_COUNTER": "na",
4651 + "SampleAfterValue": "200003",
4652 + "BriefDescription": "L1 Cache evictions for dirty data"
4653 + },
4654 + {
4655 + "CollectPEBSRecord": "1",
4656 + "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
4657 + "EventCode": "0x86",
4658 + "Counter": "0,1,2,3",
4659 + "UMask": "0x2",
4660 + "PEBScounters": "0,1,2,3",
4661 + "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
4662 + "PDIR_COUNTER": "na",
4663 + "SampleAfterValue": "200003",
4664 + "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
4665 + },
4666 + {
4667 + "CollectPEBSRecord": "1",
4668 + "EventCode": "0xB7",
4669 + "Counter": "0,1,2,3",
4670 + "UMask": "0x1",
4671 + "PEBScounters": "0,1,2,3",
4672 + "EventName": "OFFCORE_RESPONSE",
4673 + "PDIR_COUNTER": "na",
4674 + "SampleAfterValue": "100007",
4675 + "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
4676 + },
4677 + {
4678 + "PEBS": "2",
4679 + "CollectPEBSRecord": "2",
4680 + "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
4681 + "EventCode": "0xD0",
4682 + "Counter": "0,1,2,3",
4683 + "UMask": "0x21",
4684 + "PEBScounters": "0,1,2,3",
4685 + "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
4686 + "SampleAfterValue": "200003",
4687 + "BriefDescription": "Locked load uops retired (Precise event capable)"
4688 + },
4689 + {
4690 + "PEBS": "2",
4691 + "CollectPEBSRecord": "2",
4692 + "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
4693 + "EventCode": "0xD0",
4694 + "Counter": "0,1,2,3",
4695 + "UMask": "0x41",
4696 + "PEBScounters": "0,1,2,3",
4697 + "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
4698 + "SampleAfterValue": "200003",
4699 + "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
4700 + },
4701 + {
4702 + "PEBS": "2",
4703 + "CollectPEBSRecord": "2",
4704 + "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
4705 + "EventCode": "0xD0",
4706 + "Counter": "0,1,2,3",
4707 + "UMask": "0x42",
4708 + "PEBScounters": "0,1,2,3",
4709 + "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
4710 + "SampleAfterValue": "200003",
4711 + "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
4712 + },
4713 + {
4714 + "PEBS": "2",
4715 + "CollectPEBSRecord": "2",
4716 + "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
4717 + "EventCode": "0xD0",
4718 + "Counter": "0,1,2,3",
4719 + "UMask": "0x43",
4720 + "PEBScounters": "0,1,2,3",
4721 + "EventName": "MEM_UOPS_RETIRED.SPLIT",
4722 + "SampleAfterValue": "200003",
4723 + "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
4724 + },
4725 + {
4726 + "PEBS": "2",
4727 + "CollectPEBSRecord": "2",
4728 + "PublicDescription": "Counts the number of load uops retired.",
4729 + "EventCode": "0xD0",
4730 + "Counter": "0,1,2,3",
4731 + "UMask": "0x81",
4732 + "PEBScounters": "0,1,2,3",
4733 + "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
4734 + "SampleAfterValue": "200003",
4735 + "BriefDescription": "Load uops retired (Precise event capable)"
4736 + },
4737 + {
4738 + "PEBS": "2",
4739 + "CollectPEBSRecord": "2",
4740 + "PublicDescription": "Counts the number of store uops retired.",
4741 + "EventCode": "0xD0",
4742 + "Counter": "0,1,2,3",
4743 + "UMask": "0x82",
4744 + "PEBScounters": "0,1,2,3",
4745 + "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
4746 + "SampleAfterValue": "200003",
4747 + "BriefDescription": "Store uops retired (Precise event capable)"
4748 + },
4749 + {
4750 + "PEBS": "2",
4751 + "CollectPEBSRecord": "2",
4752 + "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
4753 + "EventCode": "0xD0",
4754 + "Counter": "0,1,2,3",
4755 + "UMask": "0x83",
4756 + "PEBScounters": "0,1,2,3",
4757 + "EventName": "MEM_UOPS_RETIRED.ALL",
4758 + "SampleAfterValue": "200003",
4759 + "BriefDescription": "Memory uops retired (Precise event capable)"
4760 + },
4761 + {
4762 + "PEBS": "2",
4763 + "CollectPEBSRecord": "2",
4764 + "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
4765 + "EventCode": "0xD1",
4766 + "Counter": "0,1,2,3",
4767 + "UMask": "0x1",
4768 + "PEBScounters": "0,1,2,3",
4769 + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
4770 + "SampleAfterValue": "200003",
4771 + "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
4772 + },
4773 + {
4774 + "PEBS": "2",
4775 + "CollectPEBSRecord": "2",
4776 + "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
4777 + "EventCode": "0xD1",
4778 + "Counter": "0,1,2,3",
4779 + "UMask": "0x2",
4780 + "PEBScounters": "0,1,2,3",
4781 + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
4782 + "SampleAfterValue": "200003",
4783 + "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
4784 + },
4785 + {
4786 + "PEBS": "2",
4787 + "CollectPEBSRecord": "2",
4788 + "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
4789 + "EventCode": "0xD1",
4790 + "Counter": "0,1,2,3",
4791 + "UMask": "0x8",
4792 + "PEBScounters": "0,1,2,3",
4793 + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
4794 + "SampleAfterValue": "200003",
4795 + "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
4796 + },
4797 + {
4798 + "PEBS": "2",
4799 + "CollectPEBSRecord": "2",
4800 + "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
4801 + "EventCode": "0xD1",
4802 + "Counter": "0,1,2,3",
4803 + "UMask": "0x10",
4804 + "PEBScounters": "0,1,2,3",
4805 + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
4806 + "SampleAfterValue": "200003",
4807 + "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
4808 + },
4809 + {
4810 + "PEBS": "2",
4811 + "CollectPEBSRecord": "2",
4812 + "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
4813 + "EventCode": "0xD1",
4814 + "Counter": "0,1,2,3",
4815 + "UMask": "0x20",
4816 + "PEBScounters": "0,1,2,3",
4817 + "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
4818 + "SampleAfterValue": "200003",
4819 + "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
4820 + },
4821 + {
4822 + "PEBS": "2",
4823 + "CollectPEBSRecord": "2",
4824 + "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
4825 + "EventCode": "0xD1",
4826 + "Counter": "0,1,2,3",
4827 + "UMask": "0x40",
4828 + "PEBScounters": "0,1,2,3",
4829 + "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
4830 + "SampleAfterValue": "200003",
4831 + "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
4832 + },
4833 + {
4834 + "PEBS": "2",
4835 + "CollectPEBSRecord": "2",
4836 + "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
4837 + "EventCode": "0xD1",
4838 + "Counter": "0,1,2,3",
4839 + "UMask": "0x80",
4840 + "PEBScounters": "0,1,2,3",
4841 + "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
4842 + "SampleAfterValue": "200003",
4843 + "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
4844 + },
4845 + {
4846 + "CollectPEBSRecord": "1",
4847 + "PublicDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4848 + "EventCode": "0xB7",
4849 + "MSRValue": "0x0000010001",
4850 + "Counter": "0,1,2,3",
4851 + "UMask": "0x1",
4852 + "PEBScounters": "0,1,2,3",
4853 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
4854 + "PDIR_COUNTER": "na",
4855 + "MSRIndex": "0x1a6, 0x1a7",
4856 + "SampleAfterValue": "100007",
4857 + "BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.",
4858 + "Offcore": "1"
4859 + },
4860 + {
4861 + "CollectPEBSRecord": "1",
4862 + "PublicDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4863 + "EventCode": "0xB7",
4864 + "MSRValue": "0x0000040001",
4865 + "Counter": "0,1,2,3",
4866 + "UMask": "0x1",
4867 + "PEBScounters": "0,1,2,3",
4868 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
4869 + "PDIR_COUNTER": "na",
4870 + "MSRIndex": "0x1a6, 0x1a7",
4871 + "SampleAfterValue": "100007",
4872 + "BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.",
4873 + "Offcore": "1"
4874 + },
4875 + {
4876 + "CollectPEBSRecord": "1",
4877 + "PublicDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4878 + "EventCode": "0xB7",
4879 + "MSRValue": "0x0200000001",
4880 + "Counter": "0,1,2,3",
4881 + "UMask": "0x1",
4882 + "PEBScounters": "0,1,2,3",
4883 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
4884 + "PDIR_COUNTER": "na",
4885 + "MSRIndex": "0x1a6, 0x1a7",
4886 + "SampleAfterValue": "100007",
4887 + "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ",
4888 + "Offcore": "1"
4889 + },
4890 + {
4891 + "CollectPEBSRecord": "1",
4892 + "PublicDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4893 + "EventCode": "0xB7",
4894 + "MSRValue": "0x1000000001",
4895 + "Counter": "0,1,2,3",
4896 + "UMask": "0x1",
4897 + "PEBScounters": "0,1,2,3",
4898 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
4899 + "PDIR_COUNTER": "na",
4900 + "MSRIndex": "0x1a6, 0x1a7",
4901 + "SampleAfterValue": "100007",
4902 + "BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
4903 + "Offcore": "1"
4904 + },
4905 + {
4906 + "CollectPEBSRecord": "1",
4907 + "PublicDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4908 + "EventCode": "0xB7",
4909 + "MSRValue": "0x4000000001",
4910 + "Counter": "0,1,2,3",
4911 + "UMask": "0x1",
4912 + "PEBScounters": "0,1,2,3",
4913 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
4914 + "PDIR_COUNTER": "na",
4915 + "MSRIndex": "0x1a6",
4916 + "SampleAfterValue": "100007",
4917 + "BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.",
4918 + "Offcore": "1"
4919 + },
4920 + {
4921 + "CollectPEBSRecord": "1",
4922 + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4923 + "EventCode": "0xB7",
4924 + "MSRValue": "0x0000010002",
4925 + "Counter": "0,1,2,3",
4926 + "UMask": "0x1",
4927 + "PEBScounters": "0,1,2,3",
4928 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
4929 + "PDIR_COUNTER": "na",
4930 + "MSRIndex": "0x1a6, 0x1a7",
4931 + "SampleAfterValue": "100007",
4932 + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.",
4933 + "Offcore": "1"
4934 + },
4935 + {
4936 + "CollectPEBSRecord": "1",
4937 + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4938 + "EventCode": "0xB7",
4939 + "MSRValue": "0x0000040002",
4940 + "Counter": "0,1,2,3",
4941 + "UMask": "0x1",
4942 + "PEBScounters": "0,1,2,3",
4943 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
4944 + "PDIR_COUNTER": "na",
4945 + "MSRIndex": "0x1a6, 0x1a7",
4946 + "SampleAfterValue": "100007",
4947 + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.",
4948 + "Offcore": "1"
4949 + },
4950 + {
4951 + "CollectPEBSRecord": "1",
4952 + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4953 + "EventCode": "0xB7",
4954 + "MSRValue": "0x0200000002",
4955 + "Counter": "0,1,2,3",
4956 + "UMask": "0x1",
4957 + "PEBScounters": "0,1,2,3",
4958 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
4959 + "PDIR_COUNTER": "na",
4960 + "MSRIndex": "0x1a6, 0x1a7",
4961 + "SampleAfterValue": "100007",
4962 + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ",
4963 + "Offcore": "1"
4964 + },
4965 + {
4966 + "CollectPEBSRecord": "1",
4967 + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4968 + "EventCode": "0xB7",
4969 + "MSRValue": "0x1000000002",
4970 + "Counter": "0,1,2,3",
4971 + "UMask": "0x1",
4972 + "PEBScounters": "0,1,2,3",
4973 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
4974 + "PDIR_COUNTER": "na",
4975 + "MSRIndex": "0x1a6, 0x1a7",
4976 + "SampleAfterValue": "100007",
4977 + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
4978 + "Offcore": "1"
4979 + },
4980 + {
4981 + "CollectPEBSRecord": "1",
4982 + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4983 + "EventCode": "0xB7",
4984 + "MSRValue": "0x4000000002",
4985 + "Counter": "0,1,2,3",
4986 + "UMask": "0x1",
4987 + "PEBScounters": "0,1,2,3",
4988 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
4989 + "PDIR_COUNTER": "na",
4990 + "MSRIndex": "0x1a6",
4991 + "SampleAfterValue": "100007",
4992 + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.",
4993 + "Offcore": "1"
4994 + },
4995 + {
4996 + "CollectPEBSRecord": "1",
4997 + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
4998 + "EventCode": "0xB7",
4999 + "MSRValue": "0x0000010004",
5000 + "Counter": "0,1,2,3",
5001 + "UMask": "0x1",
5002 + "PEBScounters": "0,1,2,3",
5003 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
5004 + "PDIR_COUNTER": "na",
5005 + "MSRIndex": "0x1a6, 0x1a7",
5006 + "SampleAfterValue": "100007",
5007 + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.",
5008 + "Offcore": "1"
5009 + },
5010 + {
5011 + "CollectPEBSRecord": "1",
5012 + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5013 + "EventCode": "0xB7",
5014 + "MSRValue": "0x0000040004",
5015 + "Counter": "0,1,2,3",
5016 + "UMask": "0x1",
5017 + "PEBScounters": "0,1,2,3",
5018 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
5019 + "PDIR_COUNTER": "na",
5020 + "MSRIndex": "0x1a6, 0x1a7",
5021 + "SampleAfterValue": "100007",
5022 + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.",
5023 + "Offcore": "1"
5024 + },
5025 + {
5026 + "CollectPEBSRecord": "1",
5027 + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5028 + "EventCode": "0xB7",
5029 + "MSRValue": "0x0200000004",
5030 + "Counter": "0,1,2,3",
5031 + "UMask": "0x1",
5032 + "PEBScounters": "0,1,2,3",
5033 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5034 + "PDIR_COUNTER": "na",
5035 + "MSRIndex": "0x1a6, 0x1a7",
5036 + "SampleAfterValue": "100007",
5037 + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ",
5038 + "Offcore": "1"
5039 + },
5040 + {
5041 + "CollectPEBSRecord": "1",
5042 + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5043 + "EventCode": "0xB7",
5044 + "MSRValue": "0x1000000004",
5045 + "Counter": "0,1,2,3",
5046 + "UMask": "0x1",
5047 + "PEBScounters": "0,1,2,3",
5048 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
5049 + "PDIR_COUNTER": "na",
5050 + "MSRIndex": "0x1a6, 0x1a7",
5051 + "SampleAfterValue": "100007",
5052 + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5053 + "Offcore": "1"
5054 + },
5055 + {
5056 + "CollectPEBSRecord": "1",
5057 + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5058 + "EventCode": "0xB7",
5059 + "MSRValue": "0x4000000004",
5060 + "Counter": "0,1,2,3",
5061 + "UMask": "0x1",
5062 + "PEBScounters": "0,1,2,3",
5063 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
5064 + "PDIR_COUNTER": "na",
5065 + "MSRIndex": "0x1a6",
5066 + "SampleAfterValue": "100007",
5067 + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5068 + "Offcore": "1"
5069 + },
5070 + {
5071 + "CollectPEBSRecord": "1",
5072 + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5073 + "EventCode": "0xB7",
5074 + "MSRValue": "0x0000010008",
5075 + "Counter": "0,1,2,3",
5076 + "UMask": "0x1",
5077 + "PEBScounters": "0,1,2,3",
5078 + "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
5079 + "PDIR_COUNTER": "na",
5080 + "MSRIndex": "0x1a6, 0x1a7",
5081 + "SampleAfterValue": "100007",
5082 + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.",
5083 + "Offcore": "1"
5084 + },
5085 + {
5086 + "CollectPEBSRecord": "1",
5087 + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5088 + "EventCode": "0xB7",
5089 + "MSRValue": "0x0000040008",
5090 + "Counter": "0,1,2,3",
5091 + "UMask": "0x1",
5092 + "PEBScounters": "0,1,2,3",
5093 + "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
5094 + "PDIR_COUNTER": "na",
5095 + "MSRIndex": "0x1a6, 0x1a7",
5096 + "SampleAfterValue": "100007",
5097 + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.",
5098 + "Offcore": "1"
5099 + },
5100 + {
5101 + "CollectPEBSRecord": "1",
5102 + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5103 + "EventCode": "0xB7",
5104 + "MSRValue": "0x0200000008",
5105 + "Counter": "0,1,2,3",
5106 + "UMask": "0x1",
5107 + "PEBScounters": "0,1,2,3",
5108 + "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5109 + "PDIR_COUNTER": "na",
5110 + "MSRIndex": "0x1a6, 0x1a7",
5111 + "SampleAfterValue": "100007",
5112 + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ",
5113 + "Offcore": "1"
5114 + },
5115 + {
5116 + "CollectPEBSRecord": "1",
5117 + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5118 + "EventCode": "0xB7",
5119 + "MSRValue": "0x1000000008",
5120 + "Counter": "0,1,2,3",
5121 + "UMask": "0x1",
5122 + "PEBScounters": "0,1,2,3",
5123 + "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
5124 + "PDIR_COUNTER": "na",
5125 + "MSRIndex": "0x1a6, 0x1a7",
5126 + "SampleAfterValue": "100007",
5127 + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5128 + "Offcore": "1"
5129 + },
5130 + {
5131 + "CollectPEBSRecord": "1",
5132 + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5133 + "EventCode": "0xB7",
5134 + "MSRValue": "0x4000000008",
5135 + "Counter": "0,1,2,3",
5136 + "UMask": "0x1",
5137 + "PEBScounters": "0,1,2,3",
5138 + "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
5139 + "PDIR_COUNTER": "na",
5140 + "MSRIndex": "0x1a6",
5141 + "SampleAfterValue": "100007",
5142 + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5143 + "Offcore": "1"
5144 + },
5145 + {
5146 + "CollectPEBSRecord": "1",
5147 + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5148 + "EventCode": "0xB7",
5149 + "MSRValue": "0x0000010010",
5150 + "Counter": "0,1,2,3",
5151 + "UMask": "0x1",
5152 + "PEBScounters": "0,1,2,3",
5153 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
5154 + "PDIR_COUNTER": "na",
5155 + "MSRIndex": "0x1a6, 0x1a7",
5156 + "SampleAfterValue": "100007",
5157 + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.",
5158 + "Offcore": "1"
5159 + },
5160 + {
5161 + "CollectPEBSRecord": "1",
5162 + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5163 + "EventCode": "0xB7",
5164 + "MSRValue": "0x0000040010",
5165 + "Counter": "0,1,2,3",
5166 + "UMask": "0x1",
5167 + "PEBScounters": "0,1,2,3",
5168 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
5169 + "PDIR_COUNTER": "na",
5170 + "MSRIndex": "0x1a6, 0x1a7",
5171 + "SampleAfterValue": "100007",
5172 + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.",
5173 + "Offcore": "1"
5174 + },
5175 + {
5176 + "CollectPEBSRecord": "1",
5177 + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5178 + "EventCode": "0xB7",
5179 + "MSRValue": "0x0200000010",
5180 + "Counter": "0,1,2,3",
5181 + "UMask": "0x1",
5182 + "PEBScounters": "0,1,2,3",
5183 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5184 + "PDIR_COUNTER": "na",
5185 + "MSRIndex": "0x1a6, 0x1a7",
5186 + "SampleAfterValue": "100007",
5187 + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
5188 + "Offcore": "1"
5189 + },
5190 + {
5191 + "CollectPEBSRecord": "1",
5192 + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5193 + "EventCode": "0xB7",
5194 + "MSRValue": "0x1000000010",
5195 + "Counter": "0,1,2,3",
5196 + "UMask": "0x1",
5197 + "PEBScounters": "0,1,2,3",
5198 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
5199 + "PDIR_COUNTER": "na",
5200 + "MSRIndex": "0x1a6, 0x1a7",
5201 + "SampleAfterValue": "100007",
5202 + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5203 + "Offcore": "1"
5204 + },
5205 + {
5206 + "CollectPEBSRecord": "1",
5207 + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5208 + "EventCode": "0xB7",
5209 + "MSRValue": "0x4000000010",
5210 + "Counter": "0,1,2,3",
5211 + "UMask": "0x1",
5212 + "PEBScounters": "0,1,2,3",
5213 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
5214 + "PDIR_COUNTER": "na",
5215 + "MSRIndex": "0x1a6",
5216 + "SampleAfterValue": "100007",
5217 + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5218 + "Offcore": "1"
5219 + },
5220 + {
5221 + "CollectPEBSRecord": "1",
5222 + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5223 + "EventCode": "0xB7",
5224 + "MSRValue": "0x0000010020",
5225 + "Counter": "0,1,2,3",
5226 + "UMask": "0x1",
5227 + "PEBScounters": "0,1,2,3",
5228 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
5229 + "PDIR_COUNTER": "na",
5230 + "MSRIndex": "0x1a6, 0x1a7",
5231 + "SampleAfterValue": "100007",
5232 + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.",
5233 + "Offcore": "1"
5234 + },
5235 + {
5236 + "CollectPEBSRecord": "1",
5237 + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5238 + "EventCode": "0xB7",
5239 + "MSRValue": "0x0000040020",
5240 + "Counter": "0,1,2,3",
5241 + "UMask": "0x1",
5242 + "PEBScounters": "0,1,2,3",
5243 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
5244 + "PDIR_COUNTER": "na",
5245 + "MSRIndex": "0x1a6, 0x1a7",
5246 + "SampleAfterValue": "100007",
5247 + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.",
5248 + "Offcore": "1"
5249 + },
5250 + {
5251 + "CollectPEBSRecord": "1",
5252 + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5253 + "EventCode": "0xB7",
5254 + "MSRValue": "0x0200000020",
5255 + "Counter": "0,1,2,3",
5256 + "UMask": "0x1",
5257 + "PEBScounters": "0,1,2,3",
5258 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5259 + "PDIR_COUNTER": "na",
5260 + "MSRIndex": "0x1a6, 0x1a7",
5261 + "SampleAfterValue": "100007",
5262 + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
5263 + "Offcore": "1"
5264 + },
5265 + {
5266 + "CollectPEBSRecord": "1",
5267 + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5268 + "EventCode": "0xB7",
5269 + "MSRValue": "0x1000000020",
5270 + "Counter": "0,1,2,3",
5271 + "UMask": "0x1",
5272 + "PEBScounters": "0,1,2,3",
5273 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
5274 + "PDIR_COUNTER": "na",
5275 + "MSRIndex": "0x1a6, 0x1a7",
5276 + "SampleAfterValue": "100007",
5277 + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5278 + "Offcore": "1"
5279 + },
5280 + {
5281 + "CollectPEBSRecord": "1",
5282 + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5283 + "EventCode": "0xB7",
5284 + "MSRValue": "0x4000000020",
5285 + "Counter": "0,1,2,3",
5286 + "UMask": "0x1",
5287 + "PEBScounters": "0,1,2,3",
5288 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
5289 + "PDIR_COUNTER": "na",
5290 + "MSRIndex": "0x1a6",
5291 + "SampleAfterValue": "100007",
5292 + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5293 + "Offcore": "1"
5294 + },
5295 + {
5296 + "CollectPEBSRecord": "1",
5297 + "PublicDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5298 + "EventCode": "0xB7",
5299 + "MSRValue": "0x0000010400",
5300 + "Counter": "0,1,2,3",
5301 + "UMask": "0x1",
5302 + "PEBScounters": "0,1,2,3",
5303 + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
5304 + "PDIR_COUNTER": "na",
5305 + "MSRIndex": "0x1a6, 0x1a7",
5306 + "SampleAfterValue": "100007",
5307 + "BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.",
5308 + "Offcore": "1"
5309 + },
5310 + {
5311 + "CollectPEBSRecord": "1",
5312 + "PublicDescription": "Counts bus lock and split lock requests hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5313 + "EventCode": "0xB7",
5314 + "MSRValue": "0x0000040400",
5315 + "Counter": "0,1,2,3",
5316 + "UMask": "0x1",
5317 + "PEBScounters": "0,1,2,3",
5318 + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
5319 + "PDIR_COUNTER": "na",
5320 + "MSRIndex": "0x1a6, 0x1a7",
5321 + "SampleAfterValue": "100007",
5322 + "BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.",
5323 + "Offcore": "1"
5324 + },
5325 + {
5326 + "CollectPEBSRecord": "1",
5327 + "PublicDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5328 + "EventCode": "0xB7",
5329 + "MSRValue": "0x0200000400",
5330 + "Counter": "0,1,2,3",
5331 + "UMask": "0x1",
5332 + "PEBScounters": "0,1,2,3",
5333 + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5334 + "PDIR_COUNTER": "na",
5335 + "MSRIndex": "0x1a6, 0x1a7",
5336 + "SampleAfterValue": "100007",
5337 + "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ",
5338 + "Offcore": "1"
5339 + },
5340 + {
5341 + "CollectPEBSRecord": "1",
5342 + "PublicDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5343 + "EventCode": "0xB7",
5344 + "MSRValue": "0x1000000400",
5345 + "Counter": "0,1,2,3",
5346 + "UMask": "0x1",
5347 + "PEBScounters": "0,1,2,3",
5348 + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
5349 + "PDIR_COUNTER": "na",
5350 + "MSRIndex": "0x1a6, 0x1a7",
5351 + "SampleAfterValue": "100007",
5352 + "BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5353 + "Offcore": "1"
5354 + },
5355 + {
5356 + "CollectPEBSRecord": "1",
5357 + "PublicDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5358 + "EventCode": "0xB7",
5359 + "MSRValue": "0x4000000400",
5360 + "Counter": "0,1,2,3",
5361 + "UMask": "0x1",
5362 + "PEBScounters": "0,1,2,3",
5363 + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
5364 + "PDIR_COUNTER": "na",
5365 + "MSRIndex": "0x1a6",
5366 + "SampleAfterValue": "100007",
5367 + "BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5368 + "Offcore": "1"
5369 + },
5370 + {
5371 + "CollectPEBSRecord": "1",
5372 + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5373 + "EventCode": "0xB7",
5374 + "MSRValue": "0x0000010800",
5375 + "Counter": "0,1,2,3",
5376 + "UMask": "0x1",
5377 + "PEBScounters": "0,1,2,3",
5378 + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
5379 + "PDIR_COUNTER": "na",
5380 + "MSRIndex": "0x1a6, 0x1a7",
5381 + "SampleAfterValue": "100007",
5382 + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.",
5383 + "Offcore": "1"
5384 + },
5385 + {
5386 + "CollectPEBSRecord": "1",
5387 + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5388 + "EventCode": "0xB7",
5389 + "MSRValue": "0x0000040800",
5390 + "Counter": "0,1,2,3",
5391 + "UMask": "0x1",
5392 + "PEBScounters": "0,1,2,3",
5393 + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
5394 + "PDIR_COUNTER": "na",
5395 + "MSRIndex": "0x1a6, 0x1a7",
5396 + "SampleAfterValue": "100007",
5397 + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.",
5398 + "Offcore": "1"
5399 + },
5400 + {
5401 + "CollectPEBSRecord": "1",
5402 + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5403 + "EventCode": "0xB7",
5404 + "MSRValue": "0x0200000800",
5405 + "Counter": "0,1,2,3",
5406 + "UMask": "0x1",
5407 + "PEBScounters": "0,1,2,3",
5408 + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5409 + "PDIR_COUNTER": "na",
5410 + "MSRIndex": "0x1a6, 0x1a7",
5411 + "SampleAfterValue": "100007",
5412 + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ",
5413 + "Offcore": "1"
5414 + },
5415 + {
5416 + "CollectPEBSRecord": "1",
5417 + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5418 + "EventCode": "0xB7",
5419 + "MSRValue": "0x1000000800",
5420 + "Counter": "0,1,2,3",
5421 + "UMask": "0x1",
5422 + "PEBScounters": "0,1,2,3",
5423 + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
5424 + "PDIR_COUNTER": "na",
5425 + "MSRIndex": "0x1a6, 0x1a7",
5426 + "SampleAfterValue": "100007",
5427 + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5428 + "Offcore": "1"
5429 + },
5430 + {
5431 + "CollectPEBSRecord": "1",
5432 + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5433 + "EventCode": "0xB7",
5434 + "MSRValue": "0x4000000800",
5435 + "Counter": "0,1,2,3",
5436 + "UMask": "0x1",
5437 + "PEBScounters": "0,1,2,3",
5438 + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
5439 + "PDIR_COUNTER": "na",
5440 + "MSRIndex": "0x1a6",
5441 + "SampleAfterValue": "100007",
5442 + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5443 + "Offcore": "1"
5444 + },
5445 + {
5446 + "CollectPEBSRecord": "1",
5447 + "PublicDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5448 + "EventCode": "0xB7",
5449 + "MSRValue": "0x0000011000",
5450 + "Counter": "0,1,2,3",
5451 + "UMask": "0x1",
5452 + "PEBScounters": "0,1,2,3",
5453 + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
5454 + "PDIR_COUNTER": "na",
5455 + "MSRIndex": "0x1a6, 0x1a7",
5456 + "SampleAfterValue": "100007",
5457 + "BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.",
5458 + "Offcore": "1"
5459 + },
5460 + {
5461 + "CollectPEBSRecord": "1",
5462 + "PublicDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5463 + "EventCode": "0xB7",
5464 + "MSRValue": "0x0000041000",
5465 + "Counter": "0,1,2,3",
5466 + "UMask": "0x1",
5467 + "PEBScounters": "0,1,2,3",
5468 + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
5469 + "PDIR_COUNTER": "na",
5470 + "MSRIndex": "0x1a6, 0x1a7",
5471 + "SampleAfterValue": "100007",
5472 + "BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.",
5473 + "Offcore": "1"
5474 + },
5475 + {
5476 + "CollectPEBSRecord": "1",
5477 + "PublicDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5478 + "EventCode": "0xB7",
5479 + "MSRValue": "0x0200001000",
5480 + "Counter": "0,1,2,3",
5481 + "UMask": "0x1",
5482 + "PEBScounters": "0,1,2,3",
5483 + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5484 + "PDIR_COUNTER": "na",
5485 + "MSRIndex": "0x1a6, 0x1a7",
5486 + "SampleAfterValue": "100007",
5487 + "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ",
5488 + "Offcore": "1"
5489 + },
5490 + {
5491 + "CollectPEBSRecord": "1",
5492 + "PublicDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5493 + "EventCode": "0xB7",
5494 + "MSRValue": "0x1000001000",
5495 + "Counter": "0,1,2,3",
5496 + "UMask": "0x1",
5497 + "PEBScounters": "0,1,2,3",
5498 + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
5499 + "PDIR_COUNTER": "na",
5500 + "MSRIndex": "0x1a6, 0x1a7",
5501 + "SampleAfterValue": "100007",
5502 + "BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5503 + "Offcore": "1"
5504 + },
5505 + {
5506 + "CollectPEBSRecord": "1",
5507 + "PublicDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5508 + "EventCode": "0xB7",
5509 + "MSRValue": "0x4000001000",
5510 + "Counter": "0,1,2,3",
5511 + "UMask": "0x1",
5512 + "PEBScounters": "0,1,2,3",
5513 + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
5514 + "PDIR_COUNTER": "na",
5515 + "MSRIndex": "0x1a6",
5516 + "SampleAfterValue": "100007",
5517 + "BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5518 + "Offcore": "1"
5519 + },
5520 + {
5521 + "CollectPEBSRecord": "1",
5522 + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5523 + "EventCode": "0xB7",
5524 + "MSRValue": "0x0000012000",
5525 + "Counter": "0,1,2,3",
5526 + "UMask": "0x1",
5527 + "PEBScounters": "0,1,2,3",
5528 + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
5529 + "PDIR_COUNTER": "na",
5530 + "MSRIndex": "0x1a6, 0x1a7",
5531 + "SampleAfterValue": "100007",
5532 + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.",
5533 + "Offcore": "1"
5534 + },
5535 + {
5536 + "CollectPEBSRecord": "1",
5537 + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5538 + "EventCode": "0xB7",
5539 + "MSRValue": "0x0000042000",
5540 + "Counter": "0,1,2,3",
5541 + "UMask": "0x1",
5542 + "PEBScounters": "0,1,2,3",
5543 + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
5544 + "PDIR_COUNTER": "na",
5545 + "MSRIndex": "0x1a6, 0x1a7",
5546 + "SampleAfterValue": "100007",
5547 + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.",
5548 + "Offcore": "1"
5549 + },
5550 + {
5551 + "CollectPEBSRecord": "1",
5552 + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5553 + "EventCode": "0xB7",
5554 + "MSRValue": "0x0200002000",
5555 + "Counter": "0,1,2,3",
5556 + "UMask": "0x1",
5557 + "PEBScounters": "0,1,2,3",
5558 + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5559 + "PDIR_COUNTER": "na",
5560 + "MSRIndex": "0x1a6, 0x1a7",
5561 + "SampleAfterValue": "100007",
5562 + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
5563 + "Offcore": "1"
5564 + },
5565 + {
5566 + "CollectPEBSRecord": "1",
5567 + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5568 + "EventCode": "0xB7",
5569 + "MSRValue": "0x1000002000",
5570 + "Counter": "0,1,2,3",
5571 + "UMask": "0x1",
5572 + "PEBScounters": "0,1,2,3",
5573 + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
5574 + "PDIR_COUNTER": "na",
5575 + "MSRIndex": "0x1a6, 0x1a7",
5576 + "SampleAfterValue": "100007",
5577 + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5578 + "Offcore": "1"
5579 + },
5580 + {
5581 + "CollectPEBSRecord": "1",
5582 + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5583 + "EventCode": "0xB7",
5584 + "MSRValue": "0x4000002000",
5585 + "Counter": "0,1,2,3",
5586 + "UMask": "0x1",
5587 + "PEBScounters": "0,1,2,3",
5588 + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
5589 + "PDIR_COUNTER": "na",
5590 + "MSRIndex": "0x1a6",
5591 + "SampleAfterValue": "100007",
5592 + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5593 + "Offcore": "1"
5594 + },
5595 + {
5596 + "CollectPEBSRecord": "1",
5597 + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5598 + "EventCode": "0xB7",
5599 + "MSRValue": "0x0000014800",
5600 + "Counter": "0,1,2,3",
5601 + "UMask": "0x1",
5602 + "PEBScounters": "0,1,2,3",
5603 + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
5604 + "PDIR_COUNTER": "na",
5605 + "MSRIndex": "0x1a6, 0x1a7",
5606 + "SampleAfterValue": "100007",
5607 + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.",
5608 + "Offcore": "1"
5609 + },
5610 + {
5611 + "CollectPEBSRecord": "1",
5612 + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5613 + "EventCode": "0xB7",
5614 + "MSRValue": "0x0000044800",
5615 + "Counter": "0,1,2,3",
5616 + "UMask": "0x1",
5617 + "PEBScounters": "0,1,2,3",
5618 + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
5619 + "PDIR_COUNTER": "na",
5620 + "MSRIndex": "0x1a6, 0x1a7",
5621 + "SampleAfterValue": "100007",
5622 + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.",
5623 + "Offcore": "1"
5624 + },
5625 + {
5626 + "CollectPEBSRecord": "1",
5627 + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5628 + "EventCode": "0xB7",
5629 + "MSRValue": "0x0200004800",
5630 + "Counter": "0,1,2,3",
5631 + "UMask": "0x1",
5632 + "PEBScounters": "0,1,2,3",
5633 + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5634 + "PDIR_COUNTER": "na",
5635 + "MSRIndex": "0x1a6, 0x1a7",
5636 + "SampleAfterValue": "100007",
5637 + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ",
5638 + "Offcore": "1"
5639 + },
5640 + {
5641 + "CollectPEBSRecord": "1",
5642 + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5643 + "EventCode": "0xB7",
5644 + "MSRValue": "0x1000004800",
5645 + "Counter": "0,1,2,3",
5646 + "UMask": "0x1",
5647 + "PEBScounters": "0,1,2,3",
5648 + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
5649 + "PDIR_COUNTER": "na",
5650 + "MSRIndex": "0x1a6, 0x1a7",
5651 + "SampleAfterValue": "100007",
5652 + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5653 + "Offcore": "1"
5654 + },
5655 + {
5656 + "CollectPEBSRecord": "1",
5657 + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5658 + "EventCode": "0xB7",
5659 + "MSRValue": "0x4000004800",
5660 + "Counter": "0,1,2,3",
5661 + "UMask": "0x1",
5662 + "PEBScounters": "0,1,2,3",
5663 + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
5664 + "PDIR_COUNTER": "na",
5665 + "MSRIndex": "0x1a6",
5666 + "SampleAfterValue": "100007",
5667 + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5668 + "Offcore": "1"
5669 + },
5670 + {
5671 + "CollectPEBSRecord": "1",
5672 + "PublicDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5673 + "EventCode": "0xB7",
5674 + "MSRValue": "0x0000018000",
5675 + "Counter": "0,1,2,3",
5676 + "UMask": "0x1",
5677 + "PEBScounters": "0,1,2,3",
5678 + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
5679 + "PDIR_COUNTER": "na",
5680 + "MSRIndex": "0x1a6, 0x1a7",
5681 + "SampleAfterValue": "100007",
5682 + "BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.",
5683 + "Offcore": "1"
5684 + },
5685 + {
5686 + "CollectPEBSRecord": "1",
5687 + "PublicDescription": "Counts requests to the uncore subsystem hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5688 + "EventCode": "0xB7",
5689 + "MSRValue": "0x0000048000",
5690 + "Counter": "0,1,2,3",
5691 + "UMask": "0x1",
5692 + "PEBScounters": "0,1,2,3",
5693 + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
5694 + "PDIR_COUNTER": "na",
5695 + "MSRIndex": "0x1a6, 0x1a7",
5696 + "SampleAfterValue": "100007",
5697 + "BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.",
5698 + "Offcore": "1"
5699 + },
5700 + {
5701 + "CollectPEBSRecord": "1",
5702 + "PublicDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5703 + "EventCode": "0xB7",
5704 + "MSRValue": "0x0200008000",
5705 + "Counter": "0,1,2,3",
5706 + "UMask": "0x1",
5707 + "PEBScounters": "0,1,2,3",
5708 + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5709 + "PDIR_COUNTER": "na",
5710 + "MSRIndex": "0x1a6, 0x1a7",
5711 + "SampleAfterValue": "100007",
5712 + "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ",
5713 + "Offcore": "1"
5714 + },
5715 + {
5716 + "CollectPEBSRecord": "1",
5717 + "PublicDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5718 + "EventCode": "0xB7",
5719 + "MSRValue": "0x1000008000",
5720 + "Counter": "0,1,2,3",
5721 + "UMask": "0x1",
5722 + "PEBScounters": "0,1,2,3",
5723 + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
5724 + "PDIR_COUNTER": "na",
5725 + "MSRIndex": "0x1a6, 0x1a7",
5726 + "SampleAfterValue": "100007",
5727 + "BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5728 + "Offcore": "1"
5729 + },
5730 + {
5731 + "CollectPEBSRecord": "1",
5732 + "PublicDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5733 + "EventCode": "0xB7",
5734 + "MSRValue": "0x4000008000",
5735 + "Counter": "0,1,2,3",
5736 + "UMask": "0x1",
5737 + "PEBScounters": "0,1,2,3",
5738 + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
5739 + "PDIR_COUNTER": "na",
5740 + "MSRIndex": "0x1a6",
5741 + "SampleAfterValue": "100007",
5742 + "BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5743 + "Offcore": "1"
5744 + },
5745 + {
5746 + "CollectPEBSRecord": "1",
5747 + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5748 + "EventCode": "0xB7",
5749 + "MSRValue": "0x0000013010",
5750 + "Counter": "0,1,2,3",
5751 + "UMask": "0x1",
5752 + "PEBScounters": "0,1,2,3",
5753 + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
5754 + "PDIR_COUNTER": "na",
5755 + "MSRIndex": "0x1a6, 0x1a7",
5756 + "SampleAfterValue": "100007",
5757 + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.",
5758 + "Offcore": "1"
5759 + },
5760 + {
5761 + "CollectPEBSRecord": "1",
5762 + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5763 + "EventCode": "0xB7",
5764 + "MSRValue": "0x0000043010",
5765 + "Counter": "0,1,2,3",
5766 + "UMask": "0x1",
5767 + "PEBScounters": "0,1,2,3",
5768 + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
5769 + "PDIR_COUNTER": "na",
5770 + "MSRIndex": "0x1a6, 0x1a7",
5771 + "SampleAfterValue": "100007",
5772 + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.",
5773 + "Offcore": "1"
5774 + },
5775 + {
5776 + "CollectPEBSRecord": "1",
5777 + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5778 + "EventCode": "0xB7",
5779 + "MSRValue": "0x0200003010",
5780 + "Counter": "0,1,2,3",
5781 + "UMask": "0x1",
5782 + "PEBScounters": "0,1,2,3",
5783 + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5784 + "PDIR_COUNTER": "na",
5785 + "MSRIndex": "0x1a6, 0x1a7",
5786 + "SampleAfterValue": "100007",
5787 + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ",
5788 + "Offcore": "1"
5789 + },
5790 + {
5791 + "CollectPEBSRecord": "1",
5792 + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5793 + "EventCode": "0xB7",
5794 + "MSRValue": "0x1000003010",
5795 + "Counter": "0,1,2,3",
5796 + "UMask": "0x1",
5797 + "PEBScounters": "0,1,2,3",
5798 + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
5799 + "PDIR_COUNTER": "na",
5800 + "MSRIndex": "0x1a6, 0x1a7",
5801 + "SampleAfterValue": "100007",
5802 + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5803 + "Offcore": "1"
5804 + },
5805 + {
5806 + "CollectPEBSRecord": "1",
5807 + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5808 + "EventCode": "0xB7",
5809 + "MSRValue": "0x4000003010",
5810 + "Counter": "0,1,2,3",
5811 + "UMask": "0x1",
5812 + "PEBScounters": "0,1,2,3",
5813 + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
5814 + "PDIR_COUNTER": "na",
5815 + "MSRIndex": "0x1a6",
5816 + "SampleAfterValue": "100007",
5817 + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5818 + "Offcore": "1"
5819 + },
5820 + {
5821 + "CollectPEBSRecord": "1",
5822 + "PublicDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5823 + "EventCode": "0xB7",
5824 + "MSRValue": "0x0000013091",
5825 + "Counter": "0,1,2,3",
5826 + "UMask": "0x1",
5827 + "PEBScounters": "0,1,2,3",
5828 + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
5829 + "PDIR_COUNTER": "na",
5830 + "MSRIndex": "0x1a6, 0x1a7",
5831 + "SampleAfterValue": "100007",
5832 + "BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.",
5833 + "Offcore": "1"
5834 + },
5835 + {
5836 + "CollectPEBSRecord": "1",
5837 + "PublicDescription": "Counts data reads (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5838 + "EventCode": "0xB7",
5839 + "MSRValue": "0x0000043091",
5840 + "Counter": "0,1,2,3",
5841 + "UMask": "0x1",
5842 + "PEBScounters": "0,1,2,3",
5843 + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
5844 + "PDIR_COUNTER": "na",
5845 + "MSRIndex": "0x1a6, 0x1a7",
5846 + "SampleAfterValue": "100007",
5847 + "BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.",
5848 + "Offcore": "1"
5849 + },
5850 + {
5851 + "CollectPEBSRecord": "1",
5852 + "PublicDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5853 + "EventCode": "0xB7",
5854 + "MSRValue": "0x0200003091",
5855 + "Counter": "0,1,2,3",
5856 + "UMask": "0x1",
5857 + "PEBScounters": "0,1,2,3",
5858 + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5859 + "PDIR_COUNTER": "na",
5860 + "MSRIndex": "0x1a6, 0x1a7",
5861 + "SampleAfterValue": "100007",
5862 + "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
5863 + "Offcore": "1"
5864 + },
5865 + {
5866 + "CollectPEBSRecord": "1",
5867 + "PublicDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5868 + "EventCode": "0xB7",
5869 + "MSRValue": "0x1000003091",
5870 + "Counter": "0,1,2,3",
5871 + "UMask": "0x1",
5872 + "PEBScounters": "0,1,2,3",
5873 + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
5874 + "PDIR_COUNTER": "na",
5875 + "MSRIndex": "0x1a6, 0x1a7",
5876 + "SampleAfterValue": "100007",
5877 + "BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5878 + "Offcore": "1"
5879 + },
5880 + {
5881 + "CollectPEBSRecord": "1",
5882 + "PublicDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5883 + "EventCode": "0xB7",
5884 + "MSRValue": "0x4000003091",
5885 + "Counter": "0,1,2,3",
5886 + "UMask": "0x1",
5887 + "PEBScounters": "0,1,2,3",
5888 + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
5889 + "PDIR_COUNTER": "na",
5890 + "MSRIndex": "0x1a6",
5891 + "SampleAfterValue": "100007",
5892 + "BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5893 + "Offcore": "1"
5894 + },
5895 + {
5896 + "CollectPEBSRecord": "1",
5897 + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5898 + "EventCode": "0xB7",
5899 + "MSRValue": "0x0000010022",
5900 + "Counter": "0,1,2,3",
5901 + "UMask": "0x1",
5902 + "PEBScounters": "0,1,2,3",
5903 + "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
5904 + "PDIR_COUNTER": "na",
5905 + "MSRIndex": "0x1a6, 0x1a7",
5906 + "SampleAfterValue": "100007",
5907 + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
5908 + "Offcore": "1"
5909 + },
5910 + {
5911 + "CollectPEBSRecord": "1",
5912 + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5913 + "EventCode": "0xB7",
5914 + "MSRValue": "0x0000040022",
5915 + "Counter": "0,1,2,3",
5916 + "UMask": "0x1",
5917 + "PEBScounters": "0,1,2,3",
5918 + "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
5919 + "PDIR_COUNTER": "na",
5920 + "MSRIndex": "0x1a6, 0x1a7",
5921 + "SampleAfterValue": "100007",
5922 + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
5923 + "Offcore": "1"
5924 + },
5925 + {
5926 + "CollectPEBSRecord": "1",
5927 + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5928 + "EventCode": "0xB7",
5929 + "MSRValue": "0x0200000022",
5930 + "Counter": "0,1,2,3",
5931 + "UMask": "0x1",
5932 + "PEBScounters": "0,1,2,3",
5933 + "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
5934 + "PDIR_COUNTER": "na",
5935 + "MSRIndex": "0x1a6, 0x1a7",
5936 + "SampleAfterValue": "100007",
5937 + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
5938 + "Offcore": "1"
5939 + },
5940 + {
5941 + "CollectPEBSRecord": "1",
5942 + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5943 + "EventCode": "0xB7",
5944 + "MSRValue": "0x1000000022",
5945 + "Counter": "0,1,2,3",
5946 + "UMask": "0x1",
5947 + "PEBScounters": "0,1,2,3",
5948 + "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
5949 + "PDIR_COUNTER": "na",
5950 + "MSRIndex": "0x1a6, 0x1a7",
5951 + "SampleAfterValue": "100007",
5952 + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
5953 + "Offcore": "1"
5954 + },
5955 + {
5956 + "CollectPEBSRecord": "1",
5957 + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5958 + "EventCode": "0xB7",
5959 + "MSRValue": "0x4000000022",
5960 + "Counter": "0,1,2,3",
5961 + "UMask": "0x1",
5962 + "PEBScounters": "0,1,2,3",
5963 + "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
5964 + "PDIR_COUNTER": "na",
5965 + "MSRIndex": "0x1a6",
5966 + "SampleAfterValue": "100007",
5967 + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
5968 + "Offcore": "1"
5969 + },
5970 + {
5971 + "CollectPEBSRecord": "1",
5972 + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5973 + "EventCode": "0xB7",
5974 + "MSRValue": "0x00000132b7",
5975 + "Counter": "0,1,2,3",
5976 + "UMask": "0x1",
5977 + "PEBScounters": "0,1,2,3",
5978 + "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
5979 + "PDIR_COUNTER": "na",
5980 + "MSRIndex": "0x1a6, 0x1a7",
5981 + "SampleAfterValue": "100007",
5982 + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
5983 + "Offcore": "1"
5984 + },
5985 + {
5986 + "CollectPEBSRecord": "1",
5987 + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
5988 + "EventCode": "0xB7",
5989 + "MSRValue": "0x00000432b7",
5990 + "Counter": "0,1,2,3",
5991 + "UMask": "0x1",
5992 + "PEBScounters": "0,1,2,3",
5993 + "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
5994 + "PDIR_COUNTER": "na",
5995 + "MSRIndex": "0x1a6, 0x1a7",
5996 + "SampleAfterValue": "100007",
5997 + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
5998 + "Offcore": "1"
5999 + },
6000 + {
6001 + "CollectPEBSRecord": "1",
6002 + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
6003 + "EventCode": "0xB7",
6004 + "MSRValue": "0x02000032b7",
6005 + "Counter": "0,1,2,3",
6006 + "UMask": "0x1",
6007 + "PEBScounters": "0,1,2,3",
6008 + "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
6009 + "PDIR_COUNTER": "na",
6010 + "MSRIndex": "0x1a6, 0x1a7",
6011 + "SampleAfterValue": "100007",
6012 + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
6013 + "Offcore": "1"
6014 + },
6015 + {
6016 + "CollectPEBSRecord": "1",
6017 + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
6018 + "EventCode": "0xB7",
6019 + "MSRValue": "0x10000032b7",
6020 + "Counter": "0,1,2,3",
6021 + "UMask": "0x1",
6022 + "PEBScounters": "0,1,2,3",
6023 + "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
6024 + "PDIR_COUNTER": "na",
6025 + "MSRIndex": "0x1a6, 0x1a7",
6026 + "SampleAfterValue": "100007",
6027 + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
6028 + "Offcore": "1"
6029 + },
6030 + {
6031 + "CollectPEBSRecord": "1",
6032 + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
6033 + "EventCode": "0xB7",
6034 + "MSRValue": "0x40000032b7",
6035 + "Counter": "0,1,2,3",
6036 + "UMask": "0x1",
6037 + "PEBScounters": "0,1,2,3",
6038 + "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
6039 + "PDIR_COUNTER": "na",
6040 + "MSRIndex": "0x1a6",
6041 + "SampleAfterValue": "100007",
6042 + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
6043 + "Offcore": "1"
6044 + }
6045 +]
6046 \ No newline at end of file
6047 diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
6048 new file mode 100644
6049 index 000000000000..a7878965ceab
6050 --- /dev/null
6051 +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
6052 @@ -0,0 +1,62 @@
6053 +[
6054 + {
6055 + "CollectPEBSRecord": "1",
6056 + "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
6057 + "EventCode": "0x80",
6058 + "Counter": "0,1,2,3",
6059 + "UMask": "0x1",
6060 + "PEBScounters": "0,1,2,3",
6061 + "EventName": "ICACHE.HIT",
6062 + "PDIR_COUNTER": "na",
6063 + "SampleAfterValue": "200003",
6064 + "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
6065 + },
6066 + {
6067 + "CollectPEBSRecord": "1",
6068 + "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
6069 + "EventCode": "0x80",
6070 + "Counter": "0,1,2,3",
6071 + "UMask": "0x2",
6072 + "PEBScounters": "0,1,2,3",
6073 + "EventName": "ICACHE.MISSES",
6074 + "PDIR_COUNTER": "na",
6075 + "SampleAfterValue": "200003",
6076 + "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
6077 + },
6078 + {
6079 + "CollectPEBSRecord": "1",
6080 + "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
6081 + "EventCode": "0x80",
6082 + "Counter": "0,1,2,3",
6083 + "UMask": "0x3",
6084 + "PEBScounters": "0,1,2,3",
6085 + "EventName": "ICACHE.ACCESSES",
6086 + "PDIR_COUNTER": "na",
6087 + "SampleAfterValue": "200003",
6088 + "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
6089 + },
6090 + {
6091 + "CollectPEBSRecord": "1",
6092 + "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
6093 + "EventCode": "0xE7",
6094 + "Counter": "0,1,2,3",
6095 + "UMask": "0x1",
6096 + "PEBScounters": "0,1,2,3",
6097 + "EventName": "MS_DECODED.MS_ENTRY",
6098 + "PDIR_COUNTER": "na",
6099 + "SampleAfterValue": "200003",
6100 + "BriefDescription": "MS decode starts"
6101 + },
6102 + {
6103 + "CollectPEBSRecord": "1",
6104 + "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
6105 + "EventCode": "0xE9",
6106 + "Counter": "0,1,2,3",
6107 + "UMask": "0x1",
6108 + "PEBScounters": "0,1,2,3",
6109 + "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
6110 + "PDIR_COUNTER": "na",
6111 + "SampleAfterValue": "200003",
6112 + "BriefDescription": "Decode restrictions due to predicting wrong instruction length"
6113 + }
6114 +]
6115 \ No newline at end of file
6116 diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
6117 new file mode 100644
6118 index 000000000000..91e0815f3ffb
6119 --- /dev/null
6120 +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
6121 @@ -0,0 +1,38 @@
6122 +[
6123 + {
6124 + "PEBS": "2",
6125 + "CollectPEBSRecord": "2",
6126 + "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
6127 + "EventCode": "0x13",
6128 + "Counter": "0,1,2,3",
6129 + "UMask": "0x2",
6130 + "PEBScounters": "0,1,2,3",
6131 + "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
6132 + "SampleAfterValue": "200003",
6133 + "BriefDescription": "Load uops that split a page (Precise event capable)"
6134 + },
6135 + {
6136 + "PEBS": "2",
6137 + "CollectPEBSRecord": "2",
6138 + "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
6139 + "EventCode": "0x13",
6140 + "Counter": "0,1,2,3",
6141 + "UMask": "0x4",
6142 + "PEBScounters": "0,1,2,3",
6143 + "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
6144 + "SampleAfterValue": "200003",
6145 + "BriefDescription": "Store uops that split a page (Precise event capable)"
6146 + },
6147 + {
6148 + "CollectPEBSRecord": "1",
6149 + "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
6150 + "EventCode": "0xC3",
6151 + "Counter": "0,1,2,3",
6152 + "UMask": "0x2",
6153 + "PEBScounters": "0,1,2,3",
6154 + "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
6155 + "PDIR_COUNTER": "na",
6156 + "SampleAfterValue": "20003",
6157 + "BriefDescription": "Machine clears due to memory ordering issue"
6158 + }
6159 +]
6160 \ No newline at end of file
6161 diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
6162 new file mode 100644
6163 index 000000000000..b860374418ab
6164 --- /dev/null
6165 +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
6166 @@ -0,0 +1,98 @@
6167 +[
6168 + {
6169 + "CollectPEBSRecord": "1",
6170 + "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
6171 + "EventCode": "0x86",
6172 + "Counter": "0,1,2,3",
6173 + "UMask": "0x0",
6174 + "PEBScounters": "0,1,2,3",
6175 + "EventName": "FETCH_STALL.ALL",
6176 + "PDIR_COUNTER": "na",
6177 + "SampleAfterValue": "200003",
6178 + "BriefDescription": "Cycles code-fetch stalled due to any reason."
6179 + },
6180 + {
6181 + "CollectPEBSRecord": "1",
6182 + "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
6183 + "EventCode": "0x86",
6184 + "Counter": "0,1,2,3",
6185 + "UMask": "0x1",
6186 + "PEBScounters": "0,1,2,3",
6187 + "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
6188 + "PDIR_COUNTER": "na",
6189 + "SampleAfterValue": "200003",
6190 + "BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding."
6191 + },
6192 + {
6193 + "CollectPEBSRecord": "1",
6194 + "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
6195 + "EventCode": "0xCA",
6196 + "Counter": "0,1,2,3",
6197 + "UMask": "0x0",
6198 + "PEBScounters": "0,1,2,3",
6199 + "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
6200 + "PDIR_COUNTER": "na",
6201 + "SampleAfterValue": "200003",
6202 + "BriefDescription": "Unfilled issue slots per cycle"
6203 + },
6204 + {
6205 + "CollectPEBSRecord": "1",
6206 + "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
6207 + "EventCode": "0xCA",
6208 + "Counter": "0,1,2,3",
6209 + "UMask": "0x1",
6210 + "PEBScounters": "0,1,2,3",
6211 + "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
6212 + "PDIR_COUNTER": "na",
6213 + "SampleAfterValue": "200003",
6214 + "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
6215 + },
6216 + {
6217 + "CollectPEBSRecord": "1",
6218 + "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
6219 + "EventCode": "0xCA",
6220 + "Counter": "0,1,2,3",
6221 + "UMask": "0x2",
6222 + "PEBScounters": "0,1,2,3",
6223 + "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
6224 + "PDIR_COUNTER": "na",
6225 + "SampleAfterValue": "200003",
6226 + "BriefDescription": "Unfilled issue slots per cycle to recover"
6227 + },
6228 + {
6229 + "CollectPEBSRecord": "2",
6230 + "PublicDescription": "Counts hardware interrupts received by the processor.",
6231 + "EventCode": "0xCB",
6232 + "Counter": "0,1,2,3",
6233 + "UMask": "0x1",
6234 + "PEBScounters": "0,1,2,3",
6235 + "EventName": "HW_INTERRUPTS.RECEIVED",
6236 + "PDIR_COUNTER": "na",
6237 + "SampleAfterValue": "203",
6238 + "BriefDescription": "Hardware interrupts received"
6239 + },
6240 + {
6241 + "CollectPEBSRecord": "2",
6242 + "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
6243 + "EventCode": "0xCB",
6244 + "Counter": "0,1,2,3",
6245 + "UMask": "0x2",
6246 + "PEBScounters": "0,1,2,3",
6247 + "EventName": "HW_INTERRUPTS.MASKED",
6248 + "PDIR_COUNTER": "na",
6249 + "SampleAfterValue": "200003",
6250 + "BriefDescription": "Cycles hardware interrupts are masked"
6251 + },
6252 + {
6253 + "CollectPEBSRecord": "2",
6254 + "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
6255 + "EventCode": "0xCB",
6256 + "Counter": "0,1,2,3",
6257 + "UMask": "0x4",
6258 + "PEBScounters": "0,1,2,3",
6259 + "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
6260 + "PDIR_COUNTER": "na",
6261 + "SampleAfterValue": "200003",
6262 + "BriefDescription": "Cycles pending interrupts are masked"
6263 + }
6264 +]
6265 \ No newline at end of file
6266 diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
6267 new file mode 100644
6268 index 000000000000..ccf1aed69197
6269 --- /dev/null
6270 +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
6271 @@ -0,0 +1,544 @@
6272 +[
6273 + {
6274 + "PEBS": "2",
6275 + "CollectPEBSRecord": "1",
6276 + "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
6277 + "EventCode": "0x00",
6278 + "Counter": "Fixed counter 0",
6279 + "UMask": "0x1",
6280 + "PEBScounters": "32",
6281 + "EventName": "INST_RETIRED.ANY",
6282 + "PDIR_COUNTER": "na",
6283 + "SampleAfterValue": "2000003",
6284 + "BriefDescription": "Instructions retired (Fixed event)"
6285 + },
6286 + {
6287 + "CollectPEBSRecord": "1",
6288 + "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
6289 + "EventCode": "0x00",
6290 + "Counter": "Fixed counter 1",
6291 + "UMask": "0x2",
6292 + "PEBScounters": "33",
6293 + "EventName": "CPU_CLK_UNHALTED.CORE",
6294 + "PDIR_COUNTER": "na",
6295 + "SampleAfterValue": "2000003",
6296 + "BriefDescription": "Core cycles when core is not halted (Fixed event)"
6297 + },
6298 + {
6299 + "CollectPEBSRecord": "1",
6300 + "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
6301 + "EventCode": "0x00",
6302 + "Counter": "Fixed counter 2",
6303 + "UMask": "0x3",
6304 + "PEBScounters": "34",
6305 + "EventName": "CPU_CLK_UNHALTED.REF_TSC",
6306 + "PDIR_COUNTER": "na",
6307 + "SampleAfterValue": "2000003",
6308 + "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
6309 + },
6310 + {
6311 + "PEBS": "2",
6312 + "CollectPEBSRecord": "2",
6313 + "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
6314 + "EventCode": "0x03",
6315 + "Counter": "0,1,2,3",
6316 + "UMask": "0x1",
6317 + "PEBScounters": "0,1,2,3",
6318 + "EventName": "LD_BLOCKS.DATA_UNKNOWN",
6319 + "SampleAfterValue": "200003",
6320 + "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
6321 + },
6322 + {
6323 + "PEBS": "2",
6324 + "CollectPEBSRecord": "2",
6325 + "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
6326 + "EventCode": "0x03",
6327 + "Counter": "0,1,2,3",
6328 + "UMask": "0x2",
6329 + "PEBScounters": "0,1,2,3",
6330 + "EventName": "LD_BLOCKS.STORE_FORWARD",
6331 + "SampleAfterValue": "200003",
6332 + "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
6333 + },
6334 + {
6335 + "PEBS": "2",
6336 + "CollectPEBSRecord": "2",
6337 + "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
6338 + "EventCode": "0x03",
6339 + "Counter": "0,1,2,3",
6340 + "UMask": "0x4",
6341 + "PEBScounters": "0,1,2,3",
6342 + "EventName": "LD_BLOCKS.4K_ALIAS",
6343 + "SampleAfterValue": "200003",
6344 + "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
6345 + },
6346 + {
6347 + "PEBS": "2",
6348 + "CollectPEBSRecord": "2",
6349 + "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
6350 + "EventCode": "0x03",
6351 + "Counter": "0,1,2,3",
6352 + "UMask": "0x8",
6353 + "PEBScounters": "0,1,2,3",
6354 + "EventName": "LD_BLOCKS.UTLB_MISS",
6355 + "SampleAfterValue": "200003",
6356 + "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
6357 + },
6358 + {
6359 + "PEBS": "2",
6360 + "CollectPEBSRecord": "2",
6361 + "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
6362 + "EventCode": "0x03",
6363 + "Counter": "0,1,2,3",
6364 + "UMask": "0x10",
6365 + "PEBScounters": "0,1,2,3",
6366 + "EventName": "LD_BLOCKS.ALL_BLOCK",
6367 + "SampleAfterValue": "200003",
6368 + "BriefDescription": "Loads blocked (Precise event capable)"
6369 + },
6370 + {
6371 + "CollectPEBSRecord": "1",
6372 + "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
6373 + "EventCode": "0x0E",
6374 + "Counter": "0,1,2,3",
6375 + "UMask": "0x0",
6376 + "PEBScounters": "0,1,2,3",
6377 + "EventName": "UOPS_ISSUED.ANY",
6378 + "PDIR_COUNTER": "na",
6379 + "SampleAfterValue": "200003",
6380 + "BriefDescription": "Uops issued to the back end per cycle"
6381 + },
6382 + {
6383 + "CollectPEBSRecord": "1",
6384 + "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
6385 + "EventCode": "0x3C",
6386 + "Counter": "0,1,2,3",
6387 + "UMask": "0x0",
6388 + "PEBScounters": "0,1,2,3",
6389 + "EventName": "CPU_CLK_UNHALTED.CORE_P",
6390 + "PDIR_COUNTER": "na",
6391 + "SampleAfterValue": "2000003",
6392 + "BriefDescription": "Core cycles when core is not halted"
6393 + },
6394 + {
6395 + "CollectPEBSRecord": "1",
6396 + "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
6397 + "EventCode": "0x3C",
6398 + "Counter": "0,1,2,3",
6399 + "UMask": "0x1",
6400 + "PEBScounters": "0,1,2,3",
6401 + "EventName": "CPU_CLK_UNHALTED.REF",
6402 + "PDIR_COUNTER": "na",
6403 + "SampleAfterValue": "2000003",
6404 + "BriefDescription": "Reference cycles when core is not halted"
6405 + },
6406 + {
6407 + "CollectPEBSRecord": "1",
6408 + "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
6409 + "EventCode": "0x9C",
6410 + "Counter": "0,1,2,3",
6411 + "UMask": "0x0",
6412 + "PEBScounters": "0,1,2,3",
6413 + "EventName": "UOPS_NOT_DELIVERED.ANY",
6414 + "PDIR_COUNTER": "na",
6415 + "SampleAfterValue": "200003",
6416 + "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
6417 + },
6418 + {
6419 + "PEBS": "2",
6420 + "CollectPEBSRecord": "1",
6421 + "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
6422 + "EventCode": "0xC0",
6423 + "Counter": "0,1,2,3",
6424 + "UMask": "0x0",
6425 + "PEBScounters": "0,1,2,3",
6426 + "EventName": "INST_RETIRED.ANY_P",
6427 + "SampleAfterValue": "2000003",
6428 + "BriefDescription": "Instructions retired (Precise event capable)"
6429 + },
6430 + {
6431 + "PEBS": "2",
6432 + "CollectPEBSRecord": "2",
6433 + "PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.",
6434 + "EventCode": "0xC0",
6435 + "Counter": "0,1,2,3",
6436 + "UMask": "0x0",
6437 + "EventName": "INST_RETIRED.PREC_DIST",
6438 + "SampleAfterValue": "2000003",
6439 + "BriefDescription": "Instructions retired - using Reduced Skid PEBS feature"
6440 + },
6441 + {
6442 + "PEBS": "2",
6443 + "CollectPEBSRecord": "2",
6444 + "PublicDescription": "Counts uops which retired.",
6445 + "EventCode": "0xC2",
6446 + "Counter": "0,1,2,3",
6447 + "UMask": "0x0",
6448 + "PEBScounters": "0,1,2,3",
6449 + "EventName": "UOPS_RETIRED.ANY",
6450 + "PDIR_COUNTER": "na",
6451 + "SampleAfterValue": "2000003",
6452 + "BriefDescription": "Uops retired (Precise event capable)"
6453 + },
6454 + {
6455 + "PEBS": "2",
6456 + "CollectPEBSRecord": "2",
6457 + "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
6458 + "EventCode": "0xC2",
6459 + "Counter": "0,1,2,3",
6460 + "UMask": "0x1",
6461 + "PEBScounters": "0,1,2,3",
6462 + "EventName": "UOPS_RETIRED.MS",
6463 + "PDIR_COUNTER": "na",
6464 + "SampleAfterValue": "2000003",
6465 + "BriefDescription": "MS uops retired (Precise event capable)"
6466 + },
6467 + {
6468 + "PEBS": "2",
6469 + "CollectPEBSRecord": "1",
6470 + "PublicDescription": "Counts the number of floating point divide uops retired.",
6471 + "EventCode": "0xC2",
6472 + "Counter": "0,1,2,3",
6473 + "UMask": "0x8",
6474 + "PEBScounters": "0,1,2,3",
6475 + "EventName": "UOPS_RETIRED.FPDIV",
6476 + "SampleAfterValue": "2000003",
6477 + "BriefDescription": "Floating point divide uops retired (Precise Event Capable)"
6478 + },
6479 + {
6480 + "PEBS": "2",
6481 + "CollectPEBSRecord": "1",
6482 + "PublicDescription": "Counts the number of integer divide uops retired.",
6483 + "EventCode": "0xC2",
6484 + "Counter": "0,1,2,3",
6485 + "UMask": "0x10",
6486 + "PEBScounters": "0,1,2,3",
6487 + "EventName": "UOPS_RETIRED.IDIV",
6488 + "SampleAfterValue": "2000003",
6489 + "BriefDescription": "Integer divide uops retired (Precise Event Capable)"
6490 + },
6491 + {
6492 + "CollectPEBSRecord": "1",
6493 + "PublicDescription": "Counts machine clears for any reason.",
6494 + "EventCode": "0xC3",
6495 + "Counter": "0,1,2,3",
6496 + "UMask": "0x0",
6497 + "PEBScounters": "0,1,2,3",
6498 + "EventName": "MACHINE_CLEARS.ALL",
6499 + "PDIR_COUNTER": "na",
6500 + "SampleAfterValue": "20003",
6501 + "BriefDescription": "All machine clears"
6502 + },
6503 + {
6504 + "CollectPEBSRecord": "1",
6505 + "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
6506 + "EventCode": "0xC3",
6507 + "Counter": "0,1,2,3",
6508 + "UMask": "0x1",
6509 + "PEBScounters": "0,1,2,3",
6510 + "EventName": "MACHINE_CLEARS.SMC",
6511 + "PDIR_COUNTER": "na",
6512 + "SampleAfterValue": "20003",
6513 + "BriefDescription": "Self-Modifying Code detected"
6514 + },
6515 + {
6516 + "CollectPEBSRecord": "1",
6517 + "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
6518 + "EventCode": "0xC3",
6519 + "Counter": "0,1,2,3",
6520 + "UMask": "0x4",
6521 + "PEBScounters": "0,1,2,3",
6522 + "EventName": "MACHINE_CLEARS.FP_ASSIST",
6523 + "PDIR_COUNTER": "na",
6524 + "SampleAfterValue": "20003",
6525 + "BriefDescription": "Machine clears due to FP assists"
6526 + },
6527 + {
6528 + "CollectPEBSRecord": "1",
6529 + "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
6530 + "EventCode": "0xC3",
6531 + "Counter": "0,1,2,3",
6532 + "UMask": "0x8",
6533 + "PEBScounters": "0,1,2,3",
6534 + "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
6535 + "PDIR_COUNTER": "na",
6536 + "SampleAfterValue": "20003",
6537 + "BriefDescription": "Machine clears due to memory disambiguation"
6538 + },
6539 + {
6540 + "CollectPEBSRecord": "1",
6541 + "PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
6542 + "EventCode": "0xC3",
6543 + "Counter": "0,1,2,3",
6544 + "UMask": "0x20",
6545 + "PEBScounters": "0,1,2,3",
6546 + "EventName": "MACHINE_CLEARS.PAGE_FAULT",
6547 + "PDIR_COUNTER": "na",
6548 + "SampleAfterValue": "20003",
6549 + "BriefDescription": "Machines clear due to a page fault"
6550 + },
6551 + {
6552 + "PEBS": "2",
6553 + "CollectPEBSRecord": "2",
6554 + "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
6555 + "EventCode": "0xC4",
6556 + "Counter": "0,1,2,3",
6557 + "UMask": "0x0",
6558 + "PEBScounters": "0,1,2,3",
6559 + "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
6560 + "SampleAfterValue": "200003",
6561 + "BriefDescription": "Retired branch instructions (Precise event capable)"
6562 + },
6563 + {
6564 + "PEBS": "2",
6565 + "CollectPEBSRecord": "2",
6566 + "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
6567 + "EventCode": "0xC4",
6568 + "Counter": "0,1,2,3",
6569 + "UMask": "0x7e",
6570 + "PEBScounters": "0,1,2,3",
6571 + "EventName": "BR_INST_RETIRED.JCC",
6572 + "SampleAfterValue": "200003",
6573 + "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
6574 + },
6575 + {
6576 + "PEBS": "2",
6577 + "CollectPEBSRecord": "2",
6578 + "PublicDescription": "Counts the number of taken branch instructions retired.",
6579 + "EventCode": "0xC4",
6580 + "Counter": "0,1,2,3",
6581 + "UMask": "0x80",
6582 + "PEBScounters": "0,1,2,3",
6583 + "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
6584 + "SampleAfterValue": "200003",
6585 + "BriefDescription": "Retired taken branch instructions (Precise event capable)"
6586 + },
6587 + {
6588 + "PEBS": "2",
6589 + "CollectPEBSRecord": "2",
6590 + "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
6591 + "EventCode": "0xC4",
6592 + "Counter": "0,1,2,3",
6593 + "UMask": "0xbf",
6594 + "PEBScounters": "0,1,2,3",
6595 + "EventName": "BR_INST_RETIRED.FAR_BRANCH",
6596 + "SampleAfterValue": "200003",
6597 + "BriefDescription": "Retired far branch instructions (Precise event capable)"
6598 + },
6599 + {
6600 + "PEBS": "2",
6601 + "CollectPEBSRecord": "2",
6602 + "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
6603 + "EventCode": "0xC4",
6604 + "Counter": "0,1,2,3",
6605 + "UMask": "0xeb",
6606 + "PEBScounters": "0,1,2,3",
6607 + "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
6608 + "SampleAfterValue": "200003",
6609 + "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
6610 + },
6611 + {
6612 + "PEBS": "2",
6613 + "CollectPEBSRecord": "2",
6614 + "PublicDescription": "Counts near return branch instructions retired.",
6615 + "EventCode": "0xC4",
6616 + "Counter": "0,1,2,3",
6617 + "UMask": "0xf7",
6618 + "PEBScounters": "0,1,2,3",
6619 + "EventName": "BR_INST_RETIRED.RETURN",
6620 + "SampleAfterValue": "200003",
6621 + "BriefDescription": "Retired near return instructions (Precise event capable)"
6622 + },
6623 + {
6624 + "PEBS": "2",
6625 + "CollectPEBSRecord": "2",
6626 + "PublicDescription": "Counts near CALL branch instructions retired.",
6627 + "EventCode": "0xC4",
6628 + "Counter": "0,1,2,3",
6629 + "UMask": "0xf9",
6630 + "PEBScounters": "0,1,2,3",
6631 + "EventName": "BR_INST_RETIRED.CALL",
6632 + "SampleAfterValue": "200003",
6633 + "BriefDescription": "Retired near call instructions (Precise event capable)"
6634 + },
6635 + {
6636 + "PEBS": "2",
6637 + "CollectPEBSRecord": "2",
6638 + "PublicDescription": "Counts near indirect CALL branch instructions retired.",
6639 + "EventCode": "0xC4",
6640 + "Counter": "0,1,2,3",
6641 + "UMask": "0xfb",
6642 + "PEBScounters": "0,1,2,3",
6643 + "EventName": "BR_INST_RETIRED.IND_CALL",
6644 + "SampleAfterValue": "200003",
6645 + "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
6646 + },
6647 + {
6648 + "PEBS": "2",
6649 + "CollectPEBSRecord": "2",
6650 + "PublicDescription": "Counts near relative CALL branch instructions retired.",
6651 + "EventCode": "0xC4",
6652 + "Counter": "0,1,2,3",
6653 + "UMask": "0xfd",
6654 + "PEBScounters": "0,1,2,3",
6655 + "EventName": "BR_INST_RETIRED.REL_CALL",
6656 + "SampleAfterValue": "200003",
6657 + "BriefDescription": "Retired near relative call instructions (Precise event capable)"
6658 + },
6659 + {
6660 + "PEBS": "2",
6661 + "CollectPEBSRecord": "2",
6662 + "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
6663 + "EventCode": "0xC4",
6664 + "Counter": "0,1,2,3",
6665 + "UMask": "0xfe",
6666 + "PEBScounters": "0,1,2,3",
6667 + "EventName": "BR_INST_RETIRED.TAKEN_JCC",
6668 + "SampleAfterValue": "200003",
6669 + "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
6670 + },
6671 + {
6672 + "PEBS": "2",
6673 + "CollectPEBSRecord": "2",
6674 + "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
6675 + "EventCode": "0xC5",
6676 + "Counter": "0,1,2,3",
6677 + "UMask": "0x0",
6678 + "PEBScounters": "0,1,2,3",
6679 + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
6680 + "SampleAfterValue": "200003",
6681 + "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
6682 + },
6683 + {
6684 + "PEBS": "2",
6685 + "CollectPEBSRecord": "2",
6686 + "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
6687 + "EventCode": "0xC5",
6688 + "Counter": "0,1,2,3",
6689 + "UMask": "0x7e",
6690 + "PEBScounters": "0,1,2,3",
6691 + "EventName": "BR_MISP_RETIRED.JCC",
6692 + "SampleAfterValue": "200003",
6693 + "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
6694 + },
6695 + {
6696 + "PEBS": "2",
6697 + "CollectPEBSRecord": "2",
6698 + "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
6699 + "EventCode": "0xC5",
6700 + "Counter": "0,1,2,3",
6701 + "UMask": "0xeb",
6702 + "PEBScounters": "0,1,2,3",
6703 + "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
6704 + "SampleAfterValue": "200003",
6705 + "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)"
6706 + },
6707 + {
6708 + "PEBS": "2",
6709 + "CollectPEBSRecord": "2",
6710 + "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
6711 + "EventCode": "0xC5",
6712 + "Counter": "0,1,2,3",
6713 + "UMask": "0xf7",
6714 + "PEBScounters": "0,1,2,3",
6715 + "EventName": "BR_MISP_RETIRED.RETURN",
6716 + "SampleAfterValue": "200003",
6717 + "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
6718 + },
6719 + {
6720 + "PEBS": "2",
6721 + "CollectPEBSRecord": "2",
6722 + "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
6723 + "EventCode": "0xC5",
6724 + "Counter": "0,1,2,3",
6725 + "UMask": "0xfb",
6726 + "PEBScounters": "0,1,2,3",
6727 + "EventName": "BR_MISP_RETIRED.IND_CALL",
6728 + "SampleAfterValue": "200003",
6729 + "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
6730 + },
6731 + {
6732 + "PEBS": "2",
6733 + "CollectPEBSRecord": "2",
6734 + "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
6735 + "EventCode": "0xC5",
6736 + "Counter": "0,1,2,3",
6737 + "UMask": "0xfe",
6738 + "PEBScounters": "0,1,2,3",
6739 + "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
6740 + "SampleAfterValue": "200003",
6741 + "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
6742 + },
6743 + {
6744 + "CollectPEBSRecord": "1",
6745 + "PublicDescription": "Counts core cycles if either divide unit is busy.",
6746 + "EventCode": "0xCD",
6747 + "Counter": "0,1,2,3",
6748 + "UMask": "0x0",
6749 + "PEBScounters": "0,1,2,3",
6750 + "EventName": "CYCLES_DIV_BUSY.ALL",
6751 + "PDIR_COUNTER": "na",
6752 + "SampleAfterValue": "2000003",
6753 + "BriefDescription": "Cycles a divider is busy"
6754 + },
6755 + {
6756 + "CollectPEBSRecord": "1",
6757 + "PublicDescription": "Counts core cycles the integer divide unit is busy.",
6758 + "EventCode": "0xCD",
6759 + "Counter": "0,1,2,3",
6760 + "UMask": "0x1",
6761 + "PEBScounters": "0,1,2,3",
6762 + "EventName": "CYCLES_DIV_BUSY.IDIV",
6763 + "PDIR_COUNTER": "na",
6764 + "SampleAfterValue": "200003",
6765 + "BriefDescription": "Cycles the integer divide unit is busy"
6766 + },
6767 + {
6768 + "CollectPEBSRecord": "1",
6769 + "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
6770 + "EventCode": "0xCD",
6771 + "Counter": "0,1,2,3",
6772 + "UMask": "0x2",
6773 + "PEBScounters": "0,1,2,3",
6774 + "EventName": "CYCLES_DIV_BUSY.FPDIV",
6775 + "PDIR_COUNTER": "na",
6776 + "SampleAfterValue": "200003",
6777 + "BriefDescription": "Cycles the FP divide unit is busy"
6778 + },
6779 + {
6780 + "CollectPEBSRecord": "1",
6781 + "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
6782 + "EventCode": "0xE6",
6783 + "Counter": "0,1,2,3",
6784 + "UMask": "0x1",
6785 + "PEBScounters": "0,1,2,3",
6786 + "EventName": "BACLEARS.ALL",
6787 + "PDIR_COUNTER": "na",
6788 + "SampleAfterValue": "200003",
6789 + "BriefDescription": "BACLEARs asserted for any branch type"
6790 + },
6791 + {
6792 + "CollectPEBSRecord": "1",
6793 + "PublicDescription": "Counts BACLEARS on return instructions.",
6794 + "EventCode": "0xE6",
6795 + "Counter": "0,1,2,3",
6796 + "UMask": "0x8",
6797 + "PEBScounters": "0,1,2,3",
6798 + "EventName": "BACLEARS.RETURN",
6799 + "PDIR_COUNTER": "na",
6800 + "SampleAfterValue": "200003",
6801 + "BriefDescription": "BACLEARs asserted for return branch"
6802 + },
6803 + {
6804 + "CollectPEBSRecord": "1",
6805 + "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
6806 + "EventCode": "0xE6",
6807 + "Counter": "0,1,2,3",
6808 + "UMask": "0x10",
6809 + "PEBScounters": "0,1,2,3",
6810 + "EventName": "BACLEARS.COND",
6811 + "PDIR_COUNTER": "na",
6812 + "SampleAfterValue": "200003",
6813 + "BriefDescription": "BACLEARs asserted for conditional branch"
6814 + }
6815 +]
6816 \ No newline at end of file
6817 diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
6818 new file mode 100644
6819 index 000000000000..0b53a3b0dfb8
6820 --- /dev/null
6821 +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
6822 @@ -0,0 +1,218 @@
6823 +[
6824 + {
6825 + "CollectPEBSRecord": "1",
6826 + "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
6827 + "EventCode": "0x08",
6828 + "Counter": "0,1,2,3",
6829 + "UMask": "0x2",
6830 + "PEBScounters": "0,1,2,3",
6831 + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
6832 + "PDIR_COUNTER": "na",
6833 + "SampleAfterValue": "200003",
6834 + "BriefDescription": "Page walk completed due to a demand load to a 4K page"
6835 + },
6836 + {
6837 + "CollectPEBSRecord": "1",
6838 + "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
6839 + "EventCode": "0x08",
6840 + "Counter": "0,1,2,3",
6841 + "UMask": "0x4",
6842 + "PEBScounters": "0,1,2,3",
6843 + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
6844 + "PDIR_COUNTER": "na",
6845 + "SampleAfterValue": "200003",
6846 + "BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page"
6847 + },
6848 + {
6849 + "CollectPEBSRecord": "1",
6850 + "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
6851 + "EventCode": "0x08",
6852 + "Counter": "0,1,2,3",
6853 + "UMask": "0x8",
6854 + "PEBScounters": "0,1,2,3",
6855 + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
6856 + "PDIR_COUNTER": "na",
6857 + "SampleAfterValue": "200003",
6858 + "BriefDescription": "Page walk completed due to a demand load to a 1GB page"
6859 + },
6860 + {
6861 + "CollectPEBSRecord": "1",
6862 + "PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
6863 + "EventCode": "0x08",
6864 + "Counter": "0,1,2,3",
6865 + "UMask": "0x10",
6866 + "PEBScounters": "0,1,2,3",
6867 + "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
6868 + "PDIR_COUNTER": "na",
6869 + "SampleAfterValue": "200003",
6870 + "BriefDescription": "Page walks outstanding due to a demand load every cycle."
6871 + },
6872 + {
6873 + "CollectPEBSRecord": "1",
6874 + "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
6875 + "EventCode": "0x49",
6876 + "Counter": "0,1,2,3",
6877 + "UMask": "0x2",
6878 + "PEBScounters": "0,1,2,3",
6879 + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
6880 + "PDIR_COUNTER": "na",
6881 + "SampleAfterValue": "2000003",
6882 + "BriefDescription": "Page walk completed due to a demand data store to a 4K page"
6883 + },
6884 + {
6885 + "CollectPEBSRecord": "1",
6886 + "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
6887 + "EventCode": "0x49",
6888 + "Counter": "0,1,2,3",
6889 + "UMask": "0x4",
6890 + "PEBScounters": "0,1,2,3",
6891 + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
6892 + "PDIR_COUNTER": "na",
6893 + "SampleAfterValue": "2000003",
6894 + "BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page"
6895 + },
6896 + {
6897 + "CollectPEBSRecord": "1",
6898 + "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
6899 + "EventCode": "0x49",
6900 + "Counter": "0,1,2,3",
6901 + "UMask": "0x8",
6902 + "PEBScounters": "0,1,2,3",
6903 + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
6904 + "PDIR_COUNTER": "na",
6905 + "SampleAfterValue": "2000003",
6906 + "BriefDescription": "Page walk completed due to a demand data store to a 1GB page"
6907 + },
6908 + {
6909 + "CollectPEBSRecord": "1",
6910 + "PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
6911 + "EventCode": "0x49",
6912 + "Counter": "0,1,2,3",
6913 + "UMask": "0x10",
6914 + "PEBScounters": "0,1,2,3",
6915 + "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
6916 + "PDIR_COUNTER": "na",
6917 + "SampleAfterValue": "200003",
6918 + "BriefDescription": "Page walks outstanding due to a demand data store every cycle."
6919 + },
6920 + {
6921 + "CollectPEBSRecord": "1",
6922 + "PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
6923 + "EventCode": "0x4F",
6924 + "Counter": "0,1,2,3",
6925 + "UMask": "0x10",
6926 + "PEBScounters": "0,1,2,3",
6927 + "EventName": "EPT.WALK_PENDING",
6928 + "PDIR_COUNTER": "na",
6929 + "SampleAfterValue": "200003",
6930 + "BriefDescription": "Page walks outstanding due to walking the EPT every cycle"
6931 + },
6932 + {
6933 + "CollectPEBSRecord": "1",
6934 + "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
6935 + "EventCode": "0x81",
6936 + "Counter": "0,1,2,3",
6937 + "UMask": "0x4",
6938 + "PEBScounters": "0,1,2,3",
6939 + "EventName": "ITLB.MISS",
6940 + "PDIR_COUNTER": "na",
6941 + "SampleAfterValue": "200003",
6942 + "BriefDescription": "ITLB misses"
6943 + },
6944 + {
6945 + "CollectPEBSRecord": "1",
6946 + "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
6947 + "EventCode": "0x85",
6948 + "Counter": "0,1,2,3",
6949 + "UMask": "0x2",
6950 + "PEBScounters": "0,1,2,3",
6951 + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
6952 + "PDIR_COUNTER": "na",
6953 + "SampleAfterValue": "2000003",
6954 + "BriefDescription": "Page walk completed due to an instruction fetch in a 4K page"
6955 + },
6956 + {
6957 + "CollectPEBSRecord": "1",
6958 + "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
6959 + "EventCode": "0x85",
6960 + "Counter": "0,1,2,3",
6961 + "UMask": "0x4",
6962 + "PEBScounters": "0,1,2,3",
6963 + "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
6964 + "PDIR_COUNTER": "na",
6965 + "SampleAfterValue": "2000003",
6966 + "BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page"
6967 + },
6968 + {
6969 + "CollectPEBSRecord": "1",
6970 + "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
6971 + "EventCode": "0x85",
6972 + "Counter": "0,1,2,3",
6973 + "UMask": "0x8",
6974 + "PEBScounters": "0,1,2,3",
6975 + "EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
6976 + "PDIR_COUNTER": "na",
6977 + "SampleAfterValue": "2000003",
6978 + "BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page"
6979 + },
6980 + {
6981 + "CollectPEBSRecord": "1",
6982 + "PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
6983 + "EventCode": "0x85",
6984 + "Counter": "0,1,2,3",
6985 + "UMask": "0x10",
6986 + "PEBScounters": "0,1,2,3",
6987 + "EventName": "ITLB_MISSES.WALK_PENDING",
6988 + "PDIR_COUNTER": "na",
6989 + "SampleAfterValue": "200003",
6990 + "BriefDescription": "Page walks outstanding due to an instruction fetch every cycle."
6991 + },
6992 + {
6993 + "CollectPEBSRecord": "1",
6994 + "PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
6995 + "EventCode": "0xBD",
6996 + "Counter": "0,1,2,3",
6997 + "UMask": "0x20",
6998 + "PEBScounters": "0,1,2,3",
6999 + "EventName": "TLB_FLUSHES.STLB_ANY",
7000 + "PDIR_COUNTER": "na",
7001 + "SampleAfterValue": "20003",
7002 + "BriefDescription": "STLB flushes"
7003 + },
7004 + {
7005 + "PEBS": "2",
7006 + "CollectPEBSRecord": "2",
7007 + "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
7008 + "EventCode": "0xD0",
7009 + "Counter": "0,1,2,3",
7010 + "UMask": "0x11",
7011 + "PEBScounters": "0,1,2,3",
7012 + "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
7013 + "SampleAfterValue": "200003",
7014 + "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
7015 + },
7016 + {
7017 + "PEBS": "2",
7018 + "CollectPEBSRecord": "2",
7019 + "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
7020 + "EventCode": "0xD0",
7021 + "Counter": "0,1,2,3",
7022 + "UMask": "0x12",
7023 + "PEBScounters": "0,1,2,3",
7024 + "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
7025 + "SampleAfterValue": "200003",
7026 + "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
7027 + },
7028 + {
7029 + "PEBS": "2",
7030 + "CollectPEBSRecord": "2",
7031 + "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
7032 + "EventCode": "0xD0",
7033 + "Counter": "0,1,2,3",
7034 + "UMask": "0x13",
7035 + "PEBScounters": "0,1,2,3",
7036 + "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
7037 + "SampleAfterValue": "200003",
7038 + "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
7039 + }
7040 +]
7041 \ No newline at end of file
7042 diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
7043 index 4ea068366c3e..fe1a2c47cabf 100644
7044 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv
7045 +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
7046 @@ -9,6 +9,7 @@ GenuineIntel-6-27,v4,bonnell,core
7047 GenuineIntel-6-36,v4,bonnell,core
7048 GenuineIntel-6-35,v4,bonnell,core
7049 GenuineIntel-6-5C,v8,goldmont,core
7050 +GenuineIntel-6-7A,v1,goldmontplus,core
7051 GenuineIntel-6-3C,v24,haswell,core
7052 GenuineIntel-6-45,v24,haswell,core
7053 GenuineIntel-6-46,v24,haswell,core
7054 diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
7055 index 00c98c968cb1..505c13bf7e30 100644
7056 --- a/tools/perf/util/dso.c
7057 +++ b/tools/perf/util/dso.c
7058 @@ -352,6 +352,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
7059 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
7060 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
7061 (strncmp(name, "[vdso]", 6) == 0) ||
7062 + (strncmp(name, "[vdso32]", 8) == 0) ||
7063 + (strncmp(name, "[vdsox32]", 9) == 0) ||
7064 (strncmp(name, "[vsyscall]", 10) == 0)) {
7065 m->kmod = false;
7066
7067 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
7068 index f9157aed1289..d404bed7003a 100644
7069 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
7070 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
7071 @@ -113,6 +113,7 @@ struct intel_pt_decoder {
7072 bool have_cyc;
7073 bool fixup_last_mtc;
7074 bool have_last_ip;
7075 + enum intel_pt_param_flags flags;
7076 uint64_t pos;
7077 uint64_t last_ip;
7078 uint64_t ip;
7079 @@ -226,6 +227,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
7080 decoder->return_compression = params->return_compression;
7081 decoder->branch_enable = params->branch_enable;
7082
7083 + decoder->flags = params->flags;
7084 +
7085 decoder->period = params->period;
7086 decoder->period_type = params->period_type;
7087
7088 @@ -1097,6 +1100,15 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
7089 return ret;
7090 }
7091
7092 +static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
7093 + struct intel_pt_insn *intel_pt_insn,
7094 + uint64_t ip, int err)
7095 +{
7096 + return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
7097 + intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
7098 + ip == decoder->ip + intel_pt_insn->length;
7099 +}
7100 +
7101 static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
7102 {
7103 struct intel_pt_insn intel_pt_insn;
7104 @@ -1109,10 +1121,11 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
7105 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
7106 if (err == INTEL_PT_RETURN)
7107 return 0;
7108 - if (err == -EAGAIN) {
7109 + if (err == -EAGAIN ||
7110 + intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
7111 if (intel_pt_fup_event(decoder))
7112 return 0;
7113 - return err;
7114 + return -EAGAIN;
7115 }
7116 decoder->set_fup_tx_flags = false;
7117 if (err)
7118 @@ -1376,7 +1389,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
7119 {
7120 intel_pt_log("ERROR: Buffer overflow\n");
7121 intel_pt_clear_tx_flags(decoder);
7122 - decoder->have_tma = false;
7123 decoder->cbr = 0;
7124 decoder->timestamp_insn_cnt = 0;
7125 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
7126 @@ -1604,7 +1616,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
7127 case INTEL_PT_PSB:
7128 case INTEL_PT_TSC:
7129 case INTEL_PT_TMA:
7130 - case INTEL_PT_CBR:
7131 case INTEL_PT_MODE_TSX:
7132 case INTEL_PT_BAD:
7133 case INTEL_PT_PSBEND:
7134 @@ -1620,6 +1631,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
7135 decoder->pkt_step = 0;
7136 return -ENOENT;
7137
7138 + case INTEL_PT_CBR:
7139 + intel_pt_calc_cbr(decoder);
7140 + break;
7141 +
7142 case INTEL_PT_OVF:
7143 return intel_pt_overflow(decoder);
7144
7145 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
7146 index fc1752d50019..51c18d67f4ca 100644
7147 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
7148 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
7149 @@ -60,6 +60,14 @@ enum {
7150 INTEL_PT_ERR_MAX,
7151 };
7152
7153 +enum intel_pt_param_flags {
7154 + /*
7155 + * FUP packet can contain next linear instruction pointer instead of
7156 + * current linear instruction pointer.
7157 + */
7158 + INTEL_PT_FUP_WITH_NLIP = 1 << 0,
7159 +};
7160 +
7161 struct intel_pt_state {
7162 enum intel_pt_sample_type type;
7163 int err;
7164 @@ -106,6 +114,7 @@ struct intel_pt_params {
7165 unsigned int mtc_period;
7166 uint32_t tsc_ctc_ratio_n;
7167 uint32_t tsc_ctc_ratio_d;
7168 + enum intel_pt_param_flags flags;
7169 };
7170
7171 struct intel_pt_decoder;
7172 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
7173 index ba4c9dd18643..d426761a549d 100644
7174 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
7175 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
7176 @@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
7177 if (len < offs)
7178 return INTEL_PT_NEED_MORE_BYTES;
7179 byte = buf[offs++];
7180 - payload |= (byte >> 1) << shift;
7181 + payload |= ((uint64_t)byte >> 1) << shift;
7182 }
7183
7184 packet->type = INTEL_PT_CYC;
7185 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
7186 index d9573c1fa555..3b118fa9da89 100644
7187 --- a/tools/perf/util/intel-pt.c
7188 +++ b/tools/perf/util/intel-pt.c
7189 @@ -784,6 +784,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
7190 unsigned int queue_nr)
7191 {
7192 struct intel_pt_params params = { .get_trace = 0, };
7193 + struct perf_env *env = pt->machine->env;
7194 struct intel_pt_queue *ptq;
7195
7196 ptq = zalloc(sizeof(struct intel_pt_queue));
7197 @@ -865,6 +866,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
7198 }
7199 }
7200
7201 + if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
7202 + params.flags |= INTEL_PT_FUP_WITH_NLIP;
7203 +
7204 ptq->decoder = intel_pt_decoder_new(&params);
7205 if (!ptq->decoder)
7206 goto out_free;
7207 @@ -1560,6 +1564,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
7208
7209 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
7210 switch (ptq->switch_state) {
7211 + case INTEL_PT_SS_NOT_TRACING:
7212 case INTEL_PT_SS_UNKNOWN:
7213 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
7214 err = intel_pt_next_tid(pt, ptq);
7215 diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
7216 index f2019b37370d..6a4982d029bf 100644
7217 --- a/tools/testing/selftests/ftrace/test.d/functions
7218 +++ b/tools/testing/selftests/ftrace/test.d/functions
7219 @@ -15,14 +15,29 @@ reset_tracer() { # reset the current tracer
7220 echo nop > current_tracer
7221 }
7222
7223 -reset_trigger() { # reset all current setting triggers
7224 - grep -v ^# events/*/*/trigger |
7225 +reset_trigger_file() {
7226 + # remove action triggers first
7227 + grep -H ':on[^:]*(' $@ |
7228 + while read line; do
7229 + cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "`
7230 + file=`echo $line | cut -f1 -d:`
7231 + echo "!$cmd" >> $file
7232 + done
7233 + grep -Hv ^# $@ |
7234 while read line; do
7235 cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "`
7236 - echo "!$cmd" > `echo $line | cut -f1 -d:`
7237 + file=`echo $line | cut -f1 -d:`
7238 + echo "!$cmd" > $file
7239 done
7240 }
7241
7242 +reset_trigger() { # reset all current setting triggers
7243 + if [ -d events/synthetic ]; then
7244 + reset_trigger_file events/synthetic/*/trigger
7245 + fi
7246 + reset_trigger_file events/*/*/trigger
7247 +}
7248 +
7249 reset_events_filter() { # reset all current setting filters
7250 grep -v ^none events/*/*/filter |
7251 while read line; do