Magellan Linux

Annotation of /trunk/kernel-magellan/patches-5.3/0105-5.3.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3459 - (hide annotations) (download)
Thu Oct 17 06:32:44 2019 UTC (4 years, 8 months ago) by niro
File size: 197275 byte(s)
-linux-5.3.6
1 niro 3459 diff --git a/Makefile b/Makefile
2     index bf03c110ed9b..d7469f0926a6 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 3
9     -SUBLEVEL = 5
10     +SUBLEVEL = 6
11     EXTRAVERSION =
12     NAME = Bobtail Squid
13    
14     diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
15     index b295f6fad2a5..954c216140ad 100644
16     --- a/arch/arm/boot/dts/omap3-gta04.dtsi
17     +++ b/arch/arm/boot/dts/omap3-gta04.dtsi
18     @@ -120,6 +120,7 @@
19     spi-max-frequency = <100000>;
20     spi-cpol;
21     spi-cpha;
22     + spi-cs-high;
23    
24     backlight= <&backlight>;
25     label = "lcd";
26     diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
27     index 6998a9796499..4e2bea8875f5 100644
28     --- a/arch/mips/include/asm/cpu-features.h
29     +++ b/arch/mips/include/asm/cpu-features.h
30     @@ -397,6 +397,22 @@
31     #define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
32     #endif
33    
34     +#ifndef cpu_has_loongson_mmi
35     +#define cpu_has_loongson_mmi __ase(MIPS_ASE_LOONGSON_MMI)
36     +#endif
37     +
38     +#ifndef cpu_has_loongson_cam
39     +#define cpu_has_loongson_cam __ase(MIPS_ASE_LOONGSON_CAM)
40     +#endif
41     +
42     +#ifndef cpu_has_loongson_ext
43     +#define cpu_has_loongson_ext __ase(MIPS_ASE_LOONGSON_EXT)
44     +#endif
45     +
46     +#ifndef cpu_has_loongson_ext2
47     +#define cpu_has_loongson_ext2 __ase(MIPS_ASE_LOONGSON_EXT2)
48     +#endif
49     +
50     #ifndef cpu_has_mipsmt
51     #define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
52     #endif
53     diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
54     index 290369fa44a4..1e3526efca1b 100644
55     --- a/arch/mips/include/asm/cpu.h
56     +++ b/arch/mips/include/asm/cpu.h
57     @@ -433,5 +433,9 @@ enum cpu_type_enum {
58     #define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
59     #define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
60     #define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
61     +#define MIPS_ASE_LOONGSON_MMI 0x00000800 /* Loongson MultiMedia extensions Instructions */
62     +#define MIPS_ASE_LOONGSON_CAM 0x00001000 /* Loongson CAM */
63     +#define MIPS_ASE_LOONGSON_EXT 0x00002000 /* Loongson EXTensions */
64     +#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */
65    
66     #endif /* _ASM_CPU_H */
67     diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
68     index e654ffc1c8a0..e698a20017c1 100644
69     --- a/arch/mips/kernel/cpu-probe.c
70     +++ b/arch/mips/kernel/cpu-probe.c
71     @@ -1573,6 +1573,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
72     __cpu_name[cpu] = "ICT Loongson-3";
73     set_elf_platform(cpu, "loongson3a");
74     set_isa(c, MIPS_CPU_ISA_M64R1);
75     + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
76     + MIPS_ASE_LOONGSON_EXT);
77     break;
78     case PRID_REV_LOONGSON3B_R1:
79     case PRID_REV_LOONGSON3B_R2:
80     @@ -1580,6 +1582,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
81     __cpu_name[cpu] = "ICT Loongson-3";
82     set_elf_platform(cpu, "loongson3b");
83     set_isa(c, MIPS_CPU_ISA_M64R1);
84     + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
85     + MIPS_ASE_LOONGSON_EXT);
86     break;
87     }
88    
89     @@ -1946,6 +1950,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
90     decode_configs(c);
91     c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
92     c->writecombine = _CACHE_UNCACHED_ACCELERATED;
93     + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
94     + MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
95     break;
96     default:
97     panic("Unknown Loongson Processor ID!");
98     diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
99     index b2de408a259e..f8d36710cd58 100644
100     --- a/arch/mips/kernel/proc.c
101     +++ b/arch/mips/kernel/proc.c
102     @@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
103     if (cpu_has_eva) seq_printf(m, "%s", " eva");
104     if (cpu_has_htw) seq_printf(m, "%s", " htw");
105     if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
106     + if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi");
107     + if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam");
108     + if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext");
109     + if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2");
110     seq_printf(m, "\n");
111    
112     if (cpu_has_mmips) {
113     diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
114     index d05f0c28e515..f43ff5a00d38 100644
115     --- a/arch/powerpc/include/asm/cputable.h
116     +++ b/arch/powerpc/include/asm/cputable.h
117     @@ -213,8 +213,9 @@ static inline void cpu_feature_keys_init(void) { }
118     #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
119     #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
120     #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
121     -#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000)
122     +#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000)
123     #define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
124     +#define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000)
125    
126     #ifndef __ASSEMBLY__
127    
128     @@ -461,7 +462,7 @@ static inline void cpu_feature_keys_init(void) { }
129     CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
130     CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
131     CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
132     - CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
133     + CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
134     #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
135     #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
136     #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
137     diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
138     index 2484e6a8f5ca..8e8514efb124 100644
139     --- a/arch/powerpc/include/asm/kvm_ppc.h
140     +++ b/arch/powerpc/include/asm/kvm_ppc.h
141     @@ -598,6 +598,7 @@ extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
142     union kvmppc_one_reg *val);
143     extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
144     union kvmppc_one_reg *val);
145     +extern bool kvmppc_xive_native_supported(void);
146    
147     #else
148     static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
149     diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
150     index e4016985764e..818989e11678 100644
151     --- a/arch/powerpc/include/asm/xive.h
152     +++ b/arch/powerpc/include/asm/xive.h
153     @@ -46,7 +46,15 @@ struct xive_irq_data {
154    
155     /* Setup/used by frontend */
156     int target;
157     + /*
158     + * saved_p means that there is a queue entry for this interrupt
159     + * in some CPU's queue (not including guest vcpu queues), even
160     + * if P is not set in the source ESB.
161     + * stale_p means that there is no queue entry for this interrupt
162     + * in some CPU's queue, even if P is set in the source ESB.
163     + */
164     bool saved_p;
165     + bool stale_p;
166     };
167     #define XIVE_IRQ_FLAG_STORE_EOI 0x01
168     #define XIVE_IRQ_FLAG_LSI 0x02
169     @@ -127,6 +135,7 @@ extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
170     extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
171     u32 qindex);
172     extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
173     +extern bool xive_native_has_queue_state_support(void);
174    
175     #else
176    
177     diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
178     index bd95318d2202..864cc55fa03c 100644
179     --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
180     +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
181     @@ -691,9 +691,37 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
182     return true;
183     }
184    
185     +/*
186     + * Handle POWER9 broadcast tlbie invalidation issue using
187     + * cpu feature flag.
188     + */
189     +static __init void update_tlbie_feature_flag(unsigned long pvr)
190     +{
191     + if (PVR_VER(pvr) == PVR_POWER9) {
192     + /*
193     + * Set the tlbie feature flag for anything below
194     + * Nimbus DD 2.3 and Cumulus DD 1.3
195     + */
196     + if ((pvr & 0xe000) == 0) {
197     + /* Nimbus */
198     + if ((pvr & 0xfff) < 0x203)
199     + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
200     + } else if ((pvr & 0xc000) == 0) {
201     + /* Cumulus */
202     + if ((pvr & 0xfff) < 0x103)
203     + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
204     + } else {
205     + WARN_ONCE(1, "Unknown PVR");
206     + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
207     + }
208     +
209     + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
210     + }
211     +}
212     +
213     static __init void cpufeatures_cpu_quirks(void)
214     {
215     - int version = mfspr(SPRN_PVR);
216     + unsigned long version = mfspr(SPRN_PVR);
217    
218     /*
219     * Not all quirks can be derived from the cpufeatures device tree.
220     @@ -712,10 +740,10 @@ static __init void cpufeatures_cpu_quirks(void)
221    
222     if ((version & 0xffff0000) == 0x004e0000) {
223     cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
224     - cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
225     cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
226     }
227    
228     + update_tlbie_feature_flag(version);
229     /*
230     * PKEY was not in the initial base or feature node
231     * specification, but it should become optional in the next
232     diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
233     index f255e22184b4..9e6f01abb31e 100644
234     --- a/arch/powerpc/kernel/head_32.S
235     +++ b/arch/powerpc/kernel/head_32.S
236     @@ -557,9 +557,9 @@ DataStoreTLBMiss:
237     cmplw 0,r1,r3
238     mfspr r2, SPRN_SPRG_PGDIR
239     #ifdef CONFIG_SWAP
240     - li r1, _PAGE_RW | _PAGE_PRESENT | _PAGE_ACCESSED
241     + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
242     #else
243     - li r1, _PAGE_RW | _PAGE_PRESENT
244     + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
245     #endif
246     bge- 112f
247     lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
248     @@ -897,9 +897,11 @@ start_here:
249     bl machine_init
250     bl __save_cpu_setup
251     bl MMU_init
252     +#ifdef CONFIG_KASAN
253     BEGIN_MMU_FTR_SECTION
254     bl MMU_init_hw_patch
255     END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
256     +#endif
257    
258     /*
259     * Go back to running unmapped so we can load up new values
260     diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
261     index b18df633eae9..cff31d4a501f 100644
262     --- a/arch/powerpc/kernel/mce.c
263     +++ b/arch/powerpc/kernel/mce.c
264     @@ -33,6 +33,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
265     mce_ue_event_queue);
266    
267     static void machine_check_process_queued_event(struct irq_work *work);
268     +static void machine_check_ue_irq_work(struct irq_work *work);
269     void machine_check_ue_event(struct machine_check_event *evt);
270     static void machine_process_ue_event(struct work_struct *work);
271    
272     @@ -40,6 +41,10 @@ static struct irq_work mce_event_process_work = {
273     .func = machine_check_process_queued_event,
274     };
275    
276     +static struct irq_work mce_ue_event_irq_work = {
277     + .func = machine_check_ue_irq_work,
278     +};
279     +
280     DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
281    
282     static void mce_set_error_info(struct machine_check_event *mce,
283     @@ -199,6 +204,10 @@ void release_mce_event(void)
284     get_mce_event(NULL, true);
285     }
286    
287     +static void machine_check_ue_irq_work(struct irq_work *work)
288     +{
289     + schedule_work(&mce_ue_event_work);
290     +}
291    
292     /*
293     * Queue up the MCE event which then can be handled later.
294     @@ -216,7 +225,7 @@ void machine_check_ue_event(struct machine_check_event *evt)
295     memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
296    
297     /* Queue work to process this event later. */
298     - schedule_work(&mce_ue_event_work);
299     + irq_work_queue(&mce_ue_event_irq_work);
300     }
301    
302     /*
303     diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
304     index a814d2dfb5b0..714a98e0927f 100644
305     --- a/arch/powerpc/kernel/mce_power.c
306     +++ b/arch/powerpc/kernel/mce_power.c
307     @@ -26,6 +26,7 @@
308     unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
309     {
310     pte_t *ptep;
311     + unsigned int shift;
312     unsigned long flags;
313     struct mm_struct *mm;
314    
315     @@ -35,13 +36,18 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
316     mm = &init_mm;
317    
318     local_irq_save(flags);
319     - if (mm == current->mm)
320     - ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
321     - else
322     - ptep = find_init_mm_pte(addr, NULL);
323     + ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
324     local_irq_restore(flags);
325     +
326     if (!ptep || pte_special(*ptep))
327     return ULONG_MAX;
328     +
329     + if (shift > PAGE_SHIFT) {
330     + unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
331     +
332     + return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
333     + }
334     +
335     return pte_pfn(*ptep);
336     }
337    
338     @@ -344,7 +350,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
339     MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
340     { 0, false, 0, 0, 0, 0, 0 } };
341    
342     -static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
343     +static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
344     uint64_t *phys_addr)
345     {
346     /*
347     @@ -541,7 +547,8 @@ static int mce_handle_derror(struct pt_regs *regs,
348     * kernel/exception-64s.h
349     */
350     if (get_paca()->in_mce < MAX_MCE_DEPTH)
351     - mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
352     + mce_find_instr_ea_and_phys(regs, addr,
353     + phys_addr);
354     }
355     found = 1;
356     }
357     diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
358     index 9524d92bc45d..d7fcdfa7fee4 100644
359     --- a/arch/powerpc/kvm/book3s.c
360     +++ b/arch/powerpc/kvm/book3s.c
361     @@ -1083,9 +1083,11 @@ static int kvmppc_book3s_init(void)
362     if (xics_on_xive()) {
363     kvmppc_xive_init_module();
364     kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
365     - kvmppc_xive_native_init_module();
366     - kvm_register_device_ops(&kvm_xive_native_ops,
367     - KVM_DEV_TYPE_XIVE);
368     + if (kvmppc_xive_native_supported()) {
369     + kvmppc_xive_native_init_module();
370     + kvm_register_device_ops(&kvm_xive_native_ops,
371     + KVM_DEV_TYPE_XIVE);
372     + }
373     } else
374     #endif
375     kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
376     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
377     index cde3f5a4b3e4..f8975c620f41 100644
378     --- a/arch/powerpc/kvm/book3s_hv.c
379     +++ b/arch/powerpc/kvm/book3s_hv.c
380     @@ -1678,7 +1678,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
381     *val = get_reg_val(id, vcpu->arch.pspb);
382     break;
383     case KVM_REG_PPC_DPDES:
384     - *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
385     + /*
386     + * On POWER9, where we are emulating msgsndp etc.,
387     + * we return 1 bit for each vcpu, which can come from
388     + * either vcore->dpdes or doorbell_request.
389     + * On POWER8, doorbell_request is 0.
390     + */
391     + *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
392     + vcpu->arch.doorbell_request);
393     break;
394     case KVM_REG_PPC_VTB:
395     *val = get_reg_val(id, vcpu->arch.vcore->vtb);
396     @@ -2860,7 +2867,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
397     if (!spin_trylock(&pvc->lock))
398     continue;
399     prepare_threads(pvc);
400     - if (!pvc->n_runnable) {
401     + if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
402     list_del_init(&pvc->preempt_list);
403     if (pvc->runner == NULL) {
404     pvc->vcore_state = VCORE_INACTIVE;
405     @@ -2881,15 +2888,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
406     spin_unlock(&lp->lock);
407     }
408    
409     -static bool recheck_signals(struct core_info *cip)
410     +static bool recheck_signals_and_mmu(struct core_info *cip)
411     {
412     int sub, i;
413     struct kvm_vcpu *vcpu;
414     + struct kvmppc_vcore *vc;
415    
416     - for (sub = 0; sub < cip->n_subcores; ++sub)
417     - for_each_runnable_thread(i, vcpu, cip->vc[sub])
418     + for (sub = 0; sub < cip->n_subcores; ++sub) {
419     + vc = cip->vc[sub];
420     + if (!vc->kvm->arch.mmu_ready)
421     + return true;
422     + for_each_runnable_thread(i, vcpu, vc)
423     if (signal_pending(vcpu->arch.run_task))
424     return true;
425     + }
426     return false;
427     }
428    
429     @@ -3119,7 +3131,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
430     local_irq_disable();
431     hard_irq_disable();
432     if (lazy_irq_pending() || need_resched() ||
433     - recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
434     + recheck_signals_and_mmu(&core_info)) {
435     local_irq_enable();
436     vc->vcore_state = VCORE_INACTIVE;
437     /* Unlock all except the primary vcore */
438     diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
439     index 63e0ce91e29d..47f86252e8a1 100644
440     --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
441     +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
442     @@ -433,6 +433,37 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r)
443     (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
444     }
445    
446     +static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long lpid)
447     +{
448     +
449     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
450     + /* Radix flush for a hash guest */
451     +
452     + unsigned long rb,rs,prs,r,ric;
453     +
454     + rb = PPC_BIT(52); /* IS = 2 */
455     + rs = 0; /* lpid = 0 */
456     + prs = 0; /* partition scoped */
457     + r = 1; /* radix format */
458     + ric = 0; /* RIC_FLSUH_TLB */
459     +
460     + /*
461     + * Need the extra ptesync to make sure we don't
462     + * re-order the tlbie
463     + */
464     + asm volatile("ptesync": : :"memory");
465     + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
466     + : : "r"(rb), "i"(r), "i"(prs),
467     + "i"(ric), "r"(rs) : "memory");
468     + }
469     +
470     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
471     + asm volatile("ptesync": : :"memory");
472     + asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
473     + "r" (rb_value), "r" (lpid));
474     + }
475     +}
476     +
477     static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
478     long npages, int global, bool need_sync)
479     {
480     @@ -451,16 +482,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
481     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
482     }
483    
484     - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
485     - /*
486     - * Need the extra ptesync to make sure we don't
487     - * re-order the tlbie
488     - */
489     - asm volatile("ptesync": : :"memory");
490     - asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
491     - "r" (rbvalues[0]), "r" (kvm->arch.lpid));
492     - }
493     -
494     + fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid);
495     asm volatile("eieio; tlbsync; ptesync" : : : "memory");
496     } else {
497     if (need_sync)
498     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
499     index 337e64468d78..07181d0dfcb7 100644
500     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
501     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
502     @@ -942,6 +942,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
503     ld r11, VCPU_XIVE_SAVED_STATE(r4)
504     li r9, TM_QW1_OS
505     lwz r8, VCPU_XIVE_CAM_WORD(r4)
506     + cmpwi r8, 0
507     + beq no_xive
508     li r7, TM_QW1_OS + TM_WORD2
509     mfmsr r0
510     andi. r0, r0, MSR_DR /* in real mode? */
511     @@ -2831,29 +2833,39 @@ kvm_cede_prodded:
512     kvm_cede_exit:
513     ld r9, HSTATE_KVM_VCPU(r13)
514     #ifdef CONFIG_KVM_XICS
515     - /* Abort if we still have a pending escalation */
516     + /* are we using XIVE with single escalation? */
517     + ld r10, VCPU_XIVE_ESC_VADDR(r9)
518     + cmpdi r10, 0
519     + beq 3f
520     + li r6, XIVE_ESB_SET_PQ_00
521     + /*
522     + * If we still have a pending escalation, abort the cede,
523     + * and we must set PQ to 10 rather than 00 so that we don't
524     + * potentially end up with two entries for the escalation
525     + * interrupt in the XIVE interrupt queue. In that case
526     + * we also don't want to set xive_esc_on to 1 here in
527     + * case we race with xive_esc_irq().
528     + */
529     lbz r5, VCPU_XIVE_ESC_ON(r9)
530     cmpwi r5, 0
531     - beq 1f
532     + beq 4f
533     li r0, 0
534     stb r0, VCPU_CEDED(r9)
535     -1: /* Enable XIVE escalation */
536     - li r5, XIVE_ESB_SET_PQ_00
537     + li r6, XIVE_ESB_SET_PQ_10
538     + b 5f
539     +4: li r0, 1
540     + stb r0, VCPU_XIVE_ESC_ON(r9)
541     + /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
542     + sync
543     +5: /* Enable XIVE escalation */
544     mfmsr r0
545     andi. r0, r0, MSR_DR /* in real mode? */
546     beq 1f
547     - ld r10, VCPU_XIVE_ESC_VADDR(r9)
548     - cmpdi r10, 0
549     - beq 3f
550     - ldx r0, r10, r5
551     + ldx r0, r10, r6
552     b 2f
553     1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
554     - cmpdi r10, 0
555     - beq 3f
556     - ldcix r0, r10, r5
557     + ldcix r0, r10, r6
558     2: sync
559     - li r0, 1
560     - stb r0, VCPU_XIVE_ESC_ON(r9)
561     #endif /* CONFIG_KVM_XICS */
562     3: b guest_exit_cont
563    
564     diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
565     index e3ba67095895..591bfb4bfd0f 100644
566     --- a/arch/powerpc/kvm/book3s_xive.c
567     +++ b/arch/powerpc/kvm/book3s_xive.c
568     @@ -67,8 +67,14 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
569     void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
570     u64 pq;
571    
572     - if (!tima)
573     + /*
574     + * Nothing to do if the platform doesn't have a XIVE
575     + * or this vCPU doesn't have its own XIVE context
576     + * (e.g. because it's not using an in-kernel interrupt controller).
577     + */
578     + if (!tima || !vcpu->arch.xive_cam_word)
579     return;
580     +
581     eieio();
582     __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
583     __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
584     @@ -160,6 +166,9 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
585     */
586     vcpu->arch.xive_esc_on = false;
587    
588     + /* This orders xive_esc_on = false vs. subsequent stale_p = true */
589     + smp_wmb(); /* goes with smp_mb() in cleanup_single_escalation */
590     +
591     return IRQ_HANDLED;
592     }
593    
594     @@ -1113,6 +1122,31 @@ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
595     vcpu->arch.xive_esc_raddr = 0;
596     }
597    
598     +/*
599     + * In single escalation mode, the escalation interrupt is marked so
600     + * that EOI doesn't re-enable it, but just sets the stale_p flag to
601     + * indicate that the P bit has already been dealt with. However, the
602     + * assembly code that enters the guest sets PQ to 00 without clearing
603     + * stale_p (because it has no easy way to address it). Hence we have
604     + * to adjust stale_p before shutting down the interrupt.
605     + */
606     +void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
607     + struct kvmppc_xive_vcpu *xc, int irq)
608     +{
609     + struct irq_data *d = irq_get_irq_data(irq);
610     + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
611     +
612     + /*
613     + * This slightly odd sequence gives the right result
614     + * (i.e. stale_p set if xive_esc_on is false) even if
615     + * we race with xive_esc_irq() and xive_irq_eoi().
616     + */
617     + xd->stale_p = false;
618     + smp_mb(); /* paired with smb_wmb in xive_esc_irq */
619     + if (!vcpu->arch.xive_esc_on)
620     + xd->stale_p = true;
621     +}
622     +
623     void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
624     {
625     struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
626     @@ -1134,20 +1168,28 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
627     /* Mask the VP IPI */
628     xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
629    
630     - /* Disable the VP */
631     - xive_native_disable_vp(xc->vp_id);
632     -
633     - /* Free the queues & associated interrupts */
634     + /* Free escalations */
635     for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
636     - struct xive_q *q = &xc->queues[i];
637     -
638     - /* Free the escalation irq */
639     if (xc->esc_virq[i]) {
640     + if (xc->xive->single_escalation)
641     + xive_cleanup_single_escalation(vcpu, xc,
642     + xc->esc_virq[i]);
643     free_irq(xc->esc_virq[i], vcpu);
644     irq_dispose_mapping(xc->esc_virq[i]);
645     kfree(xc->esc_virq_names[i]);
646     }
647     - /* Free the queue */
648     + }
649     +
650     + /* Disable the VP */
651     + xive_native_disable_vp(xc->vp_id);
652     +
653     + /* Clear the cam word so guest entry won't try to push context */
654     + vcpu->arch.xive_cam_word = 0;
655     +
656     + /* Free the queues */
657     + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
658     + struct xive_q *q = &xc->queues[i];
659     +
660     xive_native_disable_queue(xc->vp_id, q, i);
661     if (q->qpage) {
662     free_pages((unsigned long)q->qpage,
663     diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
664     index 50494d0ee375..955b820ffd6d 100644
665     --- a/arch/powerpc/kvm/book3s_xive.h
666     +++ b/arch/powerpc/kvm/book3s_xive.h
667     @@ -282,6 +282,8 @@ int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
668     int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
669     bool single_escalation);
670     struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
671     +void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
672     + struct kvmppc_xive_vcpu *xc, int irq);
673    
674     #endif /* CONFIG_KVM_XICS */
675     #endif /* _KVM_PPC_BOOK3S_XICS_H */
676     diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
677     index a998823f68a3..248c1ea9e788 100644
678     --- a/arch/powerpc/kvm/book3s_xive_native.c
679     +++ b/arch/powerpc/kvm/book3s_xive_native.c
680     @@ -67,20 +67,28 @@ void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
681     xc->valid = false;
682     kvmppc_xive_disable_vcpu_interrupts(vcpu);
683    
684     - /* Disable the VP */
685     - xive_native_disable_vp(xc->vp_id);
686     -
687     - /* Free the queues & associated interrupts */
688     + /* Free escalations */
689     for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
690     /* Free the escalation irq */
691     if (xc->esc_virq[i]) {
692     + if (xc->xive->single_escalation)
693     + xive_cleanup_single_escalation(vcpu, xc,
694     + xc->esc_virq[i]);
695     free_irq(xc->esc_virq[i], vcpu);
696     irq_dispose_mapping(xc->esc_virq[i]);
697     kfree(xc->esc_virq_names[i]);
698     xc->esc_virq[i] = 0;
699     }
700     + }
701     +
702     + /* Disable the VP */
703     + xive_native_disable_vp(xc->vp_id);
704     +
705     + /* Clear the cam word so guest entry won't try to push context */
706     + vcpu->arch.xive_cam_word = 0;
707    
708     - /* Free the queue */
709     + /* Free the queues */
710     + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
711     kvmppc_xive_native_cleanup_queue(vcpu, i);
712     }
713    
714     @@ -1171,6 +1179,11 @@ int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
715     return 0;
716     }
717    
718     +bool kvmppc_xive_native_supported(void)
719     +{
720     + return xive_native_has_queue_state_support();
721     +}
722     +
723     static int xive_native_debug_show(struct seq_file *m, void *private)
724     {
725     struct kvmppc_xive *xive = m->private;
726     diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
727     index 3e566c2e6066..3a77bb643452 100644
728     --- a/arch/powerpc/kvm/powerpc.c
729     +++ b/arch/powerpc/kvm/powerpc.c
730     @@ -561,7 +561,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
731     * a POWER9 processor) and the PowerNV platform, as
732     * nested is not yet supported.
733     */
734     - r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE);
735     + r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
736     + kvmppc_xive_native_supported();
737     break;
738     #endif
739    
740     diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
741     index e249fbf6b9c3..8d68f03bf5a4 100644
742     --- a/arch/powerpc/mm/book3s32/mmu.c
743     +++ b/arch/powerpc/mm/book3s32/mmu.c
744     @@ -358,6 +358,15 @@ void __init MMU_init_hw(void)
745     hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
746     if (lg_n_hpteg > 16)
747     hash_mb2 = 16 - LG_HPTEG_SIZE;
748     +
749     + /*
750     + * When KASAN is selected, there is already an early temporary hash
751     + * table and the switch to the final hash table is done later.
752     + */
753     + if (IS_ENABLED(CONFIG_KASAN))
754     + return;
755     +
756     + MMU_init_hw_patch();
757     }
758    
759     void __init MMU_init_hw_patch(void)
760     diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
761     index 90ab4f31e2b3..523e42eb11da 100644
762     --- a/arch/powerpc/mm/book3s64/hash_native.c
763     +++ b/arch/powerpc/mm/book3s64/hash_native.c
764     @@ -197,9 +197,32 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
765     return va;
766     }
767    
768     -static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
769     +static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
770     + int apsize, int ssize)
771     {
772     - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
773     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
774     + /* Radix flush for a hash guest */
775     +
776     + unsigned long rb,rs,prs,r,ric;
777     +
778     + rb = PPC_BIT(52); /* IS = 2 */
779     + rs = 0; /* lpid = 0 */
780     + prs = 0; /* partition scoped */
781     + r = 1; /* radix format */
782     + ric = 0; /* RIC_FLSUH_TLB */
783     +
784     + /*
785     + * Need the extra ptesync to make sure we don't
786     + * re-order the tlbie
787     + */
788     + asm volatile("ptesync": : :"memory");
789     + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
790     + : : "r"(rb), "i"(r), "i"(prs),
791     + "i"(ric), "r"(rs) : "memory");
792     + }
793     +
794     +
795     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
796     /* Need the extra ptesync to ensure we don't reorder tlbie*/
797     asm volatile("ptesync": : :"memory");
798     ___tlbie(vpn, psize, apsize, ssize);
799     @@ -283,7 +306,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
800     asm volatile("ptesync": : :"memory");
801     } else {
802     __tlbie(vpn, psize, apsize, ssize);
803     - fixup_tlbie(vpn, psize, apsize, ssize);
804     + fixup_tlbie_vpn(vpn, psize, apsize, ssize);
805     asm volatile("eieio; tlbsync; ptesync": : :"memory");
806     }
807     if (lock_tlbie && !use_local)
808     @@ -856,7 +879,7 @@ static void native_flush_hash_range(unsigned long number, int local)
809     /*
810     * Just do one more with the last used values.
811     */
812     - fixup_tlbie(vpn, psize, psize, ssize);
813     + fixup_tlbie_vpn(vpn, psize, psize, ssize);
814     asm volatile("eieio; tlbsync; ptesync":::"memory");
815    
816     if (lock_tlbie)
817     diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
818     index b8ad14bb1170..17b0885581e4 100644
819     --- a/arch/powerpc/mm/book3s64/hash_utils.c
820     +++ b/arch/powerpc/mm/book3s64/hash_utils.c
821     @@ -34,6 +34,7 @@
822     #include <linux/libfdt.h>
823     #include <linux/pkeys.h>
824     #include <linux/hugetlb.h>
825     +#include <linux/cpu.h>
826    
827     #include <asm/debugfs.h>
828     #include <asm/processor.h>
829     @@ -1931,10 +1932,16 @@ static int hpt_order_get(void *data, u64 *val)
830    
831     static int hpt_order_set(void *data, u64 val)
832     {
833     + int ret;
834     +
835     if (!mmu_hash_ops.resize_hpt)
836     return -ENODEV;
837    
838     - return mmu_hash_ops.resize_hpt(val);
839     + cpus_read_lock();
840     + ret = mmu_hash_ops.resize_hpt(val);
841     + cpus_read_unlock();
842     +
843     + return ret;
844     }
845    
846     DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n");
847     diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
848     index 71f7fede2fa4..e66a77bdc657 100644
849     --- a/arch/powerpc/mm/book3s64/radix_tlb.c
850     +++ b/arch/powerpc/mm/book3s64/radix_tlb.c
851     @@ -211,22 +211,83 @@ static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid
852     trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
853     }
854    
855     -static inline void fixup_tlbie(void)
856     +
857     +static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
858     + unsigned long ap)
859     +{
860     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
861     + asm volatile("ptesync": : :"memory");
862     + __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
863     + }
864     +
865     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
866     + asm volatile("ptesync": : :"memory");
867     + __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
868     + }
869     +}
870     +
871     +static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
872     + unsigned long ap)
873     {
874     - unsigned long pid = 0;
875     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
876     + asm volatile("ptesync": : :"memory");
877     + __tlbie_pid(0, RIC_FLUSH_TLB);
878     + }
879     +
880     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
881     + asm volatile("ptesync": : :"memory");
882     + __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
883     + }
884     +}
885     +
886     +static inline void fixup_tlbie_pid(unsigned long pid)
887     +{
888     + /*
889     + * We can use any address for the invalidation, pick one which is
890     + * probably unused as an optimisation.
891     + */
892     unsigned long va = ((1UL << 52) - 1);
893    
894     - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
895     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
896     + asm volatile("ptesync": : :"memory");
897     + __tlbie_pid(0, RIC_FLUSH_TLB);
898     + }
899     +
900     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
901     asm volatile("ptesync": : :"memory");
902     __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
903     }
904     }
905    
906     +
907     +static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
908     + unsigned long ap)
909     +{
910     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
911     + asm volatile("ptesync": : :"memory");
912     + __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB);
913     + }
914     +
915     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
916     + asm volatile("ptesync": : :"memory");
917     + __tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB);
918     + }
919     +}
920     +
921     static inline void fixup_tlbie_lpid(unsigned long lpid)
922     {
923     + /*
924     + * We can use any address for the invalidation, pick one which is
925     + * probably unused as an optimisation.
926     + */
927     unsigned long va = ((1UL << 52) - 1);
928    
929     - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
930     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
931     + asm volatile("ptesync": : :"memory");
932     + __tlbie_lpid(0, RIC_FLUSH_TLB);
933     + }
934     +
935     + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
936     asm volatile("ptesync": : :"memory");
937     __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
938     }
939     @@ -273,6 +334,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
940     switch (ric) {
941     case RIC_FLUSH_TLB:
942     __tlbie_pid(pid, RIC_FLUSH_TLB);
943     + fixup_tlbie_pid(pid);
944     break;
945     case RIC_FLUSH_PWC:
946     __tlbie_pid(pid, RIC_FLUSH_PWC);
947     @@ -280,8 +342,8 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
948     case RIC_FLUSH_ALL:
949     default:
950     __tlbie_pid(pid, RIC_FLUSH_ALL);
951     + fixup_tlbie_pid(pid);
952     }
953     - fixup_tlbie();
954     asm volatile("eieio; tlbsync; ptesync": : :"memory");
955     }
956    
957     @@ -325,6 +387,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
958     switch (ric) {
959     case RIC_FLUSH_TLB:
960     __tlbie_lpid(lpid, RIC_FLUSH_TLB);
961     + fixup_tlbie_lpid(lpid);
962     break;
963     case RIC_FLUSH_PWC:
964     __tlbie_lpid(lpid, RIC_FLUSH_PWC);
965     @@ -332,8 +395,8 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
966     case RIC_FLUSH_ALL:
967     default:
968     __tlbie_lpid(lpid, RIC_FLUSH_ALL);
969     + fixup_tlbie_lpid(lpid);
970     }
971     - fixup_tlbie_lpid(lpid);
972     asm volatile("eieio; tlbsync; ptesync": : :"memory");
973     }
974    
975     @@ -407,6 +470,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
976    
977     for (addr = start; addr < end; addr += page_size)
978     __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
979     +
980     + fixup_tlbie_va_range(addr - page_size, pid, ap);
981     }
982    
983     static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
984     @@ -416,7 +481,7 @@ static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
985    
986     asm volatile("ptesync": : :"memory");
987     __tlbie_va(va, pid, ap, ric);
988     - fixup_tlbie();
989     + fixup_tlbie_va(va, pid, ap);
990     asm volatile("eieio; tlbsync; ptesync": : :"memory");
991     }
992    
993     @@ -427,7 +492,7 @@ static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
994    
995     asm volatile("ptesync": : :"memory");
996     __tlbie_lpid_va(va, lpid, ap, ric);
997     - fixup_tlbie_lpid(lpid);
998     + fixup_tlbie_lpid_va(va, lpid, ap);
999     asm volatile("eieio; tlbsync; ptesync": : :"memory");
1000     }
1001    
1002     @@ -439,7 +504,6 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
1003     if (also_pwc)
1004     __tlbie_pid(pid, RIC_FLUSH_PWC);
1005     __tlbie_va_range(start, end, pid, page_size, psize);
1006     - fixup_tlbie();
1007     asm volatile("eieio; tlbsync; ptesync": : :"memory");
1008     }
1009    
1010     @@ -775,7 +839,7 @@ is_local:
1011     if (gflush)
1012     __tlbie_va_range(gstart, gend, pid,
1013     PUD_SIZE, MMU_PAGE_1G);
1014     - fixup_tlbie();
1015     +
1016     asm volatile("eieio; tlbsync; ptesync": : :"memory");
1017     }
1018     }
1019     diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
1020     index a44f6281ca3a..4e08246acd79 100644
1021     --- a/arch/powerpc/mm/init_64.c
1022     +++ b/arch/powerpc/mm/init_64.c
1023     @@ -172,6 +172,21 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
1024     vmemmap_list = vmem_back;
1025     }
1026    
1027     +static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
1028     + unsigned long page_size)
1029     +{
1030     + unsigned long nr_pfn = page_size / sizeof(struct page);
1031     + unsigned long start_pfn = page_to_pfn((struct page *)start);
1032     +
1033     + if ((start_pfn + nr_pfn) > altmap->end_pfn)
1034     + return true;
1035     +
1036     + if (start_pfn < altmap->base_pfn)
1037     + return true;
1038     +
1039     + return false;
1040     +}
1041     +
1042     int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1043     struct vmem_altmap *altmap)
1044     {
1045     @@ -194,7 +209,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1046     * fail due to alignment issues when using 16MB hugepages, so
1047     * fall back to system memory if the altmap allocation fail.
1048     */
1049     - if (altmap) {
1050     + if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
1051     p = altmap_alloc_block_buf(page_size, altmap);
1052     if (!p)
1053     pr_debug("altmap block allocation failed, falling back to system memory");
1054     diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
1055     index 74f4555a62ba..0e6ed4413eea 100644
1056     --- a/arch/powerpc/mm/kasan/kasan_init_32.c
1057     +++ b/arch/powerpc/mm/kasan/kasan_init_32.c
1058     @@ -5,12 +5,21 @@
1059     #include <linux/kasan.h>
1060     #include <linux/printk.h>
1061     #include <linux/memblock.h>
1062     +#include <linux/moduleloader.h>
1063     #include <linux/sched/task.h>
1064     #include <linux/vmalloc.h>
1065     #include <asm/pgalloc.h>
1066     #include <asm/code-patching.h>
1067     #include <mm/mmu_decl.h>
1068    
1069     +static pgprot_t kasan_prot_ro(void)
1070     +{
1071     + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
1072     + return PAGE_READONLY;
1073     +
1074     + return PAGE_KERNEL_RO;
1075     +}
1076     +
1077     static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
1078     {
1079     unsigned long va = (unsigned long)kasan_early_shadow_page;
1080     @@ -25,6 +34,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
1081     {
1082     pmd_t *pmd;
1083     unsigned long k_cur, k_next;
1084     + pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
1085    
1086     pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
1087    
1088     @@ -42,11 +52,20 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
1089    
1090     if (!new)
1091     return -ENOMEM;
1092     - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
1093     - kasan_populate_pte(new, PAGE_READONLY);
1094     - else
1095     - kasan_populate_pte(new, PAGE_KERNEL_RO);
1096     - pmd_populate_kernel(&init_mm, pmd, new);
1097     + kasan_populate_pte(new, prot);
1098     +
1099     + smp_wmb(); /* See comment in __pte_alloc */
1100     +
1101     + spin_lock(&init_mm.page_table_lock);
1102     + /* Has another populated it ? */
1103     + if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
1104     + pmd_populate_kernel(&init_mm, pmd, new);
1105     + new = NULL;
1106     + }
1107     + spin_unlock(&init_mm.page_table_lock);
1108     +
1109     + if (new && slab_is_available())
1110     + pte_free_kernel(&init_mm, new);
1111     }
1112     return 0;
1113     }
1114     @@ -74,7 +93,7 @@ static int __ref kasan_init_region(void *start, size_t size)
1115     if (!slab_is_available())
1116     block = memblock_alloc(k_end - k_start, PAGE_SIZE);
1117    
1118     - for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
1119     + for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
1120     pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
1121     void *va = block ? block + k_cur - k_start : kasan_get_one_page();
1122     pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
1123     @@ -90,11 +109,23 @@ static int __ref kasan_init_region(void *start, size_t size)
1124    
1125     static void __init kasan_remap_early_shadow_ro(void)
1126     {
1127     - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
1128     - kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
1129     - else
1130     - kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
1131     + pgprot_t prot = kasan_prot_ro();
1132     + unsigned long k_start = KASAN_SHADOW_START;
1133     + unsigned long k_end = KASAN_SHADOW_END;
1134     + unsigned long k_cur;
1135     + phys_addr_t pa = __pa(kasan_early_shadow_page);
1136     +
1137     + kasan_populate_pte(kasan_early_shadow_pte, prot);
1138     +
1139     + for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
1140     + pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
1141     + pte_t *ptep = pte_offset_kernel(pmd, k_cur);
1142    
1143     + if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
1144     + continue;
1145     +
1146     + __set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
1147     + }
1148     flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
1149     }
1150    
1151     @@ -137,7 +168,11 @@ void __init kasan_init(void)
1152     #ifdef CONFIG_MODULES
1153     void *module_alloc(unsigned long size)
1154     {
1155     - void *base = vmalloc_exec(size);
1156     + void *base;
1157     +
1158     + base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
1159     + GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
1160     + NUMA_NO_NODE, __builtin_return_address(0));
1161    
1162     if (!base)
1163     return NULL;
1164     diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
1165     index 5d6111a9ee0e..74ff2bff4ea0 100644
1166     --- a/arch/powerpc/mm/ptdump/ptdump.c
1167     +++ b/arch/powerpc/mm/ptdump/ptdump.c
1168     @@ -27,7 +27,7 @@
1169     #include "ptdump.h"
1170    
1171     #ifdef CONFIG_PPC32
1172     -#define KERN_VIRT_START PAGE_OFFSET
1173     +#define KERN_VIRT_START 0
1174     #endif
1175    
1176     /*
1177     diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
1178     index aba443be7daa..d271accf224b 100644
1179     --- a/arch/powerpc/platforms/powernv/opal.c
1180     +++ b/arch/powerpc/platforms/powernv/opal.c
1181     @@ -705,7 +705,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
1182     bin_attr->size);
1183     }
1184    
1185     -static BIN_ATTR_RO(symbol_map, 0);
1186     +static struct bin_attribute symbol_map_attr = {
1187     + .attr = {.name = "symbol_map", .mode = 0400},
1188     + .read = symbol_map_read
1189     +};
1190    
1191     static void opal_export_symmap(void)
1192     {
1193     @@ -722,10 +725,10 @@ static void opal_export_symmap(void)
1194     return;
1195    
1196     /* Setup attributes */
1197     - bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
1198     - bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
1199     + symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
1200     + symbol_map_attr.size = be64_to_cpu(syms[1]);
1201    
1202     - rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
1203     + rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
1204     if (rc)
1205     pr_warn("Error %d creating OPAL symbols file\n", rc);
1206     }
1207     diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1208     index c75ec37bf0cd..a0b9c0c23ed2 100644
1209     --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1210     +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1211     @@ -49,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
1212     return addr;
1213     }
1214    
1215     +static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
1216     + unsigned long size, unsigned int levels);
1217     +
1218     static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1219     {
1220     __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
1221     @@ -58,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1222    
1223     while (level) {
1224     int n = (idx & mask) >> (level * shift);
1225     - unsigned long tce;
1226     + unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
1227    
1228     - if (tmp[n] == 0) {
1229     + if (!tce) {
1230     __be64 *tmp2;
1231    
1232     if (!alloc)
1233     @@ -71,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1234     if (!tmp2)
1235     return NULL;
1236    
1237     - tmp[n] = cpu_to_be64(__pa(tmp2) |
1238     - TCE_PCI_READ | TCE_PCI_WRITE);
1239     + tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
1240     + oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
1241     + cpu_to_be64(tce)));
1242     + if (oldtce) {
1243     + pnv_pci_ioda2_table_do_free_pages(tmp2,
1244     + ilog2(tbl->it_level_size) + 3, 1);
1245     + tce = oldtce;
1246     + }
1247     }
1248     - tce = be64_to_cpu(tmp[n]);
1249    
1250     tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
1251     idx &= ~mask;
1252     diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
1253     index 09bb878c21e0..4f76e5f30c97 100644
1254     --- a/arch/powerpc/platforms/pseries/lpar.c
1255     +++ b/arch/powerpc/platforms/pseries/lpar.c
1256     @@ -1413,7 +1413,10 @@ static int pseries_lpar_resize_hpt_commit(void *data)
1257     return 0;
1258     }
1259    
1260     -/* Must be called in user context */
1261     +/*
1262     + * Must be called in process context. The caller must hold the
1263     + * cpus_lock.
1264     + */
1265     static int pseries_lpar_resize_hpt(unsigned long shift)
1266     {
1267     struct hpt_resize_state state = {
1268     @@ -1467,7 +1470,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
1269    
1270     t1 = ktime_get();
1271    
1272     - rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
1273     + rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
1274     + &state, NULL);
1275    
1276     t2 = ktime_get();
1277    
1278     diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
1279     index 1cdb39575eae..be86fce1a84e 100644
1280     --- a/arch/powerpc/sysdev/xive/common.c
1281     +++ b/arch/powerpc/sysdev/xive/common.c
1282     @@ -135,7 +135,7 @@ static u32 xive_read_eq(struct xive_q *q, bool just_peek)
1283     static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
1284     {
1285     u32 irq = 0;
1286     - u8 prio;
1287     + u8 prio = 0;
1288    
1289     /* Find highest pending priority */
1290     while (xc->pending_prio != 0) {
1291     @@ -148,8 +148,19 @@ static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
1292     irq = xive_read_eq(&xc->queue[prio], just_peek);
1293    
1294     /* Found something ? That's it */
1295     - if (irq)
1296     - break;
1297     + if (irq) {
1298     + if (just_peek || irq_to_desc(irq))
1299     + break;
1300     + /*
1301     + * We should never get here; if we do then we must
1302     + * have failed to synchronize the interrupt properly
1303     + * when shutting it down.
1304     + */
1305     + pr_crit("xive: got interrupt %d without descriptor, dropping\n",
1306     + irq);
1307     + WARN_ON(1);
1308     + continue;
1309     + }
1310    
1311     /* Clear pending bits */
1312     xc->pending_prio &= ~(1 << prio);
1313     @@ -307,6 +318,7 @@ static void xive_do_queue_eoi(struct xive_cpu *xc)
1314     */
1315     static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
1316     {
1317     + xd->stale_p = false;
1318     /* If the XIVE supports the new "store EOI facility, use it */
1319     if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
1320     xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
1321     @@ -350,7 +362,7 @@ static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
1322     }
1323     }
1324    
1325     -/* irq_chip eoi callback */
1326     +/* irq_chip eoi callback, called with irq descriptor lock held */
1327     static void xive_irq_eoi(struct irq_data *d)
1328     {
1329     struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1330     @@ -366,6 +378,8 @@ static void xive_irq_eoi(struct irq_data *d)
1331     if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
1332     !(xd->flags & XIVE_IRQ_NO_EOI))
1333     xive_do_source_eoi(irqd_to_hwirq(d), xd);
1334     + else
1335     + xd->stale_p = true;
1336    
1337     /*
1338     * Clear saved_p to indicate that it's no longer occupying
1339     @@ -397,11 +411,16 @@ static void xive_do_source_set_mask(struct xive_irq_data *xd,
1340     */
1341     if (mask) {
1342     val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1343     - xd->saved_p = !!(val & XIVE_ESB_VAL_P);
1344     - } else if (xd->saved_p)
1345     + if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
1346     + xd->saved_p = true;
1347     + xd->stale_p = false;
1348     + } else if (xd->saved_p) {
1349     xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
1350     - else
1351     + xd->saved_p = false;
1352     + } else {
1353     xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
1354     + xd->stale_p = false;
1355     + }
1356     }
1357    
1358     /*
1359     @@ -541,6 +560,8 @@ static unsigned int xive_irq_startup(struct irq_data *d)
1360     unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1361     int target, rc;
1362    
1363     + xd->saved_p = false;
1364     + xd->stale_p = false;
1365     pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
1366     d->irq, hw_irq, d);
1367    
1368     @@ -587,6 +608,7 @@ static unsigned int xive_irq_startup(struct irq_data *d)
1369     return 0;
1370     }
1371    
1372     +/* called with irq descriptor lock held */
1373     static void xive_irq_shutdown(struct irq_data *d)
1374     {
1375     struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1376     @@ -601,16 +623,6 @@ static void xive_irq_shutdown(struct irq_data *d)
1377     /* Mask the interrupt at the source */
1378     xive_do_source_set_mask(xd, true);
1379    
1380     - /*
1381     - * The above may have set saved_p. We clear it otherwise it
1382     - * will prevent re-enabling later on. It is ok to forget the
1383     - * fact that the interrupt might be in a queue because we are
1384     - * accounting that already in xive_dec_target_count() and will
1385     - * be re-routing it to a new queue with proper accounting when
1386     - * it's started up again
1387     - */
1388     - xd->saved_p = false;
1389     -
1390     /*
1391     * Mask the interrupt in HW in the IVT/EAS and set the number
1392     * to be the "bad" IRQ number
1393     @@ -797,6 +809,10 @@ static int xive_irq_retrigger(struct irq_data *d)
1394     return 1;
1395     }
1396    
1397     +/*
1398     + * Caller holds the irq descriptor lock, so this won't be called
1399     + * concurrently with xive_get_irqchip_state on the same interrupt.
1400     + */
1401     static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
1402     {
1403     struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1404     @@ -820,6 +836,10 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
1405    
1406     /* Set it to PQ=10 state to prevent further sends */
1407     pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
1408     + if (!xd->stale_p) {
1409     + xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
1410     + xd->stale_p = !xd->saved_p;
1411     + }
1412    
1413     /* No target ? nothing to do */
1414     if (xd->target == XIVE_INVALID_TARGET) {
1415     @@ -827,7 +847,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
1416     * An untargetted interrupt should have been
1417     * also masked at the source
1418     */
1419     - WARN_ON(pq & 2);
1420     + WARN_ON(xd->saved_p);
1421    
1422     return 0;
1423     }
1424     @@ -847,9 +867,8 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
1425     * This saved_p is cleared by the host EOI, when we know
1426     * for sure the queue slot is no longer in use.
1427     */
1428     - if (pq & 2) {
1429     - pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
1430     - xd->saved_p = true;
1431     + if (xd->saved_p) {
1432     + xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
1433    
1434     /*
1435     * Sync the XIVE source HW to ensure the interrupt
1436     @@ -862,8 +881,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
1437     */
1438     if (xive_ops->sync_source)
1439     xive_ops->sync_source(hw_irq);
1440     - } else
1441     - xd->saved_p = false;
1442     + }
1443     } else {
1444     irqd_clr_forwarded_to_vcpu(d);
1445    
1446     @@ -914,6 +932,23 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
1447     return 0;
1448     }
1449    
1450     +/* Called with irq descriptor lock held. */
1451     +static int xive_get_irqchip_state(struct irq_data *data,
1452     + enum irqchip_irq_state which, bool *state)
1453     +{
1454     + struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
1455     +
1456     + switch (which) {
1457     + case IRQCHIP_STATE_ACTIVE:
1458     + *state = !xd->stale_p &&
1459     + (xd->saved_p ||
1460     + !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
1461     + return 0;
1462     + default:
1463     + return -EINVAL;
1464     + }
1465     +}
1466     +
1467     static struct irq_chip xive_irq_chip = {
1468     .name = "XIVE-IRQ",
1469     .irq_startup = xive_irq_startup,
1470     @@ -925,6 +960,7 @@ static struct irq_chip xive_irq_chip = {
1471     .irq_set_type = xive_irq_set_type,
1472     .irq_retrigger = xive_irq_retrigger,
1473     .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
1474     + .irq_get_irqchip_state = xive_get_irqchip_state,
1475     };
1476    
1477     bool is_xive_irq(struct irq_chip *chip)
1478     @@ -1337,6 +1373,11 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1479     raw_spin_lock(&desc->lock);
1480     xd = irq_desc_get_handler_data(desc);
1481    
1482     + /*
1483     + * Clear saved_p to indicate that it's no longer pending
1484     + */
1485     + xd->saved_p = false;
1486     +
1487     /*
1488     * For LSIs, we EOI, this will cause a resend if it's
1489     * still asserted. Otherwise do an MSI retrigger.
1490     diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
1491     index cf156aadefe9..ad8ee7dd7f57 100644
1492     --- a/arch/powerpc/sysdev/xive/native.c
1493     +++ b/arch/powerpc/sysdev/xive/native.c
1494     @@ -811,6 +811,13 @@ int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
1495     }
1496     EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
1497    
1498     +bool xive_native_has_queue_state_support(void)
1499     +{
1500     + return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
1501     + opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
1502     +}
1503     +EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
1504     +
1505     int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
1506     {
1507     __be64 state;
1508     diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
1509     index bc7a56e1ca6f..9b60878a4469 100644
1510     --- a/arch/riscv/kernel/entry.S
1511     +++ b/arch/riscv/kernel/entry.S
1512     @@ -166,9 +166,13 @@ ENTRY(handle_exception)
1513     move a0, sp /* pt_regs */
1514     tail do_IRQ
1515     1:
1516     - /* Exceptions run with interrupts enabled */
1517     + /* Exceptions run with interrupts enabled or disabled
1518     + depending on the state of sstatus.SR_SPIE */
1519     + andi t0, s1, SR_SPIE
1520     + beqz t0, 1f
1521     csrs sstatus, SR_SIE
1522    
1523     +1:
1524     /* Handle syscalls */
1525     li t0, EXC_SYSCALL
1526     beq s4, t0, handle_syscall
1527     diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
1528     index 63873aa6693f..9f2727bf3cbe 100644
1529     --- a/arch/s390/kernel/process.c
1530     +++ b/arch/s390/kernel/process.c
1531     @@ -184,20 +184,30 @@ unsigned long get_wchan(struct task_struct *p)
1532    
1533     if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
1534     return 0;
1535     +
1536     + if (!try_get_task_stack(p))
1537     + return 0;
1538     +
1539     low = task_stack_page(p);
1540     high = (struct stack_frame *) task_pt_regs(p);
1541     sf = (struct stack_frame *) p->thread.ksp;
1542     - if (sf <= low || sf > high)
1543     - return 0;
1544     + if (sf <= low || sf > high) {
1545     + return_address = 0;
1546     + goto out;
1547     + }
1548     for (count = 0; count < 16; count++) {
1549     sf = (struct stack_frame *) sf->back_chain;
1550     - if (sf <= low || sf > high)
1551     - return 0;
1552     + if (sf <= low || sf > high) {
1553     + return_address = 0;
1554     + goto out;
1555     + }
1556     return_address = sf->gprs[8];
1557     if (!in_sched_functions(return_address))
1558     - return return_address;
1559     + goto out;
1560     }
1561     - return 0;
1562     +out:
1563     + put_task_stack(p);
1564     + return return_address;
1565     }
1566    
1567     unsigned long arch_align_stack(unsigned long sp)
1568     diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
1569     index 2db6fb405a9a..3627953007ed 100644
1570     --- a/arch/s390/kernel/topology.c
1571     +++ b/arch/s390/kernel/topology.c
1572     @@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
1573     on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
1574     for_each_online_cpu(cpu) {
1575     dev = get_cpu_device(cpu);
1576     - kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1577     + if (dev)
1578     + kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1579     }
1580     return rc;
1581     }
1582     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1583     index 39cff07bf2eb..7d955dbf9e6d 100644
1584     --- a/arch/s390/kvm/kvm-s390.c
1585     +++ b/arch/s390/kvm/kvm-s390.c
1586     @@ -332,7 +332,7 @@ static inline int plo_test_bit(unsigned char nr)
1587     return cc == 0;
1588     }
1589    
1590     -static inline void __insn32_query(unsigned int opcode, u8 query[32])
1591     +static inline void __insn32_query(unsigned int opcode, u8 *query)
1592     {
1593     register unsigned long r0 asm("0") = 0; /* query function */
1594     register unsigned long r1 asm("1") = (unsigned long) query;
1595     @@ -340,9 +340,9 @@ static inline void __insn32_query(unsigned int opcode, u8 query[32])
1596     asm volatile(
1597     /* Parameter regs are ignored */
1598     " .insn rrf,%[opc] << 16,2,4,6,0\n"
1599     - : "=m" (*query)
1600     + :
1601     : "d" (r0), "a" (r1), [opc] "i" (opcode)
1602     - : "cc");
1603     + : "cc", "memory");
1604     }
1605    
1606     #define INSN_SORTL 0xb938
1607     @@ -4257,7 +4257,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
1608     const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
1609     | KVM_S390_MEMOP_F_CHECK_ONLY;
1610    
1611     - if (mop->flags & ~supported_flags)
1612     + if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
1613     return -EINVAL;
1614    
1615     if (mop->size > MEM_OP_MAX_SIZE)
1616     diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1617     index a3cba321b5c5..61aa9421e27a 100644
1618     --- a/arch/x86/kvm/vmx/nested.c
1619     +++ b/arch/x86/kvm/vmx/nested.c
1620     @@ -2584,7 +2584,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
1621    
1622     /* VM-entry exception error code */
1623     if (has_error_code &&
1624     - vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
1625     + vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))
1626     return -EINVAL;
1627    
1628     /* VM-entry interruption-info field: reserved bits */
1629     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1630     index 350adc83eb50..e5ccfb33dbea 100644
1631     --- a/arch/x86/kvm/x86.c
1632     +++ b/arch/x86/kvm/x86.c
1633     @@ -884,34 +884,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1634     }
1635     EXPORT_SYMBOL_GPL(kvm_set_xcr);
1636    
1637     -int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1638     +static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1639     {
1640     - unsigned long old_cr4 = kvm_read_cr4(vcpu);
1641     - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
1642     - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
1643     -
1644     if (cr4 & CR4_RESERVED_BITS)
1645     - return 1;
1646     + return -EINVAL;
1647    
1648     if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
1649     - return 1;
1650     + return -EINVAL;
1651    
1652     if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
1653     - return 1;
1654     + return -EINVAL;
1655    
1656     if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
1657     - return 1;
1658     + return -EINVAL;
1659    
1660     if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
1661     - return 1;
1662     + return -EINVAL;
1663    
1664     if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
1665     - return 1;
1666     + return -EINVAL;
1667    
1668     if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
1669     - return 1;
1670     + return -EINVAL;
1671    
1672     if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
1673     + return -EINVAL;
1674     +
1675     + return 0;
1676     +}
1677     +
1678     +int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1679     +{
1680     + unsigned long old_cr4 = kvm_read_cr4(vcpu);
1681     + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
1682     + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
1683     +
1684     + if (kvm_valid_cr4(vcpu, cr4))
1685     return 1;
1686    
1687     if (is_long_mode(vcpu)) {
1688     @@ -8598,10 +8606,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
1689    
1690     static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1691     {
1692     - if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
1693     - (sregs->cr4 & X86_CR4_OSXSAVE))
1694     - return -EINVAL;
1695     -
1696     if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
1697     /*
1698     * When EFER.LME and CR0.PG are set, the processor is in
1699     @@ -8620,7 +8624,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1700     return -EINVAL;
1701     }
1702    
1703     - return 0;
1704     + return kvm_valid_cr4(vcpu, sregs->cr4);
1705     }
1706    
1707     static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1708     diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
1709     index 10fb42da0007..b81b5172cf99 100644
1710     --- a/arch/x86/purgatory/Makefile
1711     +++ b/arch/x86/purgatory/Makefile
1712     @@ -23,6 +23,7 @@ KCOV_INSTRUMENT := n
1713    
1714     PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
1715     PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
1716     +PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
1717    
1718     # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
1719     # in turn leaves some undefined symbols like __fentry__ in purgatory and not
1720     diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
1721     index c9d183d6c499..ca22afd47b3d 100644
1722     --- a/block/blk-mq-sched.c
1723     +++ b/block/blk-mq-sched.c
1724     @@ -555,8 +555,6 @@ void blk_mq_sched_free_requests(struct request_queue *q)
1725     struct blk_mq_hw_ctx *hctx;
1726     int i;
1727    
1728     - lockdep_assert_held(&q->sysfs_lock);
1729     -
1730     queue_for_each_hw_ctx(q, hctx, i) {
1731     if (hctx->sched_tags)
1732     blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
1733     diff --git a/block/blk.h b/block/blk.h
1734     index d5edfd73d45e..0685c45e3d96 100644
1735     --- a/block/blk.h
1736     +++ b/block/blk.h
1737     @@ -201,6 +201,8 @@ void elv_unregister_queue(struct request_queue *q);
1738     static inline void elevator_exit(struct request_queue *q,
1739     struct elevator_queue *e)
1740     {
1741     + lockdep_assert_held(&q->sysfs_lock);
1742     +
1743     blk_mq_sched_free_requests(q);
1744     __elevator_exit(q, e);
1745     }
1746     diff --git a/crypto/skcipher.c b/crypto/skcipher.c
1747     index 5d836fc3df3e..22753c1c7202 100644
1748     --- a/crypto/skcipher.c
1749     +++ b/crypto/skcipher.c
1750     @@ -90,7 +90,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
1751     return max(start, end_page);
1752     }
1753    
1754     -static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
1755     +static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
1756     {
1757     u8 *addr;
1758    
1759     @@ -98,19 +98,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
1760     addr = skcipher_get_spot(addr, bsize);
1761     scatterwalk_copychunks(addr, &walk->out, bsize,
1762     (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
1763     + return 0;
1764     }
1765    
1766     int skcipher_walk_done(struct skcipher_walk *walk, int err)
1767     {
1768     - unsigned int n; /* bytes processed */
1769     - bool more;
1770     + unsigned int n = walk->nbytes;
1771     + unsigned int nbytes = 0;
1772    
1773     - if (unlikely(err < 0))
1774     + if (!n)
1775     goto finish;
1776    
1777     - n = walk->nbytes - err;
1778     - walk->total -= n;
1779     - more = (walk->total != 0);
1780     + if (likely(err >= 0)) {
1781     + n -= err;
1782     + nbytes = walk->total - n;
1783     + }
1784    
1785     if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
1786     SKCIPHER_WALK_SLOW |
1787     @@ -126,7 +128,7 @@ unmap_src:
1788     memcpy(walk->dst.virt.addr, walk->page, n);
1789     skcipher_unmap_dst(walk);
1790     } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
1791     - if (err) {
1792     + if (err > 0) {
1793     /*
1794     * Didn't process all bytes. Either the algorithm is
1795     * broken, or this was the last step and it turned out
1796     @@ -134,27 +136,29 @@ unmap_src:
1797     * the algorithm requires it.
1798     */
1799     err = -EINVAL;
1800     - goto finish;
1801     - }
1802     - skcipher_done_slow(walk, n);
1803     - goto already_advanced;
1804     + nbytes = 0;
1805     + } else
1806     + n = skcipher_done_slow(walk, n);
1807     }
1808    
1809     + if (err > 0)
1810     + err = 0;
1811     +
1812     + walk->total = nbytes;
1813     + walk->nbytes = 0;
1814     +
1815     scatterwalk_advance(&walk->in, n);
1816     scatterwalk_advance(&walk->out, n);
1817     -already_advanced:
1818     - scatterwalk_done(&walk->in, 0, more);
1819     - scatterwalk_done(&walk->out, 1, more);
1820     + scatterwalk_done(&walk->in, 0, nbytes);
1821     + scatterwalk_done(&walk->out, 1, nbytes);
1822    
1823     - if (more) {
1824     + if (nbytes) {
1825     crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
1826     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
1827     return skcipher_walk_next(walk);
1828     }
1829     - err = 0;
1830     -finish:
1831     - walk->nbytes = 0;
1832    
1833     +finish:
1834     /* Short-circuit for the common/fast path. */
1835     if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
1836     goto out;
1837     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1838     index a69a90ad9208..0b727f7432f9 100644
1839     --- a/drivers/block/nbd.c
1840     +++ b/drivers/block/nbd.c
1841     @@ -108,6 +108,7 @@ struct nbd_device {
1842     struct nbd_config *config;
1843     struct mutex config_lock;
1844     struct gendisk *disk;
1845     + struct workqueue_struct *recv_workq;
1846    
1847     struct list_head list;
1848     struct task_struct *task_recv;
1849     @@ -138,7 +139,6 @@ static struct dentry *nbd_dbg_dir;
1850    
1851     static unsigned int nbds_max = 16;
1852     static int max_part = 16;
1853     -static struct workqueue_struct *recv_workqueue;
1854     static int part_shift;
1855    
1856     static int nbd_dev_dbg_init(struct nbd_device *nbd);
1857     @@ -1038,7 +1038,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1858     /* We take the tx_mutex in an error path in the recv_work, so we
1859     * need to queue_work outside of the tx_mutex.
1860     */
1861     - queue_work(recv_workqueue, &args->work);
1862     + queue_work(nbd->recv_workq, &args->work);
1863    
1864     atomic_inc(&config->live_connections);
1865     wake_up(&config->conn_wait);
1866     @@ -1139,6 +1139,10 @@ static void nbd_config_put(struct nbd_device *nbd)
1867     kfree(nbd->config);
1868     nbd->config = NULL;
1869    
1870     + if (nbd->recv_workq)
1871     + destroy_workqueue(nbd->recv_workq);
1872     + nbd->recv_workq = NULL;
1873     +
1874     nbd->tag_set.timeout = 0;
1875     nbd->disk->queue->limits.discard_granularity = 0;
1876     nbd->disk->queue->limits.discard_alignment = 0;
1877     @@ -1167,6 +1171,14 @@ static int nbd_start_device(struct nbd_device *nbd)
1878     return -EINVAL;
1879     }
1880    
1881     + nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1882     + WQ_MEM_RECLAIM | WQ_HIGHPRI |
1883     + WQ_UNBOUND, 0, nbd->index);
1884     + if (!nbd->recv_workq) {
1885     + dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1886     + return -ENOMEM;
1887     + }
1888     +
1889     blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1890     nbd->task_recv = current;
1891    
1892     @@ -1197,7 +1209,7 @@ static int nbd_start_device(struct nbd_device *nbd)
1893     INIT_WORK(&args->work, recv_work);
1894     args->nbd = nbd;
1895     args->index = i;
1896     - queue_work(recv_workqueue, &args->work);
1897     + queue_work(nbd->recv_workq, &args->work);
1898     }
1899     nbd_size_update(nbd);
1900     return error;
1901     @@ -1217,8 +1229,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
1902     mutex_unlock(&nbd->config_lock);
1903     ret = wait_event_interruptible(config->recv_wq,
1904     atomic_read(&config->recv_threads) == 0);
1905     - if (ret)
1906     + if (ret) {
1907     sock_shutdown(nbd);
1908     + flush_workqueue(nbd->recv_workq);
1909     + }
1910     mutex_lock(&nbd->config_lock);
1911     nbd_bdev_reset(bdev);
1912     /* user requested, ignore socket errors */
1913     @@ -1877,6 +1891,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
1914     nbd_disconnect(nbd);
1915     nbd_clear_sock(nbd);
1916     mutex_unlock(&nbd->config_lock);
1917     + /*
1918     + * Make sure recv thread has finished, so it does not drop the last
1919     + * config ref and try to destroy the workqueue from inside the work
1920     + * queue.
1921     + */
1922     + flush_workqueue(nbd->recv_workq);
1923     if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1924     &nbd->config->runtime_flags))
1925     nbd_config_put(nbd);
1926     @@ -2263,20 +2283,12 @@ static int __init nbd_init(void)
1927    
1928     if (nbds_max > 1UL << (MINORBITS - part_shift))
1929     return -EINVAL;
1930     - recv_workqueue = alloc_workqueue("knbd-recv",
1931     - WQ_MEM_RECLAIM | WQ_HIGHPRI |
1932     - WQ_UNBOUND, 0);
1933     - if (!recv_workqueue)
1934     - return -ENOMEM;
1935    
1936     - if (register_blkdev(NBD_MAJOR, "nbd")) {
1937     - destroy_workqueue(recv_workqueue);
1938     + if (register_blkdev(NBD_MAJOR, "nbd"))
1939     return -EIO;
1940     - }
1941    
1942     if (genl_register_family(&nbd_genl_family)) {
1943     unregister_blkdev(NBD_MAJOR, "nbd");
1944     - destroy_workqueue(recv_workqueue);
1945     return -EINVAL;
1946     }
1947     nbd_dbg_init();
1948     @@ -2318,7 +2330,6 @@ static void __exit nbd_cleanup(void)
1949    
1950     idr_destroy(&nbd_index_idr);
1951     genl_unregister_family(&nbd_genl_family);
1952     - destroy_workqueue(recv_workqueue);
1953     unregister_blkdev(NBD_MAJOR, "nbd");
1954     }
1955    
1956     diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
1957     index 72531837571e..28ecef7a481c 100644
1958     --- a/drivers/crypto/caam/caamalg_desc.c
1959     +++ b/drivers/crypto/caam/caamalg_desc.c
1960     @@ -503,6 +503,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
1961     const bool is_qi, int era)
1962     {
1963     u32 geniv, moveiv;
1964     + u32 *wait_cmd;
1965    
1966     /* Note: Context registers are saved. */
1967     init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
1968     @@ -598,6 +599,14 @@ copy_iv:
1969    
1970     /* Will read cryptlen */
1971     append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1972     +
1973     + /*
1974     + * Wait for IV transfer (ofifo -> class2) to finish before starting
1975     + * ciphertext transfer (ofifo -> external memory).
1976     + */
1977     + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
1978     + set_jump_tgt_here(desc, wait_cmd);
1979     +
1980     append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
1981     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
1982     append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
1983     diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
1984     index da4a4ee60c80..706007624d82 100644
1985     --- a/drivers/crypto/caam/caamalg_desc.h
1986     +++ b/drivers/crypto/caam/caamalg_desc.h
1987     @@ -12,7 +12,7 @@
1988     #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
1989     #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
1990     #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
1991     -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
1992     +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
1993     #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
1994     #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
1995     #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
1996     diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
1997     index 4f0d45865aa2..95da6ae43482 100644
1998     --- a/drivers/crypto/caam/error.c
1999     +++ b/drivers/crypto/caam/error.c
2000     @@ -118,6 +118,7 @@ static const struct {
2001     u8 value;
2002     const char *error_text;
2003     } qi_error_list[] = {
2004     + { 0x00, "No error" },
2005     { 0x1F, "Job terminated by FQ or ICID flush" },
2006     { 0x20, "FD format error"},
2007     { 0x21, "FD command format error"},
2008     diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
2009     index 0fe618e3804a..19a378bdf331 100644
2010     --- a/drivers/crypto/caam/qi.c
2011     +++ b/drivers/crypto/caam/qi.c
2012     @@ -163,7 +163,10 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
2013     dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
2014     sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
2015    
2016     - drv_req->cbk(drv_req, -EIO);
2017     + if (fd->status)
2018     + drv_req->cbk(drv_req, be32_to_cpu(fd->status));
2019     + else
2020     + drv_req->cbk(drv_req, JRSTA_SSRC_QI);
2021     }
2022    
2023     static struct qman_fq *create_caam_req_fq(struct device *qidev,
2024     diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
2025     index 8591914d5c51..7c7ea8af6a48 100644
2026     --- a/drivers/crypto/caam/regs.h
2027     +++ b/drivers/crypto/caam/regs.h
2028     @@ -641,6 +641,7 @@ struct caam_job_ring {
2029     #define JRSTA_SSRC_CCB_ERROR 0x20000000
2030     #define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
2031     #define JRSTA_SSRC_DECO 0x40000000
2032     +#define JRSTA_SSRC_QI 0x50000000
2033     #define JRSTA_SSRC_JRERROR 0x60000000
2034     #define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
2035    
2036     diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
2037     index a8447a3cf366..194624b4855b 100644
2038     --- a/drivers/crypto/cavium/zip/zip_main.c
2039     +++ b/drivers/crypto/cavium/zip/zip_main.c
2040     @@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = {
2041     .owner = THIS_MODULE,
2042     .open = zip_stats_open,
2043     .read = seq_read,
2044     + .release = single_release,
2045     };
2046    
2047     static int zip_clear_open(struct inode *inode, struct file *file)
2048     @@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = {
2049     .owner = THIS_MODULE,
2050     .open = zip_clear_open,
2051     .read = seq_read,
2052     + .release = single_release,
2053     };
2054    
2055     static int zip_regs_open(struct inode *inode, struct file *file)
2056     @@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = {
2057     .owner = THIS_MODULE,
2058     .open = zip_regs_open,
2059     .read = seq_read,
2060     + .release = single_release,
2061     };
2062    
2063     /* Root directory for thunderx_zip debugfs entry */
2064     diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
2065     index 7aa4cbe19a86..29bf397cf0c1 100644
2066     --- a/drivers/crypto/ccree/cc_aead.c
2067     +++ b/drivers/crypto/ccree/cc_aead.c
2068     @@ -236,7 +236,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
2069     /* In case of payload authentication failure, MUST NOT
2070     * revealed the decrypted message --> zero its memory.
2071     */
2072     - cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
2073     + cc_zero_sgl(areq->dst, areq->cryptlen);
2074     err = -EBADMSG;
2075     }
2076     } else { /*ENCRYPT*/
2077     diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
2078     index 5ad3ffb7acaa..040e09c0e1af 100644
2079     --- a/drivers/crypto/ccree/cc_fips.c
2080     +++ b/drivers/crypto/ccree/cc_fips.c
2081     @@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
2082     u32 reg;
2083    
2084     reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
2085     - return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
2086     + /* Did the TEE report status? */
2087     + if (reg & CC_FIPS_SYNC_TEE_STATUS)
2088     + /* Yes. Is it OK? */
2089     + return (reg & CC_FIPS_SYNC_MODULE_OK);
2090     +
2091     + /* No. It's either not in use or will be reported later */
2092     + return true;
2093     }
2094    
2095     /*
2096     diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
2097     index 5c4c0a253129..d78f8d5c89c3 100644
2098     --- a/drivers/crypto/qat/qat_common/adf_common_drv.h
2099     +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
2100     @@ -95,7 +95,7 @@ struct service_hndl {
2101    
2102     static inline int get_current_node(void)
2103     {
2104     - return topology_physical_package_id(smp_processor_id());
2105     + return topology_physical_package_id(raw_smp_processor_id());
2106     }
2107    
2108     int adf_service_register(struct service_hndl *service);
2109     diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
2110     index 35c38aad8b4f..cd15c96dd27f 100644
2111     --- a/drivers/devfreq/tegra-devfreq.c
2112     +++ b/drivers/devfreq/tegra-devfreq.c
2113     @@ -474,11 +474,11 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
2114     {
2115     struct tegra_devfreq *tegra = dev_get_drvdata(dev);
2116     struct dev_pm_opp *opp;
2117     - unsigned long rate = *freq * KHZ;
2118     + unsigned long rate;
2119    
2120     - opp = devfreq_recommended_opp(dev, &rate, flags);
2121     + opp = devfreq_recommended_opp(dev, freq, flags);
2122     if (IS_ERR(opp)) {
2123     - dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
2124     + dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
2125     return PTR_ERR(opp);
2126     }
2127     rate = dev_pm_opp_get_freq(opp);
2128     @@ -487,8 +487,6 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
2129     clk_set_min_rate(tegra->emc_clock, rate);
2130     clk_set_rate(tegra->emc_clock, 0);
2131    
2132     - *freq = rate;
2133     -
2134     return 0;
2135     }
2136    
2137     @@ -498,7 +496,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
2138     struct tegra_devfreq *tegra = dev_get_drvdata(dev);
2139     struct tegra_devfreq_device *actmon_dev;
2140    
2141     - stat->current_frequency = tegra->cur_freq;
2142     + stat->current_frequency = tegra->cur_freq * KHZ;
2143    
2144     /* To be used by the tegra governor */
2145     stat->private_data = tegra;
2146     @@ -553,7 +551,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
2147     target_freq = max(target_freq, dev->target_freq);
2148     }
2149    
2150     - *freq = target_freq;
2151     + *freq = target_freq * KHZ;
2152    
2153     return 0;
2154     }
2155     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2156     index 7850084a05e3..60655834d649 100644
2157     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2158     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2159     @@ -143,7 +143,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
2160     /* ring tests don't use a job */
2161     if (job) {
2162     vm = job->vm;
2163     - fence_ctx = job->base.s_fence->scheduled.context;
2164     + fence_ctx = job->base.s_fence ?
2165     + job->base.s_fence->scheduled.context : 0;
2166     } else {
2167     vm = NULL;
2168     fence_ctx = 0;
2169     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2170     index 0cf7e8606fd3..00beba533582 100644
2171     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2172     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2173     @@ -662,6 +662,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
2174     if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
2175     sh_num = 0xffffffff;
2176    
2177     + if (info->read_mmr_reg.count > 128)
2178     + return -EINVAL;
2179     +
2180     regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
2181     if (!regs)
2182     return -ENOMEM;
2183     diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
2184     index 9aaf2deff6e9..8bf9f541e7fe 100644
2185     --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
2186     +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
2187     @@ -532,7 +532,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
2188     struct smu_table_context *smu_table= &smu->smu_table;
2189     int ret = 0;
2190    
2191     - if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
2192     + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
2193     ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
2194     (void *)smu_table->metrics_table, false);
2195     if (ret) {
2196     diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
2197     index 50af399d7f6f..380be66d4c6e 100644
2198     --- a/drivers/gpu/drm/arm/malidp_hw.c
2199     +++ b/drivers/gpu/drm/arm/malidp_hw.c
2200     @@ -385,6 +385,7 @@ int malidp_format_get_bpp(u32 fmt)
2201     switch (fmt) {
2202     case DRM_FORMAT_VUY101010:
2203     bpp = 30;
2204     + break;
2205     case DRM_FORMAT_YUV420_10BIT:
2206     bpp = 15;
2207     break;
2208     @@ -1309,7 +1310,7 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
2209     break;
2210     case MW_RESTART:
2211     drm_writeback_signal_completion(&malidp->mw_connector, 0);
2212     - /* fall through to a new start */
2213     + /* fall through - to a new start */
2214     case MW_START:
2215     /* writeback started, need to emulate one-shot mode */
2216     hw->disable_memwrite(hwdev);
2217     diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
2218     index abe38bdf85ae..15be4667f26f 100644
2219     --- a/drivers/gpu/drm/drm_atomic_uapi.c
2220     +++ b/drivers/gpu/drm/drm_atomic_uapi.c
2221     @@ -1301,8 +1301,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
2222     if (arg->reserved)
2223     return -EINVAL;
2224    
2225     - if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
2226     - !dev->mode_config.async_page_flip)
2227     + if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
2228     return -EINVAL;
2229    
2230     /* can't test and expect an event at the same time. */
2231     diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
2232     index bd810454d239..d9de5cf8c09f 100644
2233     --- a/drivers/gpu/drm/drm_ioctl.c
2234     +++ b/drivers/gpu/drm/drm_ioctl.c
2235     @@ -336,7 +336,12 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
2236     case DRM_CLIENT_CAP_ATOMIC:
2237     if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
2238     return -EOPNOTSUPP;
2239     - if (req->value > 1)
2240     + /* The modesetting DDX has a totally broken idea of atomic. */
2241     + if (current->comm[0] == 'X' && req->value == 1) {
2242     + pr_info("broken atomic modeset userspace detected, disabling atomic\n");
2243     + return -EOPNOTSUPP;
2244     + }
2245     + if (req->value > 2)
2246     return -EINVAL;
2247     file_priv->atomic = req->value;
2248     file_priv->universal_planes = req->value;
2249     diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
2250     index 592b92782fab..1e59a78e74bf 100644
2251     --- a/drivers/gpu/drm/i915/display/intel_display.c
2252     +++ b/drivers/gpu/drm/i915/display/intel_display.c
2253     @@ -7132,7 +7132,7 @@ retry:
2254     pipe_config->fdi_lanes = lane;
2255    
2256     intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
2257     - link_bw, &pipe_config->fdi_m_n, false);
2258     + link_bw, &pipe_config->fdi_m_n, false, false);
2259    
2260     ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
2261     if (ret == -EDEADLK)
2262     @@ -7379,11 +7379,15 @@ void
2263     intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2264     int pixel_clock, int link_clock,
2265     struct intel_link_m_n *m_n,
2266     - bool constant_n)
2267     + bool constant_n, bool fec_enable)
2268     {
2269     - m_n->tu = 64;
2270     + u32 data_clock = bits_per_pixel * pixel_clock;
2271     +
2272     + if (fec_enable)
2273     + data_clock = intel_dp_mode_to_fec_clock(data_clock);
2274    
2275     - compute_m_n(bits_per_pixel * pixel_clock,
2276     + m_n->tu = 64;
2277     + compute_m_n(data_clock,
2278     link_clock * nlanes * 8,
2279     &m_n->gmch_m, &m_n->gmch_n,
2280     constant_n);
2281     diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
2282     index ee6b8194a459..868914c6d9b5 100644
2283     --- a/drivers/gpu/drm/i915/display/intel_display.h
2284     +++ b/drivers/gpu/drm/i915/display/intel_display.h
2285     @@ -351,7 +351,7 @@ struct intel_link_m_n {
2286     void intel_link_compute_m_n(u16 bpp, int nlanes,
2287     int pixel_clock, int link_clock,
2288     struct intel_link_m_n *m_n,
2289     - bool constant_n);
2290     + bool constant_n, bool fec_enable);
2291     bool is_ccs_modifier(u64 modifier);
2292     void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
2293     u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2294     diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
2295     index d0fc34826771..87f4a381dec2 100644
2296     --- a/drivers/gpu/drm/i915/display/intel_dp.c
2297     +++ b/drivers/gpu/drm/i915/display/intel_dp.c
2298     @@ -76,8 +76,8 @@
2299     #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
2300     #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
2301    
2302     -/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
2303     -#define DP_DSC_FEC_OVERHEAD_FACTOR 976
2304     +/* DP DSC FEC Overhead factor = 1/(0.972261) */
2305     +#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
2306    
2307     /* Compliance test status bits */
2308     #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
2309     @@ -526,6 +526,97 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
2310     return 0;
2311     }
2312    
2313     +u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
2314     +{
2315     + return div_u64(mul_u32_u32(mode_clock, 1000000U),
2316     + DP_DSC_FEC_OVERHEAD_FACTOR);
2317     +}
2318     +
2319     +static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
2320     + u32 mode_clock, u32 mode_hdisplay)
2321     +{
2322     + u32 bits_per_pixel, max_bpp_small_joiner_ram;
2323     + int i;
2324     +
2325     + /*
2326     + * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
2327     + * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
2328     + * for SST -> TimeSlotsPerMTP is 1,
2329     + * for MST -> TimeSlotsPerMTP has to be calculated
2330     + */
2331     + bits_per_pixel = (link_clock * lane_count * 8) /
2332     + intel_dp_mode_to_fec_clock(mode_clock);
2333     + DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
2334     +
2335     + /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
2336     + max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
2337     + DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
2338     +
2339     + /*
2340     + * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
2341     + * check, output bpp from small joiner RAM check)
2342     + */
2343     + bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
2344     +
2345     + /* Error out if the max bpp is less than smallest allowed valid bpp */
2346     + if (bits_per_pixel < valid_dsc_bpp[0]) {
2347     + DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
2348     + bits_per_pixel, valid_dsc_bpp[0]);
2349     + return 0;
2350     + }
2351     +
2352     + /* Find the nearest match in the array of known BPPs from VESA */
2353     + for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
2354     + if (bits_per_pixel < valid_dsc_bpp[i + 1])
2355     + break;
2356     + }
2357     + bits_per_pixel = valid_dsc_bpp[i];
2358     +
2359     + /*
2360     + * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
2361     + * fractional part is 0
2362     + */
2363     + return bits_per_pixel << 4;
2364     +}
2365     +
2366     +static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
2367     + int mode_clock, int mode_hdisplay)
2368     +{
2369     + u8 min_slice_count, i;
2370     + int max_slice_width;
2371     +
2372     + if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
2373     + min_slice_count = DIV_ROUND_UP(mode_clock,
2374     + DP_DSC_MAX_ENC_THROUGHPUT_0);
2375     + else
2376     + min_slice_count = DIV_ROUND_UP(mode_clock,
2377     + DP_DSC_MAX_ENC_THROUGHPUT_1);
2378     +
2379     + max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
2380     + if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
2381     + DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
2382     + max_slice_width);
2383     + return 0;
2384     + }
2385     + /* Also take into account max slice width */
2386     + min_slice_count = min_t(u8, min_slice_count,
2387     + DIV_ROUND_UP(mode_hdisplay,
2388     + max_slice_width));
2389     +
2390     + /* Find the closest match to the valid slice count values */
2391     + for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
2392     + if (valid_dsc_slicecount[i] >
2393     + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2394     + false))
2395     + break;
2396     + if (min_slice_count <= valid_dsc_slicecount[i])
2397     + return valid_dsc_slicecount[i];
2398     + }
2399     +
2400     + DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
2401     + return 0;
2402     +}
2403     +
2404     static enum drm_mode_status
2405     intel_dp_mode_valid(struct drm_connector *connector,
2406     struct drm_display_mode *mode)
2407     @@ -2248,7 +2339,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2408     adjusted_mode->crtc_clock,
2409     pipe_config->port_clock,
2410     &pipe_config->dp_m_n,
2411     - constant_n);
2412     + constant_n, pipe_config->fec_enable);
2413    
2414     if (intel_connector->panel.downclock_mode != NULL &&
2415     dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2416     @@ -2258,7 +2349,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2417     intel_connector->panel.downclock_mode->clock,
2418     pipe_config->port_clock,
2419     &pipe_config->dp_m2_n2,
2420     - constant_n);
2421     + constant_n, pipe_config->fec_enable);
2422     }
2423    
2424     if (!HAS_DDI(dev_priv))
2425     @@ -4345,91 +4436,6 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2426     DP_DPRX_ESI_LEN;
2427     }
2428    
2429     -u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
2430     - int mode_clock, int mode_hdisplay)
2431     -{
2432     - u16 bits_per_pixel, max_bpp_small_joiner_ram;
2433     - int i;
2434     -
2435     - /*
2436     - * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
2437     - * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
2438     - * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
2439     - * for MST -> TimeSlotsPerMTP has to be calculated
2440     - */
2441     - bits_per_pixel = (link_clock * lane_count * 8 *
2442     - DP_DSC_FEC_OVERHEAD_FACTOR) /
2443     - mode_clock;
2444     -
2445     - /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
2446     - max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
2447     - mode_hdisplay;
2448     -
2449     - /*
2450     - * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
2451     - * check, output bpp from small joiner RAM check)
2452     - */
2453     - bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
2454     -
2455     - /* Error out if the max bpp is less than smallest allowed valid bpp */
2456     - if (bits_per_pixel < valid_dsc_bpp[0]) {
2457     - DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
2458     - return 0;
2459     - }
2460     -
2461     - /* Find the nearest match in the array of known BPPs from VESA */
2462     - for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
2463     - if (bits_per_pixel < valid_dsc_bpp[i + 1])
2464     - break;
2465     - }
2466     - bits_per_pixel = valid_dsc_bpp[i];
2467     -
2468     - /*
2469     - * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
2470     - * fractional part is 0
2471     - */
2472     - return bits_per_pixel << 4;
2473     -}
2474     -
2475     -u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
2476     - int mode_clock,
2477     - int mode_hdisplay)
2478     -{
2479     - u8 min_slice_count, i;
2480     - int max_slice_width;
2481     -
2482     - if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
2483     - min_slice_count = DIV_ROUND_UP(mode_clock,
2484     - DP_DSC_MAX_ENC_THROUGHPUT_0);
2485     - else
2486     - min_slice_count = DIV_ROUND_UP(mode_clock,
2487     - DP_DSC_MAX_ENC_THROUGHPUT_1);
2488     -
2489     - max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
2490     - if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
2491     - DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
2492     - max_slice_width);
2493     - return 0;
2494     - }
2495     - /* Also take into account max slice width */
2496     - min_slice_count = min_t(u8, min_slice_count,
2497     - DIV_ROUND_UP(mode_hdisplay,
2498     - max_slice_width));
2499     -
2500     - /* Find the closest match to the valid slice count values */
2501     - for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
2502     - if (valid_dsc_slicecount[i] >
2503     - drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2504     - false))
2505     - break;
2506     - if (min_slice_count <= valid_dsc_slicecount[i])
2507     - return valid_dsc_slicecount[i];
2508     - }
2509     -
2510     - DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
2511     - return 0;
2512     -}
2513     -
2514     static void
2515     intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
2516     const struct intel_crtc_state *crtc_state)
2517     diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
2518     index da70b1a41c83..c00e05894e35 100644
2519     --- a/drivers/gpu/drm/i915/display/intel_dp.h
2520     +++ b/drivers/gpu/drm/i915/display/intel_dp.h
2521     @@ -102,10 +102,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
2522     bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
2523     bool
2524     intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
2525     -u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
2526     - int mode_clock, int mode_hdisplay);
2527     -u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
2528     - int mode_hdisplay);
2529    
2530     bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
2531     bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
2532     @@ -120,4 +116,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
2533     return ~((1 << lane_count) - 1) & 0xf;
2534     }
2535    
2536     +u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
2537     +
2538     #endif /* __INTEL_DP_H__ */
2539     diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
2540     index 8aa6a31e8ad0..c42d487f4dff 100644
2541     --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
2542     +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
2543     @@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
2544     adjusted_mode->crtc_clock,
2545     crtc_state->port_clock,
2546     &crtc_state->dp_m_n,
2547     - constant_n);
2548     + constant_n, crtc_state->fec_enable);
2549     crtc_state->dp_m_n.tu = slots;
2550    
2551     return 0;
2552     diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
2553     index 75baff657e43..10875b8a39a3 100644
2554     --- a/drivers/gpu/drm/i915/gvt/scheduler.c
2555     +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
2556     @@ -1424,9 +1424,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
2557     #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
2558     ((a)->lrca == (b)->lrca))
2559    
2560     -#define get_last_workload(q) \
2561     - (list_empty(q) ? NULL : container_of(q->prev, \
2562     - struct intel_vgpu_workload, list))
2563     /**
2564     * intel_vgpu_create_workload - create a vGPU workload
2565     * @vgpu: a vGPU
2566     @@ -1446,7 +1443,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
2567     {
2568     struct intel_vgpu_submission *s = &vgpu->submission;
2569     struct list_head *q = workload_q_head(vgpu, ring_id);
2570     - struct intel_vgpu_workload *last_workload = get_last_workload(q);
2571     + struct intel_vgpu_workload *last_workload = NULL;
2572     struct intel_vgpu_workload *workload = NULL;
2573     struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
2574     u64 ring_context_gpa;
2575     @@ -1472,15 +1469,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
2576     head &= RB_HEAD_OFF_MASK;
2577     tail &= RB_TAIL_OFF_MASK;
2578    
2579     - if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
2580     - gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
2581     - gvt_dbg_el("ctx head %x real head %lx\n", head,
2582     - last_workload->rb_tail);
2583     - /*
2584     - * cannot use guest context head pointer here,
2585     - * as it might not be updated at this time
2586     - */
2587     - head = last_workload->rb_tail;
2588     + list_for_each_entry_reverse(last_workload, q, list) {
2589     +
2590     + if (same_context(&last_workload->ctx_desc, desc)) {
2591     + gvt_dbg_el("ring id %d cur workload == last\n",
2592     + ring_id);
2593     + gvt_dbg_el("ctx head %x real head %lx\n", head,
2594     + last_workload->rb_tail);
2595     + /*
2596     + * cannot use guest context head pointer here,
2597     + * as it might not be updated at this time
2598     + */
2599     + head = last_workload->rb_tail;
2600     + break;
2601     + }
2602     }
2603    
2604     gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
2605     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2606     index fe7a6ec2c199..94b91a952699 100644
2607     --- a/drivers/gpu/drm/i915/i915_drv.h
2608     +++ b/drivers/gpu/drm/i915/i915_drv.h
2609     @@ -1073,6 +1073,7 @@ struct i915_frontbuffer_tracking {
2610     };
2611    
2612     struct i915_virtual_gpu {
2613     + struct mutex lock; /* serialises sending of g2v_notify command pkts */
2614     bool active;
2615     u32 caps;
2616     };
2617     diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
2618     index 7015a97b1097..6b702da7bba7 100644
2619     --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
2620     +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
2621     @@ -1248,14 +1248,15 @@ free_scratch_page:
2622     return ret;
2623     }
2624    
2625     -static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
2626     +static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
2627     {
2628     - struct i915_address_space *vm = &ppgtt->vm;
2629     - struct drm_i915_private *dev_priv = vm->i915;
2630     + struct drm_i915_private *dev_priv = ppgtt->vm.i915;
2631     enum vgt_g2v_type msg;
2632     int i;
2633    
2634     - if (i915_vm_is_4lvl(vm)) {
2635     + mutex_lock(&dev_priv->vgpu.lock);
2636     +
2637     + if (i915_vm_is_4lvl(&ppgtt->vm)) {
2638     const u64 daddr = px_dma(ppgtt->pd);
2639    
2640     I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
2641     @@ -1275,9 +1276,10 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
2642     VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
2643     }
2644    
2645     + /* g2v_notify atomically (via hv trap) consumes the message packet. */
2646     I915_WRITE(vgtif_reg(g2v_notify), msg);
2647    
2648     - return 0;
2649     + mutex_unlock(&dev_priv->vgpu.lock);
2650     }
2651    
2652     static void gen8_free_scratch(struct i915_address_space *vm)
2653     diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
2654     index 724627afdedc..8b03c67f8e5b 100644
2655     --- a/drivers/gpu/drm/i915/i915_vgpu.c
2656     +++ b/drivers/gpu/drm/i915/i915_vgpu.c
2657     @@ -79,6 +79,7 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
2658     dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
2659    
2660     dev_priv->vgpu.active = true;
2661     + mutex_init(&dev_priv->vgpu.lock);
2662     DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
2663     }
2664    
2665     diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
2666     index aa35d18ab43c..02acb4338721 100644
2667     --- a/drivers/gpu/drm/msm/dsi/dsi_host.c
2668     +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
2669     @@ -421,15 +421,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
2670     }
2671    
2672     msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
2673     - if (!msm_host->byte_clk_src) {
2674     - ret = -ENODEV;
2675     + if (IS_ERR(msm_host->byte_clk_src)) {
2676     + ret = PTR_ERR(msm_host->byte_clk_src);
2677     pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
2678     goto exit;
2679     }
2680    
2681     msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
2682     - if (!msm_host->pixel_clk_src) {
2683     - ret = -ENODEV;
2684     + if (IS_ERR(msm_host->pixel_clk_src)) {
2685     + ret = PTR_ERR(msm_host->pixel_clk_src);
2686     pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
2687     goto exit;
2688     }
2689     diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2690     index 5c36c75232e6..895a34a1a1ea 100644
2691     --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
2692     +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2693     @@ -1603,7 +1603,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
2694     nv_encoder->aux = aux;
2695     }
2696    
2697     - if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
2698     + if (nv_connector->type != DCB_CONNECTOR_eDP &&
2699     + (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
2700     ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
2701     ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
2702     nv_connector->base.base.id,
2703     diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
2704     index 5711b7a720e6..25b6a79dc385 100644
2705     --- a/drivers/gpu/drm/omapdrm/dss/dss.c
2706     +++ b/drivers/gpu/drm/omapdrm/dss/dss.c
2707     @@ -1090,7 +1090,7 @@ static const struct dss_features omap34xx_dss_feats = {
2708    
2709     static const struct dss_features omap3630_dss_feats = {
2710     .model = DSS_MODEL_OMAP3,
2711     - .fck_div_max = 32,
2712     + .fck_div_max = 31,
2713     .fck_freq_max = 173000000,
2714     .dss_fck_multiplier = 1,
2715     .parent_clk_name = "dpll4_ck",
2716     diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
2717     index 15d7bebe1729..5cc0fbb04ab1 100644
2718     --- a/drivers/gpu/drm/radeon/radeon_drv.c
2719     +++ b/drivers/gpu/drm/radeon/radeon_drv.c
2720     @@ -325,8 +325,39 @@ bool radeon_device_is_virtual(void);
2721     static int radeon_pci_probe(struct pci_dev *pdev,
2722     const struct pci_device_id *ent)
2723     {
2724     + unsigned long flags = 0;
2725     int ret;
2726    
2727     + if (!ent)
2728     + return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
2729     +
2730     + flags = ent->driver_data;
2731     +
2732     + if (!radeon_si_support) {
2733     + switch (flags & RADEON_FAMILY_MASK) {
2734     + case CHIP_TAHITI:
2735     + case CHIP_PITCAIRN:
2736     + case CHIP_VERDE:
2737     + case CHIP_OLAND:
2738     + case CHIP_HAINAN:
2739     + dev_info(&pdev->dev,
2740     + "SI support disabled by module param\n");
2741     + return -ENODEV;
2742     + }
2743     + }
2744     + if (!radeon_cik_support) {
2745     + switch (flags & RADEON_FAMILY_MASK) {
2746     + case CHIP_KAVERI:
2747     + case CHIP_BONAIRE:
2748     + case CHIP_HAWAII:
2749     + case CHIP_KABINI:
2750     + case CHIP_MULLINS:
2751     + dev_info(&pdev->dev,
2752     + "CIK support disabled by module param\n");
2753     + return -ENODEV;
2754     + }
2755     + }
2756     +
2757     if (vga_switcheroo_client_probe_defer(pdev))
2758     return -EPROBE_DEFER;
2759    
2760     diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
2761     index 07f7ace42c4b..e85c554eeaa9 100644
2762     --- a/drivers/gpu/drm/radeon/radeon_kms.c
2763     +++ b/drivers/gpu/drm/radeon/radeon_kms.c
2764     @@ -100,31 +100,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
2765     struct radeon_device *rdev;
2766     int r, acpi_status;
2767    
2768     - if (!radeon_si_support) {
2769     - switch (flags & RADEON_FAMILY_MASK) {
2770     - case CHIP_TAHITI:
2771     - case CHIP_PITCAIRN:
2772     - case CHIP_VERDE:
2773     - case CHIP_OLAND:
2774     - case CHIP_HAINAN:
2775     - dev_info(dev->dev,
2776     - "SI support disabled by module param\n");
2777     - return -ENODEV;
2778     - }
2779     - }
2780     - if (!radeon_cik_support) {
2781     - switch (flags & RADEON_FAMILY_MASK) {
2782     - case CHIP_KAVERI:
2783     - case CHIP_BONAIRE:
2784     - case CHIP_HAWAII:
2785     - case CHIP_KABINI:
2786     - case CHIP_MULLINS:
2787     - dev_info(dev->dev,
2788     - "CIK support disabled by module param\n");
2789     - return -ENODEV;
2790     - }
2791     - }
2792     -
2793     rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
2794     if (rdev == NULL) {
2795     return -ENOMEM;
2796     diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
2797     index 7bcac8896fc1..67928ff19c71 100644
2798     --- a/drivers/hwtracing/coresight/coresight-etm4x.c
2799     +++ b/drivers/hwtracing/coresight/coresight-etm4x.c
2800     @@ -188,6 +188,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
2801     dev_err(etm_dev,
2802     "timeout while waiting for Idle Trace Status\n");
2803    
2804     + /*
2805     + * As recommended by section 4.3.7 ("Synchronization when using the
2806     + * memory-mapped interface") of ARM IHI 0064D
2807     + */
2808     + dsb(sy);
2809     + isb();
2810     +
2811     done:
2812     CS_LOCK(drvdata->base);
2813    
2814     @@ -453,8 +460,12 @@ static void etm4_disable_hw(void *info)
2815     /* EN, bit[0] Trace unit enable bit */
2816     control &= ~0x1;
2817    
2818     - /* make sure everything completes before disabling */
2819     - mb();
2820     + /*
2821     + * Make sure everything completes before disabling, as recommended
2822     + * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
2823     + * SSTATUS") of ARM IHI 0064D
2824     + */
2825     + dsb(sy);
2826     isb();
2827     writel_relaxed(control, drvdata->base + TRCPRGCTLR);
2828    
2829     diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
2830     index a89bfce5388e..17abf60c94ae 100644
2831     --- a/drivers/i2c/busses/i2c-qcom-geni.c
2832     +++ b/drivers/i2c/busses/i2c-qcom-geni.c
2833     @@ -355,11 +355,13 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
2834     {
2835     dma_addr_t rx_dma;
2836     unsigned long time_left;
2837     - void *dma_buf;
2838     + void *dma_buf = NULL;
2839     struct geni_se *se = &gi2c->se;
2840     size_t len = msg->len;
2841    
2842     - dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
2843     + if (!of_machine_is_compatible("lenovo,yoga-c630"))
2844     + dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
2845     +
2846     if (dma_buf)
2847     geni_se_select_mode(se, GENI_SE_DMA);
2848     else
2849     @@ -394,11 +396,13 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
2850     {
2851     dma_addr_t tx_dma;
2852     unsigned long time_left;
2853     - void *dma_buf;
2854     + void *dma_buf = NULL;
2855     struct geni_se *se = &gi2c->se;
2856     size_t len = msg->len;
2857    
2858     - dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
2859     + if (!of_machine_is_compatible("lenovo,yoga-c630"))
2860     + dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
2861     +
2862     if (dma_buf)
2863     geni_se_select_mode(se, GENI_SE_DMA);
2864     else
2865     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2866     index e1259429ded2..3b1d7ae6f75e 100644
2867     --- a/drivers/iommu/amd_iommu.c
2868     +++ b/drivers/iommu/amd_iommu.c
2869     @@ -1490,6 +1490,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
2870     pte_level = PM_PTE_LEVEL(__pte);
2871    
2872     if (!IOMMU_PTE_PRESENT(__pte) ||
2873     + pte_level == PAGE_MODE_NONE ||
2874     pte_level == PAGE_MODE_7_LEVEL) {
2875     page = (u64 *)get_zeroed_page(gfp);
2876     if (!page)
2877     @@ -1500,7 +1501,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
2878     /* pte could have been changed somewhere. */
2879     if (cmpxchg64(pte, __pte, __npte) != __pte)
2880     free_page((unsigned long)page);
2881     - else if (pte_level == PAGE_MODE_7_LEVEL)
2882     + else if (IOMMU_PTE_PRESENT(__pte))
2883     domain->updated = true;
2884    
2885     continue;
2886     diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
2887     index 4dd43b1adf2c..74de5e8c45c8 100644
2888     --- a/drivers/mmc/host/sdhci-of-esdhc.c
2889     +++ b/drivers/mmc/host/sdhci-of-esdhc.c
2890     @@ -495,7 +495,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
2891     dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
2892    
2893     value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
2894     - value |= ESDHC_DMA_SNOOP;
2895     +
2896     + if (of_dma_is_coherent(dev->of_node))
2897     + value |= ESDHC_DMA_SNOOP;
2898     + else
2899     + value &= ~ESDHC_DMA_SNOOP;
2900     +
2901     sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
2902     return 0;
2903     }
2904     diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
2905     index 02d8f524bb9e..7bc950520fd9 100644
2906     --- a/drivers/mmc/host/sdhci-tegra.c
2907     +++ b/drivers/mmc/host/sdhci-tegra.c
2908     @@ -4,6 +4,7 @@
2909     */
2910    
2911     #include <linux/delay.h>
2912     +#include <linux/dma-mapping.h>
2913     #include <linux/err.h>
2914     #include <linux/module.h>
2915     #include <linux/init.h>
2916     @@ -104,6 +105,7 @@
2917    
2918     struct sdhci_tegra_soc_data {
2919     const struct sdhci_pltfm_data *pdata;
2920     + u64 dma_mask;
2921     u32 nvquirks;
2922     u8 min_tap_delay;
2923     u8 max_tap_delay;
2924     @@ -1233,11 +1235,25 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
2925     .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
2926     };
2927    
2928     +static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
2929     +{
2930     + struct sdhci_pltfm_host *platform = sdhci_priv(host);
2931     + struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
2932     + const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
2933     + struct device *dev = mmc_dev(host->mmc);
2934     +
2935     + if (soc->dma_mask)
2936     + return dma_set_mask_and_coherent(dev, soc->dma_mask);
2937     +
2938     + return 0;
2939     +}
2940     +
2941     static const struct sdhci_ops tegra_sdhci_ops = {
2942     .get_ro = tegra_sdhci_get_ro,
2943     .read_w = tegra_sdhci_readw,
2944     .write_l = tegra_sdhci_writel,
2945     .set_clock = tegra_sdhci_set_clock,
2946     + .set_dma_mask = tegra_sdhci_set_dma_mask,
2947     .set_bus_width = sdhci_set_bus_width,
2948     .reset = tegra_sdhci_reset,
2949     .platform_execute_tuning = tegra_sdhci_execute_tuning,
2950     @@ -1257,6 +1273,7 @@ static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
2951    
2952     static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
2953     .pdata = &sdhci_tegra20_pdata,
2954     + .dma_mask = DMA_BIT_MASK(32),
2955     .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
2956     NVQUIRK_ENABLE_BLOCK_GAP_DET,
2957     };
2958     @@ -1283,6 +1300,7 @@ static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
2959    
2960     static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
2961     .pdata = &sdhci_tegra30_pdata,
2962     + .dma_mask = DMA_BIT_MASK(32),
2963     .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
2964     NVQUIRK_ENABLE_SDR50 |
2965     NVQUIRK_ENABLE_SDR104 |
2966     @@ -1295,6 +1313,7 @@ static const struct sdhci_ops tegra114_sdhci_ops = {
2967     .write_w = tegra_sdhci_writew,
2968     .write_l = tegra_sdhci_writel,
2969     .set_clock = tegra_sdhci_set_clock,
2970     + .set_dma_mask = tegra_sdhci_set_dma_mask,
2971     .set_bus_width = sdhci_set_bus_width,
2972     .reset = tegra_sdhci_reset,
2973     .platform_execute_tuning = tegra_sdhci_execute_tuning,
2974     @@ -1316,6 +1335,7 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
2975    
2976     static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
2977     .pdata = &sdhci_tegra114_pdata,
2978     + .dma_mask = DMA_BIT_MASK(32),
2979     };
2980    
2981     static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
2982     @@ -1325,22 +1345,13 @@ static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
2983     SDHCI_QUIRK_NO_HISPD_BIT |
2984     SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
2985     SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
2986     - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
2987     - /*
2988     - * The TRM states that the SD/MMC controller found on
2989     - * Tegra124 can address 34 bits (the maximum supported by
2990     - * the Tegra memory controller), but tests show that DMA
2991     - * to or from above 4 GiB doesn't work. This is possibly
2992     - * caused by missing programming, though it's not obvious
2993     - * what sequence is required. Mark 64-bit DMA broken for
2994     - * now to fix this for existing users (e.g. Nyan boards).
2995     - */
2996     - SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
2997     + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
2998     .ops = &tegra114_sdhci_ops,
2999     };
3000    
3001     static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
3002     .pdata = &sdhci_tegra124_pdata,
3003     + .dma_mask = DMA_BIT_MASK(34),
3004     };
3005    
3006     static const struct sdhci_ops tegra210_sdhci_ops = {
3007     @@ -1349,6 +1360,7 @@ static const struct sdhci_ops tegra210_sdhci_ops = {
3008     .write_w = tegra210_sdhci_writew,
3009     .write_l = tegra_sdhci_writel,
3010     .set_clock = tegra_sdhci_set_clock,
3011     + .set_dma_mask = tegra_sdhci_set_dma_mask,
3012     .set_bus_width = sdhci_set_bus_width,
3013     .reset = tegra_sdhci_reset,
3014     .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
3015     @@ -1369,6 +1381,7 @@ static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
3016    
3017     static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
3018     .pdata = &sdhci_tegra210_pdata,
3019     + .dma_mask = DMA_BIT_MASK(34),
3020     .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
3021     NVQUIRK_HAS_PADCALIB |
3022     NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
3023     @@ -1383,6 +1396,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
3024     .read_w = tegra_sdhci_readw,
3025     .write_l = tegra_sdhci_writel,
3026     .set_clock = tegra_sdhci_set_clock,
3027     + .set_dma_mask = tegra_sdhci_set_dma_mask,
3028     .set_bus_width = sdhci_set_bus_width,
3029     .reset = tegra_sdhci_reset,
3030     .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
3031     @@ -1398,20 +1412,13 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
3032     SDHCI_QUIRK_NO_HISPD_BIT |
3033     SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
3034     SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
3035     - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
3036     - /* SDHCI controllers on Tegra186 support 40-bit addressing.
3037     - * IOVA addresses are 48-bit wide on Tegra186.
3038     - * With 64-bit dma mask used for SDHCI, accesses can
3039     - * be broken. Disable 64-bit dma, which would fall back
3040     - * to 32-bit dma mask. Ideally 40-bit dma mask would work,
3041     - * But it is not supported as of now.
3042     - */
3043     - SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
3044     + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
3045     .ops = &tegra186_sdhci_ops,
3046     };
3047    
3048     static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
3049     .pdata = &sdhci_tegra186_pdata,
3050     + .dma_mask = DMA_BIT_MASK(40),
3051     .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
3052     NVQUIRK_HAS_PADCALIB |
3053     NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
3054     @@ -1424,6 +1431,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
3055    
3056     static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
3057     .pdata = &sdhci_tegra186_pdata,
3058     + .dma_mask = DMA_BIT_MASK(39),
3059     .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
3060     NVQUIRK_HAS_PADCALIB |
3061     NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
3062     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
3063     index c66e66fbaeb4..e41ccb836538 100644
3064     --- a/drivers/mmc/host/sdhci.c
3065     +++ b/drivers/mmc/host/sdhci.c
3066     @@ -2857,6 +2857,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3067     static void sdhci_adma_show_error(struct sdhci_host *host)
3068     {
3069     void *desc = host->adma_table;
3070     + dma_addr_t dma = host->adma_addr;
3071    
3072     sdhci_dumpregs(host);
3073    
3074     @@ -2864,18 +2865,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
3075     struct sdhci_adma2_64_desc *dma_desc = desc;
3076    
3077     if (host->flags & SDHCI_USE_64_BIT_DMA)
3078     - DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3079     - desc, le32_to_cpu(dma_desc->addr_hi),
3080     + SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3081     + (unsigned long long)dma,
3082     + le32_to_cpu(dma_desc->addr_hi),
3083     le32_to_cpu(dma_desc->addr_lo),
3084     le16_to_cpu(dma_desc->len),
3085     le16_to_cpu(dma_desc->cmd));
3086     else
3087     - DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3088     - desc, le32_to_cpu(dma_desc->addr_lo),
3089     + SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3090     + (unsigned long long)dma,
3091     + le32_to_cpu(dma_desc->addr_lo),
3092     le16_to_cpu(dma_desc->len),
3093     le16_to_cpu(dma_desc->cmd));
3094    
3095     desc += host->desc_sz;
3096     + dma += host->desc_sz;
3097    
3098     if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3099     break;
3100     @@ -2951,7 +2955,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3101     != MMC_BUS_TEST_R)
3102     host->data->error = -EILSEQ;
3103     else if (intmask & SDHCI_INT_ADMA_ERROR) {
3104     - pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
3105     + pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3106     + intmask);
3107     sdhci_adma_show_error(host);
3108     host->data->error = -EIO;
3109     if (host->ops->adma_workaround)
3110     @@ -3758,18 +3763,14 @@ int sdhci_setup_host(struct sdhci_host *host)
3111     host->flags &= ~SDHCI_USE_ADMA;
3112     }
3113    
3114     - /*
3115     - * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3116     - * and *must* do 64-bit DMA. A driver has the opportunity to change
3117     - * that during the first call to ->enable_dma(). Similarly
3118     - * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3119     - * implement.
3120     - */
3121     if (sdhci_can_64bit_dma(host))
3122     host->flags |= SDHCI_USE_64_BIT_DMA;
3123    
3124     if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3125     - ret = sdhci_set_dma_mask(host);
3126     + if (host->ops->set_dma_mask)
3127     + ret = host->ops->set_dma_mask(host);
3128     + else
3129     + ret = sdhci_set_dma_mask(host);
3130    
3131     if (!ret && host->ops->enable_dma)
3132     ret = host->ops->enable_dma(host);
3133     diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
3134     index 902f855efe8f..8285498c0d8a 100644
3135     --- a/drivers/mmc/host/sdhci.h
3136     +++ b/drivers/mmc/host/sdhci.h
3137     @@ -622,6 +622,7 @@ struct sdhci_ops {
3138    
3139     u32 (*irq)(struct sdhci_host *host, u32 intmask);
3140    
3141     + int (*set_dma_mask)(struct sdhci_host *host);
3142     int (*enable_dma)(struct sdhci_host *host);
3143     unsigned int (*get_max_clock)(struct sdhci_host *host);
3144     unsigned int (*get_min_clock)(struct sdhci_host *host);
3145     diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
3146     index 12358f06d194..5d6f8977df3f 100644
3147     --- a/drivers/net/can/spi/mcp251x.c
3148     +++ b/drivers/net/can/spi/mcp251x.c
3149     @@ -612,7 +612,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
3150     static int mcp251x_hw_reset(struct spi_device *spi)
3151     {
3152     struct mcp251x_priv *priv = spi_get_drvdata(spi);
3153     - u8 reg;
3154     + unsigned long timeout;
3155     int ret;
3156    
3157     /* Wait for oscillator startup timer after power up */
3158     @@ -626,10 +626,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
3159     /* Wait for oscillator startup timer after reset */
3160     mdelay(MCP251X_OST_DELAY_MS);
3161    
3162     - reg = mcp251x_read_reg(spi, CANSTAT);
3163     - if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
3164     - return -ENODEV;
3165     -
3166     + /* Wait for reset to finish */
3167     + timeout = jiffies + HZ;
3168     + while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
3169     + CANCTRL_REQOP_CONF) {
3170     + usleep_range(MCP251X_OST_DELAY_MS * 1000,
3171     + MCP251X_OST_DELAY_MS * 1000 * 2);
3172     +
3173     + if (time_after(jiffies, timeout)) {
3174     + dev_err(&spi->dev,
3175     + "MCP251x didn't enter in conf mode after reset\n");
3176     + return -EBUSY;
3177     + }
3178     + }
3179     return 0;
3180     }
3181    
3182     diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
3183     index 72ec250b9540..823f544add0a 100644
3184     --- a/drivers/net/dsa/microchip/ksz_common.h
3185     +++ b/drivers/net/dsa/microchip/ksz_common.h
3186     @@ -130,7 +130,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
3187     { \
3188     .name = #width, \
3189     .val_bits = (width), \
3190     - .reg_stride = (width) / 8, \
3191     + .reg_stride = 1, \
3192     .reg_bits = (regbits) + (regalign), \
3193     .pad_bits = (regpad), \
3194     .max_register = BIT(regbits) - 1, \
3195     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3196     index 202e9a246019..7c13656a8338 100644
3197     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3198     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3199     @@ -21,6 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
3200     struct netlink_ext_ack *extack)
3201     {
3202     const struct flow_action_entry *act;
3203     + int mirror_act_count = 0;
3204     int err, i;
3205    
3206     if (!flow_action_has_entries(flow_action))
3207     @@ -95,6 +96,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
3208     case FLOW_ACTION_MIRRED: {
3209     struct net_device *out_dev = act->dev;
3210    
3211     + if (mirror_act_count++) {
3212     + NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
3213     + return -EOPNOTSUPP;
3214     + }
3215     +
3216     err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
3217     block, out_dev,
3218     extack);
3219     diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
3220     index 23ebddfb9532..9f8a1f69c0c4 100644
3221     --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
3222     +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
3223     @@ -176,8 +176,10 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
3224     u8 mask, val;
3225     int err;
3226    
3227     - if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
3228     + if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
3229     + err = -EOPNOTSUPP;
3230     goto err_delete;
3231     + }
3232    
3233     tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
3234    
3235     @@ -198,14 +200,18 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
3236     if ((iter->val & cmask) == (val & cmask) &&
3237     iter->band != knode->res->classid) {
3238     NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
3239     + err = -EOPNOTSUPP;
3240     goto err_delete;
3241     }
3242     }
3243    
3244     if (!match) {
3245     match = kzalloc(sizeof(*match), GFP_KERNEL);
3246     - if (!match)
3247     - return -ENOMEM;
3248     + if (!match) {
3249     + err = -ENOMEM;
3250     + goto err_delete;
3251     + }
3252     +
3253     list_add(&match->list, &alink->dscp_map);
3254     }
3255     match->handle = knode->handle;
3256     @@ -221,7 +227,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
3257    
3258     err_delete:
3259     nfp_abm_u32_knode_delete(alink, knode);
3260     - return -EOPNOTSUPP;
3261     + return err;
3262     }
3263    
3264     static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
3265     diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
3266     index ceddb424f887..0dd0ba915ab9 100644
3267     --- a/drivers/net/ieee802154/atusb.c
3268     +++ b/drivers/net/ieee802154/atusb.c
3269     @@ -1137,10 +1137,11 @@ static void atusb_disconnect(struct usb_interface *interface)
3270    
3271     ieee802154_unregister_hw(atusb->hw);
3272    
3273     + usb_put_dev(atusb->usb_dev);
3274     +
3275     ieee802154_free_hw(atusb->hw);
3276    
3277     usb_set_intfdata(interface, NULL);
3278     - usb_put_dev(atusb->usb_dev);
3279    
3280     pr_debug("%s done\n", __func__);
3281     }
3282     diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
3283     index d028331558ea..e9b7c2dfc730 100644
3284     --- a/drivers/ntb/test/ntb_perf.c
3285     +++ b/drivers/ntb/test/ntb_perf.c
3286     @@ -1378,7 +1378,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
3287     int ret;
3288    
3289     /* Get outbound MW parameters and map it */
3290     - ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr,
3291     + ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
3292     &peer->outbuf_size);
3293     if (ret)
3294     return ret;
3295     diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
3296     index a8d56887ec88..3e9f45aec8d1 100644
3297     --- a/drivers/nvdimm/btt.c
3298     +++ b/drivers/nvdimm/btt.c
3299     @@ -392,9 +392,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
3300     arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
3301     if (++(arena->freelist[lane].seq) == 4)
3302     arena->freelist[lane].seq = 1;
3303     - if (ent_e_flag(ent->old_map))
3304     + if (ent_e_flag(le32_to_cpu(ent->old_map)))
3305     arena->freelist[lane].has_err = 1;
3306     - arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
3307     + arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
3308    
3309     return ret;
3310     }
3311     @@ -560,8 +560,8 @@ static int btt_freelist_init(struct arena_info *arena)
3312     * FIXME: if error clearing fails during init, we want to make
3313     * the BTT read-only
3314     */
3315     - if (ent_e_flag(log_new.old_map) &&
3316     - !ent_normal(log_new.old_map)) {
3317     + if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
3318     + !ent_normal(le32_to_cpu(log_new.old_map))) {
3319     arena->freelist[i].has_err = 1;
3320     ret = arena_clear_freelist_error(arena, i);
3321     if (ret)
3322     diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
3323     index 798c5c4aea9c..bb3f20ebc276 100644
3324     --- a/drivers/nvdimm/bus.c
3325     +++ b/drivers/nvdimm/bus.c
3326     @@ -182,7 +182,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
3327     sector_t sector;
3328    
3329     /* make sure device is a region */
3330     - if (!is_nd_pmem(dev))
3331     + if (!is_memory(dev))
3332     return 0;
3333    
3334     nd_region = to_nd_region(dev);
3335     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
3336     index a16e52251a30..102c9d5141ee 100644
3337     --- a/drivers/nvdimm/namespace_devs.c
3338     +++ b/drivers/nvdimm/namespace_devs.c
3339     @@ -1987,7 +1987,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
3340     nd_mapping = &nd_region->mapping[i];
3341     label_ent = list_first_entry_or_null(&nd_mapping->labels,
3342     typeof(*label_ent), list);
3343     - label0 = label_ent ? label_ent->label : 0;
3344     + label0 = label_ent ? label_ent->label : NULL;
3345    
3346     if (!label0) {
3347     WARN_ON(1);
3348     @@ -2322,8 +2322,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
3349     continue;
3350    
3351     /* skip labels that describe extents outside of the region */
3352     - if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end)
3353     - continue;
3354     + if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
3355     + __le64_to_cpu(nd_label->dpa) > map_end)
3356     + continue;
3357    
3358     i = add_namespace_resource(nd_region, nd_label, devs, count);
3359     if (i < 0)
3360     diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
3361     index cb98b8fe786e..b0f7832bae72 100644
3362     --- a/drivers/nvdimm/pfn_devs.c
3363     +++ b/drivers/nvdimm/pfn_devs.c
3364     @@ -618,9 +618,11 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
3365     struct nd_namespace_common *ndns = nd_pfn->ndns;
3366     struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
3367     resource_size_t base = nsio->res.start + start_pad;
3368     + resource_size_t end = nsio->res.end - end_trunc;
3369     struct vmem_altmap __altmap = {
3370     .base_pfn = init_altmap_base(base),
3371     .reserve = init_altmap_reserve(base),
3372     + .end_pfn = PHYS_PFN(end),
3373     };
3374    
3375     memcpy(res, &nsio->res, sizeof(*res));
3376     diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
3377     index 37bf8719a2a4..0f6978e72e7c 100644
3378     --- a/drivers/nvdimm/region.c
3379     +++ b/drivers/nvdimm/region.c
3380     @@ -34,7 +34,7 @@ static int nd_region_probe(struct device *dev)
3381     if (rc)
3382     return rc;
3383    
3384     - if (is_nd_pmem(&nd_region->dev)) {
3385     + if (is_memory(&nd_region->dev)) {
3386     struct resource ndr_res;
3387    
3388     if (devm_init_badblocks(dev, &nd_region->bb))
3389     @@ -123,7 +123,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
3390     struct nd_region *nd_region = to_nd_region(dev);
3391     struct resource res;
3392    
3393     - if (is_nd_pmem(&nd_region->dev)) {
3394     + if (is_memory(&nd_region->dev)) {
3395     res.start = nd_region->ndr_start;
3396     res.end = nd_region->ndr_start +
3397     nd_region->ndr_size - 1;
3398     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
3399     index af30cbe7a8ea..47b48800fb75 100644
3400     --- a/drivers/nvdimm/region_devs.c
3401     +++ b/drivers/nvdimm/region_devs.c
3402     @@ -632,11 +632,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
3403     if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
3404     return 0;
3405    
3406     - if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
3407     + if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
3408     return 0;
3409    
3410     if (a == &dev_attr_resource.attr) {
3411     - if (is_nd_pmem(dev))
3412     + if (is_memory(dev))
3413     return 0400;
3414     else
3415     return 0;
3416     diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
3417     index a570f2263a42..5b7ea93edb93 100644
3418     --- a/drivers/nvdimm/security.c
3419     +++ b/drivers/nvdimm/security.c
3420     @@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
3421     || nvdimm->sec.state < 0)
3422     return -EIO;
3423    
3424     + /* No need to go further if security is disabled */
3425     + if (nvdimm->sec.state == NVDIMM_SECURITY_DISABLED)
3426     + return 0;
3427     +
3428     if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
3429     dev_dbg(dev, "Security operation in progress.\n");
3430     return -EBUSY;
3431     diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
3432     index 40b625458afa..2b53976cd9f9 100644
3433     --- a/drivers/pci/controller/pci-hyperv.c
3434     +++ b/drivers/pci/controller/pci-hyperv.c
3435     @@ -2701,8 +2701,8 @@ static int hv_pci_remove(struct hv_device *hdev)
3436     /* Remove the bus from PCI's point of view. */
3437     pci_lock_rescan_remove();
3438     pci_stop_root_bus(hbus->pci_bus);
3439     - pci_remove_root_bus(hbus->pci_bus);
3440     hv_pci_remove_slots(hbus);
3441     + pci_remove_root_bus(hbus->pci_bus);
3442     pci_unlock_rescan_remove();
3443     hbus->state = hv_pcibus_removed;
3444     }
3445     diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
3446     index 4575e0c6dc4b..a35d3f3996d7 100644
3447     --- a/drivers/pci/controller/vmd.c
3448     +++ b/drivers/pci/controller/vmd.c
3449     @@ -31,6 +31,9 @@
3450     #define PCI_REG_VMLOCK 0x70
3451     #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
3452    
3453     +#define MB2_SHADOW_OFFSET 0x2000
3454     +#define MB2_SHADOW_SIZE 16
3455     +
3456     enum vmd_features {
3457     /*
3458     * Device may contain registers which hint the physical location of the
3459     @@ -94,6 +97,7 @@ struct vmd_dev {
3460     struct resource resources[3];
3461     struct irq_domain *irq_domain;
3462     struct pci_bus *bus;
3463     + u8 busn_start;
3464    
3465     struct dma_map_ops dma_ops;
3466     struct dma_domain dma_domain;
3467     @@ -440,7 +444,8 @@ static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
3468     unsigned int devfn, int reg, int len)
3469     {
3470     char __iomem *addr = vmd->cfgbar +
3471     - (bus->number << 20) + (devfn << 12) + reg;
3472     + ((bus->number - vmd->busn_start) << 20) +
3473     + (devfn << 12) + reg;
3474    
3475     if ((addr - vmd->cfgbar) + len >=
3476     resource_size(&vmd->dev->resource[VMD_CFGBAR]))
3477     @@ -563,7 +568,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
3478     unsigned long flags;
3479     LIST_HEAD(resources);
3480     resource_size_t offset[2] = {0};
3481     - resource_size_t membar2_offset = 0x2000, busn_start = 0;
3482     + resource_size_t membar2_offset = 0x2000;
3483     struct pci_bus *child;
3484    
3485     /*
3486     @@ -576,7 +581,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
3487     u32 vmlock;
3488     int ret;
3489    
3490     - membar2_offset = 0x2018;
3491     + membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
3492     ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
3493     if (ret || vmlock == ~0)
3494     return -ENODEV;
3495     @@ -588,9 +593,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
3496     if (!membar2)
3497     return -ENOMEM;
3498     offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
3499     - readq(membar2 + 0x2008);
3500     + readq(membar2 + MB2_SHADOW_OFFSET);
3501     offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
3502     - readq(membar2 + 0x2010);
3503     + readq(membar2 + MB2_SHADOW_OFFSET + 8);
3504     pci_iounmap(vmd->dev, membar2);
3505     }
3506     }
3507     @@ -606,14 +611,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
3508     pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
3509     if (BUS_RESTRICT_CAP(vmcap) &&
3510     (BUS_RESTRICT_CFG(vmconfig) == 0x1))
3511     - busn_start = 128;
3512     + vmd->busn_start = 128;
3513     }
3514    
3515     res = &vmd->dev->resource[VMD_CFGBAR];
3516     vmd->resources[0] = (struct resource) {
3517     .name = "VMD CFGBAR",
3518     - .start = busn_start,
3519     - .end = busn_start + (resource_size(res) >> 20) - 1,
3520     + .start = vmd->busn_start,
3521     + .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
3522     .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
3523     };
3524    
3525     @@ -681,8 +686,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
3526     pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
3527     pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
3528    
3529     - vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
3530     - sd, &resources);
3531     + vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
3532     + &vmd_ops, sd, &resources);
3533     if (!vmd->bus) {
3534     pci_free_resource_list(&resources);
3535     irq_domain_remove(vmd->irq_domain);
3536     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
3537     index 1f17da3dfeac..b97d9e10c9cc 100644
3538     --- a/drivers/pci/pci.c
3539     +++ b/drivers/pci/pci.c
3540     @@ -1443,7 +1443,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
3541     pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3542     bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3543     res = pdev->resource + bar_idx;
3544     - size = order_base_2((resource_size(res) >> 20) | 1) - 1;
3545     + size = ilog2(resource_size(res)) - 20;
3546     ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3547     ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3548     pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3549     diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
3550     index 048d205d7074..f8d74e9f7931 100644
3551     --- a/drivers/power/supply/sbs-battery.c
3552     +++ b/drivers/power/supply/sbs-battery.c
3553     @@ -314,17 +314,22 @@ static int sbs_get_battery_presence_and_health(
3554     {
3555     int ret;
3556    
3557     - if (psp == POWER_SUPPLY_PROP_PRESENT) {
3558     - /* Dummy command; if it succeeds, battery is present. */
3559     - ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
3560     - if (ret < 0)
3561     - val->intval = 0; /* battery disconnected */
3562     - else
3563     - val->intval = 1; /* battery present */
3564     - } else { /* POWER_SUPPLY_PROP_HEALTH */
3565     + /* Dummy command; if it succeeds, battery is present. */
3566     + ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
3567     +
3568     + if (ret < 0) { /* battery not present*/
3569     + if (psp == POWER_SUPPLY_PROP_PRESENT) {
3570     + val->intval = 0;
3571     + return 0;
3572     + }
3573     + return ret;
3574     + }
3575     +
3576     + if (psp == POWER_SUPPLY_PROP_PRESENT)
3577     + val->intval = 1; /* battery present */
3578     + else /* POWER_SUPPLY_PROP_HEALTH */
3579     /* SBS spec doesn't have a general health command. */
3580     val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
3581     - }
3582    
3583     return 0;
3584     }
3585     @@ -620,12 +625,14 @@ static int sbs_get_property(struct power_supply *psy,
3586     switch (psp) {
3587     case POWER_SUPPLY_PROP_PRESENT:
3588     case POWER_SUPPLY_PROP_HEALTH:
3589     - if (client->flags & SBS_FLAGS_TI_BQ20Z75)
3590     + if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
3591     ret = sbs_get_ti_battery_presence_and_health(client,
3592     psp, val);
3593     else
3594     ret = sbs_get_battery_presence_and_health(client, psp,
3595     val);
3596     +
3597     + /* this can only be true if no gpio is used */
3598     if (psp == POWER_SUPPLY_PROP_PRESENT)
3599     return 0;
3600     break;
3601     diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
3602     index 2211a642066d..97a9afa191ee 100644
3603     --- a/drivers/pwm/pwm-stm32-lp.c
3604     +++ b/drivers/pwm/pwm-stm32-lp.c
3605     @@ -59,6 +59,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
3606     /* Calculate the period and prescaler value */
3607     div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
3608     do_div(div, NSEC_PER_SEC);
3609     + if (!div) {
3610     + /* Clock is too slow to achieve requested period. */
3611     + dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period);
3612     + return -EINVAL;
3613     + }
3614     +
3615     prd = div;
3616     while (div > STM32_LPTIM_MAX_ARR) {
3617     presc++;
3618     diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
3619     index fc53e1e221f0..c94184d080f8 100644
3620     --- a/drivers/s390/block/dasd_eckd.c
3621     +++ b/drivers/s390/block/dasd_eckd.c
3622     @@ -1553,8 +1553,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
3623     if (rc == 0) {
3624     memcpy(&private->vsq, vsq, sizeof(*vsq));
3625     } else {
3626     - dev_warn(&device->cdev->dev,
3627     - "Reading the volume storage information failed with rc=%d\n", rc);
3628     + DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3629     + "Reading the volume storage information failed with rc=%d", rc);
3630     }
3631    
3632     if (useglobal)
3633     @@ -1737,8 +1737,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
3634     if (rc == 0) {
3635     dasd_eckd_cpy_ext_pool_data(device, lcq);
3636     } else {
3637     - dev_warn(&device->cdev->dev,
3638     - "Reading the logical configuration failed with rc=%d\n", rc);
3639     + DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3640     + "Reading the logical configuration failed with rc=%d", rc);
3641     }
3642    
3643     dasd_sfree_request(cqr, cqr->memdev);
3644     @@ -2020,14 +2020,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
3645     dasd_eckd_read_features(device);
3646    
3647     /* Read Volume Information */
3648     - rc = dasd_eckd_read_vol_info(device);
3649     - if (rc)
3650     - goto out_err3;
3651     + dasd_eckd_read_vol_info(device);
3652    
3653     /* Read Extent Pool Information */
3654     - rc = dasd_eckd_read_ext_pool_info(device);
3655     - if (rc)
3656     - goto out_err3;
3657     + dasd_eckd_read_ext_pool_info(device);
3658    
3659     /* Read Device Characteristics */
3660     rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3661     @@ -2059,9 +2055,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
3662     if (readonly)
3663     set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
3664    
3665     - if (dasd_eckd_is_ese(device))
3666     - dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
3667     -
3668     dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
3669     "with %d cylinders, %d heads, %d sectors%s\n",
3670     private->rdc_data.dev_type,
3671     @@ -3695,14 +3688,6 @@ static int dasd_eckd_release_space(struct dasd_device *device,
3672     return -EINVAL;
3673     }
3674    
3675     -static struct dasd_ccw_req *
3676     -dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
3677     - struct request *req, sector_t first_trk,
3678     - sector_t last_trk)
3679     -{
3680     - return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
3681     -}
3682     -
3683     static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3684     struct dasd_device *startdev,
3685     struct dasd_block *block,
3686     @@ -4447,10 +4432,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
3687     cmdwtd = private->features.feature[12] & 0x40;
3688     use_prefix = private->features.feature[8] & 0x01;
3689    
3690     - if (req_op(req) == REQ_OP_DISCARD)
3691     - return dasd_eckd_build_cp_discard(startdev, block, req,
3692     - first_trk, last_trk);
3693     -
3694     cqr = NULL;
3695     if (cdlspecial || dasd_page_cache) {
3696     /* do nothing, just fall through to the cmd mode single case */
3697     @@ -4729,14 +4710,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
3698     struct dasd_block *block,
3699     struct request *req)
3700     {
3701     - struct dasd_device *startdev = NULL;
3702     struct dasd_eckd_private *private;
3703     - struct dasd_ccw_req *cqr;
3704     + struct dasd_device *startdev;
3705     unsigned long flags;
3706     + struct dasd_ccw_req *cqr;
3707    
3708     - /* Discard requests can only be processed on base devices */
3709     - if (req_op(req) != REQ_OP_DISCARD)
3710     - startdev = dasd_alias_get_start_dev(base);
3711     + startdev = dasd_alias_get_start_dev(base);
3712     if (!startdev)
3713     startdev = base;
3714     private = startdev->private;
3715     @@ -5663,14 +5642,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
3716     dasd_eckd_read_features(device);
3717    
3718     /* Read Volume Information */
3719     - rc = dasd_eckd_read_vol_info(device);
3720     - if (rc)
3721     - goto out_err2;
3722     + dasd_eckd_read_vol_info(device);
3723    
3724     /* Read Extent Pool Information */
3725     - rc = dasd_eckd_read_ext_pool_info(device);
3726     - if (rc)
3727     - goto out_err2;
3728     + dasd_eckd_read_ext_pool_info(device);
3729    
3730     /* Read Device Characteristics */
3731     rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3732     @@ -6521,20 +6496,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
3733     unsigned int logical_block_size = block->bp_block;
3734     struct request_queue *q = block->request_queue;
3735     struct dasd_device *device = block->base;
3736     - struct dasd_eckd_private *private;
3737     - unsigned int max_discard_sectors;
3738     - unsigned int max_bytes;
3739     - unsigned int ext_bytes; /* Extent Size in Bytes */
3740     - int recs_per_trk;
3741     - int trks_per_cyl;
3742     - int ext_limit;
3743     - int ext_size; /* Extent Size in Cylinders */
3744     int max;
3745    
3746     - private = device->private;
3747     - trks_per_cyl = private->rdc_data.trk_per_cyl;
3748     - recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
3749     -
3750     if (device->features & DASD_FEATURE_USERAW) {
3751     /*
3752     * the max_blocks value for raw_track access is 256
3753     @@ -6555,28 +6518,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
3754     /* With page sized segments each segment can be translated into one idaw/tidaw */
3755     blk_queue_max_segment_size(q, PAGE_SIZE);
3756     blk_queue_segment_boundary(q, PAGE_SIZE - 1);
3757     -
3758     - if (dasd_eckd_is_ese(device)) {
3759     - /*
3760     - * Depending on the extent size, up to UINT_MAX bytes can be
3761     - * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
3762     - * device limits should be exceeded.
3763     - */
3764     - ext_size = dasd_eckd_ext_size(device);
3765     - ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
3766     - ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
3767     - logical_block_size;
3768     - max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
3769     - if (max_bytes / ext_bytes > ext_limit)
3770     - max_bytes = ext_bytes * ext_limit;
3771     -
3772     - max_discard_sectors = max_bytes / 512;
3773     -
3774     - blk_queue_max_discard_sectors(q, max_discard_sectors);
3775     - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
3776     - q->limits.discard_granularity = ext_bytes;
3777     - q->limits.discard_alignment = ext_bytes;
3778     - }
3779     }
3780    
3781     static struct ccw_driver dasd_eckd_driver = {
3782     diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
3783     index e71992a3c55f..cc5e84b80c69 100644
3784     --- a/drivers/s390/char/sclp_early.c
3785     +++ b/drivers/s390/char/sclp_early.c
3786     @@ -40,7 +40,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
3787     sclp.has_gisaf = !!(sccb->fac118 & 0x08);
3788     sclp.has_hvs = !!(sccb->fac119 & 0x80);
3789     sclp.has_kss = !!(sccb->fac98 & 0x01);
3790     - sclp.has_sipl = !!(sccb->cbl & 0x02);
3791     + sclp.has_sipl = !!(sccb->cbl & 0x4000);
3792     if (sccb->fac85 & 0x02)
3793     S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
3794     if (sccb->fac91 & 0x40)
3795     diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
3796     index c522e9313c50..ae66875a934d 100644
3797     --- a/drivers/s390/cio/ccwgroup.c
3798     +++ b/drivers/s390/cio/ccwgroup.c
3799     @@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
3800     goto error;
3801     }
3802     /* Check for trailing stuff. */
3803     - if (i == num_devices && strlen(buf) > 0) {
3804     + if (i == num_devices && buf && strlen(buf) > 0) {
3805     rc = -EINVAL;
3806     goto error;
3807     }
3808     diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
3809     index 22c55816100b..1fbfb0a93f5f 100644
3810     --- a/drivers/s390/cio/css.c
3811     +++ b/drivers/s390/cio/css.c
3812     @@ -1388,6 +1388,8 @@ device_initcall(cio_settle_init);
3813    
3814     int sch_is_pseudo_sch(struct subchannel *sch)
3815     {
3816     + if (!sch->dev.parent)
3817     + return 0;
3818     return sch == to_css(sch->dev.parent)->pseudo_subchannel;
3819     }
3820    
3821     diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
3822     index dbf6a151886c..b11cecd0a21d 100644
3823     --- a/drivers/staging/erofs/dir.c
3824     +++ b/drivers/staging/erofs/dir.c
3825     @@ -99,8 +99,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
3826     unsigned int nameoff, maxsize;
3827    
3828     dentry_page = read_mapping_page(mapping, i, NULL);
3829     - if (IS_ERR(dentry_page))
3830     - continue;
3831     + if (dentry_page == ERR_PTR(-ENOMEM)) {
3832     + err = -ENOMEM;
3833     + break;
3834     + } else if (IS_ERR(dentry_page)) {
3835     + errln("fail to readdir of logical block %u of nid %llu",
3836     + i, EROFS_V(dir)->nid);
3837     + err = PTR_ERR(dentry_page);
3838     + break;
3839     + }
3840    
3841     de = (struct erofs_dirent *)kmap(dentry_page);
3842    
3843     diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
3844     index f0dab81ff816..155cee68fed5 100644
3845     --- a/drivers/staging/erofs/unzip_vle.c
3846     +++ b/drivers/staging/erofs/unzip_vle.c
3847     @@ -393,7 +393,11 @@ z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
3848     /* if multiref is disabled, `primary' is always true */
3849     primary = true;
3850    
3851     - DBG_BUGON(work->pageofs != f->pageofs);
3852     + if (work->pageofs != f->pageofs) {
3853     + DBG_BUGON(1);
3854     + erofs_workgroup_put(egrp);
3855     + return ERR_PTR(-EIO);
3856     + }
3857    
3858     /*
3859     * lock must be taken first to avoid grp->next == NIL between
3860     @@ -939,6 +943,7 @@ repeat:
3861     for (i = 0; i < nr_pages; ++i)
3862     pages[i] = NULL;
3863    
3864     + err = 0;
3865     z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
3866     work->pagevec, 0);
3867    
3868     @@ -960,8 +965,17 @@ repeat:
3869     pagenr = z_erofs_onlinepage_index(page);
3870    
3871     DBG_BUGON(pagenr >= nr_pages);
3872     - DBG_BUGON(pages[pagenr]);
3873    
3874     + /*
3875     + * currently EROFS doesn't support multiref(dedup),
3876     + * so here erroring out one multiref page.
3877     + */
3878     + if (pages[pagenr]) {
3879     + DBG_BUGON(1);
3880     + SetPageError(pages[pagenr]);
3881     + z_erofs_onlinepage_endio(pages[pagenr]);
3882     + err = -EIO;
3883     + }
3884     pages[pagenr] = page;
3885     }
3886     sparsemem_pages = i;
3887     @@ -971,7 +985,6 @@ repeat:
3888     overlapped = false;
3889     compressed_pages = grp->compressed_pages;
3890    
3891     - err = 0;
3892     for (i = 0; i < clusterpages; ++i) {
3893     unsigned int pagenr;
3894    
3895     @@ -995,7 +1008,12 @@ repeat:
3896     pagenr = z_erofs_onlinepage_index(page);
3897    
3898     DBG_BUGON(pagenr >= nr_pages);
3899     - DBG_BUGON(pages[pagenr]);
3900     + if (pages[pagenr]) {
3901     + DBG_BUGON(1);
3902     + SetPageError(pages[pagenr]);
3903     + z_erofs_onlinepage_endio(pages[pagenr]);
3904     + err = -EIO;
3905     + }
3906     ++sparsemem_pages;
3907     pages[pagenr] = page;
3908    
3909     @@ -1498,19 +1516,18 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
3910     err = z_erofs_do_read_page(&f, page, &pagepool);
3911     (void)z_erofs_vle_work_iter_end(&f.builder);
3912    
3913     - if (err) {
3914     + /* if some compressed cluster ready, need submit them anyway */
3915     + z_erofs_submit_and_unzip(&f, &pagepool, true);
3916     +
3917     + if (err)
3918     errln("%s, failed to read, err [%d]", __func__, err);
3919     - goto out;
3920     - }
3921    
3922     - z_erofs_submit_and_unzip(&f, &pagepool, true);
3923     -out:
3924     if (f.map.mpage)
3925     put_page(f.map.mpage);
3926    
3927     /* clean up the remaining free pages */
3928     put_pages_list(&pagepool);
3929     - return 0;
3930     + return err;
3931     }
3932    
3933     static int z_erofs_vle_normalaccess_readpages(struct file *filp,
3934     diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c
3935     index c2359321ca13..30e6d02d30de 100644
3936     --- a/drivers/staging/erofs/zmap.c
3937     +++ b/drivers/staging/erofs/zmap.c
3938     @@ -350,6 +350,12 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
3939    
3940     switch (m->type) {
3941     case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
3942     + if (!m->delta[0]) {
3943     + errln("invalid lookback distance 0 at nid %llu",
3944     + vi->nid);
3945     + DBG_BUGON(1);
3946     + return -EIO;
3947     + }
3948     return vle_extent_lookback(m, m->delta[0]);
3949     case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
3950     map->m_flags &= ~EROFS_MAP_ZIPPED;
3951     diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
3952     index 8d9b721dadb6..e46a4e3f25c4 100644
3953     --- a/drivers/thermal/qcom/tsens-8960.c
3954     +++ b/drivers/thermal/qcom/tsens-8960.c
3955     @@ -229,6 +229,8 @@ static int calibrate_8960(struct tsens_priv *priv)
3956     for (i = 0; i < num_read; i++, s++)
3957     s->offset = data[i];
3958    
3959     + kfree(data);
3960     +
3961     return 0;
3962     }
3963    
3964     diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
3965     index 6f26fadf4c27..055647bcee67 100644
3966     --- a/drivers/thermal/qcom/tsens-v0_1.c
3967     +++ b/drivers/thermal/qcom/tsens-v0_1.c
3968     @@ -145,8 +145,10 @@ static int calibrate_8916(struct tsens_priv *priv)
3969     return PTR_ERR(qfprom_cdata);
3970    
3971     qfprom_csel = (u32 *)qfprom_read(priv->dev, "calib_sel");
3972     - if (IS_ERR(qfprom_csel))
3973     + if (IS_ERR(qfprom_csel)) {
3974     + kfree(qfprom_cdata);
3975     return PTR_ERR(qfprom_csel);
3976     + }
3977    
3978     mode = (qfprom_csel[0] & MSM8916_CAL_SEL_MASK) >> MSM8916_CAL_SEL_SHIFT;
3979     dev_dbg(priv->dev, "calibration mode is %d\n", mode);
3980     @@ -181,6 +183,8 @@ static int calibrate_8916(struct tsens_priv *priv)
3981     }
3982    
3983     compute_intercept_slope(priv, p1, p2, mode);
3984     + kfree(qfprom_cdata);
3985     + kfree(qfprom_csel);
3986    
3987     return 0;
3988     }
3989     @@ -198,8 +202,10 @@ static int calibrate_8974(struct tsens_priv *priv)
3990     return PTR_ERR(calib);
3991    
3992     bkp = (u32 *)qfprom_read(priv->dev, "calib_backup");
3993     - if (IS_ERR(bkp))
3994     + if (IS_ERR(bkp)) {
3995     + kfree(calib);
3996     return PTR_ERR(bkp);
3997     + }
3998    
3999     calib_redun_sel = bkp[1] & BKP_REDUN_SEL;
4000     calib_redun_sel >>= BKP_REDUN_SHIFT;
4001     @@ -313,6 +319,8 @@ static int calibrate_8974(struct tsens_priv *priv)
4002     }
4003    
4004     compute_intercept_slope(priv, p1, p2, mode);
4005     + kfree(calib);
4006     + kfree(bkp);
4007    
4008     return 0;
4009     }
4010     diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
4011     index 10b595d4f619..870f502f2cb6 100644
4012     --- a/drivers/thermal/qcom/tsens-v1.c
4013     +++ b/drivers/thermal/qcom/tsens-v1.c
4014     @@ -138,6 +138,7 @@ static int calibrate_v1(struct tsens_priv *priv)
4015     }
4016    
4017     compute_intercept_slope(priv, p1, p2, mode);
4018     + kfree(qfprom_cdata);
4019    
4020     return 0;
4021     }
4022     diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
4023     index 2fd94997245b..b89083b61c38 100644
4024     --- a/drivers/thermal/qcom/tsens.h
4025     +++ b/drivers/thermal/qcom/tsens.h
4026     @@ -17,6 +17,7 @@
4027    
4028     #include <linux/thermal.h>
4029     #include <linux/regmap.h>
4030     +#include <linux/slab.h>
4031    
4032     struct tsens_priv;
4033    
4034     diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
4035     index 6bab66e84eb5..ebe15f2cf7fc 100644
4036     --- a/drivers/thermal/thermal_core.c
4037     +++ b/drivers/thermal/thermal_core.c
4038     @@ -304,7 +304,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
4039     &tz->poll_queue,
4040     msecs_to_jiffies(delay));
4041     else
4042     - cancel_delayed_work(&tz->poll_queue);
4043     + cancel_delayed_work_sync(&tz->poll_queue);
4044     }
4045    
4046     static void monitor_thermal_zone(struct thermal_zone_device *tz)
4047     diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
4048     index 40c69a533b24..dd5d8ee37928 100644
4049     --- a/drivers/thermal/thermal_hwmon.c
4050     +++ b/drivers/thermal/thermal_hwmon.c
4051     @@ -87,13 +87,17 @@ static struct thermal_hwmon_device *
4052     thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
4053     {
4054     struct thermal_hwmon_device *hwmon;
4055     + char type[THERMAL_NAME_LENGTH];
4056    
4057     mutex_lock(&thermal_hwmon_list_lock);
4058     - list_for_each_entry(hwmon, &thermal_hwmon_list, node)
4059     - if (!strcmp(hwmon->type, tz->type)) {
4060     + list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
4061     + strcpy(type, tz->type);
4062     + strreplace(type, '-', '_');
4063     + if (!strcmp(hwmon->type, type)) {
4064     mutex_unlock(&thermal_hwmon_list_lock);
4065     return hwmon;
4066     }
4067     + }
4068     mutex_unlock(&thermal_hwmon_list_lock);
4069    
4070     return NULL;
4071     diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
4072     index cc71861e033a..5b64bc2e8788 100644
4073     --- a/drivers/watchdog/aspeed_wdt.c
4074     +++ b/drivers/watchdog/aspeed_wdt.c
4075     @@ -34,6 +34,7 @@ static const struct aspeed_wdt_config ast2500_config = {
4076     static const struct of_device_id aspeed_wdt_of_table[] = {
4077     { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
4078     { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
4079     + { .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
4080     { },
4081     };
4082     MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
4083     @@ -259,7 +260,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
4084     set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
4085     }
4086    
4087     - if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
4088     + if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
4089     + (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
4090     u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
4091    
4092     reg &= config->ext_pulse_width_mask;
4093     diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
4094     index 32af3974e6bb..8d019a961ccc 100644
4095     --- a/drivers/watchdog/imx2_wdt.c
4096     +++ b/drivers/watchdog/imx2_wdt.c
4097     @@ -55,7 +55,7 @@
4098    
4099     #define IMX2_WDT_WMCR 0x08 /* Misc Register */
4100    
4101     -#define IMX2_WDT_MAX_TIME 128
4102     +#define IMX2_WDT_MAX_TIME 128U
4103     #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
4104    
4105     #define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
4106     @@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
4107     {
4108     unsigned int actual;
4109    
4110     - actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
4111     + actual = min(new_timeout, IMX2_WDT_MAX_TIME);
4112     __imx2_wdt_set_timeout(wdog, actual);
4113     wdog->timeout = new_timeout;
4114     return 0;
4115     diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
4116     index 4e11de6cde81..91cba70b69df 100644
4117     --- a/drivers/xen/balloon.c
4118     +++ b/drivers/xen/balloon.c
4119     @@ -688,6 +688,7 @@ static void __init balloon_add_region(unsigned long start_pfn,
4120     /* totalram_pages and totalhigh_pages do not
4121     include the boot-time balloon extension, so
4122     don't subtract from it. */
4123     + __SetPageOffline(page);
4124     __balloon_append(page);
4125     }
4126    
4127     diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
4128     index 3eeb9bea7630..224df03ce42e 100644
4129     --- a/drivers/xen/pci.c
4130     +++ b/drivers/xen/pci.c
4131     @@ -17,6 +17,8 @@
4132     #include "../pci/pci.h"
4133     #ifdef CONFIG_PCI_MMCONFIG
4134     #include <asm/pci_x86.h>
4135     +
4136     +static int xen_mcfg_late(void);
4137     #endif
4138    
4139     static bool __read_mostly pci_seg_supported = true;
4140     @@ -28,7 +30,18 @@ static int xen_add_device(struct device *dev)
4141     #ifdef CONFIG_PCI_IOV
4142     struct pci_dev *physfn = pci_dev->physfn;
4143     #endif
4144     -
4145     +#ifdef CONFIG_PCI_MMCONFIG
4146     + static bool pci_mcfg_reserved = false;
4147     + /*
4148     + * Reserve MCFG areas in Xen on first invocation due to this being
4149     + * potentially called from inside of acpi_init immediately after
4150     + * MCFG table has been finally parsed.
4151     + */
4152     + if (!pci_mcfg_reserved) {
4153     + xen_mcfg_late();
4154     + pci_mcfg_reserved = true;
4155     + }
4156     +#endif
4157     if (pci_seg_supported) {
4158     struct {
4159     struct physdev_pci_device_add add;
4160     @@ -201,7 +214,7 @@ static int __init register_xen_pci_notifier(void)
4161     arch_initcall(register_xen_pci_notifier);
4162    
4163     #ifdef CONFIG_PCI_MMCONFIG
4164     -static int __init xen_mcfg_late(void)
4165     +static int xen_mcfg_late(void)
4166     {
4167     struct pci_mmcfg_region *cfg;
4168     int rc;
4169     @@ -240,8 +253,4 @@ static int __init xen_mcfg_late(void)
4170     }
4171     return 0;
4172     }
4173     -/*
4174     - * Needs to be done after acpi_init which are subsys_initcall.
4175     - */
4176     -subsys_initcall_sync(xen_mcfg_late);
4177     #endif
4178     diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
4179     index 08adc590f631..597af455a522 100644
4180     --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
4181     +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
4182     @@ -55,6 +55,7 @@
4183     #include <linux/string.h>
4184     #include <linux/slab.h>
4185     #include <linux/miscdevice.h>
4186     +#include <linux/workqueue.h>
4187    
4188     #include <xen/xenbus.h>
4189     #include <xen/xen.h>
4190     @@ -116,6 +117,8 @@ struct xenbus_file_priv {
4191     wait_queue_head_t read_waitq;
4192    
4193     struct kref kref;
4194     +
4195     + struct work_struct wq;
4196     };
4197    
4198     /* Read out any raw xenbus messages queued up. */
4199     @@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
4200     mutex_unlock(&adap->dev_data->reply_mutex);
4201     }
4202    
4203     -static void xenbus_file_free(struct kref *kref)
4204     +static void xenbus_worker(struct work_struct *wq)
4205     {
4206     struct xenbus_file_priv *u;
4207     struct xenbus_transaction_holder *trans, *tmp;
4208     struct watch_adapter *watch, *tmp_watch;
4209     struct read_buffer *rb, *tmp_rb;
4210    
4211     - u = container_of(kref, struct xenbus_file_priv, kref);
4212     + u = container_of(wq, struct xenbus_file_priv, wq);
4213    
4214     /*
4215     * No need for locking here because there are no other users,
4216     @@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
4217     kfree(u);
4218     }
4219    
4220     +static void xenbus_file_free(struct kref *kref)
4221     +{
4222     + struct xenbus_file_priv *u;
4223     +
4224     + /*
4225     + * We might be called in xenbus_thread().
4226     + * Use workqueue to avoid deadlock.
4227     + */
4228     + u = container_of(kref, struct xenbus_file_priv, kref);
4229     + schedule_work(&u->wq);
4230     +}
4231     +
4232     static struct xenbus_transaction_holder *xenbus_get_transaction(
4233     struct xenbus_file_priv *u, uint32_t tx_id)
4234     {
4235     @@ -650,6 +665,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
4236     INIT_LIST_HEAD(&u->watches);
4237     INIT_LIST_HEAD(&u->read_buffers);
4238     init_waitqueue_head(&u->read_waitq);
4239     + INIT_WORK(&u->wq, xenbus_worker);
4240    
4241     mutex_init(&u->reply_mutex);
4242     mutex_init(&u->msgbuffer_mutex);
4243     diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
4244     index 4cc966a31cb3..fe7f0bd2048e 100644
4245     --- a/fs/9p/vfs_file.c
4246     +++ b/fs/9p/vfs_file.c
4247     @@ -513,6 +513,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
4248     v9inode = V9FS_I(inode);
4249     mutex_lock(&v9inode->v_mutex);
4250     if (!v9inode->writeback_fid &&
4251     + (vma->vm_flags & VM_SHARED) &&
4252     (vma->vm_flags & VM_WRITE)) {
4253     /*
4254     * clone a fid and add it to writeback_fid
4255     @@ -614,6 +615,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
4256     (vma->vm_end - vma->vm_start - 1),
4257     };
4258    
4259     + if (!(vma->vm_flags & VM_SHARED))
4260     + return;
4261    
4262     p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
4263    
4264     diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
4265     index 1e3ba4949399..814a918998ec 100644
4266     --- a/fs/btrfs/tests/btrfs-tests.c
4267     +++ b/fs/btrfs/tests/btrfs-tests.c
4268     @@ -51,7 +51,13 @@ static struct file_system_type test_type = {
4269    
4270     struct inode *btrfs_new_test_inode(void)
4271     {
4272     - return new_inode(test_mnt->mnt_sb);
4273     + struct inode *inode;
4274     +
4275     + inode = new_inode(test_mnt->mnt_sb);
4276     + if (inode)
4277     + inode_init_owner(inode, NULL, S_IFREG);
4278     +
4279     + return inode;
4280     }
4281    
4282     static int btrfs_init_test_fs(void)
4283     diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
4284     index ce0f5658720a..8fd530112810 100644
4285     --- a/fs/ceph/caps.c
4286     +++ b/fs/ceph/caps.c
4287     @@ -645,6 +645,7 @@ void ceph_add_cap(struct inode *inode,
4288     struct ceph_cap *cap;
4289     int mds = session->s_mds;
4290     int actual_wanted;
4291     + u32 gen;
4292    
4293     dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
4294     session->s_mds, cap_id, ceph_cap_string(issued), seq);
4295     @@ -656,6 +657,10 @@ void ceph_add_cap(struct inode *inode,
4296     if (fmode >= 0)
4297     wanted |= ceph_caps_for_mode(fmode);
4298    
4299     + spin_lock(&session->s_gen_ttl_lock);
4300     + gen = session->s_cap_gen;
4301     + spin_unlock(&session->s_gen_ttl_lock);
4302     +
4303     cap = __get_cap_for_mds(ci, mds);
4304     if (!cap) {
4305     cap = *new_cap;
4306     @@ -681,7 +686,7 @@ void ceph_add_cap(struct inode *inode,
4307     list_move_tail(&cap->session_caps, &session->s_caps);
4308     spin_unlock(&session->s_cap_lock);
4309    
4310     - if (cap->cap_gen < session->s_cap_gen)
4311     + if (cap->cap_gen < gen)
4312     cap->issued = cap->implemented = CEPH_CAP_PIN;
4313    
4314     /*
4315     @@ -775,7 +780,7 @@ void ceph_add_cap(struct inode *inode,
4316     cap->seq = seq;
4317     cap->issue_seq = seq;
4318     cap->mseq = mseq;
4319     - cap->cap_gen = session->s_cap_gen;
4320     + cap->cap_gen = gen;
4321    
4322     if (fmode >= 0)
4323     __ceph_get_fmode(ci, fmode);
4324     diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
4325     index 18500edefc56..3b537e7038c7 100644
4326     --- a/fs/ceph/inode.c
4327     +++ b/fs/ceph/inode.c
4328     @@ -801,7 +801,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
4329    
4330     /* update inode */
4331     inode->i_rdev = le32_to_cpu(info->rdev);
4332     - inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
4333     + /* directories have fl_stripe_unit set to zero */
4334     + if (le32_to_cpu(info->layout.fl_stripe_unit))
4335     + inode->i_blkbits =
4336     + fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
4337     + else
4338     + inode->i_blkbits = CEPH_BLOCK_SHIFT;
4339    
4340     __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
4341    
4342     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
4343     index 920e9f048bd8..b11af7d8e8e9 100644
4344     --- a/fs/ceph/mds_client.c
4345     +++ b/fs/ceph/mds_client.c
4346     @@ -4044,7 +4044,9 @@ static void delayed_work(struct work_struct *work)
4347     pr_info("mds%d hung\n", s->s_mds);
4348     }
4349     }
4350     - if (s->s_state < CEPH_MDS_SESSION_OPEN) {
4351     + if (s->s_state == CEPH_MDS_SESSION_NEW ||
4352     + s->s_state == CEPH_MDS_SESSION_RESTARTING ||
4353     + s->s_state == CEPH_MDS_SESSION_REJECTED) {
4354     /* this mds is failed or recovering, just wait */
4355     ceph_put_mds_session(s);
4356     continue;
4357     diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
4358     index bab7a0db81dd..f3b720884650 100644
4359     --- a/fs/fuse/cuse.c
4360     +++ b/fs/fuse/cuse.c
4361     @@ -519,6 +519,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
4362     rc = cuse_send_init(cc);
4363     if (rc) {
4364     fuse_dev_free(fud);
4365     + fuse_conn_put(&cc->fc);
4366     return rc;
4367     }
4368     file->private_data = fud;
4369     diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
4370     index 987877860c01..f3104db3de83 100644
4371     --- a/fs/fuse/inode.c
4372     +++ b/fs/fuse/inode.c
4373     @@ -823,9 +823,12 @@ static const struct super_operations fuse_super_operations = {
4374    
4375     static void sanitize_global_limit(unsigned *limit)
4376     {
4377     + /*
4378     + * The default maximum number of async requests is calculated to consume
4379     + * 1/2^13 of the total memory, assuming 392 bytes per request.
4380     + */
4381     if (*limit == 0)
4382     - *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) /
4383     - sizeof(struct fuse_req);
4384     + *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392;
4385    
4386     if (*limit >= 1 << 16)
4387     *limit = (1 << 16) - 1;
4388     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
4389     index 46a8d636d151..ab07db0f07cd 100644
4390     --- a/fs/nfs/nfs4xdr.c
4391     +++ b/fs/nfs/nfs4xdr.c
4392     @@ -1174,7 +1174,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
4393     } else
4394     *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
4395     }
4396     - if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
4397     + if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
4398     *p++ = cpu_to_be32(label->lfs);
4399     *p++ = cpu_to_be32(label->pi);
4400     *p++ = cpu_to_be32(label->len);
4401     diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
4402     index 4525d5acae38..0418b198edd3 100644
4403     --- a/fs/nfs/pnfs.c
4404     +++ b/fs/nfs/pnfs.c
4405     @@ -1449,10 +1449,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
4406     const nfs4_stateid *res_stateid = NULL;
4407     struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
4408    
4409     - if (ret == 0) {
4410     - arg_stateid = &args->stateid;
4411     + switch (ret) {
4412     + case -NFS4ERR_NOMATCHING_LAYOUT:
4413     + break;
4414     + case 0:
4415     if (res->lrs_present)
4416     res_stateid = &res->stateid;
4417     + /* Fallthrough */
4418     + default:
4419     + arg_stateid = &args->stateid;
4420     }
4421     pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
4422     res_stateid);
4423     diff --git a/fs/statfs.c b/fs/statfs.c
4424     index eea7af6f2f22..2616424012ea 100644
4425     --- a/fs/statfs.c
4426     +++ b/fs/statfs.c
4427     @@ -318,19 +318,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
4428     static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
4429     {
4430     struct compat_statfs64 buf;
4431     - if (sizeof(ubuf->f_bsize) == 4) {
4432     - if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
4433     - kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
4434     - return -EOVERFLOW;
4435     - /* f_files and f_ffree may be -1; it's okay
4436     - * to stuff that into 32 bits */
4437     - if (kbuf->f_files != 0xffffffffffffffffULL
4438     - && (kbuf->f_files & 0xffffffff00000000ULL))
4439     - return -EOVERFLOW;
4440     - if (kbuf->f_ffree != 0xffffffffffffffffULL
4441     - && (kbuf->f_ffree & 0xffffffff00000000ULL))
4442     - return -EOVERFLOW;
4443     - }
4444     +
4445     + if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
4446     + return -EOVERFLOW;
4447     +
4448     memset(&buf, 0, sizeof(struct compat_statfs64));
4449     buf.f_type = kbuf->f_type;
4450     buf.f_bsize = kbuf->f_bsize;
4451     diff --git a/include/linux/memremap.h b/include/linux/memremap.h
4452     index f8a5b2a19945..c70996fe48c8 100644
4453     --- a/include/linux/memremap.h
4454     +++ b/include/linux/memremap.h
4455     @@ -17,6 +17,7 @@ struct device;
4456     */
4457     struct vmem_altmap {
4458     const unsigned long base_pfn;
4459     + const unsigned long end_pfn;
4460     const unsigned long reserve;
4461     unsigned long free;
4462     unsigned long align;
4463     diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
4464     index 4a7944078cc3..8557ec664213 100644
4465     --- a/include/linux/sched/mm.h
4466     +++ b/include/linux/sched/mm.h
4467     @@ -362,6 +362,8 @@ enum {
4468    
4469     static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
4470     {
4471     + if (current->mm != mm)
4472     + return;
4473     if (likely(!(atomic_read(&mm->membarrier_state) &
4474     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
4475     return;
4476     diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
4477     index c00a0b8ade08..6c6694160130 100644
4478     --- a/include/sound/soc-dapm.h
4479     +++ b/include/sound/soc-dapm.h
4480     @@ -353,6 +353,8 @@ struct device;
4481     #define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
4482     #define SND_SOC_DAPM_PRE_POST_PMD \
4483     (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
4484     +#define SND_SOC_DAPM_PRE_POST_PMU \
4485     + (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
4486    
4487     /* convenience event type detection */
4488     #define SND_SOC_DAPM_EVENT_ON(e) \
4489     diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
4490     index aa7f3aeac740..79095434c1be 100644
4491     --- a/include/trace/events/writeback.h
4492     +++ b/include/trace/events/writeback.h
4493     @@ -66,8 +66,9 @@ DECLARE_EVENT_CLASS(writeback_page_template,
4494     ),
4495    
4496     TP_fast_assign(
4497     - strncpy(__entry->name,
4498     - mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
4499     + strscpy_pad(__entry->name,
4500     + mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
4501     + 32);
4502     __entry->ino = mapping ? mapping->host->i_ino : 0;
4503     __entry->index = page->index;
4504     ),
4505     @@ -110,8 +111,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
4506     struct backing_dev_info *bdi = inode_to_bdi(inode);
4507    
4508     /* may be called for files on pseudo FSes w/ unregistered bdi */
4509     - strncpy(__entry->name,
4510     - bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
4511     + strscpy_pad(__entry->name,
4512     + bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
4513     __entry->ino = inode->i_ino;
4514     __entry->state = inode->i_state;
4515     __entry->flags = flags;
4516     @@ -190,8 +191,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
4517     ),
4518    
4519     TP_fast_assign(
4520     - strncpy(__entry->name,
4521     - dev_name(inode_to_bdi(inode)->dev), 32);
4522     + strscpy_pad(__entry->name,
4523     + dev_name(inode_to_bdi(inode)->dev), 32);
4524     __entry->ino = inode->i_ino;
4525     __entry->sync_mode = wbc->sync_mode;
4526     __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
4527     @@ -234,8 +235,9 @@ DECLARE_EVENT_CLASS(writeback_work_class,
4528     __field(unsigned int, cgroup_ino)
4529     ),
4530     TP_fast_assign(
4531     - strncpy(__entry->name,
4532     - wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
4533     + strscpy_pad(__entry->name,
4534     + wb->bdi->dev ? dev_name(wb->bdi->dev) :
4535     + "(unknown)", 32);
4536     __entry->nr_pages = work->nr_pages;
4537     __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
4538     __entry->sync_mode = work->sync_mode;
4539     @@ -288,7 +290,7 @@ DECLARE_EVENT_CLASS(writeback_class,
4540     __field(unsigned int, cgroup_ino)
4541     ),
4542     TP_fast_assign(
4543     - strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
4544     + strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
4545     __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
4546     ),
4547     TP_printk("bdi %s: cgroup_ino=%u",
4548     @@ -310,7 +312,7 @@ TRACE_EVENT(writeback_bdi_register,
4549     __array(char, name, 32)
4550     ),
4551     TP_fast_assign(
4552     - strncpy(__entry->name, dev_name(bdi->dev), 32);
4553     + strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
4554     ),
4555     TP_printk("bdi %s",
4556     __entry->name
4557     @@ -335,7 +337,7 @@ DECLARE_EVENT_CLASS(wbc_class,
4558     ),
4559    
4560     TP_fast_assign(
4561     - strncpy(__entry->name, dev_name(bdi->dev), 32);
4562     + strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
4563     __entry->nr_to_write = wbc->nr_to_write;
4564     __entry->pages_skipped = wbc->pages_skipped;
4565     __entry->sync_mode = wbc->sync_mode;
4566     @@ -386,7 +388,7 @@ TRACE_EVENT(writeback_queue_io,
4567     ),
4568     TP_fast_assign(
4569     unsigned long *older_than_this = work->older_than_this;
4570     - strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
4571     + strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
4572     __entry->older = older_than_this ? *older_than_this : 0;
4573     __entry->age = older_than_this ?
4574     (jiffies - *older_than_this) * 1000 / HZ : -1;
4575     @@ -472,7 +474,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
4576     ),
4577    
4578     TP_fast_assign(
4579     - strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
4580     + strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
4581     __entry->write_bw = KBps(wb->write_bandwidth);
4582     __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
4583     __entry->dirty_rate = KBps(dirty_rate);
4584     @@ -537,7 +539,7 @@ TRACE_EVENT(balance_dirty_pages,
4585    
4586     TP_fast_assign(
4587     unsigned long freerun = (thresh + bg_thresh) / 2;
4588     - strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
4589     + strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
4590    
4591     __entry->limit = global_wb_domain.dirty_limit;
4592     __entry->setpoint = (global_wb_domain.dirty_limit +
4593     @@ -597,8 +599,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
4594     ),
4595    
4596     TP_fast_assign(
4597     - strncpy(__entry->name,
4598     - dev_name(inode_to_bdi(inode)->dev), 32);
4599     + strscpy_pad(__entry->name,
4600     + dev_name(inode_to_bdi(inode)->dev), 32);
4601     __entry->ino = inode->i_ino;
4602     __entry->state = inode->i_state;
4603     __entry->dirtied_when = inode->dirtied_when;
4604     @@ -671,8 +673,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
4605     ),
4606    
4607     TP_fast_assign(
4608     - strncpy(__entry->name,
4609     - dev_name(inode_to_bdi(inode)->dev), 32);
4610     + strscpy_pad(__entry->name,
4611     + dev_name(inode_to_bdi(inode)->dev), 32);
4612     __entry->ino = inode->i_ino;
4613     __entry->state = inode->i_state;
4614     __entry->dirtied_when = inode->dirtied_when;
4615     diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
4616     index b3105ac1381a..851ff1feadd5 100644
4617     --- a/include/uapi/linux/sched.h
4618     +++ b/include/uapi/linux/sched.h
4619     @@ -33,6 +33,7 @@
4620     #define CLONE_NEWNET 0x40000000 /* New network namespace */
4621     #define CLONE_IO 0x80000000 /* Clone io context */
4622    
4623     +#ifndef __ASSEMBLY__
4624     /*
4625     * Arguments for the clone3 syscall
4626     */
4627     @@ -46,6 +47,7 @@ struct clone_args {
4628     __aligned_u64 stack_size;
4629     __aligned_u64 tls;
4630     };
4631     +#endif
4632    
4633     /*
4634     * Scheduling policies
4635     diff --git a/kernel/elfcore.c b/kernel/elfcore.c
4636     index fc482c8e0bd8..57fb4dcff434 100644
4637     --- a/kernel/elfcore.c
4638     +++ b/kernel/elfcore.c
4639     @@ -3,6 +3,7 @@
4640     #include <linux/fs.h>
4641     #include <linux/mm.h>
4642     #include <linux/binfmts.h>
4643     +#include <linux/elfcore.h>
4644    
4645     Elf_Half __weak elf_core_extra_phdrs(void)
4646     {
4647     diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
4648     index 89bab079e7a4..e84d21aa0722 100644
4649     --- a/kernel/locking/qspinlock_paravirt.h
4650     +++ b/kernel/locking/qspinlock_paravirt.h
4651     @@ -269,7 +269,7 @@ pv_wait_early(struct pv_node *prev, int loop)
4652     if ((loop & PV_PREV_CHECK_MASK) != 0)
4653     return false;
4654    
4655     - return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
4656     + return READ_ONCE(prev->state) != vcpu_running;
4657     }
4658    
4659     /*
4660     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4661     index d38f007afea7..fffe790d98bb 100644
4662     --- a/kernel/sched/core.c
4663     +++ b/kernel/sched/core.c
4664     @@ -1537,7 +1537,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
4665     if (cpumask_equal(p->cpus_ptr, new_mask))
4666     goto out;
4667    
4668     - if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
4669     + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
4670     + if (dest_cpu >= nr_cpu_ids) {
4671     ret = -EINVAL;
4672     goto out;
4673     }
4674     @@ -1558,7 +1559,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
4675     if (cpumask_test_cpu(task_cpu(p), new_mask))
4676     goto out;
4677    
4678     - dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
4679     if (task_running(rq, p) || p->state == TASK_WAKING) {
4680     struct migration_arg arg = { p, dest_cpu };
4681     /* Need help from migration thread: drop lock and wait. */
4682     diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
4683     index aa8d75804108..5110d91b1b0e 100644
4684     --- a/kernel/sched/membarrier.c
4685     +++ b/kernel/sched/membarrier.c
4686     @@ -226,7 +226,7 @@ static int membarrier_register_private_expedited(int flags)
4687     * groups, which use the same mm. (CLONE_VM but not
4688     * CLONE_THREAD).
4689     */
4690     - if (atomic_read(&mm->membarrier_state) & state)
4691     + if ((atomic_read(&mm->membarrier_state) & state) == state)
4692     return 0;
4693     atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
4694     if (flags & MEMBARRIER_FLAG_SYNC_CORE)
4695     diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
4696     index 5be6154e2fd2..99fbfb8d9117 100644
4697     --- a/kernel/time/tick-broadcast-hrtimer.c
4698     +++ b/kernel/time/tick-broadcast-hrtimer.c
4699     @@ -42,34 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt)
4700     */
4701     static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
4702     {
4703     - int bc_moved;
4704     /*
4705     - * We try to cancel the timer first. If the callback is on
4706     - * flight on some other cpu then we let it handle it. If we
4707     - * were able to cancel the timer nothing can rearm it as we
4708     - * own broadcast_lock.
4709     + * This is called either from enter/exit idle code or from the
4710     + * broadcast handler. In all cases tick_broadcast_lock is held.
4711     *
4712     - * However we can also be called from the event handler of
4713     - * ce_broadcast_hrtimer itself when it expires. We cannot
4714     - * restart the timer because we are in the callback, but we
4715     - * can set the expiry time and let the callback return
4716     - * HRTIMER_RESTART.
4717     + * hrtimer_cancel() cannot be called here neither from the
4718     + * broadcast handler nor from the enter/exit idle code. The idle
4719     + * code can run into the problem described in bc_shutdown() and the
4720     + * broadcast handler cannot wait for itself to complete for obvious
4721     + * reasons.
4722     *
4723     - * Since we are in the idle loop at this point and because
4724     - * hrtimer_{start/cancel} functions call into tracing,
4725     - * calls to these functions must be bound within RCU_NONIDLE.
4726     + * Each caller tries to arm the hrtimer on its own CPU, but if the
4727     + * hrtimer callbback function is currently running, then
4728     + * hrtimer_start() cannot move it and the timer stays on the CPU on
4729     + * which it is assigned at the moment.
4730     + *
4731     + * As this can be called from idle code, the hrtimer_start()
4732     + * invocation has to be wrapped with RCU_NONIDLE() as
4733     + * hrtimer_start() can call into tracing.
4734     */
4735     - RCU_NONIDLE({
4736     - bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
4737     - if (bc_moved)
4738     - hrtimer_start(&bctimer, expires,
4739     - HRTIMER_MODE_ABS_PINNED);});
4740     - if (bc_moved) {
4741     - /* Bind the "device" to the cpu */
4742     - bc->bound_on = smp_processor_id();
4743     - } else if (bc->bound_on == smp_processor_id()) {
4744     - hrtimer_set_expires(&bctimer, expires);
4745     - }
4746     + RCU_NONIDLE( {
4747     + hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
4748     + /*
4749     + * The core tick broadcast mode expects bc->bound_on to be set
4750     + * correctly to prevent a CPU which has the broadcast hrtimer
4751     + * armed from going deep idle.
4752     + *
4753     + * As tick_broadcast_lock is held, nothing can change the cpu
4754     + * base which was just established in hrtimer_start() above. So
4755     + * the below access is safe even without holding the hrtimer
4756     + * base lock.
4757     + */
4758     + bc->bound_on = bctimer.base->cpu_base->cpu;
4759     + } );
4760     return 0;
4761     }
4762    
4763     @@ -95,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
4764     {
4765     ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
4766    
4767     - if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
4768     - if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
4769     - return HRTIMER_RESTART;
4770     -
4771     return HRTIMER_NORESTART;
4772     }
4773    
4774     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
4775     index 343c7ba33b1c..7d63b7347066 100644
4776     --- a/kernel/time/timer.c
4777     +++ b/kernel/time/timer.c
4778     @@ -1593,24 +1593,26 @@ void timer_clear_idle(void)
4779     static int collect_expired_timers(struct timer_base *base,
4780     struct hlist_head *heads)
4781     {
4782     + unsigned long now = READ_ONCE(jiffies);
4783     +
4784     /*
4785     * NOHZ optimization. After a long idle sleep we need to forward the
4786     * base to current jiffies. Avoid a loop by searching the bitfield for
4787     * the next expiring timer.
4788     */
4789     - if ((long)(jiffies - base->clk) > 2) {
4790     + if ((long)(now - base->clk) > 2) {
4791     unsigned long next = __next_timer_interrupt(base);
4792    
4793     /*
4794     * If the next timer is ahead of time forward to current
4795     * jiffies, otherwise forward to the next expiry time:
4796     */
4797     - if (time_after(next, jiffies)) {
4798     + if (time_after(next, now)) {
4799     /*
4800     * The call site will increment base->clk and then
4801     * terminate the expiry loop immediately.
4802     */
4803     - base->clk = jiffies;
4804     + base->clk = now;
4805     return 0;
4806     }
4807     base->clk = next;
4808     diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
4809     index ca1255d14576..3e38a010003c 100644
4810     --- a/kernel/trace/bpf_trace.c
4811     +++ b/kernel/trace/bpf_trace.c
4812     @@ -500,14 +500,17 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
4813     .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4814     };
4815    
4816     -static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
4817     -static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
4818     +static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
4819     +struct bpf_nested_pt_regs {
4820     + struct pt_regs regs[3];
4821     +};
4822     +static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
4823     +static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
4824    
4825     u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
4826     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
4827     {
4828     - struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
4829     - struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
4830     + int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
4831     struct perf_raw_frag frag = {
4832     .copy = ctx_copy,
4833     .size = ctx_size,
4834     @@ -522,12 +525,25 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
4835     .data = meta,
4836     },
4837     };
4838     + struct perf_sample_data *sd;
4839     + struct pt_regs *regs;
4840     + u64 ret;
4841     +
4842     + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
4843     + ret = -EBUSY;
4844     + goto out;
4845     + }
4846     + sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
4847     + regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
4848    
4849     perf_fetch_caller_regs(regs);
4850     perf_sample_data_init(sd, 0, 0);
4851     sd->raw = &raw;
4852    
4853     - return __bpf_perf_event_output(regs, map, flags, sd);
4854     + ret = __bpf_perf_event_output(regs, map, flags, sd);
4855     +out:
4856     + this_cpu_dec(bpf_event_output_nest_level);
4857     + return ret;
4858     }
4859    
4860     BPF_CALL_0(bpf_get_current_task)
4861     diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
4862     index ca6b0dff60c5..dd310d3b5843 100644
4863     --- a/kernel/trace/trace_events_hist.c
4864     +++ b/kernel/trace/trace_events_hist.c
4865     @@ -2785,6 +2785,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
4866     return NULL;
4867     }
4868    
4869     + alias->var_ref_idx = var_ref->var_ref_idx;
4870     +
4871     return alias;
4872     }
4873    
4874     diff --git a/mm/usercopy.c b/mm/usercopy.c
4875     index 98e924864554..660717a1ea5c 100644
4876     --- a/mm/usercopy.c
4877     +++ b/mm/usercopy.c
4878     @@ -11,6 +11,7 @@
4879     #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4880    
4881     #include <linux/mm.h>
4882     +#include <linux/highmem.h>
4883     #include <linux/slab.h>
4884     #include <linux/sched.h>
4885     #include <linux/sched/task.h>
4886     @@ -227,7 +228,12 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
4887     if (!virt_addr_valid(ptr))
4888     return;
4889    
4890     - page = virt_to_head_page(ptr);
4891     + /*
4892     + * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
4893     + * highmem page or fallback to virt_to_page(). The following
4894     + * is effectively a highmem-aware virt_to_head_page().
4895     + */
4896     + page = compound_head(kmap_to_page((void *)ptr));
4897    
4898     if (PageSlab(page)) {
4899     /* Check slab allocator for flags and size. */
4900     diff --git a/net/9p/client.c b/net/9p/client.c
4901     index 9622f3e469f6..1d48afc7033c 100644
4902     --- a/net/9p/client.c
4903     +++ b/net/9p/client.c
4904     @@ -281,6 +281,7 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
4905    
4906     p9pdu_reset(&req->tc);
4907     p9pdu_reset(&req->rc);
4908     + req->t_err = 0;
4909     req->status = REQ_STATUS_ALLOC;
4910     init_waitqueue_head(&req->wq);
4911     INIT_LIST_HEAD(&req->req_list);
4912     diff --git a/net/mac80211/util.c b/net/mac80211/util.c
4913     index ad1e58184c4e..21212faec6d0 100644
4914     --- a/net/mac80211/util.c
4915     +++ b/net/mac80211/util.c
4916     @@ -247,7 +247,8 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
4917     struct sta_info *sta;
4918     int i;
4919    
4920     - spin_lock_bh(&fq->lock);
4921     + local_bh_disable();
4922     + spin_lock(&fq->lock);
4923    
4924     if (sdata->vif.type == NL80211_IFTYPE_AP)
4925     ps = &sdata->bss->ps;
4926     @@ -273,9 +274,9 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
4927     &txqi->flags))
4928     continue;
4929    
4930     - spin_unlock_bh(&fq->lock);
4931     + spin_unlock(&fq->lock);
4932     drv_wake_tx_queue(local, txqi);
4933     - spin_lock_bh(&fq->lock);
4934     + spin_lock(&fq->lock);
4935     }
4936     }
4937    
4938     @@ -288,12 +289,14 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
4939     (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
4940     goto out;
4941    
4942     - spin_unlock_bh(&fq->lock);
4943     + spin_unlock(&fq->lock);
4944    
4945     drv_wake_tx_queue(local, txqi);
4946     + local_bh_enable();
4947     return;
4948     out:
4949     - spin_unlock_bh(&fq->lock);
4950     + spin_unlock(&fq->lock);
4951     + local_bh_enable();
4952     }
4953    
4954     static void
4955     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4956     index d47469f824a1..3b81323fa017 100644
4957     --- a/net/netfilter/nf_tables_api.c
4958     +++ b/net/netfilter/nf_tables_api.c
4959     @@ -3562,8 +3562,11 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
4960     NFT_SET_OBJECT))
4961     return -EINVAL;
4962     /* Only one of these operations is supported */
4963     - if ((flags & (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) ==
4964     - (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT))
4965     + if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
4966     + (NFT_SET_MAP | NFT_SET_OBJECT))
4967     + return -EOPNOTSUPP;
4968     + if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
4969     + (NFT_SET_EVAL | NFT_SET_OBJECT))
4970     return -EOPNOTSUPP;
4971     }
4972    
4973     diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
4974     index c0560bf3c31b..660bad688e2b 100644
4975     --- a/net/netfilter/nft_lookup.c
4976     +++ b/net/netfilter/nft_lookup.c
4977     @@ -73,9 +73,6 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
4978     if (IS_ERR(set))
4979     return PTR_ERR(set);
4980    
4981     - if (set->flags & NFT_SET_EVAL)
4982     - return -EOPNOTSUPP;
4983     -
4984     priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
4985     err = nft_validate_register_load(priv->sreg, set->klen);
4986     if (err < 0)
4987     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
4988     index 7a75f34ad393..f7f78566be46 100644
4989     --- a/net/sunrpc/clnt.c
4990     +++ b/net/sunrpc/clnt.c
4991     @@ -1837,7 +1837,7 @@ call_allocate(struct rpc_task *task)
4992     return;
4993     }
4994    
4995     - rpc_exit(task, -ERESTARTSYS);
4996     + rpc_call_rpcerror(task, -ERESTARTSYS);
4997     }
4998    
4999     static int
5000     @@ -2482,6 +2482,7 @@ call_decode(struct rpc_task *task)
5001     struct rpc_clnt *clnt = task->tk_client;
5002     struct rpc_rqst *req = task->tk_rqstp;
5003     struct xdr_stream xdr;
5004     + int err;
5005    
5006     dprint_status(task);
5007    
5008     @@ -2504,6 +2505,15 @@ call_decode(struct rpc_task *task)
5009     * before it changed req->rq_reply_bytes_recvd.
5010     */
5011     smp_rmb();
5012     +
5013     + /*
5014     + * Did we ever call xprt_complete_rqst()? If not, we should assume
5015     + * the message is incomplete.
5016     + */
5017     + err = -EAGAIN;
5018     + if (!req->rq_reply_bytes_recvd)
5019     + goto out;
5020     +
5021     req->rq_rcv_buf.len = req->rq_private_buf.len;
5022    
5023     /* Check that the softirq receive buffer is valid */
5024     @@ -2512,7 +2522,9 @@ call_decode(struct rpc_task *task)
5025    
5026     xdr_init_decode(&xdr, &req->rq_rcv_buf,
5027     req->rq_rcv_buf.head[0].iov_base, req);
5028     - switch (rpc_decode_header(task, &xdr)) {
5029     + err = rpc_decode_header(task, &xdr);
5030     +out:
5031     + switch (err) {
5032     case 0:
5033     task->tk_action = rpc_exit_task;
5034     task->tk_status = rpcauth_unwrap_resp(task, &xdr);
5035     @@ -2561,7 +2573,7 @@ rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
5036     return 0;
5037     out_fail:
5038     trace_rpc_bad_callhdr(task);
5039     - rpc_exit(task, error);
5040     + rpc_call_rpcerror(task, error);
5041     return error;
5042     }
5043    
5044     @@ -2628,7 +2640,7 @@ out_garbage:
5045     return -EAGAIN;
5046     }
5047     out_err:
5048     - rpc_exit(task, error);
5049     + rpc_call_rpcerror(task, error);
5050     return error;
5051    
5052     out_unparsable:
5053     diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
5054     index 1f275aba786f..53934fe73a9d 100644
5055     --- a/net/sunrpc/sched.c
5056     +++ b/net/sunrpc/sched.c
5057     @@ -930,8 +930,10 @@ static void __rpc_execute(struct rpc_task *task)
5058     /*
5059     * Signalled tasks should exit rather than sleep.
5060     */
5061     - if (RPC_SIGNALLED(task))
5062     + if (RPC_SIGNALLED(task)) {
5063     + task->tk_rpc_status = -ERESTARTSYS;
5064     rpc_exit(task, -ERESTARTSYS);
5065     + }
5066    
5067     /*
5068     * The queue->lock protects against races with
5069     @@ -967,6 +969,7 @@ static void __rpc_execute(struct rpc_task *task)
5070     */
5071     dprintk("RPC: %5u got signal\n", task->tk_pid);
5072     set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
5073     + task->tk_rpc_status = -ERESTARTSYS;
5074     rpc_exit(task, -ERESTARTSYS);
5075     }
5076     dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
5077     diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
5078     index 2ec349ed4770..f4763e8a6761 100644
5079     --- a/net/sunrpc/xprtrdma/transport.c
5080     +++ b/net/sunrpc/xprtrdma/transport.c
5081     @@ -571,6 +571,7 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
5082     return;
5083    
5084     out_sleep:
5085     + set_bit(XPRT_CONGESTED, &xprt->state);
5086     rpc_sleep_on(&xprt->backlog, task, NULL);
5087     task->tk_status = -EAGAIN;
5088     }
5089     @@ -589,7 +590,8 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
5090    
5091     memset(rqst, 0, sizeof(*rqst));
5092     rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
5093     - rpc_wake_up_next(&xprt->backlog);
5094     + if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
5095     + clear_bit(XPRT_CONGESTED, &xprt->state);
5096     }
5097    
5098     static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
5099     diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
5100     index 805b1f35e1ca..2bd9b4de0e32 100644
5101     --- a/net/sunrpc/xprtrdma/verbs.c
5102     +++ b/net/sunrpc/xprtrdma/verbs.c
5103     @@ -605,10 +605,10 @@ void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt)
5104     * Unlike a normal reconnection, a fresh PD and a new set
5105     * of MRs and buffers is needed.
5106     */
5107     -static int
5108     -rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
5109     - struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
5110     +static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
5111     + struct ib_qp_init_attr *qp_init_attr)
5112     {
5113     + struct rpcrdma_ia *ia = &r_xprt->rx_ia;
5114     int rc, err;
5115    
5116     trace_xprtrdma_reinsert(r_xprt);
5117     @@ -625,7 +625,7 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
5118     }
5119    
5120     rc = -ENETUNREACH;
5121     - err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
5122     + err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
5123     if (err) {
5124     pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
5125     goto out3;
5126     @@ -642,16 +642,16 @@ out1:
5127     return rc;
5128     }
5129    
5130     -static int
5131     -rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
5132     - struct rpcrdma_ia *ia)
5133     +static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt,
5134     + struct ib_qp_init_attr *qp_init_attr)
5135     {
5136     + struct rpcrdma_ia *ia = &r_xprt->rx_ia;
5137     struct rdma_cm_id *id, *old;
5138     int err, rc;
5139    
5140     trace_xprtrdma_reconnect(r_xprt);
5141    
5142     - rpcrdma_ep_disconnect(ep, ia);
5143     + rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia);
5144    
5145     rc = -EHOSTUNREACH;
5146     id = rpcrdma_create_id(r_xprt, ia);
5147     @@ -673,7 +673,7 @@ rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
5148     goto out_destroy;
5149     }
5150    
5151     - err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
5152     + err = rdma_create_qp(id, ia->ri_pd, qp_init_attr);
5153     if (err)
5154     goto out_destroy;
5155    
5156     @@ -698,25 +698,27 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
5157     struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
5158     rx_ia);
5159     struct rpc_xprt *xprt = &r_xprt->rx_xprt;
5160     + struct ib_qp_init_attr qp_init_attr;
5161     int rc;
5162    
5163     retry:
5164     + memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr));
5165     switch (ep->rep_connected) {
5166     case 0:
5167     dprintk("RPC: %s: connecting...\n", __func__);
5168     - rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
5169     + rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr);
5170     if (rc) {
5171     rc = -ENETUNREACH;
5172     goto out_noupdate;
5173     }
5174     break;
5175     case -ENODEV:
5176     - rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
5177     + rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr);
5178     if (rc)
5179     goto out_noupdate;
5180     break;
5181     default:
5182     - rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
5183     + rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr);
5184     if (rc)
5185     goto out;
5186     }
5187     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5188     index c9cfc796eccf..f03459ddc840 100644
5189     --- a/net/wireless/nl80211.c
5190     +++ b/net/wireless/nl80211.c
5191     @@ -201,6 +201,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
5192     return __cfg80211_rdev_from_attrs(netns, info->attrs);
5193     }
5194    
5195     +static int validate_beacon_head(const struct nlattr *attr,
5196     + struct netlink_ext_ack *extack)
5197     +{
5198     + const u8 *data = nla_data(attr);
5199     + unsigned int len = nla_len(attr);
5200     + const struct element *elem;
5201     + const struct ieee80211_mgmt *mgmt = (void *)data;
5202     + unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
5203     + u.beacon.variable);
5204     +
5205     + if (len < fixedlen)
5206     + goto err;
5207     +
5208     + if (ieee80211_hdrlen(mgmt->frame_control) !=
5209     + offsetof(struct ieee80211_mgmt, u.beacon))
5210     + goto err;
5211     +
5212     + data += fixedlen;
5213     + len -= fixedlen;
5214     +
5215     + for_each_element(elem, data, len) {
5216     + /* nothing */
5217     + }
5218     +
5219     + if (for_each_element_completed(elem, data, len))
5220     + return 0;
5221     +
5222     +err:
5223     + NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
5224     + return -EINVAL;
5225     +}
5226     +
5227     static int validate_ie_attr(const struct nlattr *attr,
5228     struct netlink_ext_ack *extack)
5229     {
5230     @@ -322,8 +354,9 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
5231    
5232     [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
5233     [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
5234     - [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY,
5235     - .len = IEEE80211_MAX_DATA_LEN },
5236     + [NL80211_ATTR_BEACON_HEAD] =
5237     + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head,
5238     + IEEE80211_MAX_DATA_LEN),
5239     [NL80211_ATTR_BEACON_TAIL] =
5240     NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
5241     IEEE80211_MAX_DATA_LEN),
5242     @@ -2564,6 +2597,8 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
5243    
5244     control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]);
5245    
5246     + memset(chandef, 0, sizeof(*chandef));
5247     +
5248     chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
5249     chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
5250     chandef->center_freq1 = control_freq;
5251     @@ -3092,7 +3127,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
5252    
5253     if (rdev->ops->get_channel) {
5254     int ret;
5255     - struct cfg80211_chan_def chandef;
5256     + struct cfg80211_chan_def chandef = {};
5257    
5258     ret = rdev_get_channel(rdev, wdev, &chandef);
5259     if (ret == 0) {
5260     diff --git a/net/wireless/reg.c b/net/wireless/reg.c
5261     index 327479ce69f5..36eba5804efe 100644
5262     --- a/net/wireless/reg.c
5263     +++ b/net/wireless/reg.c
5264     @@ -2108,7 +2108,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
5265    
5266     static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
5267     {
5268     - struct cfg80211_chan_def chandef;
5269     + struct cfg80211_chan_def chandef = {};
5270     struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
5271     enum nl80211_iftype iftype;
5272    
5273     diff --git a/net/wireless/scan.c b/net/wireless/scan.c
5274     index d66e6d4b7555..27d76c4c5cea 100644
5275     --- a/net/wireless/scan.c
5276     +++ b/net/wireless/scan.c
5277     @@ -1711,7 +1711,12 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
5278     return;
5279     new_ie_len -= trans_ssid[1];
5280     mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
5281     - if (!mbssid)
5282     + /*
5283     + * It's not valid to have the MBSSID element before SSID
5284     + * ignore if that happens - the code below assumes it is
5285     + * after (while copying things inbetween).
5286     + */
5287     + if (!mbssid || mbssid < trans_ssid)
5288     return;
5289     new_ie_len -= mbssid[1];
5290     rcu_read_lock();
5291     diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
5292     index 46e4d69db845..b1f94730bde2 100644
5293     --- a/net/wireless/wext-compat.c
5294     +++ b/net/wireless/wext-compat.c
5295     @@ -797,7 +797,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
5296     {
5297     struct wireless_dev *wdev = dev->ieee80211_ptr;
5298     struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
5299     - struct cfg80211_chan_def chandef;
5300     + struct cfg80211_chan_def chandef = {};
5301     int ret;
5302    
5303     switch (wdev->iftype) {
5304     diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
5305     index d4c7b8e1b083..73044fc6a952 100644
5306     --- a/security/integrity/ima/ima_crypto.c
5307     +++ b/security/integrity/ima/ima_crypto.c
5308     @@ -268,8 +268,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
5309     rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
5310     rc = integrity_kernel_read(file, offset, rbuf[active],
5311     rbuf_len);
5312     - if (rc != rbuf_len)
5313     + if (rc != rbuf_len) {
5314     + if (rc >= 0)
5315     + rc = -EINVAL;
5316     + /*
5317     + * Forward current rc, do not overwrite with return value
5318     + * from ahash_wait()
5319     + */
5320     + ahash_wait(ahash_rc, &wait);
5321     goto out3;
5322     + }
5323    
5324     if (rbuf[1] && offset) {
5325     /* Using two buffers, and it is not the first
5326     diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
5327     index 7cbaedffa1ef..8e5e48f6a24b 100644
5328     --- a/sound/soc/codecs/sgtl5000.c
5329     +++ b/sound/soc/codecs/sgtl5000.c
5330     @@ -31,6 +31,13 @@
5331     #define SGTL5000_DAP_REG_OFFSET 0x0100
5332     #define SGTL5000_MAX_REG_OFFSET 0x013A
5333    
5334     +/* Delay for the VAG ramp up */
5335     +#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
5336     +/* Delay for the VAG ramp down */
5337     +#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
5338     +
5339     +#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
5340     +
5341     /* default value of sgtl5000 registers */
5342     static const struct reg_default sgtl5000_reg_defaults[] = {
5343     { SGTL5000_CHIP_DIG_POWER, 0x0000 },
5344     @@ -123,6 +130,13 @@ enum {
5345     I2S_SCLK_STRENGTH_HIGH,
5346     };
5347    
5348     +enum {
5349     + HP_POWER_EVENT,
5350     + DAC_POWER_EVENT,
5351     + ADC_POWER_EVENT,
5352     + LAST_POWER_EVENT = ADC_POWER_EVENT
5353     +};
5354     +
5355     /* sgtl5000 private structure in codec */
5356     struct sgtl5000_priv {
5357     int sysclk; /* sysclk rate */
5358     @@ -137,8 +151,109 @@ struct sgtl5000_priv {
5359     u8 micbias_voltage;
5360     u8 lrclk_strength;
5361     u8 sclk_strength;
5362     + u16 mute_state[LAST_POWER_EVENT + 1];
5363     };
5364    
5365     +static inline int hp_sel_input(struct snd_soc_component *component)
5366     +{
5367     + return (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_CTRL) &
5368     + SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
5369     +}
5370     +
5371     +static inline u16 mute_output(struct snd_soc_component *component,
5372     + u16 mute_mask)
5373     +{
5374     + u16 mute_reg = snd_soc_component_read32(component,
5375     + SGTL5000_CHIP_ANA_CTRL);
5376     +
5377     + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
5378     + mute_mask, mute_mask);
5379     + return mute_reg;
5380     +}
5381     +
5382     +static inline void restore_output(struct snd_soc_component *component,
5383     + u16 mute_mask, u16 mute_reg)
5384     +{
5385     + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
5386     + mute_mask, mute_reg);
5387     +}
5388     +
5389     +static void vag_power_on(struct snd_soc_component *component, u32 source)
5390     +{
5391     + if (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
5392     + SGTL5000_VAG_POWERUP)
5393     + return;
5394     +
5395     + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
5396     + SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
5397     +
5398     + /* When VAG powering on to get local loop from Line-In, the sleep
5399     + * is required to avoid loud pop.
5400     + */
5401     + if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
5402     + source == HP_POWER_EVENT)
5403     + msleep(SGTL5000_VAG_POWERUP_DELAY);
5404     +}
5405     +
5406     +static int vag_power_consumers(struct snd_soc_component *component,
5407     + u16 ana_pwr_reg, u32 source)
5408     +{
5409     + int consumers = 0;
5410     +
5411     + /* count dac/adc consumers unconditional */
5412     + if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
5413     + consumers++;
5414     + if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
5415     + consumers++;
5416     +
5417     + /*
5418     + * If the event comes from HP and Line-In is selected,
5419     + * current action is 'DAC to be powered down'.
5420     + * As HP_POWERUP is not set when HP muxed to line-in,
5421     + * we need to keep VAG power ON.
5422     + */
5423     + if (source == HP_POWER_EVENT) {
5424     + if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
5425     + consumers++;
5426     + } else {
5427     + if (ana_pwr_reg & SGTL5000_HP_POWERUP)
5428     + consumers++;
5429     + }
5430     +
5431     + return consumers;
5432     +}
5433     +
5434     +static void vag_power_off(struct snd_soc_component *component, u32 source)
5435     +{
5436     + u16 ana_pwr = snd_soc_component_read32(component,
5437     + SGTL5000_CHIP_ANA_POWER);
5438     +
5439     + if (!(ana_pwr & SGTL5000_VAG_POWERUP))
5440     + return;
5441     +
5442     + /*
5443     + * This function calls when any of VAG power consumers is disappearing.
5444     + * Thus, if there is more than one consumer at the moment, as minimum
5445     + * one consumer will definitely stay after the end of the current
5446     + * event.
5447     + * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
5448     + * - LINE_IN (for HP events) / HP (for DAC/ADC events)
5449     + * - DAC
5450     + * - ADC
5451     + * (the current consumer is disappearing right now)
5452     + */
5453     + if (vag_power_consumers(component, ana_pwr, source) >= 2)
5454     + return;
5455     +
5456     + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
5457     + SGTL5000_VAG_POWERUP, 0);
5458     + /* In power down case, we need wait 400-1000 ms
5459     + * when VAG fully ramped down.
5460     + * As longer we wait, as smaller pop we've got.
5461     + */
5462     + msleep(SGTL5000_VAG_POWERDOWN_DELAY);
5463     +}
5464     +
5465     /*
5466     * mic_bias power on/off share the same register bits with
5467     * output impedance of mic bias, when power on mic bias, we
5468     @@ -170,36 +285,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
5469     return 0;
5470     }
5471    
5472     -/*
5473     - * As manual described, ADC/DAC only works when VAG powerup,
5474     - * So enabled VAG before ADC/DAC up.
5475     - * In power down case, we need wait 400ms when vag fully ramped down.
5476     - */
5477     -static int power_vag_event(struct snd_soc_dapm_widget *w,
5478     - struct snd_kcontrol *kcontrol, int event)
5479     +static int vag_and_mute_control(struct snd_soc_component *component,
5480     + int event, int event_source)
5481     {
5482     - struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
5483     - const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
5484     + static const u16 mute_mask[] = {
5485     + /*
5486     + * Mask for HP_POWER_EVENT.
5487     + * Muxing Headphones have to be wrapped with mute/unmute
5488     + * headphones only.
5489     + */
5490     + SGTL5000_HP_MUTE,
5491     + /*
5492     + * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
5493     + * Muxing DAC or ADC block have to wrapped with mute/unmute
5494     + * both headphones and line-out.
5495     + */
5496     + SGTL5000_OUTPUTS_MUTE,
5497     + SGTL5000_OUTPUTS_MUTE
5498     + };
5499     +
5500     + struct sgtl5000_priv *sgtl5000 =
5501     + snd_soc_component_get_drvdata(component);
5502    
5503     switch (event) {
5504     + case SND_SOC_DAPM_PRE_PMU:
5505     + sgtl5000->mute_state[event_source] =
5506     + mute_output(component, mute_mask[event_source]);
5507     + break;
5508     case SND_SOC_DAPM_POST_PMU:
5509     - snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
5510     - SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
5511     - msleep(400);
5512     + vag_power_on(component, event_source);
5513     + restore_output(component, mute_mask[event_source],
5514     + sgtl5000->mute_state[event_source]);
5515     break;
5516     -
5517     case SND_SOC_DAPM_PRE_PMD:
5518     - /*
5519     - * Don't clear VAG_POWERUP, when both DAC and ADC are
5520     - * operational to prevent inadvertently starving the
5521     - * other one of them.
5522     - */
5523     - if ((snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
5524     - mask) != mask) {
5525     - snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
5526     - SGTL5000_VAG_POWERUP, 0);
5527     - msleep(400);
5528     - }
5529     + sgtl5000->mute_state[event_source] =
5530     + mute_output(component, mute_mask[event_source]);
5531     + vag_power_off(component, event_source);
5532     + break;
5533     + case SND_SOC_DAPM_POST_PMD:
5534     + restore_output(component, mute_mask[event_source],
5535     + sgtl5000->mute_state[event_source]);
5536     break;
5537     default:
5538     break;
5539     @@ -208,6 +333,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
5540     return 0;
5541     }
5542    
5543     +/*
5544     + * Mute Headphone when power it up/down.
5545     + * Control VAG power on HP power path.
5546     + */
5547     +static int headphone_pga_event(struct snd_soc_dapm_widget *w,
5548     + struct snd_kcontrol *kcontrol, int event)
5549     +{
5550     + struct snd_soc_component *component =
5551     + snd_soc_dapm_to_component(w->dapm);
5552     +
5553     + return vag_and_mute_control(component, event, HP_POWER_EVENT);
5554     +}
5555     +
5556     +/* As manual describes, ADC/DAC powering up/down requires
5557     + * to mute outputs to avoid pops.
5558     + * Control VAG power on ADC/DAC power path.
5559     + */
5560     +static int adc_updown_depop(struct snd_soc_dapm_widget *w,
5561     + struct snd_kcontrol *kcontrol, int event)
5562     +{
5563     + struct snd_soc_component *component =
5564     + snd_soc_dapm_to_component(w->dapm);
5565     +
5566     + return vag_and_mute_control(component, event, ADC_POWER_EVENT);
5567     +}
5568     +
5569     +static int dac_updown_depop(struct snd_soc_dapm_widget *w,
5570     + struct snd_kcontrol *kcontrol, int event)
5571     +{
5572     + struct snd_soc_component *component =
5573     + snd_soc_dapm_to_component(w->dapm);
5574     +
5575     + return vag_and_mute_control(component, event, DAC_POWER_EVENT);
5576     +}
5577     +
5578     /* input sources for ADC */
5579     static const char *adc_mux_text[] = {
5580     "MIC_IN", "LINE_IN"
5581     @@ -280,7 +440,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
5582     mic_bias_event,
5583     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
5584    
5585     - SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
5586     + SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
5587     + headphone_pga_event,
5588     + SND_SOC_DAPM_PRE_POST_PMU |
5589     + SND_SOC_DAPM_PRE_POST_PMD),
5590     SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
5591    
5592     SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
5593     @@ -301,11 +464,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
5594     0, SGTL5000_CHIP_DIG_POWER,
5595     1, 0),
5596    
5597     - SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
5598     - SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
5599     -
5600     - SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
5601     - SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
5602     + SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
5603     + adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
5604     + SND_SOC_DAPM_PRE_POST_PMD),
5605     + SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
5606     + dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
5607     + SND_SOC_DAPM_PRE_POST_PMD),
5608     };
5609    
5610     /* routes for sgtl5000 */
5611     diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
5612     index 7065bb5b2752..e1357dbb16c2 100644
5613     --- a/tools/lib/bpf/btf_dump.c
5614     +++ b/tools/lib/bpf/btf_dump.c
5615     @@ -1213,6 +1213,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
5616     return;
5617     }
5618    
5619     + next_id = decls->ids[decls->cnt - 1];
5620     next_t = btf__type_by_id(d->btf, next_id);
5621     multidim = btf_kind_of(next_t) == BTF_KIND_ARRAY;
5622     /* we need space if we have named non-pointer */
5623     diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
5624     index 86ce17a1f7fb..a39cdd0d890d 100644
5625     --- a/tools/lib/traceevent/Makefile
5626     +++ b/tools/lib/traceevent/Makefile
5627     @@ -266,8 +266,8 @@ endef
5628    
5629     define do_generate_dynamic_list_file
5630     symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
5631     - xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
5632     - if [ "$$symbol_type" = "U W w" ];then \
5633     + xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
5634     + if [ "$$symbol_type" = "U W" ];then \
5635     (echo '{'; \
5636     $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
5637     echo '};'; \
5638     diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
5639     index b36b536a9fcb..13fd9fdf91e0 100644
5640     --- a/tools/lib/traceevent/event-parse.c
5641     +++ b/tools/lib/traceevent/event-parse.c
5642     @@ -269,10 +269,10 @@ static int add_new_comm(struct tep_handle *tep,
5643     errno = ENOMEM;
5644     return -1;
5645     }
5646     + tep->cmdlines = cmdlines;
5647    
5648     cmdlines[tep->cmdline_count].comm = strdup(comm);
5649     if (!cmdlines[tep->cmdline_count].comm) {
5650     - free(cmdlines);
5651     errno = ENOMEM;
5652     return -1;
5653     }
5654     @@ -283,7 +283,6 @@ static int add_new_comm(struct tep_handle *tep,
5655     tep->cmdline_count++;
5656    
5657     qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
5658     - tep->cmdlines = cmdlines;
5659    
5660     return 0;
5661     }
5662     diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
5663     index 89ac5a1f1550..3da374911852 100644
5664     --- a/tools/perf/Makefile.config
5665     +++ b/tools/perf/Makefile.config
5666     @@ -908,7 +908,7 @@ ifndef NO_JVMTI
5667     JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
5668     else
5669     ifneq (,$(wildcard /usr/sbin/alternatives))
5670     - JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
5671     + JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
5672     endif
5673     endif
5674     ifndef JDIR
5675     diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
5676     index 05920e3edf7a..47357973b55b 100644
5677     --- a/tools/perf/arch/x86/util/unwind-libunwind.c
5678     +++ b/tools/perf/arch/x86/util/unwind-libunwind.c
5679     @@ -1,11 +1,11 @@
5680     // SPDX-License-Identifier: GPL-2.0
5681    
5682     #include <errno.h>
5683     +#include "../../util/debug.h"
5684     #ifndef REMOTE_UNWIND_LIBUNWIND
5685     #include <libunwind.h>
5686     #include "perf_regs.h"
5687     #include "../../util/unwind.h"
5688     -#include "../../util/debug.h"
5689     #endif
5690    
5691     #ifdef HAVE_ARCH_X86_64_SUPPORT
5692     diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
5693     index 352cf39d7c2f..8ec06bf3372c 100644
5694     --- a/tools/perf/builtin-stat.c
5695     +++ b/tools/perf/builtin-stat.c
5696     @@ -1961,8 +1961,11 @@ int cmd_stat(int argc, const char **argv)
5697     fprintf(output, "[ perf stat: executing run #%d ... ]\n",
5698     run_idx + 1);
5699    
5700     + if (run_idx != 0)
5701     + perf_evlist__reset_prev_raw_counts(evsel_list);
5702     +
5703     status = run_perf_stat(argc, argv, run_idx);
5704     - if (forever && status != -1) {
5705     + if (forever && status != -1 && !interval) {
5706     print_counters(NULL, argc, argv);
5707     perf_stat__reset_stats();
5708     }
5709     diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
5710     index bf7cf1249553..e95a2a26c40a 100644
5711     --- a/tools/perf/util/header.c
5712     +++ b/tools/perf/util/header.c
5713     @@ -1061,7 +1061,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
5714    
5715     scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
5716     if (sysfs__read_str(file, &cache->map, &len)) {
5717     - zfree(&cache->map);
5718     + zfree(&cache->size);
5719     zfree(&cache->type);
5720     return -1;
5721     }
5722     diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
5723     index 8394d48f8b32..3355c445abed 100644
5724     --- a/tools/perf/util/probe-event.c
5725     +++ b/tools/perf/util/probe-event.c
5726     @@ -2329,6 +2329,7 @@ void clear_probe_trace_event(struct probe_trace_event *tev)
5727     }
5728     }
5729     zfree(&tev->args);
5730     + tev->nargs = 0;
5731     }
5732    
5733     struct kprobe_blacklist_node {
5734     diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
5735     index db8a6cf336be..6ce66c272747 100644
5736     --- a/tools/perf/util/stat.c
5737     +++ b/tools/perf/util/stat.c
5738     @@ -155,6 +155,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
5739     evsel->prev_raw_counts = NULL;
5740     }
5741    
5742     +static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
5743     +{
5744     + if (evsel->prev_raw_counts) {
5745     + evsel->prev_raw_counts->aggr.val = 0;
5746     + evsel->prev_raw_counts->aggr.ena = 0;
5747     + evsel->prev_raw_counts->aggr.run = 0;
5748     + }
5749     +}
5750     +
5751     static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
5752     {
5753     int ncpus = perf_evsel__nr_cpus(evsel);
5754     @@ -205,6 +214,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
5755     }
5756     }
5757    
5758     +void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
5759     +{
5760     + struct perf_evsel *evsel;
5761     +
5762     + evlist__for_each_entry(evlist, evsel)
5763     + perf_evsel__reset_prev_raw_counts(evsel);
5764     +}
5765     +
5766     static void zero_per_pkg(struct perf_evsel *counter)
5767     {
5768     if (counter->per_pkg_mask)
5769     diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
5770     index 7032dd1eeac2..9cd0d9cff374 100644
5771     --- a/tools/perf/util/stat.h
5772     +++ b/tools/perf/util/stat.h
5773     @@ -194,6 +194,7 @@ void perf_stat__collect_metric_expr(struct perf_evlist *);
5774     int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
5775     void perf_evlist__free_stats(struct perf_evlist *evlist);
5776     void perf_evlist__reset_stats(struct perf_evlist *evlist);
5777     +void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
5778    
5779     int perf_stat_process_counter(struct perf_stat_config *config,
5780     struct perf_evsel *counter);
5781     diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
5782     index 448d686da8b1..0bf5640f1f07 100644
5783     --- a/tools/testing/nvdimm/test/nfit_test.h
5784     +++ b/tools/testing/nvdimm/test/nfit_test.h
5785     @@ -4,6 +4,7 @@
5786     */
5787     #ifndef __NFIT_TEST_H__
5788     #define __NFIT_TEST_H__
5789     +#include <linux/acpi.h>
5790     #include <linux/list.h>
5791     #include <linux/uuid.h>
5792     #include <linux/ioport.h>
5793     @@ -202,9 +203,6 @@ struct nd_intel_lss {
5794     __u32 status;
5795     } __packed;
5796    
5797     -union acpi_object;
5798     -typedef void *acpi_handle;
5799     -
5800     typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
5801     typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
5802     const guid_t *guid, u64 rev, u64 func,
5803     diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
5804     index 8a399bdfd920..067eb625d01c 100644
5805     --- a/tools/testing/selftests/bpf/progs/strobemeta.h
5806     +++ b/tools/testing/selftests/bpf/progs/strobemeta.h
5807     @@ -413,7 +413,10 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
5808     #else
5809     #pragma unroll
5810     #endif
5811     - for (int i = 0; i < STROBE_MAX_MAP_ENTRIES && i < map.cnt; ++i) {
5812     + for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) {
5813     + if (i >= map.cnt)
5814     + break;
5815     +
5816     descr->key_lens[i] = 0;
5817     len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
5818     map.entries[i].key);
5819     diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
5820     index 720b2d884b3c..e86141796444 100644
5821     --- a/tools/testing/selftests/pidfd/Makefile
5822     +++ b/tools/testing/selftests/pidfd/Makefile
5823     @@ -1,5 +1,5 @@
5824     # SPDX-License-Identifier: GPL-2.0-only
5825     -CFLAGS += -g -I../../../../usr/include/ -lpthread
5826     +CFLAGS += -g -I../../../../usr/include/ -pthread
5827    
5828     TEST_GEN_PROGS := pidfd_test pidfd_open_test
5829    
5830     diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
5831     index 6ef7f16c4cf5..7f8b5c8982e3 100644
5832     --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
5833     +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
5834     @@ -199,6 +199,11 @@ struct seccomp_notif_sizes {
5835     };
5836     #endif
5837    
5838     +#ifndef PTRACE_EVENTMSG_SYSCALL_ENTRY
5839     +#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1
5840     +#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
5841     +#endif
5842     +
5843     #ifndef seccomp
5844     int seccomp(unsigned int op, unsigned int flags, void *args)
5845     {
5846     diff --git a/tools/testing/selftests/tpm2/Makefile b/tools/testing/selftests/tpm2/Makefile
5847     index 9dd848427a7b..bf401f725eef 100644
5848     --- a/tools/testing/selftests/tpm2/Makefile
5849     +++ b/tools/testing/selftests/tpm2/Makefile
5850     @@ -2,3 +2,4 @@
5851     include ../lib.mk
5852    
5853     TEST_PROGS := test_smoke.sh test_space.sh
5854     +TEST_FILES := tpm2.py tpm2_tests.py