Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0206-4.9.107-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3183 - (hide annotations) (download)
Wed Aug 8 14:17:35 2018 UTC (5 years, 10 months ago) by niro
File size: 95403 byte(s)
-linux-4.9.107
1 niro 3183 diff --git a/Makefile b/Makefile
2     index 48d87e3a36c1..ac30e448e0a5 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 106
9     +SUBLEVEL = 107
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
14     index 7457ce082b5f..d32a0160c89f 100644
15     --- a/arch/arm64/include/asm/atomic_lse.h
16     +++ b/arch/arm64/include/asm/atomic_lse.h
17     @@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
18     /* LSE atomics */
19     " mvn %w[i], %w[i]\n"
20     " stclr %w[i], %[v]")
21     - : [i] "+r" (w0), [v] "+Q" (v->counter)
22     + : [i] "+&r" (w0), [v] "+Q" (v->counter)
23     : "r" (x1)
24     : __LL_SC_CLOBBERS);
25     }
26     @@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
27     /* LSE atomics */ \
28     " mvn %w[i], %w[i]\n" \
29     " ldclr" #mb " %w[i], %w[i], %[v]") \
30     - : [i] "+r" (w0), [v] "+Q" (v->counter) \
31     + : [i] "+&r" (w0), [v] "+Q" (v->counter) \
32     : "r" (x1) \
33     : __LL_SC_CLOBBERS, ##cl); \
34     \
35     @@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
36     /* LSE atomics */
37     " neg %w[i], %w[i]\n"
38     " stadd %w[i], %[v]")
39     - : [i] "+r" (w0), [v] "+Q" (v->counter)
40     + : [i] "+&r" (w0), [v] "+Q" (v->counter)
41     : "r" (x1)
42     : __LL_SC_CLOBBERS);
43     }
44     @@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
45     " neg %w[i], %w[i]\n" \
46     " ldadd" #mb " %w[i], w30, %[v]\n" \
47     " add %w[i], %w[i], w30") \
48     - : [i] "+r" (w0), [v] "+Q" (v->counter) \
49     + : [i] "+&r" (w0), [v] "+Q" (v->counter) \
50     : "r" (x1) \
51     : __LL_SC_CLOBBERS , ##cl); \
52     \
53     @@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
54     /* LSE atomics */ \
55     " neg %w[i], %w[i]\n" \
56     " ldadd" #mb " %w[i], %w[i], %[v]") \
57     - : [i] "+r" (w0), [v] "+Q" (v->counter) \
58     + : [i] "+&r" (w0), [v] "+Q" (v->counter) \
59     : "r" (x1) \
60     : __LL_SC_CLOBBERS, ##cl); \
61     \
62     @@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
63     /* LSE atomics */
64     " mvn %[i], %[i]\n"
65     " stclr %[i], %[v]")
66     - : [i] "+r" (x0), [v] "+Q" (v->counter)
67     + : [i] "+&r" (x0), [v] "+Q" (v->counter)
68     : "r" (x1)
69     : __LL_SC_CLOBBERS);
70     }
71     @@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
72     /* LSE atomics */ \
73     " mvn %[i], %[i]\n" \
74     " ldclr" #mb " %[i], %[i], %[v]") \
75     - : [i] "+r" (x0), [v] "+Q" (v->counter) \
76     + : [i] "+&r" (x0), [v] "+Q" (v->counter) \
77     : "r" (x1) \
78     : __LL_SC_CLOBBERS, ##cl); \
79     \
80     @@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
81     /* LSE atomics */
82     " neg %[i], %[i]\n"
83     " stadd %[i], %[v]")
84     - : [i] "+r" (x0), [v] "+Q" (v->counter)
85     + : [i] "+&r" (x0), [v] "+Q" (v->counter)
86     : "r" (x1)
87     : __LL_SC_CLOBBERS);
88     }
89     @@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
90     " neg %[i], %[i]\n" \
91     " ldadd" #mb " %[i], x30, %[v]\n" \
92     " add %[i], %[i], x30") \
93     - : [i] "+r" (x0), [v] "+Q" (v->counter) \
94     + : [i] "+&r" (x0), [v] "+Q" (v->counter) \
95     : "r" (x1) \
96     : __LL_SC_CLOBBERS, ##cl); \
97     \
98     @@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
99     /* LSE atomics */ \
100     " neg %[i], %[i]\n" \
101     " ldadd" #mb " %[i], %[i], %[v]") \
102     - : [i] "+r" (x0), [v] "+Q" (v->counter) \
103     + : [i] "+&r" (x0), [v] "+Q" (v->counter) \
104     : "r" (x1) \
105     : __LL_SC_CLOBBERS, ##cl); \
106     \
107     @@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
108     " eor %[old1], %[old1], %[oldval1]\n" \
109     " eor %[old2], %[old2], %[oldval2]\n" \
110     " orr %[old1], %[old1], %[old2]") \
111     - : [old1] "+r" (x0), [old2] "+r" (x1), \
112     + : [old1] "+&r" (x0), [old2] "+&r" (x1), \
113     [v] "+Q" (*(unsigned long *)ptr) \
114     : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
115     [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
116     diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
117     index 0bc0b1de90c4..4ea85ebdf4df 100644
118     --- a/arch/arm64/include/asm/cpufeature.h
119     +++ b/arch/arm64/include/asm/cpufeature.h
120     @@ -9,8 +9,6 @@
121     #ifndef __ASM_CPUFEATURE_H
122     #define __ASM_CPUFEATURE_H
123    
124     -#include <linux/jump_label.h>
125     -
126     #include <asm/cpucaps.h>
127     #include <asm/hwcap.h>
128     #include <asm/sysreg.h>
129     @@ -27,6 +25,8 @@
130    
131     #ifndef __ASSEMBLY__
132    
133     +#include <linux/bug.h>
134     +#include <linux/jump_label.h>
135     #include <linux/kernel.h>
136    
137     /* CPU feature register tracking */
138     @@ -96,6 +96,7 @@ struct arm64_cpu_capabilities {
139    
140     extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
141     extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
142     +extern struct static_key_false arm64_const_caps_ready;
143    
144     bool this_cpu_has_cap(unsigned int cap);
145    
146     @@ -104,14 +105,27 @@ static inline bool cpu_have_feature(unsigned int num)
147     return elf_hwcap & (1UL << num);
148     }
149    
150     +/* System capability check for constant caps */
151     +static inline bool __cpus_have_const_cap(int num)
152     +{
153     + if (num >= ARM64_NCAPS)
154     + return false;
155     + return static_branch_unlikely(&cpu_hwcap_keys[num]);
156     +}
157     +
158     static inline bool cpus_have_cap(unsigned int num)
159     {
160     if (num >= ARM64_NCAPS)
161     return false;
162     - if (__builtin_constant_p(num))
163     - return static_branch_unlikely(&cpu_hwcap_keys[num]);
164     + return test_bit(num, cpu_hwcaps);
165     +}
166     +
167     +static inline bool cpus_have_const_cap(int num)
168     +{
169     + if (static_branch_likely(&arm64_const_caps_ready))
170     + return __cpus_have_const_cap(num);
171     else
172     - return test_bit(num, cpu_hwcaps);
173     + return cpus_have_cap(num);
174     }
175    
176     static inline void cpus_set_cap(unsigned int num)
177     @@ -121,7 +135,6 @@ static inline void cpus_set_cap(unsigned int num)
178     num, ARM64_NCAPS);
179     } else {
180     __set_bit(num, cpu_hwcaps);
181     - static_branch_enable(&cpu_hwcap_keys[num]);
182     }
183     }
184    
185     @@ -200,7 +213,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)
186    
187     static inline bool system_supports_32bit_el0(void)
188     {
189     - return cpus_have_cap(ARM64_HAS_32BIT_EL0);
190     + return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
191     }
192    
193     static inline bool system_supports_mixed_endian_el0(void)
194     diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
195     index 0a33ea304e63..2abb4493f4f6 100644
196     --- a/arch/arm64/include/asm/kvm_host.h
197     +++ b/arch/arm64/include/asm/kvm_host.h
198     @@ -24,6 +24,7 @@
199    
200     #include <linux/types.h>
201     #include <linux/kvm_types.h>
202     +#include <asm/cpufeature.h>
203     #include <asm/kvm.h>
204     #include <asm/kvm_asm.h>
205     #include <asm/kvm_mmio.h>
206     @@ -358,9 +359,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
207     unsigned long vector_ptr)
208     {
209     /*
210     - * Call initialization code, and switch to the full blown
211     - * HYP code.
212     + * Call initialization code, and switch to the full blown HYP code.
213     + * If the cpucaps haven't been finalized yet, something has gone very
214     + * wrong, and hyp will crash and burn when it uses any
215     + * cpus_have_const_cap() wrapper.
216     */
217     + BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
218     __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
219     }
220    
221     @@ -398,7 +402,7 @@ static inline void __cpu_init_stage2(void)
222    
223     static inline bool kvm_arm_harden_branch_predictor(void)
224     {
225     - return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
226     + return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
227     }
228    
229     #endif /* __ARM64_KVM_HOST_H__ */
230     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
231     index eac73a640ea7..824c83db9b47 100644
232     --- a/arch/arm64/include/asm/kvm_mmu.h
233     +++ b/arch/arm64/include/asm/kvm_mmu.h
234     @@ -341,7 +341,7 @@ static inline void *kvm_get_hyp_vector(void)
235     vect = __bp_harden_hyp_vecs_start +
236     data->hyp_vectors_slot * SZ_2K;
237    
238     - if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
239     + if (!cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
240     vect = lm_alias(vect);
241     }
242    
243     diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
244     index d51158a61892..6ac34c75f4e1 100644
245     --- a/arch/arm64/include/asm/mmu.h
246     +++ b/arch/arm64/include/asm/mmu.h
247     @@ -37,7 +37,7 @@ typedef struct {
248     static inline bool arm64_kernel_unmapped_at_el0(void)
249     {
250     return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
251     - cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
252     + cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
253     }
254    
255     typedef void (*bp_hardening_cb_t)(void);
256     diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
257     index a0ee01202503..7959d2c92010 100644
258     --- a/arch/arm64/kernel/cpufeature.c
259     +++ b/arch/arm64/kernel/cpufeature.c
260     @@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
261     #endif
262    
263     DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
264     +EXPORT_SYMBOL(cpu_hwcaps);
265    
266     DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
267     EXPORT_SYMBOL(cpu_hwcap_keys);
268     @@ -762,7 +763,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
269     * ThunderX leads to apparent I-cache corruption of kernel text, which
270     * ends as well as you might imagine. Don't even try.
271     */
272     - if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
273     + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
274     str = "ARM64_WORKAROUND_CAVIUM_27456";
275     __kpti_forced = -1;
276     }
277     @@ -1051,8 +1052,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
278     */
279     void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
280     {
281     - for (; caps->matches; caps++)
282     - if (caps->enable && cpus_have_cap(caps->capability))
283     + for (; caps->matches; caps++) {
284     + unsigned int num = caps->capability;
285     +
286     + if (!cpus_have_cap(num))
287     + continue;
288     +
289     + /* Ensure cpus_have_const_cap(num) works */
290     + static_branch_enable(&cpu_hwcap_keys[num]);
291     +
292     + if (caps->enable) {
293     /*
294     * Use stop_machine() as it schedules the work allowing
295     * us to modify PSTATE, instead of on_each_cpu() which
296     @@ -1060,6 +1069,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
297     * we return.
298     */
299     stop_machine(caps->enable, (void *)caps, cpu_online_mask);
300     + }
301     + }
302     }
303    
304     /*
305     @@ -1163,6 +1174,14 @@ static void __init setup_feature_capabilities(void)
306     enable_cpu_capabilities(arm64_features);
307     }
308    
309     +DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
310     +EXPORT_SYMBOL(arm64_const_caps_ready);
311     +
312     +static void __init mark_const_caps_ready(void)
313     +{
314     + static_branch_enable(&arm64_const_caps_ready);
315     +}
316     +
317     extern const struct arm64_cpu_capabilities arm64_errata[];
318    
319     bool this_cpu_has_cap(unsigned int cap)
320     @@ -1179,6 +1198,7 @@ void __init setup_cpu_features(void)
321     /* Set the CPU feature capabilies */
322     setup_feature_capabilities();
323     enable_errata_workarounds();
324     + mark_const_caps_ready();
325     setup_elf_hwcaps(arm64_elf_hwcaps);
326    
327     if (system_supports_32bit_el0())
328     @@ -1203,5 +1223,5 @@ void __init setup_cpu_features(void)
329     static bool __maybe_unused
330     cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
331     {
332     - return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
333     + return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
334     }
335     diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
336     index 0972ce58316d..e917d119490c 100644
337     --- a/arch/arm64/kernel/process.c
338     +++ b/arch/arm64/kernel/process.c
339     @@ -291,7 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
340     memset(childregs, 0, sizeof(struct pt_regs));
341     childregs->pstate = PSR_MODE_EL1h;
342     if (IS_ENABLED(CONFIG_ARM64_UAO) &&
343     - cpus_have_cap(ARM64_HAS_UAO))
344     + cpus_have_const_cap(ARM64_HAS_UAO))
345     childregs->pstate |= PSR_UAO_BIT;
346     p->thread.cpu_context.x19 = stack_start;
347     p->thread.cpu_context.x20 = stk_sz;
348     diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
349     index 6e716a5f1173..ebb575c4231b 100644
350     --- a/arch/mips/kernel/process.c
351     +++ b/arch/mips/kernel/process.c
352     @@ -699,6 +699,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
353     if (value & ~known_bits)
354     return -EOPNOTSUPP;
355    
356     + /* Setting FRE without FR is not supported. */
357     + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
358     + return -EOPNOTSUPP;
359     +
360     /* Avoid inadvertently triggering emulation */
361     if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
362     !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
363     diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
364     index 8f7bf74d1c0b..4f64913b4b4c 100644
365     --- a/arch/mips/kernel/ptrace.c
366     +++ b/arch/mips/kernel/ptrace.c
367     @@ -838,7 +838,7 @@ long arch_ptrace(struct task_struct *child, long request,
368     break;
369     }
370     #endif
371     - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
372     + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
373     break;
374     case PC:
375     tmp = regs->cp0_epc;
376     diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
377     index bc9afbabbe14..b1e945738138 100644
378     --- a/arch/mips/kernel/ptrace32.c
379     +++ b/arch/mips/kernel/ptrace32.c
380     @@ -107,7 +107,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
381     addr & 1);
382     break;
383     }
384     - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
385     + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
386     break;
387     case PC:
388     tmp = regs->cp0_epc;
389     diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
390     index 903e76a9f158..e2200100828d 100644
391     --- a/arch/powerpc/include/asm/exception-64s.h
392     +++ b/arch/powerpc/include/asm/exception-64s.h
393     @@ -51,6 +51,27 @@
394     #define EX_PPR 88 /* SMT thread status register (priority) */
395     #define EX_CTR 96
396    
397     +#define STF_ENTRY_BARRIER_SLOT \
398     + STF_ENTRY_BARRIER_FIXUP_SECTION; \
399     + nop; \
400     + nop; \
401     + nop
402     +
403     +#define STF_EXIT_BARRIER_SLOT \
404     + STF_EXIT_BARRIER_FIXUP_SECTION; \
405     + nop; \
406     + nop; \
407     + nop; \
408     + nop; \
409     + nop; \
410     + nop
411     +
412     +/*
413     + * r10 must be free to use, r13 must be paca
414     + */
415     +#define INTERRUPT_TO_KERNEL \
416     + STF_ENTRY_BARRIER_SLOT
417     +
418     /*
419     * Macros for annotating the expected destination of (h)rfid
420     *
421     @@ -67,16 +88,19 @@
422     rfid
423    
424     #define RFI_TO_USER \
425     + STF_EXIT_BARRIER_SLOT; \
426     RFI_FLUSH_SLOT; \
427     rfid; \
428     b rfi_flush_fallback
429    
430     #define RFI_TO_USER_OR_KERNEL \
431     + STF_EXIT_BARRIER_SLOT; \
432     RFI_FLUSH_SLOT; \
433     rfid; \
434     b rfi_flush_fallback
435    
436     #define RFI_TO_GUEST \
437     + STF_EXIT_BARRIER_SLOT; \
438     RFI_FLUSH_SLOT; \
439     rfid; \
440     b rfi_flush_fallback
441     @@ -85,21 +109,25 @@
442     hrfid
443    
444     #define HRFI_TO_USER \
445     + STF_EXIT_BARRIER_SLOT; \
446     RFI_FLUSH_SLOT; \
447     hrfid; \
448     b hrfi_flush_fallback
449    
450     #define HRFI_TO_USER_OR_KERNEL \
451     + STF_EXIT_BARRIER_SLOT; \
452     RFI_FLUSH_SLOT; \
453     hrfid; \
454     b hrfi_flush_fallback
455    
456     #define HRFI_TO_GUEST \
457     + STF_EXIT_BARRIER_SLOT; \
458     RFI_FLUSH_SLOT; \
459     hrfid; \
460     b hrfi_flush_fallback
461    
462     #define HRFI_TO_UNKNOWN \
463     + STF_EXIT_BARRIER_SLOT; \
464     RFI_FLUSH_SLOT; \
465     hrfid; \
466     b hrfi_flush_fallback
467     @@ -225,6 +253,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
468     #define __EXCEPTION_PROLOG_1(area, extra, vec) \
469     OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
470     OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
471     + INTERRUPT_TO_KERNEL; \
472     SAVE_CTR(r10, area); \
473     mfcr r9; \
474     extra(vec); \
475     diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
476     index 7b332342071c..0bf8202feca6 100644
477     --- a/arch/powerpc/include/asm/feature-fixups.h
478     +++ b/arch/powerpc/include/asm/feature-fixups.h
479     @@ -189,6 +189,22 @@ void apply_feature_fixups(void);
480     void setup_feature_keys(void);
481     #endif
482    
483     +#define STF_ENTRY_BARRIER_FIXUP_SECTION \
484     +953: \
485     + .pushsection __stf_entry_barrier_fixup,"a"; \
486     + .align 2; \
487     +954: \
488     + FTR_ENTRY_OFFSET 953b-954b; \
489     + .popsection;
490     +
491     +#define STF_EXIT_BARRIER_FIXUP_SECTION \
492     +955: \
493     + .pushsection __stf_exit_barrier_fixup,"a"; \
494     + .align 2; \
495     +956: \
496     + FTR_ENTRY_OFFSET 955b-956b; \
497     + .popsection;
498     +
499     #define RFI_FLUSH_FIXUP_SECTION \
500     951: \
501     .pushsection __rfi_flush_fixup,"a"; \
502     @@ -200,6 +216,9 @@ void setup_feature_keys(void);
503    
504     #ifndef __ASSEMBLY__
505    
506     +extern long stf_barrier_fallback;
507     +extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
508     +extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
509     extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
510    
511     #endif
512     diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
513     index dc0996b9d75d..9d978102bf0d 100644
514     --- a/arch/powerpc/include/asm/hvcall.h
515     +++ b/arch/powerpc/include/asm/hvcall.h
516     @@ -313,6 +313,9 @@
517     #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
518     #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
519     #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
520     +#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
521     +#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
522     +#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
523    
524     #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
525     #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
526     diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
527     new file mode 100644
528     index 000000000000..44989b22383c
529     --- /dev/null
530     +++ b/arch/powerpc/include/asm/security_features.h
531     @@ -0,0 +1,85 @@
532     +/* SPDX-License-Identifier: GPL-2.0+ */
533     +/*
534     + * Security related feature bit definitions.
535     + *
536     + * Copyright 2018, Michael Ellerman, IBM Corporation.
537     + */
538     +
539     +#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
540     +#define _ASM_POWERPC_SECURITY_FEATURES_H
541     +
542     +
543     +extern unsigned long powerpc_security_features;
544     +extern bool rfi_flush;
545     +
546     +/* These are bit flags */
547     +enum stf_barrier_type {
548     + STF_BARRIER_NONE = 0x1,
549     + STF_BARRIER_FALLBACK = 0x2,
550     + STF_BARRIER_EIEIO = 0x4,
551     + STF_BARRIER_SYNC_ORI = 0x8,
552     +};
553     +
554     +void setup_stf_barrier(void);
555     +void do_stf_barrier_fixups(enum stf_barrier_type types);
556     +
557     +static inline void security_ftr_set(unsigned long feature)
558     +{
559     + powerpc_security_features |= feature;
560     +}
561     +
562     +static inline void security_ftr_clear(unsigned long feature)
563     +{
564     + powerpc_security_features &= ~feature;
565     +}
566     +
567     +static inline bool security_ftr_enabled(unsigned long feature)
568     +{
569     + return !!(powerpc_security_features & feature);
570     +}
571     +
572     +
573     +// Features indicating support for Spectre/Meltdown mitigations
574     +
575     +// The L1-D cache can be flushed with ori r30,r30,0
576     +#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
577     +
578     +// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
579     +#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
580     +
581     +// ori r31,r31,0 acts as a speculation barrier
582     +#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
583     +
584     +// Speculation past bctr is disabled
585     +#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
586     +
587     +// Entries in L1-D are private to a SMT thread
588     +#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
589     +
590     +// Indirect branch prediction cache disabled
591     +#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
592     +
593     +
594     +// Features indicating need for Spectre/Meltdown mitigations
595     +
596     +// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
597     +#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
598     +
599     +// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
600     +#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
601     +
602     +// A speculation barrier should be used for bounds checks (Spectre variant 1)
603     +#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
604     +
605     +// Firmware configuration indicates user favours security over performance
606     +#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
607     +
608     +
609     +// Features enabled by default
610     +#define SEC_FTR_DEFAULT \
611     + (SEC_FTR_L1D_FLUSH_HV | \
612     + SEC_FTR_L1D_FLUSH_PR | \
613     + SEC_FTR_BNDS_CHK_SPEC_BAR | \
614     + SEC_FTR_FAVOUR_SECURITY)
615     +
616     +#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
617     diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
618     index 6825a67cc3db..3f160cd20107 100644
619     --- a/arch/powerpc/include/asm/setup.h
620     +++ b/arch/powerpc/include/asm/setup.h
621     @@ -48,7 +48,7 @@ enum l1d_flush_type {
622     L1D_FLUSH_MTTRIG = 0x8,
623     };
624    
625     -void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
626     +void setup_rfi_flush(enum l1d_flush_type, bool enable);
627     void do_rfi_flush_fixups(enum l1d_flush_type types);
628    
629     #endif /* !__ASSEMBLY__ */
630     diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
631     index adb52d101133..13885786282b 100644
632     --- a/arch/powerpc/kernel/Makefile
633     +++ b/arch/powerpc/kernel/Makefile
634     @@ -44,7 +44,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
635     obj-$(CONFIG_VDSO32) += vdso32/
636     obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
637     obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
638     -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
639     +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
640     obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
641     obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
642     obj-$(CONFIG_PPC64) += vdso64/
643     diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
644     index 9e05c8828ee2..ff45d007d195 100644
645     --- a/arch/powerpc/kernel/cpu_setup_power.S
646     +++ b/arch/powerpc/kernel/cpu_setup_power.S
647     @@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
648     beqlr
649     li r0,0
650     mtspr SPRN_LPID,r0
651     + mtspr SPRN_PCR,r0
652     mfspr r3,SPRN_LPCR
653     bl __init_LPCR
654     bl __init_tlb_power7
655     @@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
656     beqlr
657     li r0,0
658     mtspr SPRN_LPID,r0
659     + mtspr SPRN_PCR,r0
660     mfspr r3,SPRN_LPCR
661     bl __init_LPCR
662     bl __init_tlb_power7
663     @@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
664     beqlr
665     li r0,0
666     mtspr SPRN_LPID,r0
667     + mtspr SPRN_PCR,r0
668     mfspr r3,SPRN_LPCR
669     ori r3, r3, LPCR_PECEDH
670     bl __init_LPCR
671     @@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
672     beqlr
673     li r0,0
674     mtspr SPRN_LPID,r0
675     + mtspr SPRN_PCR,r0
676     mfspr r3,SPRN_LPCR
677     ori r3, r3, LPCR_PECEDH
678     bl __init_LPCR
679     @@ -98,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
680     li r0,0
681     mtspr SPRN_LPID,r0
682     mtspr SPRN_PID,r0
683     + mtspr SPRN_PCR,r0
684     mfspr r3,SPRN_LPCR
685     LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
686     or r3, r3, r4
687     @@ -121,6 +126,7 @@ _GLOBAL(__restore_cpu_power9)
688     li r0,0
689     mtspr SPRN_LPID,r0
690     mtspr SPRN_PID,r0
691     + mtspr SPRN_PCR,r0
692     mfspr r3,SPRN_LPCR
693     LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
694     or r3, r3, r4
695     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
696     index 94b5dfb087e9..d50cc9b38b80 100644
697     --- a/arch/powerpc/kernel/exceptions-64s.S
698     +++ b/arch/powerpc/kernel/exceptions-64s.S
699     @@ -846,7 +846,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
700     #endif
701    
702    
703     -EXC_REAL_MASKABLE(decrementer, 0x900, 0x980)
704     +EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x980)
705     EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
706     TRAMP_KVM(PACA_EXGEN, 0x900)
707     EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
708     @@ -884,6 +884,7 @@ BEGIN_FTR_SECTION \
709     END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
710     mr r9,r13 ; \
711     GET_PACA(r13) ; \
712     + INTERRUPT_TO_KERNEL ; \
713     mfspr r11,SPRN_SRR0 ; \
714     0:
715    
716     @@ -1353,6 +1354,19 @@ masked_##_H##interrupt: \
717     ##_H##RFI_TO_KERNEL; \
718     b .
719    
720     +TRAMP_REAL_BEGIN(stf_barrier_fallback)
721     + std r9,PACA_EXRFI+EX_R9(r13)
722     + std r10,PACA_EXRFI+EX_R10(r13)
723     + sync
724     + ld r9,PACA_EXRFI+EX_R9(r13)
725     + ld r10,PACA_EXRFI+EX_R10(r13)
726     + ori 31,31,0
727     + .rept 14
728     + b 1f
729     +1:
730     + .endr
731     + blr
732     +
733     /*
734     * Real mode exceptions actually use this too, but alternate
735     * instruction code patches (which end up in the common .text area)
736     diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
737     new file mode 100644
738     index 000000000000..2277df84ef6e
739     --- /dev/null
740     +++ b/arch/powerpc/kernel/security.c
741     @@ -0,0 +1,237 @@
742     +// SPDX-License-Identifier: GPL-2.0+
743     +//
744     +// Security related flags and so on.
745     +//
746     +// Copyright 2018, Michael Ellerman, IBM Corporation.
747     +
748     +#include <linux/kernel.h>
749     +#include <linux/debugfs.h>
750     +#include <linux/device.h>
751     +#include <linux/seq_buf.h>
752     +
753     +#include <asm/security_features.h>
754     +
755     +
756     +unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
757     +
758     +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
759     +{
760     + bool thread_priv;
761     +
762     + thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
763     +
764     + if (rfi_flush || thread_priv) {
765     + struct seq_buf s;
766     + seq_buf_init(&s, buf, PAGE_SIZE - 1);
767     +
768     + seq_buf_printf(&s, "Mitigation: ");
769     +
770     + if (rfi_flush)
771     + seq_buf_printf(&s, "RFI Flush");
772     +
773     + if (rfi_flush && thread_priv)
774     + seq_buf_printf(&s, ", ");
775     +
776     + if (thread_priv)
777     + seq_buf_printf(&s, "L1D private per thread");
778     +
779     + seq_buf_printf(&s, "\n");
780     +
781     + return s.len;
782     + }
783     +
784     + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
785     + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
786     + return sprintf(buf, "Not affected\n");
787     +
788     + return sprintf(buf, "Vulnerable\n");
789     +}
790     +
791     +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
792     +{
793     + if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
794     + return sprintf(buf, "Not affected\n");
795     +
796     + return sprintf(buf, "Vulnerable\n");
797     +}
798     +
799     +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
800     +{
801     + bool bcs, ccd, ori;
802     + struct seq_buf s;
803     +
804     + seq_buf_init(&s, buf, PAGE_SIZE - 1);
805     +
806     + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
807     + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
808     + ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
809     +
810     + if (bcs || ccd) {
811     + seq_buf_printf(&s, "Mitigation: ");
812     +
813     + if (bcs)
814     + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
815     +
816     + if (bcs && ccd)
817     + seq_buf_printf(&s, ", ");
818     +
819     + if (ccd)
820     + seq_buf_printf(&s, "Indirect branch cache disabled");
821     + } else
822     + seq_buf_printf(&s, "Vulnerable");
823     +
824     + if (ori)
825     + seq_buf_printf(&s, ", ori31 speculation barrier enabled");
826     +
827     + seq_buf_printf(&s, "\n");
828     +
829     + return s.len;
830     +}
831     +
832     +/*
833     + * Store-forwarding barrier support.
834     + */
835     +
836     +static enum stf_barrier_type stf_enabled_flush_types;
837     +static bool no_stf_barrier;
838     +bool stf_barrier;
839     +
840     +static int __init handle_no_stf_barrier(char *p)
841     +{
842     + pr_info("stf-barrier: disabled on command line.");
843     + no_stf_barrier = true;
844     + return 0;
845     +}
846     +
847     +early_param("no_stf_barrier", handle_no_stf_barrier);
848     +
849     +/* This is the generic flag used by other architectures */
850     +static int __init handle_ssbd(char *p)
851     +{
852     + if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
853     + /* Until firmware tells us, we have the barrier with auto */
854     + return 0;
855     + } else if (strncmp(p, "off", 3) == 0) {
856     + handle_no_stf_barrier(NULL);
857     + return 0;
858     + } else
859     + return 1;
860     +
861     + return 0;
862     +}
863     +early_param("spec_store_bypass_disable", handle_ssbd);
864     +
865     +/* This is the generic flag used by other architectures */
866     +static int __init handle_no_ssbd(char *p)
867     +{
868     + handle_no_stf_barrier(NULL);
869     + return 0;
870     +}
871     +early_param("nospec_store_bypass_disable", handle_no_ssbd);
872     +
873     +static void stf_barrier_enable(bool enable)
874     +{
875     + if (enable)
876     + do_stf_barrier_fixups(stf_enabled_flush_types);
877     + else
878     + do_stf_barrier_fixups(STF_BARRIER_NONE);
879     +
880     + stf_barrier = enable;
881     +}
882     +
883     +void setup_stf_barrier(void)
884     +{
885     + enum stf_barrier_type type;
886     + bool enable, hv;
887     +
888     + hv = cpu_has_feature(CPU_FTR_HVMODE);
889     +
890     + /* Default to fallback in case fw-features are not available */
891     + if (cpu_has_feature(CPU_FTR_ARCH_300))
892     + type = STF_BARRIER_EIEIO;
893     + else if (cpu_has_feature(CPU_FTR_ARCH_207S))
894     + type = STF_BARRIER_SYNC_ORI;
895     + else if (cpu_has_feature(CPU_FTR_ARCH_206))
896     + type = STF_BARRIER_FALLBACK;
897     + else
898     + type = STF_BARRIER_NONE;
899     +
900     + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
901     + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
902     + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
903     +
904     + if (type == STF_BARRIER_FALLBACK) {
905     + pr_info("stf-barrier: fallback barrier available\n");
906     + } else if (type == STF_BARRIER_SYNC_ORI) {
907     + pr_info("stf-barrier: hwsync barrier available\n");
908     + } else if (type == STF_BARRIER_EIEIO) {
909     + pr_info("stf-barrier: eieio barrier available\n");
910     + }
911     +
912     + stf_enabled_flush_types = type;
913     +
914     + if (!no_stf_barrier)
915     + stf_barrier_enable(enable);
916     +}
917     +
918     +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
919     +{
920     + if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
921     + const char *type;
922     + switch (stf_enabled_flush_types) {
923     + case STF_BARRIER_EIEIO:
924     + type = "eieio";
925     + break;
926     + case STF_BARRIER_SYNC_ORI:
927     + type = "hwsync";
928     + break;
929     + case STF_BARRIER_FALLBACK:
930     + type = "fallback";
931     + break;
932     + default:
933     + type = "unknown";
934     + }
935     + return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
936     + }
937     +
938     + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
939     + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
940     + return sprintf(buf, "Not affected\n");
941     +
942     + return sprintf(buf, "Vulnerable\n");
943     +}
944     +
945     +#ifdef CONFIG_DEBUG_FS
946     +static int stf_barrier_set(void *data, u64 val)
947     +{
948     + bool enable;
949     +
950     + if (val == 1)
951     + enable = true;
952     + else if (val == 0)
953     + enable = false;
954     + else
955     + return -EINVAL;
956     +
957     + /* Only do anything if we're changing state */
958     + if (enable != stf_barrier)
959     + stf_barrier_enable(enable);
960     +
961     + return 0;
962     +}
963     +
964     +static int stf_barrier_get(void *data, u64 *val)
965     +{
966     + *val = stf_barrier ? 1 : 0;
967     + return 0;
968     +}
969     +
970     +DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
971     +
972     +static __init int stf_barrier_debugfs_init(void)
973     +{
974     + debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
975     + return 0;
976     +}
977     +device_initcall(stf_barrier_debugfs_init);
978     +#endif /* CONFIG_DEBUG_FS */
979     diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
980     index 5243501d95ef..fdba10695208 100644
981     --- a/arch/powerpc/kernel/setup_64.c
982     +++ b/arch/powerpc/kernel/setup_64.c
983     @@ -679,6 +679,7 @@ static int __init disable_hardlockup_detector(void)
984     return 0;
985     }
986     early_initcall(disable_hardlockup_detector);
987     +#endif /* CONFIG_HARDLOCKUP_DETECTOR */
988    
989     #ifdef CONFIG_PPC_BOOK3S_64
990     static enum l1d_flush_type enabled_flush_types;
991     @@ -716,9 +717,6 @@ static void do_nothing(void *unused)
992    
993     void rfi_flush_enable(bool enable)
994     {
995     - if (rfi_flush == enable)
996     - return;
997     -
998     if (enable) {
999     do_rfi_flush_fixups(enabled_flush_types);
1000     on_each_cpu(do_nothing, NULL, 1);
1001     @@ -728,11 +726,15 @@ void rfi_flush_enable(bool enable)
1002     rfi_flush = enable;
1003     }
1004    
1005     -static void init_fallback_flush(void)
1006     +static void __ref init_fallback_flush(void)
1007     {
1008     u64 l1d_size, limit;
1009     int cpu;
1010    
1011     + /* Only allocate the fallback flush area once (at boot time). */
1012     + if (l1d_flush_fallback_area)
1013     + return;
1014     +
1015     l1d_size = ppc64_caches.dsize;
1016     limit = min(safe_stack_limit(), ppc64_rma_size);
1017    
1018     @@ -750,18 +752,18 @@ static void init_fallback_flush(void)
1019     }
1020     }
1021    
1022     -void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
1023     +void setup_rfi_flush(enum l1d_flush_type types, bool enable)
1024     {
1025     if (types & L1D_FLUSH_FALLBACK) {
1026     - pr_info("rfi-flush: Using fallback displacement flush\n");
1027     + pr_info("rfi-flush: fallback displacement flush available\n");
1028     init_fallback_flush();
1029     }
1030    
1031     if (types & L1D_FLUSH_ORI)
1032     - pr_info("rfi-flush: Using ori type flush\n");
1033     + pr_info("rfi-flush: ori type flush available\n");
1034    
1035     if (types & L1D_FLUSH_MTTRIG)
1036     - pr_info("rfi-flush: Using mttrig type flush\n");
1037     + pr_info("rfi-flush: mttrig type flush available\n");
1038    
1039     enabled_flush_types = types;
1040    
1041     @@ -772,13 +774,19 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
1042     #ifdef CONFIG_DEBUG_FS
1043     static int rfi_flush_set(void *data, u64 val)
1044     {
1045     + bool enable;
1046     +
1047     if (val == 1)
1048     - rfi_flush_enable(true);
1049     + enable = true;
1050     else if (val == 0)
1051     - rfi_flush_enable(false);
1052     + enable = false;
1053     else
1054     return -EINVAL;
1055    
1056     + /* Only do anything if we're changing state */
1057     + if (enable != rfi_flush)
1058     + rfi_flush_enable(enable);
1059     +
1060     return 0;
1061     }
1062    
1063     @@ -797,13 +805,4 @@ static __init int rfi_flush_debugfs_init(void)
1064     }
1065     device_initcall(rfi_flush_debugfs_init);
1066     #endif
1067     -
1068     -ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1069     -{
1070     - if (rfi_flush)
1071     - return sprintf(buf, "Mitigation: RFI Flush\n");
1072     -
1073     - return sprintf(buf, "Vulnerable\n");
1074     -}
1075     #endif /* CONFIG_PPC_BOOK3S_64 */
1076     -#endif
1077     diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
1078     index b61fb7902018..c16fddbb6ab8 100644
1079     --- a/arch/powerpc/kernel/vmlinux.lds.S
1080     +++ b/arch/powerpc/kernel/vmlinux.lds.S
1081     @@ -133,6 +133,20 @@ SECTIONS
1082     RODATA
1083    
1084     #ifdef CONFIG_PPC64
1085     + . = ALIGN(8);
1086     + __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
1087     + __start___stf_entry_barrier_fixup = .;
1088     + *(__stf_entry_barrier_fixup)
1089     + __stop___stf_entry_barrier_fixup = .;
1090     + }
1091     +
1092     + . = ALIGN(8);
1093     + __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
1094     + __start___stf_exit_barrier_fixup = .;
1095     + *(__stf_exit_barrier_fixup)
1096     + __stop___stf_exit_barrier_fixup = .;
1097     + }
1098     +
1099     . = ALIGN(8);
1100     __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
1101     __start___rfi_flush_fixup = .;
1102     diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
1103     index 46c8338a61bc..cf1398e3c2e0 100644
1104     --- a/arch/powerpc/lib/feature-fixups.c
1105     +++ b/arch/powerpc/lib/feature-fixups.c
1106     @@ -22,6 +22,7 @@
1107     #include <asm/page.h>
1108     #include <asm/sections.h>
1109     #include <asm/setup.h>
1110     +#include <asm/security_features.h>
1111     #include <asm/firmware.h>
1112     #include <asm/setup.h>
1113    
1114     @@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
1115     }
1116    
1117     #ifdef CONFIG_PPC_BOOK3S_64
1118     +void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
1119     +{
1120     + unsigned int instrs[3], *dest;
1121     + long *start, *end;
1122     + int i;
1123     +
1124     + start = PTRRELOC(&__start___stf_entry_barrier_fixup),
1125     + end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
1126     +
1127     + instrs[0] = 0x60000000; /* nop */
1128     + instrs[1] = 0x60000000; /* nop */
1129     + instrs[2] = 0x60000000; /* nop */
1130     +
1131     + i = 0;
1132     + if (types & STF_BARRIER_FALLBACK) {
1133     + instrs[i++] = 0x7d4802a6; /* mflr r10 */
1134     + instrs[i++] = 0x60000000; /* branch patched below */
1135     + instrs[i++] = 0x7d4803a6; /* mtlr r10 */
1136     + } else if (types & STF_BARRIER_EIEIO) {
1137     + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
1138     + } else if (types & STF_BARRIER_SYNC_ORI) {
1139     + instrs[i++] = 0x7c0004ac; /* hwsync */
1140     + instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
1141     + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
1142     + }
1143     +
1144     + for (i = 0; start < end; start++, i++) {
1145     + dest = (void *)start + *start;
1146     +
1147     + pr_devel("patching dest %lx\n", (unsigned long)dest);
1148     +
1149     + patch_instruction(dest, instrs[0]);
1150     +
1151     + if (types & STF_BARRIER_FALLBACK)
1152     + patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
1153     + BRANCH_SET_LINK);
1154     + else
1155     + patch_instruction(dest + 1, instrs[1]);
1156     +
1157     + patch_instruction(dest + 2, instrs[2]);
1158     + }
1159     +
1160     + printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
1161     + (types == STF_BARRIER_NONE) ? "no" :
1162     + (types == STF_BARRIER_FALLBACK) ? "fallback" :
1163     + (types == STF_BARRIER_EIEIO) ? "eieio" :
1164     + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
1165     + : "unknown");
1166     +}
1167     +
1168     +void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
1169     +{
1170     + unsigned int instrs[6], *dest;
1171     + long *start, *end;
1172     + int i;
1173     +
1174     + start = PTRRELOC(&__start___stf_exit_barrier_fixup),
1175     + end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
1176     +
1177     + instrs[0] = 0x60000000; /* nop */
1178     + instrs[1] = 0x60000000; /* nop */
1179     + instrs[2] = 0x60000000; /* nop */
1180     + instrs[3] = 0x60000000; /* nop */
1181     + instrs[4] = 0x60000000; /* nop */
1182     + instrs[5] = 0x60000000; /* nop */
1183     +
1184     + i = 0;
1185     + if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
1186     + if (cpu_has_feature(CPU_FTR_HVMODE)) {
1187     + instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
1188     + instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
1189     + } else {
1190     + instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
1191     + instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
1192     + }
1193     + instrs[i++] = 0x7c0004ac; /* hwsync */
1194     + instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
1195     + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
1196     + if (cpu_has_feature(CPU_FTR_HVMODE)) {
1197     + instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
1198     + } else {
1199     + instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
1200     + }
1201     + } else if (types & STF_BARRIER_EIEIO) {
1202     + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
1203     + }
1204     +
1205     + for (i = 0; start < end; start++, i++) {
1206     + dest = (void *)start + *start;
1207     +
1208     + pr_devel("patching dest %lx\n", (unsigned long)dest);
1209     +
1210     + patch_instruction(dest, instrs[0]);
1211     + patch_instruction(dest + 1, instrs[1]);
1212     + patch_instruction(dest + 2, instrs[2]);
1213     + patch_instruction(dest + 3, instrs[3]);
1214     + patch_instruction(dest + 4, instrs[4]);
1215     + patch_instruction(dest + 5, instrs[5]);
1216     + }
1217     + printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
1218     + (types == STF_BARRIER_NONE) ? "no" :
1219     + (types == STF_BARRIER_FALLBACK) ? "fallback" :
1220     + (types == STF_BARRIER_EIEIO) ? "eieio" :
1221     + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
1222     + : "unknown");
1223     +}
1224     +
1225     +
1226     +void do_stf_barrier_fixups(enum stf_barrier_type types)
1227     +{
1228     + do_stf_entry_barrier_fixups(types);
1229     + do_stf_exit_barrier_fixups(types);
1230     +}
1231     +
1232     void do_rfi_flush_fixups(enum l1d_flush_type types)
1233     {
1234     unsigned int instrs[3], *dest;
1235     @@ -153,7 +268,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
1236     patch_instruction(dest + 2, instrs[2]);
1237     }
1238    
1239     - printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
1240     + printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
1241     + (types == L1D_FLUSH_NONE) ? "no" :
1242     + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
1243     + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
1244     + ? "ori+mttrig type"
1245     + : "ori type" :
1246     + (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
1247     + : "unknown");
1248     }
1249     #endif /* CONFIG_PPC_BOOK3S_64 */
1250    
1251     diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
1252     index 6f8b4c19373a..17203abf38e8 100644
1253     --- a/arch/powerpc/platforms/powernv/setup.c
1254     +++ b/arch/powerpc/platforms/powernv/setup.c
1255     @@ -37,53 +37,92 @@
1256     #include <asm/smp.h>
1257     #include <asm/tm.h>
1258     #include <asm/setup.h>
1259     +#include <asm/security_features.h>
1260    
1261     #include "powernv.h"
1262    
1263     +
1264     +static bool fw_feature_is(const char *state, const char *name,
1265     + struct device_node *fw_features)
1266     +{
1267     + struct device_node *np;
1268     + bool rc = false;
1269     +
1270     + np = of_get_child_by_name(fw_features, name);
1271     + if (np) {
1272     + rc = of_property_read_bool(np, state);
1273     + of_node_put(np);
1274     + }
1275     +
1276     + return rc;
1277     +}
1278     +
1279     +static void init_fw_feat_flags(struct device_node *np)
1280     +{
1281     + if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
1282     + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
1283     +
1284     + if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
1285     + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
1286     +
1287     + if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
1288     + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
1289     +
1290     + if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
1291     + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
1292     +
1293     + if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
1294     + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
1295     +
1296     + if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
1297     + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
1298     +
1299     + /*
1300     + * The features below are enabled by default, so we instead look to see
1301     + * if firmware has *disabled* them, and clear them if so.
1302     + */
1303     + if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
1304     + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
1305     +
1306     + if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
1307     + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
1308     +
1309     + if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
1310     + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
1311     +
1312     + if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
1313     + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
1314     +}
1315     +
1316     static void pnv_setup_rfi_flush(void)
1317     {
1318     struct device_node *np, *fw_features;
1319     enum l1d_flush_type type;
1320     - int enable;
1321     + bool enable;
1322    
1323     /* Default to fallback in case fw-features are not available */
1324     type = L1D_FLUSH_FALLBACK;
1325     - enable = 1;
1326    
1327     np = of_find_node_by_name(NULL, "ibm,opal");
1328     fw_features = of_get_child_by_name(np, "fw-features");
1329     of_node_put(np);
1330    
1331     if (fw_features) {
1332     - np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
1333     - if (np && of_property_read_bool(np, "enabled"))
1334     - type = L1D_FLUSH_MTTRIG;
1335     + init_fw_feat_flags(fw_features);
1336     + of_node_put(fw_features);
1337    
1338     - of_node_put(np);
1339     + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
1340     + type = L1D_FLUSH_MTTRIG;
1341    
1342     - np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
1343     - if (np && of_property_read_bool(np, "enabled"))
1344     + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
1345     type = L1D_FLUSH_ORI;
1346     -
1347     - of_node_put(np);
1348     -
1349     - /* Enable unless firmware says NOT to */
1350     - enable = 2;
1351     - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
1352     - if (np && of_property_read_bool(np, "disabled"))
1353     - enable--;
1354     -
1355     - of_node_put(np);
1356     -
1357     - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
1358     - if (np && of_property_read_bool(np, "disabled"))
1359     - enable--;
1360     -
1361     - of_node_put(np);
1362     - of_node_put(fw_features);
1363     }
1364    
1365     - setup_rfi_flush(type, enable > 0);
1366     + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
1367     + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
1368     + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
1369     +
1370     + setup_rfi_flush(type, enable);
1371     }
1372    
1373     static void __init pnv_setup_arch(void)
1374     @@ -91,6 +130,7 @@ static void __init pnv_setup_arch(void)
1375     set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
1376    
1377     pnv_setup_rfi_flush();
1378     + setup_stf_barrier();
1379    
1380     /* Initialize SMP */
1381     pnv_smp_init();
1382     diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
1383     index 6a5e7467445c..3784a7abfcc8 100644
1384     --- a/arch/powerpc/platforms/pseries/mobility.c
1385     +++ b/arch/powerpc/platforms/pseries/mobility.c
1386     @@ -314,6 +314,9 @@ void post_mobility_fixup(void)
1387     printk(KERN_ERR "Post-mobility device tree update "
1388     "failed: %d\n", rc);
1389    
1390     + /* Possibly switch to a new RFI flush type */
1391     + pseries_setup_rfi_flush();
1392     +
1393     return;
1394     }
1395    
1396     diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
1397     index b1be7b713fe6..62ff57cf6c24 100644
1398     --- a/arch/powerpc/platforms/pseries/pseries.h
1399     +++ b/arch/powerpc/platforms/pseries/pseries.h
1400     @@ -79,4 +79,6 @@ extern struct pci_controller_ops pseries_pci_controller_ops;
1401    
1402     unsigned long pseries_memory_block_size(void);
1403    
1404     +void pseries_setup_rfi_flush(void);
1405     +
1406     #endif /* _PSERIES_PSERIES_H */
1407     diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
1408     index 1845fc611912..91ade7755823 100644
1409     --- a/arch/powerpc/platforms/pseries/setup.c
1410     +++ b/arch/powerpc/platforms/pseries/setup.c
1411     @@ -66,6 +66,7 @@
1412     #include <asm/reg.h>
1413     #include <asm/plpar_wrappers.h>
1414     #include <asm/kexec.h>
1415     +#include <asm/security_features.h>
1416    
1417     #include "pseries.h"
1418    
1419     @@ -450,35 +451,78 @@ static void __init find_and_init_phbs(void)
1420     of_pci_check_probe_only();
1421     }
1422    
1423     -static void pseries_setup_rfi_flush(void)
1424     +static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
1425     +{
1426     + /*
1427     + * The features below are disabled by default, so we instead look to see
1428     + * if firmware has *enabled* them, and set them if so.
1429     + */
1430     + if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
1431     + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
1432     +
1433     + if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
1434     + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
1435     +
1436     + if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
1437     + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
1438     +
1439     + if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
1440     + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
1441     +
1442     + if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
1443     + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
1444     +
1445     + if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
1446     + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
1447     +
1448     + /*
1449     + * The features below are enabled by default, so we instead look to see
1450     + * if firmware has *disabled* them, and clear them if so.
1451     + */
1452     + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
1453     + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
1454     +
1455     + if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
1456     + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
1457     +
1458     + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
1459     + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
1460     +}
1461     +
1462     +void pseries_setup_rfi_flush(void)
1463     {
1464     struct h_cpu_char_result result;
1465     enum l1d_flush_type types;
1466     bool enable;
1467     long rc;
1468    
1469     - /* Enable by default */
1470     - enable = true;
1471     + /*
1472     + * Set features to the defaults assumed by init_cpu_char_feature_flags()
1473     + * so it can set/clear again any features that might have changed after
1474     + * migration, and in case the hypercall fails and it is not even called.
1475     + */
1476     + powerpc_security_features = SEC_FTR_DEFAULT;
1477    
1478     rc = plpar_get_cpu_characteristics(&result);
1479     - if (rc == H_SUCCESS) {
1480     - types = L1D_FLUSH_NONE;
1481     + if (rc == H_SUCCESS)
1482     + init_cpu_char_feature_flags(&result);
1483    
1484     - if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
1485     - types |= L1D_FLUSH_MTTRIG;
1486     - if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
1487     - types |= L1D_FLUSH_ORI;
1488     + /*
1489     + * We're the guest so this doesn't apply to us, clear it to simplify
1490     + * handling of it elsewhere.
1491     + */
1492     + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
1493    
1494     - /* Use fallback if nothing set in hcall */
1495     - if (types == L1D_FLUSH_NONE)
1496     - types = L1D_FLUSH_FALLBACK;
1497     + types = L1D_FLUSH_FALLBACK;
1498    
1499     - if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
1500     - enable = false;
1501     - } else {
1502     - /* Default to fallback if case hcall is not available */
1503     - types = L1D_FLUSH_FALLBACK;
1504     - }
1505     + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
1506     + types |= L1D_FLUSH_MTTRIG;
1507     +
1508     + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
1509     + types |= L1D_FLUSH_ORI;
1510     +
1511     + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
1512     + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
1513    
1514     setup_rfi_flush(types, enable);
1515     }
1516     @@ -501,6 +545,7 @@ static void __init pSeries_setup_arch(void)
1517     fwnmi_init();
1518    
1519     pseries_setup_rfi_flush();
1520     + setup_stf_barrier();
1521    
1522     /* By default, only probe PCI (can be overridden by rtas_pci) */
1523     pci_add_flags(PCI_PROBE_ONLY);
1524     diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
1525     index f87a55d77094..9b3f2e212b37 100644
1526     --- a/arch/sparc/kernel/ds.c
1527     +++ b/arch/sparc/kernel/ds.c
1528     @@ -908,7 +908,7 @@ static int register_services(struct ds_info *dp)
1529     pbuf.req.handle = cp->handle;
1530     pbuf.req.major = 1;
1531     pbuf.req.minor = 0;
1532     - strcpy(pbuf.req.svc_id, cp->service_id);
1533     + strcpy(pbuf.id_buf, cp->service_id);
1534    
1535     err = __ds_send(lp, &pbuf, msg_len);
1536     if (err > 0)
1537     diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
1538     index d6b6c97fe3c7..703127aaf4a5 100644
1539     --- a/arch/sparc/lib/multi3.S
1540     +++ b/arch/sparc/lib/multi3.S
1541     @@ -5,26 +5,26 @@
1542     .align 4
1543     ENTRY(__multi3) /* %o0 = u, %o1 = v */
1544     mov %o1, %g1
1545     - srl %o3, 0, %g4
1546     - mulx %g4, %g1, %o1
1547     + srl %o3, 0, %o4
1548     + mulx %o4, %g1, %o1
1549     srlx %g1, 0x20, %g3
1550     - mulx %g3, %g4, %g5
1551     - sllx %g5, 0x20, %o5
1552     - srl %g1, 0, %g4
1553     + mulx %g3, %o4, %g7
1554     + sllx %g7, 0x20, %o5
1555     + srl %g1, 0, %o4
1556     sub %o1, %o5, %o5
1557     srlx %o5, 0x20, %o5
1558     - addcc %g5, %o5, %g5
1559     + addcc %g7, %o5, %g7
1560     srlx %o3, 0x20, %o5
1561     - mulx %g4, %o5, %g4
1562     + mulx %o4, %o5, %o4
1563     mulx %g3, %o5, %o5
1564     sethi %hi(0x80000000), %g3
1565     - addcc %g5, %g4, %g5
1566     - srlx %g5, 0x20, %g5
1567     + addcc %g7, %o4, %g7
1568     + srlx %g7, 0x20, %g7
1569     add %g3, %g3, %g3
1570     movcc %xcc, %g0, %g3
1571     - addcc %o5, %g5, %o5
1572     - sllx %g4, 0x20, %g4
1573     - add %o1, %g4, %o1
1574     + addcc %o5, %g7, %o5
1575     + sllx %o4, 0x20, %o4
1576     + add %o1, %o4, %o1
1577     add %o5, %g3, %g2
1578     mulx %g1, %o2, %g1
1579     add %g1, %g2, %g1
1580     diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1581     index e3a3f5a64884..2986a13b9786 100644
1582     --- a/arch/x86/xen/enlighten.c
1583     +++ b/arch/x86/xen/enlighten.c
1584     @@ -472,6 +472,12 @@ static void __init xen_init_cpuid_mask(void)
1585     cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
1586     }
1587    
1588     +static void __init xen_init_capabilities(void)
1589     +{
1590     + if (xen_pv_domain())
1591     + setup_force_cpu_cap(X86_FEATURE_XENPV);
1592     +}
1593     +
1594     static void xen_set_debugreg(int reg, unsigned long val)
1595     {
1596     HYPERVISOR_set_debugreg(reg, val);
1597     @@ -1634,6 +1640,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
1598    
1599     xen_init_irq_ops();
1600     xen_init_cpuid_mask();
1601     + xen_init_capabilities();
1602    
1603     #ifdef CONFIG_X86_LOCAL_APIC
1604     /*
1605     @@ -1978,12 +1985,6 @@ bool xen_hvm_need_lapic(void)
1606     }
1607     EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1608    
1609     -static void xen_set_cpu_features(struct cpuinfo_x86 *c)
1610     -{
1611     - if (xen_pv_domain())
1612     - set_cpu_cap(c, X86_FEATURE_XENPV);
1613     -}
1614     -
1615     static void xen_pin_vcpu(int cpu)
1616     {
1617     static bool disable_pinning;
1618     @@ -2030,7 +2031,6 @@ const struct hypervisor_x86 x86_hyper_xen = {
1619     .init_platform = xen_hvm_guest_init,
1620     #endif
1621     .x2apic_available = xen_x2apic_para_available,
1622     - .set_cpu_features = xen_set_cpu_features,
1623     .pin_vcpu = xen_pin_vcpu,
1624     };
1625     EXPORT_SYMBOL(x86_hyper_xen);
1626     diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
1627     index 6b54e02da10c..e48140e76043 100644
1628     --- a/drivers/dma-buf/dma-buf.c
1629     +++ b/drivers/dma-buf/dma-buf.c
1630     @@ -551,7 +551,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
1631     struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1632     enum dma_data_direction direction)
1633     {
1634     - struct sg_table *sg_table = ERR_PTR(-EINVAL);
1635     + struct sg_table *sg_table;
1636    
1637     might_sleep();
1638    
1639     diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
1640     index 3e6fe82c6d64..4d49fa0911c6 100644
1641     --- a/drivers/gpu/drm/drm_dp_helper.c
1642     +++ b/drivers/gpu/drm/drm_dp_helper.c
1643     @@ -1065,6 +1065,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
1644     static const u16 psr_setup_time_us[] = {
1645     PSR_SETUP_TIME(330),
1646     PSR_SETUP_TIME(275),
1647     + PSR_SETUP_TIME(220),
1648     PSR_SETUP_TIME(165),
1649     PSR_SETUP_TIME(110),
1650     PSR_SETUP_TIME(55),
1651     diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1652     index 3517c0ed984a..479d64184da5 100644
1653     --- a/drivers/gpu/drm/i915/intel_lvds.c
1654     +++ b/drivers/gpu/drm/i915/intel_lvds.c
1655     @@ -864,6 +864,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
1656     DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
1657     },
1658     },
1659     + {
1660     + .callback = intel_no_lvds_dmi_callback,
1661     + .ident = "Radiant P845",
1662     + .matches = {
1663     + DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
1664     + DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
1665     + },
1666     + },
1667    
1668     { } /* terminating entry */
1669     };
1670     diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
1671     index 877a0ed76abf..c38645106783 100644
1672     --- a/drivers/hwtracing/stm/core.c
1673     +++ b/drivers/hwtracing/stm/core.c
1674     @@ -27,6 +27,7 @@
1675     #include <linux/stm.h>
1676     #include <linux/fs.h>
1677     #include <linux/mm.h>
1678     +#include <linux/vmalloc.h>
1679     #include "stm.h"
1680    
1681     #include <uapi/linux/stm.h>
1682     @@ -682,7 +683,7 @@ static void stm_device_release(struct device *dev)
1683     {
1684     struct stm_device *stm = to_stm_device(dev);
1685    
1686     - kfree(stm);
1687     + vfree(stm);
1688     }
1689    
1690     int stm_register_device(struct device *parent, struct stm_data *stm_data,
1691     @@ -699,7 +700,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
1692     return -EINVAL;
1693    
1694     nmasters = stm_data->sw_end - stm_data->sw_start + 1;
1695     - stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
1696     + stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
1697     if (!stm)
1698     return -ENOMEM;
1699    
1700     @@ -752,7 +753,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
1701     /* matches device_initialize() above */
1702     put_device(&stm->dev);
1703     err_free:
1704     - kfree(stm);
1705     + vfree(stm);
1706    
1707     return err;
1708     }
1709     diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
1710     index c5b999f0c519..7ef9b13262a8 100644
1711     --- a/drivers/iio/buffer/kfifo_buf.c
1712     +++ b/drivers/iio/buffer/kfifo_buf.c
1713     @@ -24,6 +24,13 @@ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
1714     if ((length == 0) || (bytes_per_datum == 0))
1715     return -EINVAL;
1716    
1717     + /*
1718     + * Make sure we don't overflow an unsigned int after kfifo rounds up to
1719     + * the next power of 2.
1720     + */
1721     + if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
1722     + return -EINVAL;
1723     +
1724     return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
1725     bytes_per_datum, GFP_KERNEL);
1726     }
1727     diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
1728     index ae04826e82fc..a32dd851e712 100644
1729     --- a/drivers/infiniband/core/cache.c
1730     +++ b/drivers/infiniband/core/cache.c
1731     @@ -437,7 +437,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
1732     return -EINVAL;
1733    
1734     if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
1735     - return -EAGAIN;
1736     + return -EINVAL;
1737    
1738     memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
1739     if (attr) {
1740     diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
1741     index e23b2495d52e..05b8695a6369 100644
1742     --- a/drivers/input/mouse/elan_i2c_smbus.c
1743     +++ b/drivers/input/mouse/elan_i2c_smbus.c
1744     @@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
1745     bool max_baseline, u8 *value)
1746     {
1747     int error;
1748     - u8 val[3];
1749     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1750    
1751     error = i2c_smbus_read_block_data(client,
1752     max_baseline ?
1753     @@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
1754     bool iap, u8 *version)
1755     {
1756     int error;
1757     - u8 val[3];
1758     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1759    
1760     error = i2c_smbus_read_block_data(client,
1761     iap ? ETP_SMBUS_IAP_VERSION_CMD :
1762     @@ -169,7 +169,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
1763     u8 *ic_type, u8 *version)
1764     {
1765     int error;
1766     - u8 val[3];
1767     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1768    
1769     error = i2c_smbus_read_block_data(client,
1770     ETP_SMBUS_SM_VERSION_CMD, val);
1771     @@ -186,7 +186,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
1772     static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
1773     {
1774     int error;
1775     - u8 val[3];
1776     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1777    
1778     error = i2c_smbus_read_block_data(client,
1779     ETP_SMBUS_UNIQUEID_CMD, val);
1780     @@ -203,7 +203,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
1781     bool iap, u16 *csum)
1782     {
1783     int error;
1784     - u8 val[3];
1785     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1786    
1787     error = i2c_smbus_read_block_data(client,
1788     iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
1789     @@ -224,7 +224,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
1790     {
1791     int ret;
1792     int error;
1793     - u8 val[3];
1794     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1795    
1796     ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
1797     if (ret != 3) {
1798     @@ -244,7 +244,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
1799     {
1800     int ret;
1801     int error;
1802     - u8 val[3];
1803     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1804    
1805     ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
1806     if (ret != 3) {
1807     @@ -265,7 +265,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
1808     {
1809     int ret;
1810     int error;
1811     - u8 val[3];
1812     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1813    
1814     ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
1815     if (ret != 3) {
1816     @@ -292,7 +292,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
1817     {
1818     int error;
1819     u16 constant;
1820     - u8 val[3];
1821     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1822    
1823     error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
1824     if (error < 0) {
1825     @@ -343,7 +343,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
1826     int len;
1827     int error;
1828     enum tp_mode mode;
1829     - u8 val[3];
1830     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1831     u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
1832     u16 password;
1833    
1834     @@ -417,7 +417,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
1835     struct device *dev = &client->dev;
1836     int error;
1837     u16 result;
1838     - u8 val[3];
1839     + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1840    
1841     /*
1842     * Due to the limitation of smbus protocol limiting
1843     diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1844     index 0b1d5bdd0862..f7b8681aed3f 100644
1845     --- a/drivers/irqchip/irq-gic-v3.c
1846     +++ b/drivers/irqchip/irq-gic-v3.c
1847     @@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
1848     }
1849    
1850     #ifdef CONFIG_ARM64
1851     -static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
1852    
1853     static u64 __maybe_unused gic_read_iar(void)
1854     {
1855     - if (static_branch_unlikely(&is_cavium_thunderx))
1856     + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
1857     return gic_read_iar_cavium_thunderx();
1858     else
1859     return gic_read_iar_common();
1860     @@ -908,14 +907,6 @@ static const struct irq_domain_ops partition_domain_ops = {
1861     .select = gic_irq_domain_select,
1862     };
1863    
1864     -static void gicv3_enable_quirks(void)
1865     -{
1866     -#ifdef CONFIG_ARM64
1867     - if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
1868     - static_branch_enable(&is_cavium_thunderx);
1869     -#endif
1870     -}
1871     -
1872     static int __init gic_init_bases(void __iomem *dist_base,
1873     struct redist_region *rdist_regs,
1874     u32 nr_redist_regions,
1875     @@ -938,8 +929,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
1876     gic_data.nr_redist_regions = nr_redist_regions;
1877     gic_data.redist_stride = redist_stride;
1878    
1879     - gicv3_enable_quirks();
1880     -
1881     /*
1882     * Find out how many interrupts are supported.
1883     * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
1884     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1885     index bcbb80ff86a7..1a92cd719e19 100644
1886     --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1887     +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1888     @@ -142,16 +142,17 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
1889     struct mlx4_en_rx_alloc *frags,
1890     int i)
1891     {
1892     - const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
1893     - u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
1894     -
1895     -
1896     - if (next_frag_end > frags[i].page_size)
1897     - dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
1898     - frag_info->dma_dir);
1899     + if (frags[i].page) {
1900     + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
1901     + u32 next_frag_end = frags[i].page_offset +
1902     + 2 * frag_info->frag_stride;
1903    
1904     - if (frags[i].page)
1905     + if (next_frag_end > frags[i].page_size) {
1906     + dma_unmap_page(priv->ddev, frags[i].dma,
1907     + frags[i].page_size, frag_info->dma_dir);
1908     + }
1909     put_page(frags[i].page);
1910     + }
1911     }
1912    
1913     static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
1914     @@ -586,21 +587,28 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
1915     int length)
1916     {
1917     struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
1918     - struct mlx4_en_frag_info *frag_info;
1919     int nr;
1920     dma_addr_t dma;
1921    
1922     /* Collect used fragments while replacing them in the HW descriptors */
1923     for (nr = 0; nr < priv->num_frags; nr++) {
1924     - frag_info = &priv->frag_info[nr];
1925     + struct mlx4_en_frag_info *frag_info = &priv->frag_info[nr];
1926     + u32 next_frag_end = frags[nr].page_offset +
1927     + 2 * frag_info->frag_stride;
1928     +
1929     if (length <= frag_info->frag_prefix_size)
1930     break;
1931     if (unlikely(!frags[nr].page))
1932     goto fail;
1933    
1934     dma = be64_to_cpu(rx_desc->data[nr].addr);
1935     - dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
1936     - DMA_FROM_DEVICE);
1937     + if (next_frag_end > frags[nr].page_size)
1938     + dma_unmap_page(priv->ddev, frags[nr].dma,
1939     + frags[nr].page_size, frag_info->dma_dir);
1940     + else
1941     + dma_sync_single_for_cpu(priv->ddev, dma,
1942     + frag_info->frag_size,
1943     + DMA_FROM_DEVICE);
1944    
1945     /* Save page reference in skb */
1946     __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
1947     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1948     index ec2ea56f7933..fdbd35954d15 100644
1949     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1950     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1951     @@ -304,9 +304,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
1952     writeVal = 0x00000000;
1953     if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
1954     writeVal = writeVal - 0x06060606;
1955     - else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
1956     - TXHIGHPWRLEVEL_BT2)
1957     - writeVal = writeVal;
1958     *(p_outwriteval + rf) = writeVal;
1959     }
1960     }
1961     diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
1962     index 056845bdf67b..bedce3453dd3 100644
1963     --- a/drivers/pinctrl/qcom/pinctrl-msm.c
1964     +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
1965     @@ -790,7 +790,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
1966     return -EINVAL;
1967    
1968     chip = &pctrl->chip;
1969     - chip->base = -1;
1970     + chip->base = 0;
1971     chip->ngpio = ngpio;
1972     chip->label = dev_name(pctrl->dev);
1973     chip->parent = pctrl->dev;
1974     diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
1975     index f9a245465fd0..6a25bfd4541e 100644
1976     --- a/drivers/platform/chrome/cros_ec_lpc.c
1977     +++ b/drivers/platform/chrome/cros_ec_lpc.c
1978     @@ -49,7 +49,6 @@ static int ec_response_timed_out(void)
1979     static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
1980     struct cros_ec_command *msg)
1981     {
1982     - struct ec_host_request *request;
1983     struct ec_host_response response;
1984     u8 sum = 0;
1985     int i;
1986     @@ -62,8 +61,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
1987     for (i = 0; i < ret; i++)
1988     outb(ec->dout[i], EC_LPC_ADDR_HOST_PACKET + i);
1989    
1990     - request = (struct ec_host_request *)ec->dout;
1991     -
1992     /* Here we go */
1993     outb(EC_COMMAND_PROTOCOL_3, EC_LPC_ADDR_HOST_CMD);
1994    
1995     diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
1996     index e3cd3ece4412..c3d1891d2d3f 100644
1997     --- a/drivers/scsi/scsi_transport_srp.c
1998     +++ b/drivers/scsi/scsi_transport_srp.c
1999     @@ -52,6 +52,8 @@ struct srp_internal {
2000     struct transport_container rport_attr_cont;
2001     };
2002    
2003     +static int scsi_is_srp_rport(const struct device *dev);
2004     +
2005     #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
2006    
2007     #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
2008     @@ -61,9 +63,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
2009     return dev_to_shost(r->dev.parent);
2010     }
2011    
2012     +static int find_child_rport(struct device *dev, void *data)
2013     +{
2014     + struct device **child = data;
2015     +
2016     + if (scsi_is_srp_rport(dev)) {
2017     + WARN_ON_ONCE(*child);
2018     + *child = dev;
2019     + }
2020     + return 0;
2021     +}
2022     +
2023     static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
2024     {
2025     - return transport_class_to_srp_rport(&shost->shost_gendev);
2026     + struct device *child = NULL;
2027     +
2028     + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
2029     + find_child_rport) < 0);
2030     + return child ? dev_to_rport(child) : NULL;
2031     }
2032    
2033     /**
2034     @@ -637,7 +654,8 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
2035     struct srp_rport *rport = shost_to_rport(shost);
2036    
2037     pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
2038     - return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
2039     + return rport && rport->fast_io_fail_tmo < 0 &&
2040     + rport->dev_loss_tmo < 0 &&
2041     i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
2042     BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
2043     }
2044     diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
2045     index 845b874e2977..5bb2316f60bf 100644
2046     --- a/drivers/scsi/ufs/ufs.h
2047     +++ b/drivers/scsi/ufs/ufs.h
2048     @@ -145,7 +145,7 @@ enum attr_idn {
2049     /* Descriptor idn for Query requests */
2050     enum desc_idn {
2051     QUERY_DESC_IDN_DEVICE = 0x0,
2052     - QUERY_DESC_IDN_CONFIGURAION = 0x1,
2053     + QUERY_DESC_IDN_CONFIGURATION = 0x1,
2054     QUERY_DESC_IDN_UNIT = 0x2,
2055     QUERY_DESC_IDN_RFU_0 = 0x3,
2056     QUERY_DESC_IDN_INTERCONNECT = 0x4,
2057     @@ -161,19 +161,13 @@ enum desc_header_offset {
2058     QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
2059     };
2060    
2061     -enum ufs_desc_max_size {
2062     - QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
2063     - QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
2064     - QUERY_DESC_UNIT_MAX_SIZE = 0x23,
2065     - QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
2066     - /*
2067     - * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
2068     - * of descriptor header.
2069     - */
2070     - QUERY_DESC_STRING_MAX_SIZE = 0xFE,
2071     - QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
2072     - QUERY_DESC_POWER_MAX_SIZE = 0x62,
2073     - QUERY_DESC_RFU_MAX_SIZE = 0x00,
2074     +enum ufs_desc_def_size {
2075     + QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
2076     + QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
2077     + QUERY_DESC_UNIT_DEF_SIZE = 0x23,
2078     + QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
2079     + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
2080     + QUERY_DESC_POWER_DEF_SIZE = 0x62,
2081     };
2082    
2083     /* Unit descriptor parameters offsets in bytes*/
2084     @@ -522,4 +516,16 @@ struct ufs_dev_info {
2085     bool is_lu_power_on_wp;
2086     };
2087    
2088     +#define MAX_MODEL_LEN 16
2089     +/**
2090     + * ufs_dev_desc - ufs device details from the device descriptor
2091     + *
2092     + * @wmanufacturerid: card details
2093     + * @model: card model
2094     + */
2095     +struct ufs_dev_desc {
2096     + u16 wmanufacturerid;
2097     + char model[MAX_MODEL_LEN + 1];
2098     +};
2099     +
2100     #endif /* End of Header */
2101     diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
2102     index 08b799d4efcc..71f73d1d1ad1 100644
2103     --- a/drivers/scsi/ufs/ufs_quirks.h
2104     +++ b/drivers/scsi/ufs/ufs_quirks.h
2105     @@ -21,41 +21,28 @@
2106     #define UFS_ANY_VENDOR 0xFFFF
2107     #define UFS_ANY_MODEL "ANY_MODEL"
2108    
2109     -#define MAX_MODEL_LEN 16
2110     -
2111     #define UFS_VENDOR_TOSHIBA 0x198
2112     #define UFS_VENDOR_SAMSUNG 0x1CE
2113     #define UFS_VENDOR_SKHYNIX 0x1AD
2114    
2115     -/**
2116     - * ufs_device_info - ufs device details
2117     - * @wmanufacturerid: card details
2118     - * @model: card model
2119     - */
2120     -struct ufs_device_info {
2121     - u16 wmanufacturerid;
2122     - char model[MAX_MODEL_LEN + 1];
2123     -};
2124     -
2125     /**
2126     * ufs_dev_fix - ufs device quirk info
2127     * @card: ufs card details
2128     * @quirk: device quirk
2129     */
2130     struct ufs_dev_fix {
2131     - struct ufs_device_info card;
2132     + struct ufs_dev_desc card;
2133     unsigned int quirk;
2134     };
2135    
2136     #define END_FIX { { 0 }, 0 }
2137    
2138     /* add specific device quirk */
2139     -#define UFS_FIX(_vendor, _model, _quirk) \
2140     - { \
2141     - .card.wmanufacturerid = (_vendor),\
2142     - .card.model = (_model), \
2143     - .quirk = (_quirk), \
2144     - }
2145     +#define UFS_FIX(_vendor, _model, _quirk) { \
2146     + .card.wmanufacturerid = (_vendor),\
2147     + .card.model = (_model), \
2148     + .quirk = (_quirk), \
2149     +}
2150    
2151     /*
2152     * If UFS device is having issue in processing LCC (Line Control
2153     @@ -144,7 +131,4 @@ struct ufs_dev_fix {
2154     */
2155     #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
2156    
2157     -struct ufs_hba;
2158     -void ufs_advertise_fixup_device(struct ufs_hba *hba);
2159     -
2160     #endif /* UFS_QUIRKS_H_ */
2161     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2162     index 98a7111dd53f..86a3110c6d75 100644
2163     --- a/drivers/scsi/ufs/ufshcd.c
2164     +++ b/drivers/scsi/ufs/ufshcd.c
2165     @@ -98,19 +98,6 @@
2166     _ret; \
2167     })
2168    
2169     -static u32 ufs_query_desc_max_size[] = {
2170     - QUERY_DESC_DEVICE_MAX_SIZE,
2171     - QUERY_DESC_CONFIGURAION_MAX_SIZE,
2172     - QUERY_DESC_UNIT_MAX_SIZE,
2173     - QUERY_DESC_RFU_MAX_SIZE,
2174     - QUERY_DESC_INTERCONNECT_MAX_SIZE,
2175     - QUERY_DESC_STRING_MAX_SIZE,
2176     - QUERY_DESC_RFU_MAX_SIZE,
2177     - QUERY_DESC_GEOMETRY_MAX_SIZE,
2178     - QUERY_DESC_POWER_MAX_SIZE,
2179     - QUERY_DESC_RFU_MAX_SIZE,
2180     -};
2181     -
2182     enum {
2183     UFSHCD_MAX_CHANNEL = 0,
2184     UFSHCD_MAX_ID = 1,
2185     @@ -1961,7 +1948,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2186     goto out;
2187     }
2188    
2189     - if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2190     + if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2191     dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2192     __func__, *buf_len);
2193     err = -EINVAL;
2194     @@ -2040,6 +2027,92 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2195     }
2196     EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2197    
2198     +/**
2199     + * ufshcd_read_desc_length - read the specified descriptor length from header
2200     + * @hba: Pointer to adapter instance
2201     + * @desc_id: descriptor idn value
2202     + * @desc_index: descriptor index
2203     + * @desc_length: pointer to variable to read the length of descriptor
2204     + *
2205     + * Return 0 in case of success, non-zero otherwise
2206     + */
2207     +static int ufshcd_read_desc_length(struct ufs_hba *hba,
2208     + enum desc_idn desc_id,
2209     + int desc_index,
2210     + int *desc_length)
2211     +{
2212     + int ret;
2213     + u8 header[QUERY_DESC_HDR_SIZE];
2214     + int header_len = QUERY_DESC_HDR_SIZE;
2215     +
2216     + if (desc_id >= QUERY_DESC_IDN_MAX)
2217     + return -EINVAL;
2218     +
2219     + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2220     + desc_id, desc_index, 0, header,
2221     + &header_len);
2222     +
2223     + if (ret) {
2224     + dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2225     + __func__, desc_id);
2226     + return ret;
2227     + } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2228     + dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2229     + __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
2230     + desc_id);
2231     + ret = -EINVAL;
2232     + }
2233     +
2234     + *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
2235     + return ret;
2236     +
2237     +}
2238     +
2239     +/**
2240     + * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
2241     + * @hba: Pointer to adapter instance
2242     + * @desc_id: descriptor idn value
2243     + * @desc_len: mapped desc length (out)
2244     + *
2245     + * Return 0 in case of success, non-zero otherwise
2246     + */
2247     +int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
2248     + enum desc_idn desc_id, int *desc_len)
2249     +{
2250     + switch (desc_id) {
2251     + case QUERY_DESC_IDN_DEVICE:
2252     + *desc_len = hba->desc_size.dev_desc;
2253     + break;
2254     + case QUERY_DESC_IDN_POWER:
2255     + *desc_len = hba->desc_size.pwr_desc;
2256     + break;
2257     + case QUERY_DESC_IDN_GEOMETRY:
2258     + *desc_len = hba->desc_size.geom_desc;
2259     + break;
2260     + case QUERY_DESC_IDN_CONFIGURATION:
2261     + *desc_len = hba->desc_size.conf_desc;
2262     + break;
2263     + case QUERY_DESC_IDN_UNIT:
2264     + *desc_len = hba->desc_size.unit_desc;
2265     + break;
2266     + case QUERY_DESC_IDN_INTERCONNECT:
2267     + *desc_len = hba->desc_size.interc_desc;
2268     + break;
2269     + case QUERY_DESC_IDN_STRING:
2270     + *desc_len = QUERY_DESC_MAX_SIZE;
2271     + break;
2272     + case QUERY_DESC_IDN_RFU_0:
2273     + case QUERY_DESC_IDN_RFU_1:
2274     + *desc_len = 0;
2275     + break;
2276     + default:
2277     + *desc_len = 0;
2278     + return -EINVAL;
2279     + }
2280     + return 0;
2281     +}
2282     +EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
2283     +
2284     /**
2285     * ufshcd_read_desc_param - read the specified descriptor parameter
2286     * @hba: Pointer to adapter instance
2287     @@ -2054,50 +2127,64 @@ EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2288     static int ufshcd_read_desc_param(struct ufs_hba *hba,
2289     enum desc_idn desc_id,
2290     int desc_index,
2291     - u32 param_offset,
2292     + u8 param_offset,
2293     u8 *param_read_buf,
2294     - u32 param_size)
2295     + u8 param_size)
2296     {
2297     int ret;
2298     u8 *desc_buf;
2299     - u32 buff_len;
2300     + int buff_len;
2301     bool is_kmalloc = true;
2302    
2303     - /* safety checks */
2304     - if (desc_id >= QUERY_DESC_IDN_MAX)
2305     + /* Safety check */
2306     + if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
2307     return -EINVAL;
2308    
2309     - buff_len = ufs_query_desc_max_size[desc_id];
2310     - if ((param_offset + param_size) > buff_len)
2311     - return -EINVAL;
2312     + /* Get the max length of descriptor from structure filled up at probe
2313     + * time.
2314     + */
2315     + ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
2316    
2317     - if (!param_offset && (param_size == buff_len)) {
2318     - /* memory space already available to hold full descriptor */
2319     - desc_buf = param_read_buf;
2320     - is_kmalloc = false;
2321     - } else {
2322     - /* allocate memory to hold full descriptor */
2323     + /* Sanity checks */
2324     + if (ret || !buff_len) {
2325     + dev_err(hba->dev, "%s: Failed to get full descriptor length",
2326     + __func__);
2327     + return ret;
2328     + }
2329     +
2330     + /* Check whether we need temp memory */
2331     + if (param_offset != 0 || param_size < buff_len) {
2332     desc_buf = kmalloc(buff_len, GFP_KERNEL);
2333     if (!desc_buf)
2334     return -ENOMEM;
2335     + } else {
2336     + desc_buf = param_read_buf;
2337     + is_kmalloc = false;
2338     }
2339    
2340     + /* Request for full descriptor */
2341     ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2342     - desc_id, desc_index, 0, desc_buf,
2343     - &buff_len);
2344     -
2345     - if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
2346     - (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
2347     - ufs_query_desc_max_size[desc_id])
2348     - || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
2349     - dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
2350     - __func__, desc_id, param_offset, buff_len, ret);
2351     - if (!ret)
2352     - ret = -EINVAL;
2353     + desc_id, desc_index, 0,
2354     + desc_buf, &buff_len);
2355     +
2356     + if (ret) {
2357     + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2358     + __func__, desc_id, desc_index, param_offset, ret);
2359     + goto out;
2360     + }
2361    
2362     + /* Sanity check */
2363     + if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
2364     + dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
2365     + __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
2366     + ret = -EINVAL;
2367     goto out;
2368     }
2369    
2370     + /* Check wherher we will not copy more data, than available */
2371     + if (is_kmalloc && param_size > buff_len)
2372     + param_size = buff_len;
2373     +
2374     if (is_kmalloc)
2375     memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2376     out:
2377     @@ -4789,8 +4876,8 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
2378     static void ufshcd_init_icc_levels(struct ufs_hba *hba)
2379     {
2380     int ret;
2381     - int buff_len = QUERY_DESC_POWER_MAX_SIZE;
2382     - u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
2383     + int buff_len = hba->desc_size.pwr_desc;
2384     + u8 desc_buf[hba->desc_size.pwr_desc];
2385    
2386     ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
2387     if (ret) {
2388     @@ -4883,16 +4970,15 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
2389     return ret;
2390     }
2391    
2392     -static int ufs_get_device_info(struct ufs_hba *hba,
2393     - struct ufs_device_info *card_data)
2394     +static int ufs_get_device_desc(struct ufs_hba *hba,
2395     + struct ufs_dev_desc *dev_desc)
2396     {
2397     int err;
2398     u8 model_index;
2399     - u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
2400     - u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
2401     + u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
2402     + u8 desc_buf[hba->desc_size.dev_desc];
2403    
2404     - err = ufshcd_read_device_desc(hba, desc_buf,
2405     - QUERY_DESC_DEVICE_MAX_SIZE);
2406     + err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
2407     if (err) {
2408     dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
2409     __func__, err);
2410     @@ -4903,50 +4989,40 @@ static int ufs_get_device_info(struct ufs_hba *hba,
2411     * getting vendor (manufacturerID) and Bank Index in big endian
2412     * format
2413     */
2414     - card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
2415     + dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
2416     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
2417    
2418     model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
2419    
2420     err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
2421     - QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
2422     + QUERY_DESC_MAX_SIZE, ASCII_STD);
2423     if (err) {
2424     dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
2425     __func__, err);
2426     goto out;
2427     }
2428    
2429     - str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
2430     - strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
2431     + str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
2432     + strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
2433     min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
2434     MAX_MODEL_LEN));
2435    
2436     /* Null terminate the model string */
2437     - card_data->model[MAX_MODEL_LEN] = '\0';
2438     + dev_desc->model[MAX_MODEL_LEN] = '\0';
2439    
2440     out:
2441     return err;
2442     }
2443    
2444     -void ufs_advertise_fixup_device(struct ufs_hba *hba)
2445     +static void ufs_fixup_device_setup(struct ufs_hba *hba,
2446     + struct ufs_dev_desc *dev_desc)
2447     {
2448     - int err;
2449     struct ufs_dev_fix *f;
2450     - struct ufs_device_info card_data;
2451     -
2452     - card_data.wmanufacturerid = 0;
2453     -
2454     - err = ufs_get_device_info(hba, &card_data);
2455     - if (err) {
2456     - dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
2457     - __func__, err);
2458     - return;
2459     - }
2460    
2461     for (f = ufs_fixups; f->quirk; f++) {
2462     - if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
2463     - (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
2464     - (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
2465     + if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
2466     + f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
2467     + (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
2468     !strcmp(f->card.model, UFS_ANY_MODEL)))
2469     hba->dev_quirks |= f->quirk;
2470     }
2471     @@ -5116,6 +5192,51 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
2472     ufshcd_vops_apply_dev_quirks(hba);
2473     }
2474    
2475     +static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
2476     +{
2477     + int err;
2478     +
2479     + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
2480     + &hba->desc_size.dev_desc);
2481     + if (err)
2482     + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
2483     +
2484     + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
2485     + &hba->desc_size.pwr_desc);
2486     + if (err)
2487     + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
2488     +
2489     + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
2490     + &hba->desc_size.interc_desc);
2491     + if (err)
2492     + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
2493     +
2494     + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
2495     + &hba->desc_size.conf_desc);
2496     + if (err)
2497     + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
2498     +
2499     + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
2500     + &hba->desc_size.unit_desc);
2501     + if (err)
2502     + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
2503     +
2504     + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
2505     + &hba->desc_size.geom_desc);
2506     + if (err)
2507     + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
2508     +}
2509     +
2510     +static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
2511     +{
2512     + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
2513     + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
2514     + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
2515     + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
2516     + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
2517     + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
2518     +}
2519     +
2520     /**
2521     * ufshcd_probe_hba - probe hba to detect device and initialize
2522     * @hba: per-adapter instance
2523     @@ -5124,6 +5245,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
2524     */
2525     static int ufshcd_probe_hba(struct ufs_hba *hba)
2526     {
2527     + struct ufs_dev_desc card = {0};
2528     int ret;
2529    
2530     ret = ufshcd_link_startup(hba);
2531     @@ -5147,7 +5269,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
2532     if (ret)
2533     goto out;
2534    
2535     - ufs_advertise_fixup_device(hba);
2536     + /* Init check for device descriptor sizes */
2537     + ufshcd_init_desc_sizes(hba);
2538     +
2539     + ret = ufs_get_device_desc(hba, &card);
2540     + if (ret) {
2541     + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
2542     + __func__, ret);
2543     + goto out;
2544     + }
2545     +
2546     + ufs_fixup_device_setup(hba, &card);
2547     ufshcd_tune_unipro_params(hba);
2548    
2549     ret = ufshcd_set_vccq_rail_unused(hba,
2550     @@ -5173,6 +5305,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
2551    
2552     /* set the state as operational after switching to desired gear */
2553     hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
2554     +
2555     /*
2556     * If we are in error handling context or in power management callbacks
2557     * context, no need to scan the host
2558     @@ -6549,6 +6682,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
2559     hba->mmio_base = mmio_base;
2560     hba->irq = irq;
2561    
2562     + /* Set descriptor lengths to specification defaults */
2563     + ufshcd_def_desc_sizes(hba);
2564     +
2565     err = ufshcd_hba_init(hba);
2566     if (err)
2567     goto out_error;
2568     diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
2569     index f2170d5058a8..6dbd2e176333 100644
2570     --- a/drivers/scsi/ufs/ufshcd.h
2571     +++ b/drivers/scsi/ufs/ufshcd.h
2572     @@ -205,6 +205,15 @@ struct ufs_dev_cmd {
2573     struct ufs_query query;
2574     };
2575    
2576     +struct ufs_desc_size {
2577     + int dev_desc;
2578     + int pwr_desc;
2579     + int geom_desc;
2580     + int interc_desc;
2581     + int unit_desc;
2582     + int conf_desc;
2583     +};
2584     +
2585     /**
2586     * struct ufs_clk_info - UFS clock related info
2587     * @list: list headed by hba->clk_list_head
2588     @@ -388,6 +397,7 @@ struct ufs_init_prefetch {
2589     * @clk_list_head: UFS host controller clocks list node head
2590     * @pwr_info: holds current power mode
2591     * @max_pwr_info: keeps the device max valid pwm
2592     + * @desc_size: descriptor sizes reported by device
2593     * @urgent_bkops_lvl: keeps track of urgent bkops level for device
2594     * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
2595     * device is known or not.
2596     @@ -563,6 +573,8 @@ struct ufs_hba {
2597    
2598     enum bkops_status urgent_bkops_lvl;
2599     bool is_urgent_bkops_lvl_checked;
2600     +
2601     + struct ufs_desc_size desc_size;
2602     };
2603    
2604     /* Returns true if clocks can be gated. Otherwise false */
2605     @@ -736,6 +748,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2606     enum flag_idn idn, bool *flag_res);
2607     int ufshcd_hold(struct ufs_hba *hba, bool async);
2608     void ufshcd_release(struct ufs_hba *hba);
2609     +
2610     +int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
2611     + int *desc_length);
2612     +
2613     u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
2614    
2615     /* Wrapper functions for safely calling variant operations */
2616     diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
2617     index b42d7f1c9089..6b1863293fe1 100644
2618     --- a/drivers/tty/serial/amba-pl011.c
2619     +++ b/drivers/tty/serial/amba-pl011.c
2620     @@ -2320,12 +2320,67 @@ static int __init pl011_console_setup(struct console *co, char *options)
2621     return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2622     }
2623    
2624     +/**
2625     + * pl011_console_match - non-standard console matching
2626     + * @co: registering console
2627     + * @name: name from console command line
2628     + * @idx: index from console command line
2629     + * @options: ptr to option string from console command line
2630     + *
2631     + * Only attempts to match console command lines of the form:
2632     + * console=pl011,mmio|mmio32,<addr>[,<options>]
2633     + * console=pl011,0x<addr>[,<options>]
2634     + * This form is used to register an initial earlycon boot console and
2635     + * replace it with the amba_console at pl011 driver init.
2636     + *
2637     + * Performs console setup for a match (as required by interface)
2638     + * If no <options> are specified, then assume the h/w is already setup.
2639     + *
2640     + * Returns 0 if console matches; otherwise non-zero to use default matching
2641     + */
2642     +static int __init pl011_console_match(struct console *co, char *name, int idx,
2643     + char *options)
2644     +{
2645     + unsigned char iotype;
2646     + resource_size_t addr;
2647     + int i;
2648     +
2649     + if (strcmp(name, "pl011") != 0)
2650     + return -ENODEV;
2651     +
2652     + if (uart_parse_earlycon(options, &iotype, &addr, &options))
2653     + return -ENODEV;
2654     +
2655     + if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2656     + return -ENODEV;
2657     +
2658     + /* try to match the port specified on the command line */
2659     + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2660     + struct uart_port *port;
2661     +
2662     + if (!amba_ports[i])
2663     + continue;
2664     +
2665     + port = &amba_ports[i]->port;
2666     +
2667     + if (port->mapbase != addr)
2668     + continue;
2669     +
2670     + co->index = i;
2671     + port->cons = co;
2672     + return pl011_console_setup(co, options);
2673     + }
2674     +
2675     + return -ENODEV;
2676     +}
2677     +
2678     static struct uart_driver amba_reg;
2679     static struct console amba_console = {
2680     .name = "ttyAMA",
2681     .write = pl011_console_write,
2682     .device = uart_console_device,
2683     .setup = pl011_console_setup,
2684     + .match = pl011_console_match,
2685     .flags = CON_PRINTBUFFER,
2686     .index = -1,
2687     .data = &amba_reg,
2688     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2689     index d98531823998..46b4dea7a0ec 100644
2690     --- a/drivers/usb/serial/cp210x.c
2691     +++ b/drivers/usb/serial/cp210x.c
2692     @@ -33,7 +33,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
2693     static void cp210x_close(struct usb_serial_port *);
2694     static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *);
2695     static void cp210x_get_termios_port(struct usb_serial_port *port,
2696     - unsigned int *cflagp, unsigned int *baudp);
2697     + tcflag_t *cflagp, unsigned int *baudp);
2698     static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
2699     struct ktermios *);
2700     static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
2701     @@ -728,7 +728,7 @@ static void cp210x_get_termios(struct tty_struct *tty,
2702     &tty->termios.c_cflag, &baud);
2703     tty_encode_baud_rate(tty, baud, baud);
2704     } else {
2705     - unsigned int cflag;
2706     + tcflag_t cflag;
2707     cflag = 0;
2708     cp210x_get_termios_port(port, &cflag, &baud);
2709     }
2710     @@ -739,10 +739,10 @@ static void cp210x_get_termios(struct tty_struct *tty,
2711     * This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
2712     */
2713     static void cp210x_get_termios_port(struct usb_serial_port *port,
2714     - unsigned int *cflagp, unsigned int *baudp)
2715     + tcflag_t *cflagp, unsigned int *baudp)
2716     {
2717     struct device *dev = &port->dev;
2718     - unsigned int cflag;
2719     + tcflag_t cflag;
2720     struct cp210x_flow_ctl flow_ctl;
2721     u32 baud;
2722     u16 bits;
2723     diff --git a/fs/aio.c b/fs/aio.c
2724     index 42d8c09311d1..b1170a7affe2 100644
2725     --- a/fs/aio.c
2726     +++ b/fs/aio.c
2727     @@ -636,9 +636,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
2728     while (!list_empty(&ctx->active_reqs)) {
2729     req = list_first_entry(&ctx->active_reqs,
2730     struct aio_kiocb, ki_list);
2731     -
2732     - list_del_init(&req->ki_list);
2733     kiocb_cancel(req);
2734     + list_del_init(&req->ki_list);
2735     }
2736    
2737     spin_unlock_irq(&ctx->ctx_lock);
2738     diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
2739     index c3702cda010a..e567551402a6 100644
2740     --- a/fs/xfs/libxfs/xfs_alloc.c
2741     +++ b/fs/xfs/libxfs/xfs_alloc.c
2742     @@ -2034,6 +2034,93 @@ xfs_alloc_space_available(
2743     return true;
2744     }
2745    
2746     +/*
2747     + * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2748     + * is to detect an agfl header padding mismatch between current and early v5
2749     + * kernels. This problem manifests as a 1-slot size difference between the
2750     + * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2751     + * may also catch variants of agfl count corruption unrelated to padding. Either
2752     + * way, we'll reset the agfl and warn the user.
2753     + *
2754     + * Return true if a reset is required before the agfl can be used, false
2755     + * otherwise.
2756     + */
2757     +static bool
2758     +xfs_agfl_needs_reset(
2759     + struct xfs_mount *mp,
2760     + struct xfs_agf *agf)
2761     +{
2762     + uint32_t f = be32_to_cpu(agf->agf_flfirst);
2763     + uint32_t l = be32_to_cpu(agf->agf_fllast);
2764     + uint32_t c = be32_to_cpu(agf->agf_flcount);
2765     + int agfl_size = XFS_AGFL_SIZE(mp);
2766     + int active;
2767     +
2768     + /* no agfl header on v4 supers */
2769     + if (!xfs_sb_version_hascrc(&mp->m_sb))
2770     + return false;
2771     +
2772     + /*
2773     + * The agf read verifier catches severe corruption of these fields.
2774     + * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2775     + * the verifier allows it.
2776     + */
2777     + if (f >= agfl_size || l >= agfl_size)
2778     + return true;
2779     + if (c > agfl_size)
2780     + return true;
2781     +
2782     + /*
2783     + * Check consistency between the on-disk count and the active range. An
2784     + * agfl padding mismatch manifests as an inconsistent flcount.
2785     + */
2786     + if (c && l >= f)
2787     + active = l - f + 1;
2788     + else if (c)
2789     + active = agfl_size - f + l + 1;
2790     + else
2791     + active = 0;
2792     +
2793     + return active != c;
2794     +}
2795     +
2796     +/*
2797     + * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2798     + * agfl content cannot be trusted. Warn the user that a repair is required to
2799     + * recover leaked blocks.
2800     + *
2801     + * The purpose of this mechanism is to handle filesystems affected by the agfl
2802     + * header padding mismatch problem. A reset keeps the filesystem online with a
2803     + * relatively minor free space accounting inconsistency rather than suffer the
2804     + * inevitable crash from use of an invalid agfl block.
2805     + */
2806     +static void
2807     +xfs_agfl_reset(
2808     + struct xfs_trans *tp,
2809     + struct xfs_buf *agbp,
2810     + struct xfs_perag *pag)
2811     +{
2812     + struct xfs_mount *mp = tp->t_mountp;
2813     + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
2814     +
2815     + ASSERT(pag->pagf_agflreset);
2816     + trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2817     +
2818     + xfs_warn(mp,
2819     + "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2820     + "Please unmount and run xfs_repair.",
2821     + pag->pag_agno, pag->pagf_flcount);
2822     +
2823     + agf->agf_flfirst = 0;
2824     + agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
2825     + agf->agf_flcount = 0;
2826     + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2827     + XFS_AGF_FLCOUNT);
2828     +
2829     + pag->pagf_flcount = 0;
2830     + pag->pagf_agflreset = false;
2831     +}
2832     +
2833     /*
2834     * Decide whether to use this allocation group for this allocation.
2835     * If so, fix up the btree freelist's size.
2836     @@ -2095,6 +2182,10 @@ xfs_alloc_fix_freelist(
2837     }
2838     }
2839    
2840     + /* reset a padding mismatched agfl before final free space check */
2841     + if (pag->pagf_agflreset)
2842     + xfs_agfl_reset(tp, agbp, pag);
2843     +
2844     /* If there isn't enough total space or single-extent, reject it. */
2845     need = xfs_alloc_min_freelist(mp, pag);
2846     if (!xfs_alloc_space_available(args, need, flags))
2847     @@ -2251,6 +2342,7 @@ xfs_alloc_get_freelist(
2848     agf->agf_flfirst = 0;
2849    
2850     pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2851     + ASSERT(!pag->pagf_agflreset);
2852     be32_add_cpu(&agf->agf_flcount, -1);
2853     xfs_trans_agflist_delta(tp, -1);
2854     pag->pagf_flcount--;
2855     @@ -2362,6 +2454,7 @@ xfs_alloc_put_freelist(
2856     agf->agf_fllast = 0;
2857    
2858     pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2859     + ASSERT(!pag->pagf_agflreset);
2860     be32_add_cpu(&agf->agf_flcount, 1);
2861     xfs_trans_agflist_delta(tp, 1);
2862     pag->pagf_flcount++;
2863     @@ -2568,6 +2661,7 @@ xfs_alloc_read_agf(
2864     pag->pagb_count = 0;
2865     pag->pagb_tree = RB_ROOT;
2866     pag->pagf_init = 1;
2867     + pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2868     }
2869     #ifdef DEBUG
2870     else if (!XFS_FORCED_SHUTDOWN(mp)) {
2871     diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
2872     index 5415f9031ef8..7cb099e1c84c 100644
2873     --- a/fs/xfs/xfs_mount.h
2874     +++ b/fs/xfs/xfs_mount.h
2875     @@ -368,6 +368,7 @@ typedef struct xfs_perag {
2876     char pagi_inodeok; /* The agi is ok for inodes */
2877     __uint8_t pagf_levels[XFS_BTNUM_AGF];
2878     /* # of levels in bno & cnt btree */
2879     + bool pagf_agflreset; /* agfl requires reset before use */
2880     __uint32_t pagf_flcount; /* count of blocks in freelist */
2881     xfs_extlen_t pagf_freeblks; /* total free blocks */
2882     xfs_extlen_t pagf_longest; /* longest free space */
2883     diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
2884     index bdf69e1c7410..42a7c0da898f 100644
2885     --- a/fs/xfs/xfs_trace.h
2886     +++ b/fs/xfs/xfs_trace.h
2887     @@ -1516,7 +1516,7 @@ TRACE_EVENT(xfs_trans_commit_lsn,
2888     __entry->lsn)
2889     );
2890    
2891     -TRACE_EVENT(xfs_agf,
2892     +DECLARE_EVENT_CLASS(xfs_agf_class,
2893     TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
2894     unsigned long caller_ip),
2895     TP_ARGS(mp, agf, flags, caller_ip),
2896     @@ -1572,6 +1572,13 @@ TRACE_EVENT(xfs_agf,
2897     __entry->longest,
2898     (void *)__entry->caller_ip)
2899     );
2900     +#define DEFINE_AGF_EVENT(name) \
2901     +DEFINE_EVENT(xfs_agf_class, name, \
2902     + TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
2903     + unsigned long caller_ip), \
2904     + TP_ARGS(mp, agf, flags, caller_ip))
2905     +DEFINE_AGF_EVENT(xfs_agf);
2906     +DEFINE_AGF_EVENT(xfs_agfl_reset);
2907    
2908     TRACE_EVENT(xfs_free_extent,
2909     TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
2910     diff --git a/include/linux/tcp.h b/include/linux/tcp.h
2911     index f50b717ce644..d0c3615f9050 100644
2912     --- a/include/linux/tcp.h
2913     +++ b/include/linux/tcp.h
2914     @@ -337,7 +337,7 @@ struct tcp_sock {
2915    
2916     /* Receiver queue space */
2917     struct {
2918     - int space;
2919     + u32 space;
2920     u32 seq;
2921     u32 time;
2922     } rcvq_space;
2923     diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
2924     index 7290eacc8cee..b902f106f69a 100644
2925     --- a/include/uapi/linux/nl80211.h
2926     +++ b/include/uapi/linux/nl80211.h
2927     @@ -2379,7 +2379,7 @@ enum nl80211_attrs {
2928     #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
2929     #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
2930    
2931     -#define NL80211_WIPHY_NAME_MAXLEN 128
2932     +#define NL80211_WIPHY_NAME_MAXLEN 64
2933    
2934     #define NL80211_MAX_SUPP_RATES 32
2935     #define NL80211_MAX_SUPP_HT_RATES 77
2936     diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
2937     index 6721a1e89f39..88f398af57fa 100644
2938     --- a/kernel/trace/trace_events_trigger.c
2939     +++ b/kernel/trace/trace_events_trigger.c
2940     @@ -481,9 +481,10 @@ clear_event_triggers(struct trace_array *tr)
2941     struct trace_event_file *file;
2942    
2943     list_for_each_entry(file, &tr->events, list) {
2944     - struct event_trigger_data *data;
2945     - list_for_each_entry_rcu(data, &file->triggers, list) {
2946     + struct event_trigger_data *data, *n;
2947     + list_for_each_entry_safe(data, n, &file->triggers, list) {
2948     trace_event_trigger_enable_disable(file, 0);
2949     + list_del_rcu(&data->list);
2950     if (data->ops->free)
2951     data->ops->free(data->ops, data);
2952     }
2953     diff --git a/mm/vmscan.c b/mm/vmscan.c
2954     index 2d4b6478237b..f03ca5ab86b1 100644
2955     --- a/mm/vmscan.c
2956     +++ b/mm/vmscan.c
2957     @@ -1393,7 +1393,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
2958     return ret;
2959    
2960     mapping = page_mapping(page);
2961     - migrate_dirty = mapping && mapping->a_ops->migratepage;
2962     + migrate_dirty = !mapping || mapping->a_ops->migratepage;
2963     unlock_page(page);
2964     if (!migrate_dirty)
2965     return ret;
2966     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2967     index 52b0a84be765..94a55b83e48c 100644
2968     --- a/net/ipv4/tcp_input.c
2969     +++ b/net/ipv4/tcp_input.c
2970     @@ -581,8 +581,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
2971     void tcp_rcv_space_adjust(struct sock *sk)
2972     {
2973     struct tcp_sock *tp = tcp_sk(sk);
2974     + u32 copied;
2975     int time;
2976     - int copied;
2977    
2978     time = tcp_time_stamp - tp->rcvq_space.time;
2979     if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
2980     @@ -604,12 +604,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
2981    
2982     if (sysctl_tcp_moderate_rcvbuf &&
2983     !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
2984     - int rcvwin, rcvmem, rcvbuf;
2985     + int rcvmem, rcvbuf;
2986     + u64 rcvwin;
2987    
2988     /* minimal window to cope with packet losses, assuming
2989     * steady state. Add some cushion because of small variations.
2990     */
2991     - rcvwin = (copied << 1) + 16 * tp->advmss;
2992     + rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
2993    
2994     /* If rate increased by 25%,
2995     * assume slow start, rcvwin = 3 * copied
2996     @@ -629,7 +630,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
2997     while (tcp_win_from_space(rcvmem) < tp->advmss)
2998     rcvmem += 128;
2999    
3000     - rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
3001     + do_div(rcvwin, tp->advmss);
3002     + rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
3003     if (rcvbuf > sk->sk_rcvbuf) {
3004     sk->sk_rcvbuf = rcvbuf;
3005    
3006     diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
3007     index 7bf8b005a178..1e6f23f77f15 100644
3008     --- a/security/integrity/ima/ima_appraise.c
3009     +++ b/security/integrity/ima/ima_appraise.c
3010     @@ -389,14 +389,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
3011     result = ima_protect_xattr(dentry, xattr_name, xattr_value,
3012     xattr_value_len);
3013     if (result == 1) {
3014     - bool digsig;
3015     -
3016     if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
3017     return -EINVAL;
3018     - digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
3019     - if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE))
3020     - return -EPERM;
3021     - ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
3022     + ima_reset_appraise_flags(d_backing_inode(dentry),
3023     + (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
3024     result = 0;
3025     }
3026     return result;
3027     diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
3028     index d656b7c98394..bfc4ffa1fa1a 100644
3029     --- a/security/selinux/ss/services.c
3030     +++ b/security/selinux/ss/services.c
3031     @@ -1435,7 +1435,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
3032     scontext_len, &context, def_sid);
3033     if (rc == -EINVAL && force) {
3034     context.str = str;
3035     - context.len = scontext_len;
3036     + context.len = strlen(str) + 1;
3037     str = NULL;
3038     } else if (rc)
3039     goto out_unlock;
3040     diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
3041     index a086c35f91bb..79a9fdf94d38 100644
3042     --- a/sound/soc/intel/common/sst-firmware.c
3043     +++ b/sound/soc/intel/common/sst-firmware.c
3044     @@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst)
3045     struct sst_pdata *sst_pdata = sst->pdata;
3046     struct sst_dma *dma;
3047     struct resource mem;
3048     - const char *dma_dev_name;
3049     int ret = 0;
3050    
3051     if (sst->pdata->resindex_dma_base == -1)
3052     @@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst)
3053     * is attached to the ADSP IP. */
3054     switch (sst->pdata->dma_engine) {
3055     case SST_DMA_TYPE_DW:
3056     - dma_dev_name = "dw_dmac";
3057     break;
3058     default:
3059     dev_err(sst->dev, "error: invalid DMA engine %d\n",