Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0123-4.19.24-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3402 - (show annotations) (download)
Fri Aug 2 11:47:35 2019 UTC (4 years, 9 months ago) by niro
File size: 107116 byte(s)
-linux-4.19.24
1 diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
2 index aededdbc262b..f9a7c984274c 100644
3 --- a/Documentation/devicetree/bindings/eeprom/at24.txt
4 +++ b/Documentation/devicetree/bindings/eeprom/at24.txt
5 @@ -27,6 +27,7 @@ Required properties:
6 "atmel,24c256",
7 "atmel,24c512",
8 "atmel,24c1024",
9 + "atmel,24c2048",
10
11 If <manufacturer> is not "atmel", then a fallback must be used
12 with the same <model> and "atmel" as manufacturer.
13 diff --git a/Makefile b/Makefile
14 index 3dcf3f2363c1..370ad0d34076 100644
15 --- a/Makefile
16 +++ b/Makefile
17 @@ -1,7 +1,7 @@
18 # SPDX-License-Identifier: GPL-2.0
19 VERSION = 4
20 PATCHLEVEL = 19
21 -SUBLEVEL = 23
22 +SUBLEVEL = 24
23 EXTRAVERSION =
24 NAME = "People's Front"
25
26 diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
27 index 4d17cacd1462..432402c8e47f 100644
28 --- a/arch/alpha/include/asm/irq.h
29 +++ b/arch/alpha/include/asm/irq.h
30 @@ -56,15 +56,15 @@
31
32 #elif defined(CONFIG_ALPHA_DP264) || \
33 defined(CONFIG_ALPHA_LYNX) || \
34 - defined(CONFIG_ALPHA_SHARK) || \
35 - defined(CONFIG_ALPHA_EIGER)
36 + defined(CONFIG_ALPHA_SHARK)
37 # define NR_IRQS 64
38
39 #elif defined(CONFIG_ALPHA_TITAN)
40 #define NR_IRQS 80
41
42 #elif defined(CONFIG_ALPHA_RAWHIDE) || \
43 - defined(CONFIG_ALPHA_TAKARA)
44 + defined(CONFIG_ALPHA_TAKARA) || \
45 + defined(CONFIG_ALPHA_EIGER)
46 # define NR_IRQS 128
47
48 #elif defined(CONFIG_ALPHA_WILDFIRE)
49 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
50 index d73dc473fbb9..188fc9256baf 100644
51 --- a/arch/alpha/mm/fault.c
52 +++ b/arch/alpha/mm/fault.c
53 @@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
54 /* Macro for exception fixup code to access integer registers. */
55 #define dpf_reg(r) \
56 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
57 - (r) <= 18 ? (r)+8 : (r)-10])
58 + (r) <= 18 ? (r)+10 : (r)-10])
59
60 asmlinkage void
61 do_page_fault(unsigned long address, unsigned long mmcsr,
62 diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
63 index f9b757905845..016616cc036c 100644
64 --- a/arch/arm/boot/dts/da850-evm.dts
65 +++ b/arch/arm/boot/dts/da850-evm.dts
66 @@ -94,6 +94,28 @@
67 regulator-boot-on;
68 };
69
70 + baseboard_3v3: fixedregulator-3v3 {
71 + /* TPS73701DCQ */
72 + compatible = "regulator-fixed";
73 + regulator-name = "baseboard_3v3";
74 + regulator-min-microvolt = <3300000>;
75 + regulator-max-microvolt = <3300000>;
76 + vin-supply = <&vbat>;
77 + regulator-always-on;
78 + regulator-boot-on;
79 + };
80 +
81 + baseboard_1v8: fixedregulator-1v8 {
82 + /* TPS73701DCQ */
83 + compatible = "regulator-fixed";
84 + regulator-name = "baseboard_1v8";
85 + regulator-min-microvolt = <1800000>;
86 + regulator-max-microvolt = <1800000>;
87 + vin-supply = <&vbat>;
88 + regulator-always-on;
89 + regulator-boot-on;
90 + };
91 +
92 backlight_lcd: backlight-regulator {
93 compatible = "regulator-fixed";
94 regulator-name = "lcd_backlight_pwr";
95 @@ -105,7 +127,7 @@
96
97 sound {
98 compatible = "simple-audio-card";
99 - simple-audio-card,name = "DA850/OMAP-L138 EVM";
100 + simple-audio-card,name = "DA850-OMAPL138 EVM";
101 simple-audio-card,widgets =
102 "Line", "Line In",
103 "Line", "Line Out";
104 @@ -210,10 +232,9 @@
105
106 /* Regulators */
107 IOVDD-supply = <&vdcdc2_reg>;
108 - /* Derived from VBAT: Baseboard 3.3V / 1.8V */
109 - AVDD-supply = <&vbat>;
110 - DRVDD-supply = <&vbat>;
111 - DVDD-supply = <&vbat>;
112 + AVDD-supply = <&baseboard_3v3>;
113 + DRVDD-supply = <&baseboard_3v3>;
114 + DVDD-supply = <&baseboard_1v8>;
115 };
116 tca6416: gpio@20 {
117 compatible = "ti,tca6416";
118 diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
119 index 0177e3ed20fe..3a2fa6e035a3 100644
120 --- a/arch/arm/boot/dts/da850-lcdk.dts
121 +++ b/arch/arm/boot/dts/da850-lcdk.dts
122 @@ -39,9 +39,39 @@
123 };
124 };
125
126 + vcc_5vd: fixedregulator-vcc_5vd {
127 + compatible = "regulator-fixed";
128 + regulator-name = "vcc_5vd";
129 + regulator-min-microvolt = <5000000>;
130 + regulator-max-microvolt = <5000000>;
131 + regulator-boot-on;
132 + };
133 +
134 + vcc_3v3d: fixedregulator-vcc_3v3d {
135 + /* TPS650250 - VDCDC1 */
136 + compatible = "regulator-fixed";
137 + regulator-name = "vcc_3v3d";
138 + regulator-min-microvolt = <3300000>;
139 + regulator-max-microvolt = <3300000>;
140 + vin-supply = <&vcc_5vd>;
141 + regulator-always-on;
142 + regulator-boot-on;
143 + };
144 +
145 + vcc_1v8d: fixedregulator-vcc_1v8d {
146 + /* TPS650250 - VDCDC2 */
147 + compatible = "regulator-fixed";
148 + regulator-name = "vcc_1v8d";
149 + regulator-min-microvolt = <1800000>;
150 + regulator-max-microvolt = <1800000>;
151 + vin-supply = <&vcc_5vd>;
152 + regulator-always-on;
153 + regulator-boot-on;
154 + };
155 +
156 sound {
157 compatible = "simple-audio-card";
158 - simple-audio-card,name = "DA850/OMAP-L138 LCDK";
159 + simple-audio-card,name = "DA850-OMAPL138 LCDK";
160 simple-audio-card,widgets =
161 "Line", "Line In",
162 "Line", "Line Out";
163 @@ -221,6 +251,12 @@
164 compatible = "ti,tlv320aic3106";
165 reg = <0x18>;
166 status = "okay";
167 +
168 + /* Regulators */
169 + IOVDD-supply = <&vcc_3v3d>;
170 + AVDD-supply = <&vcc_3v3d>;
171 + DRVDD-supply = <&vcc_3v3d>;
172 + DVDD-supply = <&vcc_1v8d>;
173 };
174 };
175
176 diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
177 index cbaf06f2f78e..eb917462b219 100644
178 --- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
179 +++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
180 @@ -36,8 +36,8 @@
181 compatible = "gpio-fan";
182 pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
183 pinctrl-names = "default";
184 - gpios = <&gpio1 14 GPIO_ACTIVE_LOW
185 - &gpio1 13 GPIO_ACTIVE_LOW>;
186 + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
187 + &gpio1 13 GPIO_ACTIVE_HIGH>;
188 gpio-fan,speed-map = <0 0
189 3000 1
190 6000 2>;
191 diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
192 index ab6f640b282b..8b8db9d8e912 100644
193 --- a/arch/arm/boot/dts/omap5-board-common.dtsi
194 +++ b/arch/arm/boot/dts/omap5-board-common.dtsi
195 @@ -317,7 +317,8 @@
196
197 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
198 pinctrl-single,pins = <
199 - OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
200 + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
201 + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
202 >;
203 };
204
205 @@ -385,7 +386,8 @@
206
207 palmas: palmas@48 {
208 compatible = "ti,palmas";
209 - interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
210 + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
211 + interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
212 reg = <0x48>;
213 interrupt-controller;
214 #interrupt-cells = <2>;
215 @@ -651,7 +653,8 @@
216 pinctrl-names = "default";
217 pinctrl-0 = <&twl6040_pins>;
218
219 - interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
220 + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
221 + interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
222
223 /* audpwron gpio defined in the board specific dts */
224
225 diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
226 index 5e21fb430a65..e78d3718f145 100644
227 --- a/arch/arm/boot/dts/omap5-cm-t54.dts
228 +++ b/arch/arm/boot/dts/omap5-cm-t54.dts
229 @@ -181,6 +181,13 @@
230 OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
231 >;
232 };
233 +
234 + palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
235 + pinctrl-single,pins = <
236 + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
237 + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
238 + >;
239 + };
240 };
241
242 &omap5_pmx_core {
243 @@ -414,8 +421,11 @@
244
245 palmas: palmas@48 {
246 compatible = "ti,palmas";
247 - interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
248 reg = <0x48>;
249 + pinctrl-0 = <&palmas_sys_nirq_pins>;
250 + pinctrl-names = "default";
251 + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
252 + interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
253 interrupt-controller;
254 #interrupt-cells = <2>;
255 ti,system-power-controller;
256 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
257 index b17ee03d280b..88286dd483ff 100644
258 --- a/arch/arm/include/asm/assembler.h
259 +++ b/arch/arm/include/asm/assembler.h
260 @@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
261 #endif
262 .endm
263
264 + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
265 +#ifdef CONFIG_CPU_SPECTRE
266 + sub \tmp, \limit, #1
267 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
268 + addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
269 + subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
270 + movlo \addr, #0 @ if (tmp < 0) addr = NULL
271 + csdb
272 +#endif
273 + .endm
274 +
275 .macro uaccess_disable, tmp, isb=1
276 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
277 /*
278 diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
279 index 0d289240b6ca..775cac3c02bb 100644
280 --- a/arch/arm/include/asm/cputype.h
281 +++ b/arch/arm/include/asm/cputype.h
282 @@ -111,6 +111,7 @@
283 #include <linux/kernel.h>
284
285 extern unsigned int processor_id;
286 +struct proc_info_list *lookup_processor(u32 midr);
287
288 #ifdef CONFIG_CPU_CP15
289 #define read_cpuid(reg) \
290 diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
291 index e25f4392e1b2..e1b6f280ab08 100644
292 --- a/arch/arm/include/asm/proc-fns.h
293 +++ b/arch/arm/include/asm/proc-fns.h
294 @@ -23,7 +23,7 @@ struct mm_struct;
295 /*
296 * Don't change this structure - ASM code relies on it.
297 */
298 -extern struct processor {
299 +struct processor {
300 /* MISC
301 * get data abort address/flags
302 */
303 @@ -79,9 +79,13 @@ extern struct processor {
304 unsigned int suspend_size;
305 void (*do_suspend)(void *);
306 void (*do_resume)(void *);
307 -} processor;
308 +};
309
310 #ifndef MULTI_CPU
311 +static inline void init_proc_vtable(const struct processor *p)
312 +{
313 +}
314 +
315 extern void cpu_proc_init(void);
316 extern void cpu_proc_fin(void);
317 extern int cpu_do_idle(void);
318 @@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
319 extern void cpu_do_suspend(void *);
320 extern void cpu_do_resume(void *);
321 #else
322 -#define cpu_proc_init processor._proc_init
323 -#define cpu_proc_fin processor._proc_fin
324 -#define cpu_reset processor.reset
325 -#define cpu_do_idle processor._do_idle
326 -#define cpu_dcache_clean_area processor.dcache_clean_area
327 -#define cpu_set_pte_ext processor.set_pte_ext
328 -#define cpu_do_switch_mm processor.switch_mm
329
330 -/* These three are private to arch/arm/kernel/suspend.c */
331 -#define cpu_do_suspend processor.do_suspend
332 -#define cpu_do_resume processor.do_resume
333 +extern struct processor processor;
334 +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
335 +#include <linux/smp.h>
336 +/*
337 + * This can't be a per-cpu variable because we need to access it before
338 + * per-cpu has been initialised. We have a couple of functions that are
339 + * called in a pre-emptible context, and so can't use smp_processor_id()
340 + * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
341 + * function pointers for these are identical across all CPUs.
342 + */
343 +extern struct processor *cpu_vtable[];
344 +#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
345 +#define PROC_TABLE(f) cpu_vtable[0]->f
346 +static inline void init_proc_vtable(const struct processor *p)
347 +{
348 + unsigned int cpu = smp_processor_id();
349 + *cpu_vtable[cpu] = *p;
350 + WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
351 + cpu_vtable[0]->dcache_clean_area);
352 + WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
353 + cpu_vtable[0]->set_pte_ext);
354 +}
355 +#else
356 +#define PROC_VTABLE(f) processor.f
357 +#define PROC_TABLE(f) processor.f
358 +static inline void init_proc_vtable(const struct processor *p)
359 +{
360 + processor = *p;
361 +}
362 +#endif
363 +
364 +#define cpu_proc_init PROC_VTABLE(_proc_init)
365 +#define cpu_check_bugs PROC_VTABLE(check_bugs)
366 +#define cpu_proc_fin PROC_VTABLE(_proc_fin)
367 +#define cpu_reset PROC_VTABLE(reset)
368 +#define cpu_do_idle PROC_VTABLE(_do_idle)
369 +#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
370 +#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
371 +#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
372 +
373 +/* These two are private to arch/arm/kernel/suspend.c */
374 +#define cpu_do_suspend PROC_VTABLE(do_suspend)
375 +#define cpu_do_resume PROC_VTABLE(do_resume)
376 #endif
377
378 extern void cpu_resume(void);
379 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
380 index 9b37b6ab27fe..8f55dc520a3e 100644
381 --- a/arch/arm/include/asm/thread_info.h
382 +++ b/arch/arm/include/asm/thread_info.h
383 @@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
384 struct user_vfp;
385 struct user_vfp_exc;
386
387 -extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
388 - struct user_vfp_exc __user *);
389 +extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
390 + struct user_vfp_exc *);
391 extern int vfp_restore_user_hwstate(struct user_vfp *,
392 struct user_vfp_exc *);
393 #endif
394 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
395 index 5451e1f05a19..c136eef8f690 100644
396 --- a/arch/arm/include/asm/uaccess.h
397 +++ b/arch/arm/include/asm/uaccess.h
398 @@ -69,6 +69,14 @@ extern int __put_user_bad(void);
399 static inline void set_fs(mm_segment_t fs)
400 {
401 current_thread_info()->addr_limit = fs;
402 +
403 + /*
404 + * Prevent a mispredicted conditional call to set_fs from forwarding
405 + * the wrong address limit to access_ok under speculation.
406 + */
407 + dsb(nsh);
408 + isb();
409 +
410 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
411 }
412
413 @@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
414 #define __inttype(x) \
415 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
416
417 +/*
418 + * Sanitise a uaccess pointer such that it becomes NULL if addr+size
419 + * is above the current addr_limit.
420 + */
421 +#define uaccess_mask_range_ptr(ptr, size) \
422 + ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
423 +static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
424 + size_t size)
425 +{
426 + void __user *safe_ptr = (void __user *)ptr;
427 + unsigned long tmp;
428 +
429 + asm volatile(
430 + " sub %1, %3, #1\n"
431 + " subs %1, %1, %0\n"
432 + " addhs %1, %1, #1\n"
433 + " subhss %1, %1, %2\n"
434 + " movlo %0, #0\n"
435 + : "+r" (safe_ptr), "=&r" (tmp)
436 + : "r" (size), "r" (current_thread_info()->addr_limit)
437 + : "cc");
438 +
439 + csdb();
440 + return safe_ptr;
441 +}
442 +
443 /*
444 * Single-value transfer routines. They automatically use the right
445 * size if we just have the right pointer type. Note that the functions
446 @@ -362,6 +396,14 @@ do { \
447 __pu_err; \
448 })
449
450 +#ifdef CONFIG_CPU_SPECTRE
451 +/*
452 + * When mitigating Spectre variant 1.1, all accessors need to include
453 + * verification of the address space.
454 + */
455 +#define __put_user(x, ptr) put_user(x, ptr)
456 +
457 +#else
458 #define __put_user(x, ptr) \
459 ({ \
460 long __pu_err = 0; \
461 @@ -369,12 +411,6 @@ do { \
462 __pu_err; \
463 })
464
465 -#define __put_user_error(x, ptr, err) \
466 -({ \
467 - __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
468 - (void) 0; \
469 -})
470 -
471 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
472 do { \
473 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
474 @@ -454,6 +490,7 @@ do { \
475 : "r" (x), "i" (-EFAULT) \
476 : "cc")
477
478 +#endif /* !CONFIG_CPU_SPECTRE */
479
480 #ifdef CONFIG_MMU
481 extern unsigned long __must_check
482 diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
483 index 7be511310191..d41d3598e5e5 100644
484 --- a/arch/arm/kernel/bugs.c
485 +++ b/arch/arm/kernel/bugs.c
486 @@ -6,8 +6,8 @@
487 void check_other_bugs(void)
488 {
489 #ifdef MULTI_CPU
490 - if (processor.check_bugs)
491 - processor.check_bugs();
492 + if (cpu_check_bugs)
493 + cpu_check_bugs();
494 #endif
495 }
496
497 diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
498 index 6e0375e7db05..997b02302c31 100644
499 --- a/arch/arm/kernel/head-common.S
500 +++ b/arch/arm/kernel/head-common.S
501 @@ -145,6 +145,9 @@ __mmap_switched_data:
502 #endif
503 .size __mmap_switched_data, . - __mmap_switched_data
504
505 + __FINIT
506 + .text
507 +
508 /*
509 * This provides a C-API version of __lookup_processor_type
510 */
511 @@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
512 ldmfd sp!, {r4 - r6, r9, pc}
513 ENDPROC(lookup_processor_type)
514
515 - __FINIT
516 - .text
517 -
518 /*
519 * Read processor ID register (CP#15, CR0), and look up in the linker-built
520 * supported processor list. Note that we can't use the absolute addresses
521 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
522 index 4c249cb261f3..7bbaa293a38c 100644
523 --- a/arch/arm/kernel/setup.c
524 +++ b/arch/arm/kernel/setup.c
525 @@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2);
526
527 #ifdef MULTI_CPU
528 struct processor processor __ro_after_init;
529 +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
530 +struct processor *cpu_vtable[NR_CPUS] = {
531 + [0] = &processor,
532 +};
533 +#endif
534 #endif
535 #ifdef MULTI_TLB
536 struct cpu_tlb_fns cpu_tlb __ro_after_init;
537 @@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void)
538 }
539 #endif
540
541 -static void __init setup_processor(void)
542 +/*
543 + * locate processor in the list of supported processor types. The linker
544 + * builds this table for us from the entries in arch/arm/mm/proc-*.S
545 + */
546 +struct proc_info_list *lookup_processor(u32 midr)
547 {
548 - struct proc_info_list *list;
549 + struct proc_info_list *list = lookup_processor_type(midr);
550
551 - /*
552 - * locate processor in the list of supported processor
553 - * types. The linker builds this table for us from the
554 - * entries in arch/arm/mm/proc-*.S
555 - */
556 - list = lookup_processor_type(read_cpuid_id());
557 if (!list) {
558 - pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
559 - read_cpuid_id());
560 - while (1);
561 + pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
562 + smp_processor_id(), midr);
563 + while (1)
564 + /* can't use cpu_relax() here as it may require MMU setup */;
565 }
566
567 + return list;
568 +}
569 +
570 +static void __init setup_processor(void)
571 +{
572 + unsigned int midr = read_cpuid_id();
573 + struct proc_info_list *list = lookup_processor(midr);
574 +
575 cpu_name = list->cpu_name;
576 __cpu_architecture = __get_cpu_architecture();
577
578 -#ifdef MULTI_CPU
579 - processor = *list->proc;
580 -#endif
581 + init_proc_vtable(list->proc);
582 #ifdef MULTI_TLB
583 cpu_tlb = *list->tlb;
584 #endif
585 @@ -700,7 +710,7 @@ static void __init setup_processor(void)
586 #endif
587
588 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
589 - cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
590 + list->cpu_name, midr, midr & 15,
591 proc_arch[cpu_architecture()], get_cr());
592
593 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
594 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
595 index b8f766cf3a90..b908382b69ff 100644
596 --- a/arch/arm/kernel/signal.c
597 +++ b/arch/arm/kernel/signal.c
598 @@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
599 kframe->magic = IWMMXT_MAGIC;
600 kframe->size = IWMMXT_STORAGE_SIZE;
601 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
602 -
603 - err = __copy_to_user(frame, kframe, sizeof(*frame));
604 } else {
605 /*
606 * For bug-compatibility with older kernels, some space
607 @@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
608 * Set the magic and size appropriately so that properly
609 * written userspace can skip it reliably:
610 */
611 - __put_user_error(DUMMY_MAGIC, &frame->magic, err);
612 - __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
613 + *kframe = (struct iwmmxt_sigframe) {
614 + .magic = DUMMY_MAGIC,
615 + .size = IWMMXT_STORAGE_SIZE,
616 + };
617 }
618
619 + err = __copy_to_user(frame, kframe, sizeof(*kframe));
620 +
621 return err;
622 }
623
624 @@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
625
626 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
627 {
628 - const unsigned long magic = VFP_MAGIC;
629 - const unsigned long size = VFP_STORAGE_SIZE;
630 + struct vfp_sigframe kframe;
631 int err = 0;
632
633 - __put_user_error(magic, &frame->magic, err);
634 - __put_user_error(size, &frame->size, err);
635 + memset(&kframe, 0, sizeof(kframe));
636 + kframe.magic = VFP_MAGIC;
637 + kframe.size = VFP_STORAGE_SIZE;
638
639 + err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
640 if (err)
641 - return -EFAULT;
642 + return err;
643
644 - return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
645 + return __copy_to_user(frame, &kframe, sizeof(kframe));
646 }
647
648 static int restore_vfp_context(char __user **auxp)
649 @@ -288,30 +291,35 @@ static int
650 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
651 {
652 struct aux_sigframe __user *aux;
653 + struct sigcontext context;
654 int err = 0;
655
656 - __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
657 - __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
658 - __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
659 - __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
660 - __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
661 - __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
662 - __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
663 - __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
664 - __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
665 - __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
666 - __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
667 - __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
668 - __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
669 - __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
670 - __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
671 - __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
672 - __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
673 -
674 - __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
675 - __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
676 - __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
677 - __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
678 + context = (struct sigcontext) {
679 + .arm_r0 = regs->ARM_r0,
680 + .arm_r1 = regs->ARM_r1,
681 + .arm_r2 = regs->ARM_r2,
682 + .arm_r3 = regs->ARM_r3,
683 + .arm_r4 = regs->ARM_r4,
684 + .arm_r5 = regs->ARM_r5,
685 + .arm_r6 = regs->ARM_r6,
686 + .arm_r7 = regs->ARM_r7,
687 + .arm_r8 = regs->ARM_r8,
688 + .arm_r9 = regs->ARM_r9,
689 + .arm_r10 = regs->ARM_r10,
690 + .arm_fp = regs->ARM_fp,
691 + .arm_ip = regs->ARM_ip,
692 + .arm_sp = regs->ARM_sp,
693 + .arm_lr = regs->ARM_lr,
694 + .arm_pc = regs->ARM_pc,
695 + .arm_cpsr = regs->ARM_cpsr,
696 +
697 + .trap_no = current->thread.trap_no,
698 + .error_code = current->thread.error_code,
699 + .fault_address = current->thread.address,
700 + .oldmask = set->sig[0],
701 + };
702 +
703 + err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
704
705 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
706
707 @@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
708 if (err == 0)
709 err |= preserve_vfp_context(&aux->vfp);
710 #endif
711 - __put_user_error(0, &aux->end_magic, err);
712 + err |= __put_user(0, &aux->end_magic);
713
714 return err;
715 }
716 @@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
717 /*
718 * Set uc.uc_flags to a value which sc.trap_no would never have.
719 */
720 - __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
721 + err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
722
723 err |= setup_sigframe(frame, regs, set);
724 if (err == 0)
725 @@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
726
727 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
728
729 - __put_user_error(0, &frame->sig.uc.uc_flags, err);
730 - __put_user_error(NULL, &frame->sig.uc.uc_link, err);
731 + err |= __put_user(0, &frame->sig.uc.uc_flags);
732 + err |= __put_user(NULL, &frame->sig.uc.uc_link);
733
734 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
735 err |= setup_sigframe(&frame->sig, regs, set);
736 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
737 index f574a5e0d589..3bf82232b1be 100644
738 --- a/arch/arm/kernel/smp.c
739 +++ b/arch/arm/kernel/smp.c
740 @@ -42,6 +42,7 @@
741 #include <asm/mmu_context.h>
742 #include <asm/pgtable.h>
743 #include <asm/pgalloc.h>
744 +#include <asm/procinfo.h>
745 #include <asm/processor.h>
746 #include <asm/sections.h>
747 #include <asm/tlbflush.h>
748 @@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
749 #endif
750 }
751
752 +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
753 +static int secondary_biglittle_prepare(unsigned int cpu)
754 +{
755 + if (!cpu_vtable[cpu])
756 + cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
757 +
758 + return cpu_vtable[cpu] ? 0 : -ENOMEM;
759 +}
760 +
761 +static void secondary_biglittle_init(void)
762 +{
763 + init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
764 +}
765 +#else
766 +static int secondary_biglittle_prepare(unsigned int cpu)
767 +{
768 + return 0;
769 +}
770 +
771 +static void secondary_biglittle_init(void)
772 +{
773 +}
774 +#endif
775 +
776 int __cpu_up(unsigned int cpu, struct task_struct *idle)
777 {
778 int ret;
779 @@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
780 if (!smp_ops.smp_boot_secondary)
781 return -ENOSYS;
782
783 + ret = secondary_biglittle_prepare(cpu);
784 + if (ret)
785 + return ret;
786 +
787 /*
788 * We need to tell the secondary core where to find
789 * its stack and the page tables.
790 @@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
791 struct mm_struct *mm = &init_mm;
792 unsigned int cpu;
793
794 + secondary_biglittle_init();
795 +
796 /*
797 * The identity mapping is uncached (strongly ordered), so
798 * switch away from it before attempting any exclusive accesses.
799 diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
800 index f0dd4b6ebb63..40da0872170f 100644
801 --- a/arch/arm/kernel/sys_oabi-compat.c
802 +++ b/arch/arm/kernel/sys_oabi-compat.c
803 @@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
804 int maxevents, int timeout)
805 {
806 struct epoll_event *kbuf;
807 + struct oabi_epoll_event e;
808 mm_segment_t fs;
809 long ret, err, i;
810
811 @@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
812 set_fs(fs);
813 err = 0;
814 for (i = 0; i < ret; i++) {
815 - __put_user_error(kbuf[i].events, &events->events, err);
816 - __put_user_error(kbuf[i].data, &events->data, err);
817 + e.events = kbuf[i].events;
818 + e.data = kbuf[i].data;
819 + err = __copy_to_user(events, &e, sizeof(e));
820 + if (err)
821 + break;
822 events++;
823 }
824 kfree(kbuf);
825 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
826 index a826df3d3814..6709a8d33963 100644
827 --- a/arch/arm/lib/copy_from_user.S
828 +++ b/arch/arm/lib/copy_from_user.S
829 @@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
830 #ifdef CONFIG_CPU_SPECTRE
831 get_thread_info r3
832 ldr r3, [r3, #TI_ADDR_LIMIT]
833 - adds ip, r1, r2 @ ip=addr+size
834 - sub r3, r3, #1 @ addr_limit - 1
835 - cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
836 - movcs r1, #0 @ addr = NULL
837 - csdb
838 + uaccess_mask_range_ptr r1, r2, r3, ip
839 #endif
840
841 #include "copy_template.S"
842 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
843 index caf5019d8161..970abe521197 100644
844 --- a/arch/arm/lib/copy_to_user.S
845 +++ b/arch/arm/lib/copy_to_user.S
846 @@ -94,6 +94,11 @@
847
848 ENTRY(__copy_to_user_std)
849 WEAK(arm_copy_to_user)
850 +#ifdef CONFIG_CPU_SPECTRE
851 + get_thread_info r3
852 + ldr r3, [r3, #TI_ADDR_LIMIT]
853 + uaccess_mask_range_ptr r0, r2, r3, ip
854 +#endif
855
856 #include "copy_template.S"
857
858 @@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
859 rsb r0, r0, r2
860 copy_abort_end
861 .popsection
862 -
863 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
864 index 9b4ed1728616..73dc7360cbdd 100644
865 --- a/arch/arm/lib/uaccess_with_memcpy.c
866 +++ b/arch/arm/lib/uaccess_with_memcpy.c
867 @@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
868 n = __copy_to_user_std(to, from, n);
869 uaccess_restore(ua_flags);
870 } else {
871 - n = __copy_to_user_memcpy(to, from, n);
872 + n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
873 + from, n);
874 }
875 return n;
876 }
877 diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
878 index a109f6482413..0f916c245a2e 100644
879 --- a/arch/arm/mach-integrator/impd1.c
880 +++ b/arch/arm/mach-integrator/impd1.c
881 @@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev)
882 sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
883 GFP_KERNEL);
884 chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
885 - mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
886 + mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
887 + "lm%x:00700", dev->id);
888 + if (!lookup || !chipname || !mmciname)
889 + return -ENOMEM;
890 +
891 lookup->dev_id = mmciname;
892 /*
893 * Offsets on GPIO block 1:
894 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
895 index fc5fb776a710..17558be4bf0a 100644
896 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
897 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
898 @@ -50,6 +50,9 @@
899 #define OMAP4_NR_BANKS 4
900 #define OMAP4_NR_IRQS 128
901
902 +#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
903 +#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
904 +
905 static void __iomem *wakeupgen_base;
906 static void __iomem *sar_base;
907 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
908 @@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
909 irq_chip_unmask_parent(d);
910 }
911
912 +/*
913 + * The sys_nirq pins bypass peripheral modules and are wired directly
914 + * to MPUSS wakeupgen. They get automatically inverted for GIC.
915 + */
916 +static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
917 +{
918 + bool inverted = false;
919 +
920 + switch (type) {
921 + case IRQ_TYPE_LEVEL_LOW:
922 + type &= ~IRQ_TYPE_LEVEL_MASK;
923 + type |= IRQ_TYPE_LEVEL_HIGH;
924 + inverted = true;
925 + break;
926 + case IRQ_TYPE_EDGE_FALLING:
927 + type &= ~IRQ_TYPE_EDGE_BOTH;
928 + type |= IRQ_TYPE_EDGE_RISING;
929 + inverted = true;
930 + break;
931 + default:
932 + break;
933 + }
934 +
935 + if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
936 + d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
937 + pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
938 + d->hwirq);
939 +
940 + return irq_chip_set_type_parent(d, type);
941 +}
942 +
943 #ifdef CONFIG_HOTPLUG_CPU
944 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
945
946 @@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
947 .irq_mask = wakeupgen_mask,
948 .irq_unmask = wakeupgen_unmask,
949 .irq_retrigger = irq_chip_retrigger_hierarchy,
950 - .irq_set_type = irq_chip_set_type_parent,
951 + .irq_set_type = wakeupgen_irq_set_type,
952 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
953 #ifdef CONFIG_SMP
954 .irq_set_affinity = irq_chip_set_affinity_parent,
955 diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
956 index 81d0efb055c6..5461d589a1e2 100644
957 --- a/arch/arm/mm/proc-macros.S
958 +++ b/arch/arm/mm/proc-macros.S
959 @@ -274,6 +274,13 @@
960 .endm
961
962 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
963 +/*
964 + * If we are building for big.Little with branch predictor hardening,
965 + * we need the processor function tables to remain available after boot.
966 + */
967 +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
968 + .section ".rodata"
969 +#endif
970 .type \name\()_processor_functions, #object
971 .align 2
972 ENTRY(\name\()_processor_functions)
973 @@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
974 .endif
975
976 .size \name\()_processor_functions, . - \name\()_processor_functions
977 +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
978 + .previous
979 +#endif
980 .endm
981
982 .macro define_cache_functions name:req
983 diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
984 index 5544b82a2e7a..9a07916af8dd 100644
985 --- a/arch/arm/mm/proc-v7-bugs.c
986 +++ b/arch/arm/mm/proc-v7-bugs.c
987 @@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
988 case ARM_CPU_PART_CORTEX_A17:
989 case ARM_CPU_PART_CORTEX_A73:
990 case ARM_CPU_PART_CORTEX_A75:
991 - if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
992 - goto bl_error;
993 per_cpu(harden_branch_predictor_fn, cpu) =
994 harden_branch_predictor_bpiall;
995 spectre_v2_method = "BPIALL";
996 @@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
997
998 case ARM_CPU_PART_CORTEX_A15:
999 case ARM_CPU_PART_BRAHMA_B15:
1000 - if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
1001 - goto bl_error;
1002 per_cpu(harden_branch_predictor_fn, cpu) =
1003 harden_branch_predictor_iciallu;
1004 spectre_v2_method = "ICIALLU";
1005 @@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
1006 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1007 if ((int)res.a0 != 0)
1008 break;
1009 - if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
1010 - goto bl_error;
1011 per_cpu(harden_branch_predictor_fn, cpu) =
1012 call_hvc_arch_workaround_1;
1013 - processor.switch_mm = cpu_v7_hvc_switch_mm;
1014 + cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
1015 spectre_v2_method = "hypervisor";
1016 break;
1017
1018 @@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
1019 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1020 if ((int)res.a0 != 0)
1021 break;
1022 - if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
1023 - goto bl_error;
1024 per_cpu(harden_branch_predictor_fn, cpu) =
1025 call_smc_arch_workaround_1;
1026 - processor.switch_mm = cpu_v7_smc_switch_mm;
1027 + cpu_do_switch_mm = cpu_v7_smc_switch_mm;
1028 spectre_v2_method = "firmware";
1029 break;
1030
1031 @@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
1032 if (spectre_v2_method)
1033 pr_info("CPU%u: Spectre v2: using %s workaround\n",
1034 smp_processor_id(), spectre_v2_method);
1035 - return;
1036 -
1037 -bl_error:
1038 - pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
1039 - cpu);
1040 }
1041 #else
1042 static void cpu_v7_spectre_init(void)
1043 diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
1044 index dc7e6b50ef67..66c5e693428a 100644
1045 --- a/arch/arm/vfp/vfpmodule.c
1046 +++ b/arch/arm/vfp/vfpmodule.c
1047 @@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
1048 * Save the current VFP state into the provided structures and prepare
1049 * for entry into a new function (signal handler).
1050 */
1051 -int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
1052 - struct user_vfp_exc __user *ufp_exc)
1053 +int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
1054 + struct user_vfp_exc *ufp_exc)
1055 {
1056 struct thread_info *thread = current_thread_info();
1057 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
1058 - int err = 0;
1059
1060 /* Ensure that the saved hwstate is up-to-date. */
1061 vfp_sync_hwstate(thread);
1062 @@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
1063 * Copy the floating point registers. There can be unused
1064 * registers see asm/hwcap.h for details.
1065 */
1066 - err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
1067 - sizeof(hwstate->fpregs));
1068 + memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
1069 +
1070 /*
1071 * Copy the status and control register.
1072 */
1073 - __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
1074 + ufp->fpscr = hwstate->fpscr;
1075
1076 /*
1077 * Copy the exception registers.
1078 */
1079 - __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
1080 - __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
1081 - __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
1082 -
1083 - if (err)
1084 - return -EFAULT;
1085 + ufp_exc->fpexc = hwstate->fpexc;
1086 + ufp_exc->fpinst = hwstate->fpinst;
1087 + ufp_exc->fpinst2 = hwstate->fpinst2;
1088
1089 /* Ensure that VFP is disabled. */
1090 vfp_flush_hwstate(thread);
1091 diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
1092 index 2fa2942be221..470755cb7558 100644
1093 --- a/arch/riscv/include/asm/pgtable-bits.h
1094 +++ b/arch/riscv/include/asm/pgtable-bits.h
1095 @@ -35,6 +35,12 @@
1096 #define _PAGE_SPECIAL _PAGE_SOFT
1097 #define _PAGE_TABLE _PAGE_PRESENT
1098
1099 +/*
1100 + * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
1101 + * distinguish them from swapped out pages
1102 + */
1103 +#define _PAGE_PROT_NONE _PAGE_READ
1104 +
1105 #define _PAGE_PFN_SHIFT 10
1106
1107 /* Set of bits to preserve across pte_modify() */
1108 diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
1109 index 16301966d65b..a8179a8c1491 100644
1110 --- a/arch/riscv/include/asm/pgtable.h
1111 +++ b/arch/riscv/include/asm/pgtable.h
1112 @@ -44,7 +44,7 @@
1113 /* Page protection bits */
1114 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
1115
1116 -#define PAGE_NONE __pgprot(0)
1117 +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
1118 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
1119 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
1120 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
1121 @@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
1122
1123 static inline int pmd_present(pmd_t pmd)
1124 {
1125 - return (pmd_val(pmd) & _PAGE_PRESENT);
1126 + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
1127 }
1128
1129 static inline int pmd_none(pmd_t pmd)
1130 @@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
1131
1132 static inline int pte_present(pte_t pte)
1133 {
1134 - return (pte_val(pte) & _PAGE_PRESENT);
1135 + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
1136 }
1137
1138 static inline int pte_none(pte_t pte)
1139 @@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1140 *
1141 * Format of swap PTE:
1142 * bit 0: _PAGE_PRESENT (zero)
1143 - * bit 1: reserved for future use (zero)
1144 + * bit 1: _PAGE_PROT_NONE (zero)
1145 * bits 2 to 6: swap type
1146 * bits 7 to XLEN-1: swap offset
1147 */
1148 diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
1149 index 9f82a7e34c64..9db7d0076375 100644
1150 --- a/arch/riscv/kernel/ptrace.c
1151 +++ b/arch/riscv/kernel/ptrace.c
1152 @@ -120,6 +120,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
1153
1154 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
1155 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1156 - trace_sys_exit(regs, regs->regs[0]);
1157 + trace_sys_exit(regs, regs_return_value(regs));
1158 #endif
1159 }
1160 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
1161 index c8d08da5b308..c04a8813cff9 100644
1162 --- a/arch/x86/events/core.c
1163 +++ b/arch/x86/events/core.c
1164 @@ -2253,6 +2253,19 @@ void perf_check_microcode(void)
1165 x86_pmu.check_microcode();
1166 }
1167
1168 +static int x86_pmu_check_period(struct perf_event *event, u64 value)
1169 +{
1170 + if (x86_pmu.check_period && x86_pmu.check_period(event, value))
1171 + return -EINVAL;
1172 +
1173 + if (value && x86_pmu.limit_period) {
1174 + if (x86_pmu.limit_period(event, value) > value)
1175 + return -EINVAL;
1176 + }
1177 +
1178 + return 0;
1179 +}
1180 +
1181 static struct pmu pmu = {
1182 .pmu_enable = x86_pmu_enable,
1183 .pmu_disable = x86_pmu_disable,
1184 @@ -2277,6 +2290,7 @@ static struct pmu pmu = {
1185 .event_idx = x86_pmu_event_idx,
1186 .sched_task = x86_pmu_sched_task,
1187 .task_ctx_size = sizeof(struct x86_perf_task_context),
1188 + .check_period = x86_pmu_check_period,
1189 };
1190
1191 void arch_perf_update_userpage(struct perf_event *event,
1192 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1193 index d0b186264941..fbd7551a8d44 100644
1194 --- a/arch/x86/events/intel/core.c
1195 +++ b/arch/x86/events/intel/core.c
1196 @@ -3465,6 +3465,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
1197 intel_pmu_lbr_sched_task(ctx, sched_in);
1198 }
1199
1200 +static int intel_pmu_check_period(struct perf_event *event, u64 value)
1201 +{
1202 + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
1203 +}
1204 +
1205 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1206
1207 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
1208 @@ -3545,6 +3550,8 @@ static __initconst const struct x86_pmu core_pmu = {
1209 .cpu_starting = intel_pmu_cpu_starting,
1210 .cpu_dying = intel_pmu_cpu_dying,
1211 .cpu_dead = intel_pmu_cpu_dead,
1212 +
1213 + .check_period = intel_pmu_check_period,
1214 };
1215
1216 static struct attribute *intel_pmu_attrs[];
1217 @@ -3589,6 +3596,8 @@ static __initconst const struct x86_pmu intel_pmu = {
1218
1219 .guest_get_msrs = intel_guest_get_msrs,
1220 .sched_task = intel_pmu_sched_task,
1221 +
1222 + .check_period = intel_pmu_check_period,
1223 };
1224
1225 static __init void intel_clovertown_quirk(void)
1226 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
1227 index c5ad9cc61f4b..0ee3a441ad79 100644
1228 --- a/arch/x86/events/perf_event.h
1229 +++ b/arch/x86/events/perf_event.h
1230 @@ -644,6 +644,11 @@ struct x86_pmu {
1231 * Intel host/guest support (KVM)
1232 */
1233 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
1234 +
1235 + /*
1236 + * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
1237 + */
1238 + int (*check_period) (struct perf_event *event, u64 period);
1239 };
1240
1241 struct x86_perf_task_context {
1242 @@ -855,7 +860,7 @@ static inline int amd_pmu_init(void)
1243
1244 #ifdef CONFIG_CPU_SUP_INTEL
1245
1246 -static inline bool intel_pmu_has_bts(struct perf_event *event)
1247 +static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
1248 {
1249 struct hw_perf_event *hwc = &event->hw;
1250 unsigned int hw_event, bts_event;
1251 @@ -866,7 +871,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
1252 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1253 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1254
1255 - return hw_event == bts_event && hwc->sample_period == 1;
1256 + return hw_event == bts_event && period == 1;
1257 +}
1258 +
1259 +static inline bool intel_pmu_has_bts(struct perf_event *event)
1260 +{
1261 + struct hw_perf_event *hwc = &event->hw;
1262 +
1263 + return intel_pmu_has_bts_period(event, hwc->sample_period);
1264 }
1265
1266 int intel_pmu_save_and_restart(struct perf_event *event);
1267 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
1268 index 8e02b30cf08e..3ebd77770f98 100644
1269 --- a/arch/x86/ia32/ia32_aout.c
1270 +++ b/arch/x86/ia32/ia32_aout.c
1271 @@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
1272 /*
1273 * fill in the user structure for a core dump..
1274 */
1275 -static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
1276 +static void fill_dump(struct pt_regs *regs, struct user32 *dump)
1277 {
1278 u32 fs, gs;
1279 memset(dump, 0, sizeof(*dump));
1280 @@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
1281 fs = get_fs();
1282 set_fs(KERNEL_DS);
1283 has_dumped = 1;
1284 +
1285 + fill_dump(cprm->regs, &dump);
1286 +
1287 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
1288 dump.u_ar0 = offsetof(struct user32, regs);
1289 dump.signal = cprm->siginfo->si_signo;
1290 - dump_thread32(cprm->regs, &dump);
1291
1292 /*
1293 * If the size of the dump file exceeds the rlimit, then see
1294 diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
1295 index e652a7cc6186..3f697a9e3f59 100644
1296 --- a/arch/x86/include/asm/uv/bios.h
1297 +++ b/arch/x86/include/asm/uv/bios.h
1298 @@ -48,7 +48,8 @@ enum {
1299 BIOS_STATUS_SUCCESS = 0,
1300 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
1301 BIOS_STATUS_EINVAL = -EINVAL,
1302 - BIOS_STATUS_UNAVAIL = -EBUSY
1303 + BIOS_STATUS_UNAVAIL = -EBUSY,
1304 + BIOS_STATUS_ABORT = -EINTR,
1305 };
1306
1307 /* Address map parameters */
1308 @@ -167,4 +168,9 @@ extern long system_serial_number;
1309
1310 extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
1311
1312 +/*
1313 + * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
1314 + */
1315 +extern struct semaphore __efi_uv_runtime_lock;
1316 +
1317 #endif /* _ASM_X86_UV_BIOS_H */
1318 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1319 index 02ac8fa0cd6d..ee8f8d70b98a 100644
1320 --- a/arch/x86/kvm/svm.c
1321 +++ b/arch/x86/kvm/svm.c
1322 @@ -6256,6 +6256,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
1323 int asid, ret;
1324
1325 ret = -EBUSY;
1326 + if (unlikely(sev->active))
1327 + return ret;
1328 +
1329 asid = sev_asid_new();
1330 if (asid < 0)
1331 return ret;
1332 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1333 index 0b2e13dd517b..f6da5c37d2e8 100644
1334 --- a/arch/x86/kvm/vmx.c
1335 +++ b/arch/x86/kvm/vmx.c
1336 @@ -2757,7 +2757,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1337 if (!entry_only)
1338 j = find_msr(&m->host, msr);
1339
1340 - if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
1341 + if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
1342 + (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
1343 printk_once(KERN_WARNING "Not enough msr switch entries. "
1344 "Can't add msr %x\n", msr);
1345 return;
1346 @@ -3601,9 +3602,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
1347 * secondary cpu-based controls. Do not include those that
1348 * depend on CPUID bits, they are added later by vmx_cpuid_update.
1349 */
1350 - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
1351 - msrs->secondary_ctls_low,
1352 - msrs->secondary_ctls_high);
1353 + if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
1354 + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
1355 + msrs->secondary_ctls_low,
1356 + msrs->secondary_ctls_high);
1357 +
1358 msrs->secondary_ctls_low = 0;
1359 msrs->secondary_ctls_high &=
1360 SECONDARY_EXEC_DESC |
1361 diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
1362 index 4a6a5a26c582..eb33432f2f24 100644
1363 --- a/arch/x86/platform/uv/bios_uv.c
1364 +++ b/arch/x86/platform/uv/bios_uv.c
1365 @@ -29,7 +29,8 @@
1366
1367 struct uv_systab *uv_systab;
1368
1369 -s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1370 +static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1371 + u64 a4, u64 a5)
1372 {
1373 struct uv_systab *tab = uv_systab;
1374 s64 ret;
1375 @@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1376
1377 return ret;
1378 }
1379 +
1380 +s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1381 +{
1382 + s64 ret;
1383 +
1384 + if (down_interruptible(&__efi_uv_runtime_lock))
1385 + return BIOS_STATUS_ABORT;
1386 +
1387 + ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
1388 + up(&__efi_uv_runtime_lock);
1389 +
1390 + return ret;
1391 +}
1392 EXPORT_SYMBOL_GPL(uv_bios_call);
1393
1394 s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1395 @@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1396 unsigned long bios_flags;
1397 s64 ret;
1398
1399 + if (down_interruptible(&__efi_uv_runtime_lock))
1400 + return BIOS_STATUS_ABORT;
1401 +
1402 local_irq_save(bios_flags);
1403 - ret = uv_bios_call(which, a1, a2, a3, a4, a5);
1404 + ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
1405 local_irq_restore(bios_flags);
1406
1407 + up(&__efi_uv_runtime_lock);
1408 +
1409 return ret;
1410 }
1411
1412 diff --git a/block/blk-flush.c b/block/blk-flush.c
1413 index ce41f666de3e..76487948a27f 100644
1414 --- a/block/blk-flush.c
1415 +++ b/block/blk-flush.c
1416 @@ -424,7 +424,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
1417 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
1418 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
1419
1420 - blk_mq_run_hw_queue(hctx, true);
1421 + blk_mq_sched_restart(hctx);
1422 }
1423
1424 /**
1425 diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
1426 index 85167603b9c9..0da58f0bf7e5 100644
1427 --- a/drivers/acpi/numa.c
1428 +++ b/drivers/acpi/numa.c
1429 @@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
1430 {
1431 struct acpi_srat_mem_affinity *p =
1432 (struct acpi_srat_mem_affinity *)header;
1433 - pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
1434 - (unsigned long)p->base_address,
1435 - (unsigned long)p->length,
1436 + pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
1437 + (unsigned long long)p->base_address,
1438 + (unsigned long long)p->length,
1439 p->proximity_domain,
1440 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
1441 "enabled" : "disabled",
1442 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1443 index f53fb41efb7b..b100260b6ed2 100644
1444 --- a/drivers/cpufreq/cpufreq.c
1445 +++ b/drivers/cpufreq/cpufreq.c
1446 @@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1447 {
1448 unsigned int ret_freq = 0;
1449
1450 - if (!cpufreq_driver->get)
1451 + if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
1452 return ret_freq;
1453
1454 ret_freq = cpufreq_driver->get(policy->cpu);
1455
1456 /*
1457 - * Updating inactive policies is invalid, so avoid doing that. Also
1458 - * if fast frequency switching is used with the given policy, the check
1459 + * If fast frequency switching is used with the given policy, the check
1460 * against policy->cur is pointless, so skip it in that case too.
1461 */
1462 - if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1463 + if (policy->fast_switch_enabled)
1464 return ret_freq;
1465
1466 if (ret_freq && policy->cur &&
1467 @@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1468
1469 if (policy) {
1470 down_read(&policy->rwsem);
1471 -
1472 - if (!policy_is_inactive(policy))
1473 - ret_freq = __cpufreq_get(policy);
1474 -
1475 + ret_freq = __cpufreq_get(policy);
1476 up_read(&policy->rwsem);
1477
1478 cpufreq_cpu_put(policy);
1479 diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
1480 index aa66cbf23512..b0aeffd4e269 100644
1481 --- a/drivers/firmware/efi/runtime-wrappers.c
1482 +++ b/drivers/firmware/efi/runtime-wrappers.c
1483 @@ -172,6 +172,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
1484 */
1485 static DEFINE_SEMAPHORE(efi_runtime_lock);
1486
1487 +/*
1488 + * Expose the EFI runtime lock to the UV platform
1489 + */
1490 +#ifdef CONFIG_X86_UV
1491 +extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
1492 +#endif
1493 +
1494 /*
1495 * Calls the appropriate efi_runtime_service() with the appropriate
1496 * arguments.
1497 diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
1498 index 995cf0b9e0b1..2d1dfa1e0745 100644
1499 --- a/drivers/gpio/gpio-mxc.c
1500 +++ b/drivers/gpio/gpio-mxc.c
1501 @@ -17,6 +17,7 @@
1502 #include <linux/irqchip/chained_irq.h>
1503 #include <linux/platform_device.h>
1504 #include <linux/slab.h>
1505 +#include <linux/syscore_ops.h>
1506 #include <linux/gpio/driver.h>
1507 #include <linux/of.h>
1508 #include <linux/of_device.h>
1509 @@ -550,33 +551,38 @@ static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
1510 writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
1511 }
1512
1513 -static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
1514 +static int mxc_gpio_syscore_suspend(void)
1515 {
1516 - struct platform_device *pdev = to_platform_device(dev);
1517 - struct mxc_gpio_port *port = platform_get_drvdata(pdev);
1518 + struct mxc_gpio_port *port;
1519
1520 - mxc_gpio_save_regs(port);
1521 - clk_disable_unprepare(port->clk);
1522 + /* walk through all ports */
1523 + list_for_each_entry(port, &mxc_gpio_ports, node) {
1524 + mxc_gpio_save_regs(port);
1525 + clk_disable_unprepare(port->clk);
1526 + }
1527
1528 return 0;
1529 }
1530
1531 -static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
1532 +static void mxc_gpio_syscore_resume(void)
1533 {
1534 - struct platform_device *pdev = to_platform_device(dev);
1535 - struct mxc_gpio_port *port = platform_get_drvdata(pdev);
1536 + struct mxc_gpio_port *port;
1537 int ret;
1538
1539 - ret = clk_prepare_enable(port->clk);
1540 - if (ret)
1541 - return ret;
1542 - mxc_gpio_restore_regs(port);
1543 -
1544 - return 0;
1545 + /* walk through all ports */
1546 + list_for_each_entry(port, &mxc_gpio_ports, node) {
1547 + ret = clk_prepare_enable(port->clk);
1548 + if (ret) {
1549 + pr_err("mxc: failed to enable gpio clock %d\n", ret);
1550 + return;
1551 + }
1552 + mxc_gpio_restore_regs(port);
1553 + }
1554 }
1555
1556 -static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
1557 - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
1558 +static struct syscore_ops mxc_gpio_syscore_ops = {
1559 + .suspend = mxc_gpio_syscore_suspend,
1560 + .resume = mxc_gpio_syscore_resume,
1561 };
1562
1563 static struct platform_driver mxc_gpio_driver = {
1564 @@ -584,7 +590,6 @@ static struct platform_driver mxc_gpio_driver = {
1565 .name = "gpio-mxc",
1566 .of_match_table = mxc_gpio_dt_ids,
1567 .suppress_bind_attrs = true,
1568 - .pm = &mxc_gpio_dev_pm_ops,
1569 },
1570 .probe = mxc_gpio_probe,
1571 .id_table = mxc_gpio_devtype,
1572 @@ -592,6 +597,8 @@ static struct platform_driver mxc_gpio_driver = {
1573
1574 static int __init gpio_mxc_init(void)
1575 {
1576 + register_syscore_ops(&mxc_gpio_syscore_ops);
1577 +
1578 return platform_driver_register(&mxc_gpio_driver);
1579 }
1580 subsys_initcall(gpio_mxc_init);
1581 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1582 index 39bf2ce548c6..7f6af421d3e9 100644
1583 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1584 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1585 @@ -1653,8 +1653,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1586
1587 amdgpu_amdkfd_device_init(adev);
1588
1589 - if (amdgpu_sriov_vf(adev))
1590 + if (amdgpu_sriov_vf(adev)) {
1591 + amdgpu_virt_init_data_exchange(adev);
1592 amdgpu_virt_release_full_gpu(adev, true);
1593 + }
1594
1595 return 0;
1596 }
1597 @@ -2555,9 +2557,6 @@ fence_driver_init:
1598 goto failed;
1599 }
1600
1601 - if (amdgpu_sriov_vf(adev))
1602 - amdgpu_virt_init_data_exchange(adev);
1603 -
1604 amdgpu_fbdev_init(adev);
1605
1606 r = amdgpu_pm_sysfs_init(adev);
1607 @@ -3269,6 +3268,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
1608 r = amdgpu_ib_ring_tests(adev);
1609
1610 error:
1611 + amdgpu_virt_init_data_exchange(adev);
1612 amdgpu_virt_release_full_gpu(adev, true);
1613 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
1614 atomic_inc(&adev->vram_lost_counter);
1615 diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
1616 index 078f70faedcb..d06332be59d3 100644
1617 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
1618 +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
1619 @@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
1620 return r;
1621 }
1622 /* Retrieve checksum from mailbox2 */
1623 - if (req == IDH_REQ_GPU_INIT_ACCESS) {
1624 + if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
1625 adev->virt.fw_reserve.checksum_key =
1626 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
1627 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
1628 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1629 index 7c3b634d8d5f..de5a689e1925 100644
1630 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1631 +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1632 @@ -71,7 +71,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
1633 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
1634 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
1635 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
1636 - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
1637 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
1638 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
1639 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
1640 @@ -89,6 +88,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
1641 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
1642 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
1643 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
1644 + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
1645 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
1646 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
1647 };
1648 @@ -96,6 +96,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
1649 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
1650 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
1651 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
1652 + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
1653 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
1654 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
1655 };
1656 diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
1657 index 8e28e738cb52..391547358756 100644
1658 --- a/drivers/gpu/drm/bridge/tc358767.c
1659 +++ b/drivers/gpu/drm/bridge/tc358767.c
1660 @@ -98,6 +98,8 @@
1661 #define DP0_STARTVAL 0x064c
1662 #define DP0_ACTIVEVAL 0x0650
1663 #define DP0_SYNCVAL 0x0654
1664 +#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
1665 +#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
1666 #define DP0_MISC 0x0658
1667 #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
1668 #define BPC_6 (0 << 5)
1669 @@ -142,6 +144,8 @@
1670 #define DP0_LTLOOPCTRL 0x06d8
1671 #define DP0_SNKLTCTRL 0x06e4
1672
1673 +#define DP1_SRCCTRL 0x07a0
1674 +
1675 /* PHY */
1676 #define DP_PHY_CTRL 0x0800
1677 #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
1678 @@ -150,6 +154,7 @@
1679 #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
1680 #define PHY_RDY BIT(16) /* PHY Main Channels Ready */
1681 #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
1682 +#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
1683 #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
1684 #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
1685
1686 @@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
1687 unsigned long rate;
1688 u32 value;
1689 int ret;
1690 + u32 dp_phy_ctrl;
1691
1692 rate = clk_get_rate(tc->refclk);
1693 switch (rate) {
1694 @@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
1695 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
1696 tc_write(SYS_PLLPARAM, value);
1697
1698 - tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
1699 + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
1700 + if (tc->link.base.num_lanes == 2)
1701 + dp_phy_ctrl |= PHY_2LANE;
1702 + tc_write(DP_PHY_CTRL, dp_phy_ctrl);
1703
1704 /*
1705 * Initially PLLs are in bypass. Force PLL parameter update,
1706 @@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1707
1708 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
1709
1710 - tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
1711 + tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
1712 + ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
1713 + ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
1714
1715 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
1716 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
1717 @@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
1718 if (!tc->mode)
1719 return -EINVAL;
1720
1721 - /* from excel file - DP0_SrcCtrl */
1722 - tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
1723 - DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
1724 - DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
1725 - /* from excel file - DP1_SrcCtrl */
1726 - tc_write(0x07a0, 0x00003083);
1727 + tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
1728 + /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
1729 + tc_write(DP1_SRCCTRL,
1730 + (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
1731 + ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
1732
1733 rate = clk_get_rate(tc->refclk);
1734 switch (rate) {
1735 @@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
1736 }
1737 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
1738 tc_write(SYS_PLLPARAM, value);
1739 +
1740 /* Setup Main Link */
1741 - dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
1742 + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
1743 + if (tc->link.base.num_lanes == 2)
1744 + dp_phy_ctrl |= PHY_2LANE;
1745 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
1746 msleep(100);
1747
1748 @@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1749 static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
1750 struct drm_display_mode *mode)
1751 {
1752 + struct tc_data *tc = connector_to_tc(connector);
1753 + u32 req, avail;
1754 + u32 bits_per_pixel = 24;
1755 +
1756 /* DPI interface clock limitation: upto 154 MHz */
1757 if (mode->clock > 154000)
1758 return MODE_CLOCK_HIGH;
1759
1760 + req = mode->clock * bits_per_pixel / 8;
1761 + avail = tc->link.base.num_lanes * tc->link.base.rate;
1762 +
1763 + if (req > avail)
1764 + return MODE_BAD;
1765 +
1766 return MODE_OK;
1767 }
1768
1769 @@ -1195,6 +1218,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
1770
1771 drm_display_info_set_bus_formats(&tc->connector.display_info,
1772 &bus_format, 1);
1773 + tc->connector.display_info.bus_flags =
1774 + DRM_BUS_FLAG_DE_HIGH |
1775 + DRM_BUS_FLAG_PIXDATA_NEGEDGE |
1776 + DRM_BUS_FLAG_SYNC_NEGEDGE;
1777 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
1778
1779 return 0;
1780 diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
1781 index fe6bfaf8b53f..086f2adc541b 100644
1782 --- a/drivers/gpu/drm/drm_lease.c
1783 +++ b/drivers/gpu/drm/drm_lease.c
1784 @@ -521,7 +521,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
1785
1786 object_count = cl->object_count;
1787
1788 - object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
1789 + object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
1790 + array_size(object_count, sizeof(__u32)));
1791 if (IS_ERR(object_ids))
1792 return PTR_ERR(object_ids);
1793
1794 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1795 index 47cc932e23a7..280c851714e6 100644
1796 --- a/drivers/gpu/drm/i915/i915_gem.c
1797 +++ b/drivers/gpu/drm/i915/i915_gem.c
1798 @@ -1821,6 +1821,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1799 return 0;
1800 }
1801
1802 +static inline bool
1803 +__vma_matches(struct vm_area_struct *vma, struct file *filp,
1804 + unsigned long addr, unsigned long size)
1805 +{
1806 + if (vma->vm_file != filp)
1807 + return false;
1808 +
1809 + return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1810 +}
1811 +
1812 /**
1813 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1814 * it is mapped to.
1815 @@ -1879,7 +1889,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1816 return -EINTR;
1817 }
1818 vma = find_vma(mm, addr);
1819 - if (vma)
1820 + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1821 vma->vm_page_prot =
1822 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1823 else
1824 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1825 index 8fc61e96754f..50d56498de77 100644
1826 --- a/drivers/gpu/drm/i915/intel_drv.h
1827 +++ b/drivers/gpu/drm/i915/intel_drv.h
1828 @@ -209,6 +209,16 @@ struct intel_fbdev {
1829 unsigned long vma_flags;
1830 async_cookie_t cookie;
1831 int preferred_bpp;
1832 +
1833 + /* Whether or not fbdev hpd processing is temporarily suspended */
1834 + bool hpd_suspended : 1;
1835 + /* Set when a hotplug was received while HPD processing was
1836 + * suspended
1837 + */
1838 + bool hpd_waiting : 1;
1839 +
1840 + /* Protects hpd_suspended */
1841 + struct mutex hpd_lock;
1842 };
1843
1844 struct intel_encoder {
1845 diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
1846 index fb2f9fce34cd..2d6506c08bf7 100644
1847 --- a/drivers/gpu/drm/i915/intel_fbdev.c
1848 +++ b/drivers/gpu/drm/i915/intel_fbdev.c
1849 @@ -677,6 +677,7 @@ int intel_fbdev_init(struct drm_device *dev)
1850 if (ifbdev == NULL)
1851 return -ENOMEM;
1852
1853 + mutex_init(&ifbdev->hpd_lock);
1854 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
1855
1856 if (!intel_fbdev_init_bios(dev, ifbdev))
1857 @@ -750,6 +751,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
1858 intel_fbdev_destroy(ifbdev);
1859 }
1860
1861 +/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
1862 + * processing, fbdev will perform a full connector reprobe if a hotplug event
1863 + * was received while HPD was suspended.
1864 + */
1865 +static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
1866 +{
1867 + bool send_hpd = false;
1868 +
1869 + mutex_lock(&ifbdev->hpd_lock);
1870 + ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
1871 + send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
1872 + ifbdev->hpd_waiting = false;
1873 + mutex_unlock(&ifbdev->hpd_lock);
1874 +
1875 + if (send_hpd) {
1876 + DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
1877 + drm_fb_helper_hotplug_event(&ifbdev->helper);
1878 + }
1879 +}
1880 +
1881 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
1882 {
1883 struct drm_i915_private *dev_priv = to_i915(dev);
1884 @@ -771,6 +792,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
1885 */
1886 if (state != FBINFO_STATE_RUNNING)
1887 flush_work(&dev_priv->fbdev_suspend_work);
1888 +
1889 console_lock();
1890 } else {
1891 /*
1892 @@ -798,17 +820,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
1893
1894 drm_fb_helper_set_suspend(&ifbdev->helper, state);
1895 console_unlock();
1896 +
1897 + intel_fbdev_hpd_set_suspend(ifbdev, state);
1898 }
1899
1900 void intel_fbdev_output_poll_changed(struct drm_device *dev)
1901 {
1902 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
1903 + bool send_hpd;
1904
1905 if (!ifbdev)
1906 return;
1907
1908 intel_fbdev_sync(ifbdev);
1909 - if (ifbdev->vma || ifbdev->helper.deferred_setup)
1910 +
1911 + mutex_lock(&ifbdev->hpd_lock);
1912 + send_hpd = !ifbdev->hpd_suspended;
1913 + ifbdev->hpd_waiting = true;
1914 + mutex_unlock(&ifbdev->hpd_lock);
1915 +
1916 + if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
1917 drm_fb_helper_hotplug_event(&ifbdev->helper);
1918 }
1919
1920 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
1921 index 816ccaedfc73..8675613e142b 100644
1922 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
1923 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
1924 @@ -22,6 +22,7 @@
1925 #include <engine/falcon.h>
1926
1927 #include <core/gpuobj.h>
1928 +#include <subdev/mc.h>
1929 #include <subdev/timer.h>
1930 #include <engine/fifo.h>
1931
1932 @@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
1933 }
1934 }
1935
1936 - nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
1937 - nvkm_wr32(device, base + 0x014, 0xffffffff);
1938 + if (nvkm_mc_enabled(device, engine->subdev.index)) {
1939 + nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
1940 + nvkm_wr32(device, base + 0x014, 0xffffffff);
1941 + }
1942 return 0;
1943 }
1944
1945 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
1946 index 3695cde669f8..07914e36939e 100644
1947 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
1948 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
1949 @@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
1950 duty = nvkm_therm_update_linear(therm);
1951 break;
1952 case NVBIOS_THERM_FAN_OTHER:
1953 - if (therm->cstate)
1954 + if (therm->cstate) {
1955 duty = therm->cstate;
1956 - else
1957 + poll = false;
1958 + } else {
1959 duty = nvkm_therm_update_linear_fallback(therm);
1960 - poll = false;
1961 + }
1962 break;
1963 }
1964 immd = false;
1965 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
1966 index 875fca662ac0..1ea2dd35bca9 100644
1967 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
1968 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
1969 @@ -1,10 +1,4 @@
1970 -// SPDX-License-Identifier: GPL-2.0
1971 -/*
1972 - * This program is free software; you can redistribute it and/or modify
1973 - * it under the terms of the GNU General Public License as published by
1974 - * the Free Software Foundation; either version 2 of the License, or
1975 - * (at your option) any later version.
1976 - */
1977 +// SPDX-License-Identifier: GPL-2.0+
1978
1979 #include "vkms_drv.h"
1980 #include <drm/drm_atomic_helper.h>
1981 diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
1982 index 6e728b825259..b1201c18d3eb 100644
1983 --- a/drivers/gpu/drm/vkms/vkms_drv.c
1984 +++ b/drivers/gpu/drm/vkms/vkms_drv.c
1985 @@ -1,9 +1,4 @@
1986 -/*
1987 - * This program is free software; you can redistribute it and/or modify
1988 - * it under the terms of the GNU General Public License as published by
1989 - * the Free Software Foundation; either version 2 of the License, or
1990 - * (at your option) any later version.
1991 - */
1992 +// SPDX-License-Identifier: GPL-2.0+
1993
1994 #include <linux/module.h>
1995 #include <drm/drm_gem.h>
1996 diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
1997 index 07be29f2dc44..e018752d57bb 100644
1998 --- a/drivers/gpu/drm/vkms/vkms_drv.h
1999 +++ b/drivers/gpu/drm/vkms/vkms_drv.h
2000 @@ -1,3 +1,5 @@
2001 +/* SPDX-License-Identifier: GPL-2.0+ */
2002 +
2003 #ifndef _VKMS_DRV_H_
2004 #define _VKMS_DRV_H_
2005
2006 diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
2007 index c7e38368602b..ca4a74e04977 100644
2008 --- a/drivers/gpu/drm/vkms/vkms_gem.c
2009 +++ b/drivers/gpu/drm/vkms/vkms_gem.c
2010 @@ -1,10 +1,4 @@
2011 -// SPDX-License-Identifier: GPL-2.0
2012 -/*
2013 - * This program is free software; you can redistribute it and/or modify
2014 - * it under the terms of the GNU General Public License as published by
2015 - * the Free Software Foundation; either version 2 of the License, or
2016 - * (at your option) any later version.
2017 - */
2018 +// SPDX-License-Identifier: GPL-2.0+
2019
2020 #include <linux/shmem_fs.h>
2021
2022 diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
2023 index 901012cb1af1..5697148e0b73 100644
2024 --- a/drivers/gpu/drm/vkms/vkms_output.c
2025 +++ b/drivers/gpu/drm/vkms/vkms_output.c
2026 @@ -1,10 +1,4 @@
2027 -// SPDX-License-Identifier: GPL-2.0
2028 -/*
2029 - * This program is free software; you can redistribute it and/or modify
2030 - * it under the terms of the GNU General Public License as published by
2031 - * the Free Software Foundation; either version 2 of the License, or
2032 - * (at your option) any later version.
2033 - */
2034 +// SPDX-License-Identifier: GPL-2.0+
2035
2036 #include "vkms_drv.h"
2037 #include <drm/drm_crtc_helper.h>
2038 diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
2039 index 9f75b1e2c1c4..ce043b721e0c 100644
2040 --- a/drivers/gpu/drm/vkms/vkms_plane.c
2041 +++ b/drivers/gpu/drm/vkms/vkms_plane.c
2042 @@ -1,10 +1,4 @@
2043 -// SPDX-License-Identifier: GPL-2.0
2044 -/*
2045 - * This program is free software; you can redistribute it and/or modify
2046 - * it under the terms of the GNU General Public License as published by
2047 - * the Free Software Foundation; either version 2 of the License, or
2048 - * (at your option) any later version.
2049 - */
2050 +// SPDX-License-Identifier: GPL-2.0+
2051
2052 #include "vkms_drv.h"
2053 #include <drm/drm_plane_helper.h>
2054 diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
2055 index 1efcfdf9f8a8..dd9dd4e40827 100644
2056 --- a/drivers/input/misc/bma150.c
2057 +++ b/drivers/input/misc/bma150.c
2058 @@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
2059 idev->close = bma150_irq_close;
2060 input_set_drvdata(idev, bma150);
2061
2062 + bma150->input = idev;
2063 +
2064 error = input_register_device(idev);
2065 if (error) {
2066 input_free_device(idev);
2067 return error;
2068 }
2069
2070 - bma150->input = idev;
2071 return 0;
2072 }
2073
2074 @@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
2075
2076 bma150_init_input_device(bma150, ipoll_dev->input);
2077
2078 + bma150->input_polled = ipoll_dev;
2079 + bma150->input = ipoll_dev->input;
2080 +
2081 error = input_register_polled_device(ipoll_dev);
2082 if (error) {
2083 input_free_polled_device(ipoll_dev);
2084 return error;
2085 }
2086
2087 - bma150->input_polled = ipoll_dev;
2088 - bma150->input = ipoll_dev->input;
2089 -
2090 return 0;
2091 }
2092
2093 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
2094 index f322a1768fbb..225ae6980182 100644
2095 --- a/drivers/input/mouse/elan_i2c_core.c
2096 +++ b/drivers/input/mouse/elan_i2c_core.c
2097 @@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
2098 static const struct acpi_device_id elan_acpi_id[] = {
2099 { "ELAN0000", 0 },
2100 { "ELAN0100", 0 },
2101 - { "ELAN0501", 0 },
2102 { "ELAN0600", 0 },
2103 { "ELAN0602", 0 },
2104 { "ELAN0605", 0 },
2105 @@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
2106 { "ELAN060C", 0 },
2107 { "ELAN0611", 0 },
2108 { "ELAN0612", 0 },
2109 + { "ELAN0617", 0 },
2110 { "ELAN0618", 0 },
2111 { "ELAN061C", 0 },
2112 { "ELAN061D", 0 },
2113 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2114 index 9fe075c137dc..a7f8b1614559 100644
2115 --- a/drivers/input/mouse/elantech.c
2116 +++ b/drivers/input/mouse/elantech.c
2117 @@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
2118 * Asus UX31 0x361f00 20, 15, 0e clickpad
2119 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
2120 * Avatar AVIU-145A2 0x361f00 ? clickpad
2121 + * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
2122 + * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
2123 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
2124 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
2125 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
2126 @@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
2127 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
2128 },
2129 },
2130 + {
2131 + /* Fujitsu H780 also has a middle button */
2132 + .matches = {
2133 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2134 + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
2135 + },
2136 + },
2137 #endif
2138 { }
2139 };
2140 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2141 index 5921ecc670c1..f3dcc7640319 100644
2142 --- a/drivers/md/dm-crypt.c
2143 +++ b/drivers/md/dm-crypt.c
2144 @@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
2145 if (IS_ERR(bip))
2146 return PTR_ERR(bip);
2147
2148 - tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
2149 + tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
2150
2151 bip->bip_iter.bi_size = tag_len;
2152 bip->bip_iter.bi_sector = io->cc->start + io->sector;
2153 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
2154 index c30a7850b2da..cd4220ee7004 100644
2155 --- a/drivers/md/dm-thin.c
2156 +++ b/drivers/md/dm-thin.c
2157 @@ -257,6 +257,7 @@ struct pool {
2158
2159 spinlock_t lock;
2160 struct bio_list deferred_flush_bios;
2161 + struct bio_list deferred_flush_completions;
2162 struct list_head prepared_mappings;
2163 struct list_head prepared_discards;
2164 struct list_head prepared_discards_pt2;
2165 @@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
2166 mempool_free(m, &m->tc->pool->mapping_pool);
2167 }
2168
2169 +static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
2170 +{
2171 + struct pool *pool = tc->pool;
2172 + unsigned long flags;
2173 +
2174 + /*
2175 + * If the bio has the REQ_FUA flag set we must commit the metadata
2176 + * before signaling its completion.
2177 + */
2178 + if (!bio_triggers_commit(tc, bio)) {
2179 + bio_endio(bio);
2180 + return;
2181 + }
2182 +
2183 + /*
2184 + * Complete bio with an error if earlier I/O caused changes to the
2185 + * metadata that can't be committed, e.g, due to I/O errors on the
2186 + * metadata device.
2187 + */
2188 + if (dm_thin_aborted_changes(tc->td)) {
2189 + bio_io_error(bio);
2190 + return;
2191 + }
2192 +
2193 + /*
2194 + * Batch together any bios that trigger commits and then issue a
2195 + * single commit for them in process_deferred_bios().
2196 + */
2197 + spin_lock_irqsave(&pool->lock, flags);
2198 + bio_list_add(&pool->deferred_flush_completions, bio);
2199 + spin_unlock_irqrestore(&pool->lock, flags);
2200 +}
2201 +
2202 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
2203 {
2204 struct thin_c *tc = m->tc;
2205 @@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
2206 */
2207 if (bio) {
2208 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
2209 - bio_endio(bio);
2210 + complete_overwrite_bio(tc, bio);
2211 } else {
2212 inc_all_io_entry(tc->pool, m->cell->holder);
2213 remap_and_issue(tc, m->cell->holder, m->data_block);
2214 @@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
2215 {
2216 unsigned long flags;
2217 struct bio *bio;
2218 - struct bio_list bios;
2219 + struct bio_list bios, bio_completions;
2220 struct thin_c *tc;
2221
2222 tc = get_first_thin(pool);
2223 @@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
2224 }
2225
2226 /*
2227 - * If there are any deferred flush bios, we must commit
2228 - * the metadata before issuing them.
2229 + * If there are any deferred flush bios, we must commit the metadata
2230 + * before issuing them or signaling their completion.
2231 */
2232 bio_list_init(&bios);
2233 + bio_list_init(&bio_completions);
2234 +
2235 spin_lock_irqsave(&pool->lock, flags);
2236 bio_list_merge(&bios, &pool->deferred_flush_bios);
2237 bio_list_init(&pool->deferred_flush_bios);
2238 +
2239 + bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
2240 + bio_list_init(&pool->deferred_flush_completions);
2241 spin_unlock_irqrestore(&pool->lock, flags);
2242
2243 - if (bio_list_empty(&bios) &&
2244 + if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
2245 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2246 return;
2247
2248 if (commit(pool)) {
2249 + bio_list_merge(&bios, &bio_completions);
2250 +
2251 while ((bio = bio_list_pop(&bios)))
2252 bio_io_error(bio);
2253 return;
2254 }
2255 pool->last_commit_jiffies = jiffies;
2256
2257 + while ((bio = bio_list_pop(&bio_completions)))
2258 + bio_endio(bio);
2259 +
2260 while ((bio = bio_list_pop(&bios)))
2261 generic_make_request(bio);
2262 }
2263 @@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2264 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2265 spin_lock_init(&pool->lock);
2266 bio_list_init(&pool->deferred_flush_bios);
2267 + bio_list_init(&pool->deferred_flush_completions);
2268 INIT_LIST_HEAD(&pool->prepared_mappings);
2269 INIT_LIST_HEAD(&pool->prepared_discards);
2270 INIT_LIST_HEAD(&pool->prepared_discards_pt2);
2271 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2272 index 1d54109071cc..fa47249fa3e4 100644
2273 --- a/drivers/md/raid1.c
2274 +++ b/drivers/md/raid1.c
2275 @@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
2276 reschedule_retry(r1_bio);
2277 }
2278
2279 +static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
2280 +{
2281 + sector_t sync_blocks = 0;
2282 + sector_t s = r1_bio->sector;
2283 + long sectors_to_go = r1_bio->sectors;
2284 +
2285 + /* make sure these bits don't get cleared. */
2286 + do {
2287 + md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
2288 + s += sync_blocks;
2289 + sectors_to_go -= sync_blocks;
2290 + } while (sectors_to_go > 0);
2291 +}
2292 +
2293 static void end_sync_write(struct bio *bio)
2294 {
2295 int uptodate = !bio->bi_status;
2296 @@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
2297 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
2298
2299 if (!uptodate) {
2300 - sector_t sync_blocks = 0;
2301 - sector_t s = r1_bio->sector;
2302 - long sectors_to_go = r1_bio->sectors;
2303 - /* make sure these bits doesn't get cleared. */
2304 - do {
2305 - md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
2306 - s += sync_blocks;
2307 - sectors_to_go -= sync_blocks;
2308 - } while (sectors_to_go > 0);
2309 + abort_sync_write(mddev, r1_bio);
2310 set_bit(WriteErrorSeen, &rdev->flags);
2311 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2312 set_bit(MD_RECOVERY_NEEDED, &
2313 @@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2314 (i == r1_bio->read_disk ||
2315 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2316 continue;
2317 - if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2318 + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2319 + abort_sync_write(mddev, r1_bio);
2320 continue;
2321 + }
2322
2323 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2324 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2325 diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
2326 index 68a1ac929917..d382b13c27dd 100644
2327 --- a/drivers/misc/eeprom/Kconfig
2328 +++ b/drivers/misc/eeprom/Kconfig
2329 @@ -13,7 +13,7 @@ config EEPROM_AT24
2330 ones like at24c64, 24lc02 or fm24c04:
2331
2332 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
2333 - 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
2334 + 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
2335
2336 Unless you like data loss puzzles, always be sure that any chip
2337 you configure as a 24c32 (32 kbit) or larger is NOT really a
2338 diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
2339 index 7e50e1d6f58c..94836fcbe721 100644
2340 --- a/drivers/misc/eeprom/at24.c
2341 +++ b/drivers/misc/eeprom/at24.c
2342 @@ -173,6 +173,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
2343 AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
2344 AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
2345 AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
2346 +AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
2347 /* identical to 24c08 ? */
2348 AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
2349
2350 @@ -199,6 +200,7 @@ static const struct i2c_device_id at24_ids[] = {
2351 { "24c256", (kernel_ulong_t)&at24_data_24c256 },
2352 { "24c512", (kernel_ulong_t)&at24_data_24c512 },
2353 { "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
2354 + { "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
2355 { "at24", 0 },
2356 { /* END OF LIST */ }
2357 };
2358 @@ -227,6 +229,7 @@ static const struct of_device_id at24_of_match[] = {
2359 { .compatible = "atmel,24c256", .data = &at24_data_24c256 },
2360 { .compatible = "atmel,24c512", .data = &at24_data_24c512 },
2361 { .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
2362 + { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
2363 { /* END OF LIST */ },
2364 };
2365 MODULE_DEVICE_TABLE(of, at24_of_match);
2366 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
2367 index f6755b86eba2..eee004fb3c3e 100644
2368 --- a/drivers/mmc/core/block.c
2369 +++ b/drivers/mmc/core/block.c
2370 @@ -2114,7 +2114,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
2371 if (waiting)
2372 wake_up(&mq->wait);
2373 else
2374 - kblockd_schedule_work(&mq->complete_work);
2375 + queue_work(mq->card->complete_wq, &mq->complete_work);
2376
2377 return;
2378 }
2379 @@ -2928,6 +2928,13 @@ static int mmc_blk_probe(struct mmc_card *card)
2380
2381 mmc_fixup_device(card, mmc_blk_fixups);
2382
2383 + card->complete_wq = alloc_workqueue("mmc_complete",
2384 + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2385 + if (unlikely(!card->complete_wq)) {
2386 + pr_err("Failed to create mmc completion workqueue");
2387 + return -ENOMEM;
2388 + }
2389 +
2390 md = mmc_blk_alloc(card);
2391 if (IS_ERR(md))
2392 return PTR_ERR(md);
2393 @@ -2991,6 +2998,7 @@ static void mmc_blk_remove(struct mmc_card *card)
2394 pm_runtime_put_noidle(&card->dev);
2395 mmc_blk_remove_req(md);
2396 dev_set_drvdata(&card->dev, NULL);
2397 + destroy_workqueue(card->complete_wq);
2398 }
2399
2400 static int _mmc_blk_suspend(struct mmc_card *card)
2401 diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
2402 index 568349e1fbc2..c4584184525f 100644
2403 --- a/drivers/mmc/host/sunxi-mmc.c
2404 +++ b/drivers/mmc/host/sunxi-mmc.c
2405 @@ -1394,6 +1394,21 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
2406 if (ret)
2407 goto error_free_dma;
2408
2409 + /*
2410 + * If we don't support delay chains in the SoC, we can't use any
2411 + * of the higher speed modes. Mask them out in case the device
2412 + * tree specifies the properties for them, which gets added to
2413 + * the caps by mmc_of_parse() above.
2414 + */
2415 + if (!(host->cfg->clk_delays || host->use_new_timings)) {
2416 + mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
2417 + MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
2418 + mmc->caps2 &= ~MMC_CAP2_HS200;
2419 + }
2420 +
2421 + /* TODO: This driver doesn't support HS400 mode yet */
2422 + mmc->caps2 &= ~MMC_CAP2_HS400;
2423 +
2424 ret = sunxi_mmc_init_host(host);
2425 if (ret)
2426 goto error_free_dma;
2427 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2428 index e5bddae16ed4..e0d2b7473901 100644
2429 --- a/drivers/nvme/host/core.c
2430 +++ b/drivers/nvme/host/core.c
2431 @@ -2095,7 +2095,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
2432
2433 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2434 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2435 - "nqn.2014.08.org.nvmexpress:%4x%4x",
2436 + "nqn.2014.08.org.nvmexpress:%04x%04x",
2437 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2438 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2439 off += sizeof(id->sn);
2440 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2441 index c27af277e14e..815509dbed84 100644
2442 --- a/drivers/nvme/host/multipath.c
2443 +++ b/drivers/nvme/host/multipath.c
2444 @@ -556,6 +556,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2445 return 0;
2446 out_free_ana_log_buf:
2447 kfree(ctrl->ana_log_buf);
2448 + ctrl->ana_log_buf = NULL;
2449 out:
2450 return error;
2451 }
2452 @@ -563,5 +564,6 @@ out:
2453 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
2454 {
2455 kfree(ctrl->ana_log_buf);
2456 + ctrl->ana_log_buf = NULL;
2457 }
2458
2459 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2460 index d668682f91df..f46313f441ec 100644
2461 --- a/drivers/nvme/host/pci.c
2462 +++ b/drivers/nvme/host/pci.c
2463 @@ -908,9 +908,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
2464
2465 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
2466 {
2467 - if (++nvmeq->cq_head == nvmeq->q_depth) {
2468 + if (nvmeq->cq_head == nvmeq->q_depth - 1) {
2469 nvmeq->cq_head = 0;
2470 nvmeq->cq_phase = !nvmeq->cq_phase;
2471 + } else {
2472 + nvmeq->cq_head++;
2473 }
2474 }
2475
2476 @@ -1727,8 +1729,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
2477 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
2478 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
2479
2480 - dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
2481 - le64_to_cpu(desc->addr));
2482 + dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
2483 + le64_to_cpu(desc->addr),
2484 + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
2485 }
2486
2487 kfree(dev->host_mem_desc_bufs);
2488 @@ -1794,8 +1797,9 @@ out_free_bufs:
2489 while (--i >= 0) {
2490 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
2491
2492 - dma_free_coherent(dev->dev, size, bufs[i],
2493 - le64_to_cpu(descs[i].addr));
2494 + dma_free_attrs(dev->dev, size, bufs[i],
2495 + le64_to_cpu(descs[i].addr),
2496 + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
2497 }
2498
2499 kfree(bufs);
2500 diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
2501 index f039266b275d..a57b969b8973 100644
2502 --- a/drivers/s390/crypto/ap_bus.c
2503 +++ b/drivers/s390/crypto/ap_bus.c
2504 @@ -249,7 +249,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
2505 static inline int ap_test_config_card_id(unsigned int id)
2506 {
2507 if (!ap_configuration) /* QCI not supported */
2508 - return 1;
2509 + /* only ids 0...3F may be probed */
2510 + return id < 0x40 ? 1 : 0;
2511 return ap_test_config(ap_configuration->apm, id);
2512 }
2513
2514 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2515 index 0a27917263aa..58b78702c6c9 100644
2516 --- a/drivers/scsi/sd.c
2517 +++ b/drivers/scsi/sd.c
2518 @@ -2970,9 +2970,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2519 if (rot == 1) {
2520 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2521 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2522 - } else {
2523 - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2524 - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
2525 }
2526
2527 if (sdkp->device->type == TYPE_ZBC) {
2528 @@ -3109,6 +3106,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
2529 if (sdkp->media_present) {
2530 sd_read_capacity(sdkp, buffer);
2531
2532 + /*
2533 + * set the default to rotational. All non-rotational devices
2534 + * support the block characteristics VPD page, which will
2535 + * cause this to be updated correctly and any device which
2536 + * doesn't support it should be treated as rotational.
2537 + */
2538 + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2539 + blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
2540 +
2541 if (scsi_device_supports_vpd(sdp)) {
2542 sd_read_block_provisioning(sdkp);
2543 sd_read_block_limits(sdkp);
2544 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2545 index 7b637fc27990..23db881daab5 100644
2546 --- a/fs/cifs/file.c
2547 +++ b/fs/cifs/file.c
2548 @@ -1128,6 +1128,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
2549 return -EINVAL;
2550 }
2551
2552 + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2553 + PAGE_SIZE);
2554 + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2555 + PAGE_SIZE);
2556 max_num = (max_buf - sizeof(struct smb_hdr)) /
2557 sizeof(LOCKING_ANDX_RANGE);
2558 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2559 @@ -1466,6 +1470,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2560 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2561 return -EINVAL;
2562
2563 + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2564 + PAGE_SIZE);
2565 + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2566 + PAGE_SIZE);
2567 max_num = (max_buf - sizeof(struct smb_hdr)) /
2568 sizeof(LOCKING_ANDX_RANGE);
2569 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2570 diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
2571 index 2fc3d31967ee..b204e84b87fb 100644
2572 --- a/fs/cifs/smb2file.c
2573 +++ b/fs/cifs/smb2file.c
2574 @@ -128,6 +128,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2575 if (max_buf < sizeof(struct smb2_lock_element))
2576 return -EINVAL;
2577
2578 + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
2579 + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
2580 max_num = max_buf / sizeof(struct smb2_lock_element);
2581 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
2582 if (!buf)
2583 @@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
2584 return -EINVAL;
2585 }
2586
2587 + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
2588 + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
2589 max_num = max_buf / sizeof(struct smb2_lock_element);
2590 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
2591 if (!buf) {
2592 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2593 index 8a01e89ff827..1e5a1171212f 100644
2594 --- a/fs/cifs/smb2pdu.c
2595 +++ b/fs/cifs/smb2pdu.c
2596 @@ -2814,9 +2814,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
2597 {
2598 struct TCP_Server_Info *server = mid->callback_data;
2599 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
2600 - unsigned int credits_received = 1;
2601 + unsigned int credits_received = 0;
2602
2603 - if (mid->mid_state == MID_RESPONSE_RECEIVED)
2604 + if (mid->mid_state == MID_RESPONSE_RECEIVED
2605 + || mid->mid_state == MID_RESPONSE_MALFORMED)
2606 credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
2607
2608 DeleteMidQEntry(mid);
2609 @@ -3073,7 +3074,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
2610 struct TCP_Server_Info *server = tcon->ses->server;
2611 struct smb2_sync_hdr *shdr =
2612 (struct smb2_sync_hdr *)rdata->iov[0].iov_base;
2613 - unsigned int credits_received = 1;
2614 + unsigned int credits_received = 0;
2615 struct smb_rqst rqst = { .rq_iov = rdata->iov,
2616 .rq_nvec = 2,
2617 .rq_pages = rdata->pages,
2618 @@ -3112,6 +3113,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
2619 task_io_account_read(rdata->got_bytes);
2620 cifs_stats_bytes_read(tcon, rdata->got_bytes);
2621 break;
2622 + case MID_RESPONSE_MALFORMED:
2623 + credits_received = le16_to_cpu(shdr->CreditRequest);
2624 + /* fall through */
2625 default:
2626 if (rdata->result != -ENODATA)
2627 rdata->result = -EIO;
2628 @@ -3305,7 +3309,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
2629 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
2630 unsigned int written;
2631 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
2632 - unsigned int credits_received = 1;
2633 + unsigned int credits_received = 0;
2634
2635 switch (mid->mid_state) {
2636 case MID_RESPONSE_RECEIVED:
2637 @@ -3333,6 +3337,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
2638 case MID_RETRY_NEEDED:
2639 wdata->result = -EAGAIN;
2640 break;
2641 + case MID_RESPONSE_MALFORMED:
2642 + credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
2643 + /* fall through */
2644 default:
2645 wdata->result = -EIO;
2646 break;
2647 diff --git a/fs/inode.c b/fs/inode.c
2648 index 65ae154df760..42f6d25f32a5 100644
2649 --- a/fs/inode.c
2650 +++ b/fs/inode.c
2651 @@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
2652 return LRU_REMOVED;
2653 }
2654
2655 - /*
2656 - * Recently referenced inodes and inodes with many attached pages
2657 - * get one more pass.
2658 - */
2659 - if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
2660 + /* recently referenced inodes get one more pass */
2661 + if (inode->i_state & I_REFERENCED) {
2662 inode->i_state &= ~I_REFERENCED;
2663 spin_unlock(&inode->i_lock);
2664 return LRU_ROTATE;
2665 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
2666 index 899174c7a8ae..39b835d7c445 100644
2667 --- a/fs/nfsd/nfsctl.c
2668 +++ b/fs/nfsd/nfsctl.c
2669 @@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
2670 retval = nfsd_idmap_init(net);
2671 if (retval)
2672 goto out_idmap_error;
2673 - nn->nfsd4_lease = 45; /* default lease time */
2674 - nn->nfsd4_grace = 45;
2675 + nn->nfsd4_lease = 90; /* default lease time */
2676 + nn->nfsd4_grace = 90;
2677 nn->somebody_reclaimed = false;
2678 nn->clverifier_counter = prandom_u32();
2679 nn->clientid_counter = prandom_u32();
2680 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2681 index a027473561c6..d76fe166f6ce 100644
2682 --- a/fs/proc/task_mmu.c
2683 +++ b/fs/proc/task_mmu.c
2684 @@ -423,7 +423,7 @@ struct mem_size_stats {
2685 };
2686
2687 static void smaps_account(struct mem_size_stats *mss, struct page *page,
2688 - bool compound, bool young, bool dirty)
2689 + bool compound, bool young, bool dirty, bool locked)
2690 {
2691 int i, nr = compound ? 1 << compound_order(page) : 1;
2692 unsigned long size = nr * PAGE_SIZE;
2693 @@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
2694 else
2695 mss->private_clean += size;
2696 mss->pss += (u64)size << PSS_SHIFT;
2697 + if (locked)
2698 + mss->pss_locked += (u64)size << PSS_SHIFT;
2699 return;
2700 }
2701
2702 for (i = 0; i < nr; i++, page++) {
2703 int mapcount = page_mapcount(page);
2704 + unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
2705
2706 if (mapcount >= 2) {
2707 if (dirty || PageDirty(page))
2708 mss->shared_dirty += PAGE_SIZE;
2709 else
2710 mss->shared_clean += PAGE_SIZE;
2711 - mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
2712 + mss->pss += pss / mapcount;
2713 + if (locked)
2714 + mss->pss_locked += pss / mapcount;
2715 } else {
2716 if (dirty || PageDirty(page))
2717 mss->private_dirty += PAGE_SIZE;
2718 else
2719 mss->private_clean += PAGE_SIZE;
2720 - mss->pss += PAGE_SIZE << PSS_SHIFT;
2721 + mss->pss += pss;
2722 + if (locked)
2723 + mss->pss_locked += pss;
2724 }
2725 }
2726 }
2727 @@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
2728 {
2729 struct mem_size_stats *mss = walk->private;
2730 struct vm_area_struct *vma = walk->vma;
2731 + bool locked = !!(vma->vm_flags & VM_LOCKED);
2732 struct page *page = NULL;
2733
2734 if (pte_present(*pte)) {
2735 @@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
2736 if (!page)
2737 return;
2738
2739 - smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
2740 + smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
2741 }
2742
2743 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2744 @@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
2745 {
2746 struct mem_size_stats *mss = walk->private;
2747 struct vm_area_struct *vma = walk->vma;
2748 + bool locked = !!(vma->vm_flags & VM_LOCKED);
2749 struct page *page;
2750
2751 /* FOLL_DUMP will return -EFAULT on huge zero page */
2752 @@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
2753 /* pass */;
2754 else
2755 VM_BUG_ON_PAGE(1, page);
2756 - smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
2757 + smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
2758 }
2759 #else
2760 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
2761 @@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
2762 }
2763 }
2764 #endif
2765 -
2766 /* mmap_sem is held in m_start */
2767 walk_page_vma(vma, &smaps_walk);
2768 - if (vma->vm_flags & VM_LOCKED)
2769 - mss->pss_locked += mss->pss;
2770 }
2771
2772 #define SEQ_PUT_DEC(str, val) \
2773 diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
2774 index de7377815b6b..8ef330027b13 100644
2775 --- a/include/linux/mmc/card.h
2776 +++ b/include/linux/mmc/card.h
2777 @@ -308,6 +308,7 @@ struct mmc_card {
2778 unsigned int nr_parts;
2779
2780 unsigned int bouncesz; /* Bounce buffer size */
2781 + struct workqueue_struct *complete_wq; /* Private workqueue */
2782 };
2783
2784 static inline bool mmc_large_sector(struct mmc_card *card)
2785 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
2786 index 53c500f0ca79..c2876e740514 100644
2787 --- a/include/linux/perf_event.h
2788 +++ b/include/linux/perf_event.h
2789 @@ -447,6 +447,11 @@ struct pmu {
2790 * Filter events for PMU-specific reasons.
2791 */
2792 int (*filter_match) (struct perf_event *event); /* optional */
2793 +
2794 + /*
2795 + * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
2796 + */
2797 + int (*check_period) (struct perf_event *event, u64 value); /* optional */
2798 };
2799
2800 enum perf_addr_filter_action_t {
2801 diff --git a/kernel/events/core.c b/kernel/events/core.c
2802 index 5a97f34bc14c..4fb9d5054618 100644
2803 --- a/kernel/events/core.c
2804 +++ b/kernel/events/core.c
2805 @@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
2806 }
2807 }
2808
2809 +static int perf_event_check_period(struct perf_event *event, u64 value)
2810 +{
2811 + return event->pmu->check_period(event, value);
2812 +}
2813 +
2814 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2815 {
2816 u64 value;
2817 @@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
2818 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
2819 return -EINVAL;
2820
2821 + if (perf_event_check_period(event, value))
2822 + return -EINVAL;
2823 +
2824 event_function_call(event, __perf_event_period, &value);
2825
2826 return 0;
2827 @@ -9362,6 +9370,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
2828 return 0;
2829 }
2830
2831 +static int perf_event_nop_int(struct perf_event *event, u64 value)
2832 +{
2833 + return 0;
2834 +}
2835 +
2836 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
2837
2838 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
2839 @@ -9662,6 +9675,9 @@ got_cpu_context:
2840 pmu->pmu_disable = perf_pmu_nop_void;
2841 }
2842
2843 + if (!pmu->check_period)
2844 + pmu->check_period = perf_event_nop_int;
2845 +
2846 if (!pmu->event_idx)
2847 pmu->event_idx = perf_event_idx_default;
2848
2849 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
2850 index 51386d9105fa..5631af940316 100644
2851 --- a/kernel/events/ring_buffer.c
2852 +++ b/kernel/events/ring_buffer.c
2853 @@ -724,7 +724,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
2854 size = sizeof(struct ring_buffer);
2855 size += nr_pages * sizeof(void *);
2856
2857 - if (order_base_2(size) >= MAX_ORDER)
2858 + if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
2859 goto fail;
2860
2861 rb = kzalloc(size, GFP_KERNEL);
2862 diff --git a/kernel/signal.c b/kernel/signal.c
2863 index c187def3dba6..9102d60fc5c6 100644
2864 --- a/kernel/signal.c
2865 +++ b/kernel/signal.c
2866 @@ -2433,9 +2433,12 @@ relock:
2867 }
2868
2869 /* Has this task already been marked for death? */
2870 - ksig->info.si_signo = signr = SIGKILL;
2871 - if (signal_group_exit(signal))
2872 + if (signal_group_exit(signal)) {
2873 + ksig->info.si_signo = signr = SIGKILL;
2874 + sigdelset(&current->pending.signal, SIGKILL);
2875 + recalc_sigpending();
2876 goto fatal;
2877 + }
2878
2879 for (;;) {
2880 struct k_sigaction *ka;
2881 diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
2882 index a6aebbc848fe..0da379b90249 100644
2883 --- a/kernel/trace/trace_uprobe.c
2884 +++ b/kernel/trace/trace_uprobe.c
2885 @@ -141,7 +141,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
2886
2887 ret = strncpy_from_user(dst, src, maxlen);
2888 if (ret == maxlen)
2889 - dst[--ret] = '\0';
2890 + dst[ret - 1] = '\0';
2891 + else if (ret >= 0)
2892 + /*
2893 + * Include the terminating null byte. In this case it
2894 + * was copied by strncpy_from_user but not accounted
2895 + * for in ret.
2896 + */
2897 + ret++;
2898
2899 if (ret < 0) { /* Failed to fetch string */
2900 ((u8 *)get_rloc_data(dest))[0] = '\0';
2901 diff --git a/mm/vmscan.c b/mm/vmscan.c
2902 index 961401c46334..3830066018c1 100644
2903 --- a/mm/vmscan.c
2904 +++ b/mm/vmscan.c
2905 @@ -477,16 +477,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
2906 delta *= 4;
2907 do_div(delta, shrinker->seeks);
2908
2909 - /*
2910 - * Make sure we apply some minimal pressure on default priority
2911 - * even on small cgroups. Stale objects are not only consuming memory
2912 - * by themselves, but can also hold a reference to a dying cgroup,
2913 - * preventing it from being reclaimed. A dying cgroup with all
2914 - * corresponding structures like per-cpu stats and kmem caches
2915 - * can be really big, so it may lead to a significant waste of memory.
2916 - */
2917 - delta = max_t(unsigned long long, delta, min(freeable, batch_size));
2918 -
2919 total_scan += delta;
2920 if (total_scan < 0) {
2921 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
2922 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2923 index 31a84a5a1338..fead0acb29f7 100644
2924 --- a/sound/pci/hda/patch_conexant.c
2925 +++ b/sound/pci/hda/patch_conexant.c
2926 @@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2927 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
2928 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
2929 SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
2930 + SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
2931 SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
2932 SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
2933 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
2934 diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
2935 index d00734d31e04..e5b6769b9797 100644
2936 --- a/sound/soc/codecs/hdmi-codec.c
2937 +++ b/sound/soc/codecs/hdmi-codec.c
2938 @@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
2939 if (hcd->spdif)
2940 hcp->daidrv[i] = hdmi_spdif_dai;
2941
2942 + dev_set_drvdata(dev, hcp);
2943 +
2944 ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
2945 dai_count);
2946 if (ret) {
2947 @@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
2948 __func__, ret);
2949 return ret;
2950 }
2951 -
2952 - dev_set_drvdata(dev, hcp);
2953 return 0;
2954 }
2955
2956 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2957 index 382847154227..db114f3977e0 100644
2958 --- a/sound/usb/pcm.c
2959 +++ b/sound/usb/pcm.c
2960 @@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
2961 return 0;
2962 }
2963
2964 +/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
2965 + * applies. Returns 1 if a quirk was found.
2966 + */
2967 static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
2968 struct usb_device *dev,
2969 struct usb_interface_descriptor *altsd,
2970 @@ -384,7 +387,7 @@ add_sync_ep:
2971
2972 subs->data_endpoint->sync_master = subs->sync_endpoint;
2973
2974 - return 0;
2975 + return 1;
2976 }
2977
2978 static int set_sync_endpoint(struct snd_usb_substream *subs,
2979 @@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
2980 if (err < 0)
2981 return err;
2982
2983 + /* endpoint set by quirk */
2984 + if (err > 0)
2985 + return 0;
2986 +
2987 if (altsd->bNumEndpoints < 2)
2988 return 0;
2989
2990 diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
2991 new file mode 100644
2992 index 000000000000..0b3cb52fd29d
2993 --- /dev/null
2994 +++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
2995 @@ -0,0 +1,25 @@
2996 +/*
2997 + * Copyright (C) 2012 ARM Ltd.
2998 + * Copyright (C) 2015 Regents of the University of California
2999 + *
3000 + * This program is free software; you can redistribute it and/or modify
3001 + * it under the terms of the GNU General Public License version 2 as
3002 + * published by the Free Software Foundation.
3003 + *
3004 + * This program is distributed in the hope that it will be useful,
3005 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3006 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3007 + * GNU General Public License for more details.
3008 + *
3009 + * You should have received a copy of the GNU General Public License
3010 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
3011 + */
3012 +
3013 +#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
3014 +#define _UAPI_ASM_RISCV_BITSPERLONG_H
3015 +
3016 +#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
3017 +
3018 +#include <asm-generic/bitsperlong.h>
3019 +
3020 +#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
3021 diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
3022 index 8dd6aefdafa4..57aaeaf8e192 100644
3023 --- a/tools/include/uapi/asm/bitsperlong.h
3024 +++ b/tools/include/uapi/asm/bitsperlong.h
3025 @@ -13,6 +13,10 @@
3026 #include "../../arch/mips/include/uapi/asm/bitsperlong.h"
3027 #elif defined(__ia64__)
3028 #include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
3029 +#elif defined(__riscv)
3030 +#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
3031 +#elif defined(__alpha__)
3032 +#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
3033 #else
3034 #include <asm-generic/bitsperlong.h>
3035 #endif
3036 diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
3037 index 1c16e56cd93e..7cb99b433888 100644
3038 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
3039 +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
3040 @@ -13,7 +13,8 @@ add_probe_vfs_getname() {
3041 local verbose=$1
3042 if [ $had_vfs_getname -eq 1 ] ; then
3043 line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
3044 - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
3045 + perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
3046 + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
3047 fi
3048 }
3049
3050 diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
3051 index 32ef7bdca1cf..dc2212e12184 100644
3052 --- a/tools/perf/util/callchain.c
3053 +++ b/tools/perf/util/callchain.c
3054 @@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
3055 cnode->cycles_count += node->branch_flags.cycles;
3056 cnode->iter_count += node->nr_loop_iter;
3057 cnode->iter_cycles += node->iter_cycles;
3058 + cnode->from_count++;
3059 }
3060 }
3061
3062 @@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize,
3063 static int branch_from_str(char *bf, int bfsize,
3064 u64 branch_count,
3065 u64 cycles_count, u64 iter_count,
3066 - u64 iter_cycles)
3067 + u64 iter_cycles, u64 from_count)
3068 {
3069 int printed = 0, i = 0;
3070 - u64 cycles;
3071 + u64 cycles, v = 0;
3072
3073 cycles = cycles_count / branch_count;
3074 if (cycles) {
3075 @@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize,
3076 bf + printed, bfsize - printed);
3077 }
3078
3079 - if (iter_count) {
3080 - printed += count_pri64_printf(i++, "iter",
3081 - iter_count,
3082 - bf + printed, bfsize - printed);
3083 + if (iter_count && from_count) {
3084 + v = iter_count / from_count;
3085 + if (v) {
3086 + printed += count_pri64_printf(i++, "iter",
3087 + v, bf + printed, bfsize - printed);
3088
3089 - printed += count_pri64_printf(i++, "avg_cycles",
3090 - iter_cycles / iter_count,
3091 - bf + printed, bfsize - printed);
3092 + printed += count_pri64_printf(i++, "avg_cycles",
3093 + iter_cycles / iter_count,
3094 + bf + printed, bfsize - printed);
3095 + }
3096 }
3097
3098 if (i)
3099 @@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize,
3100 u64 branch_count, u64 predicted_count,
3101 u64 abort_count, u64 cycles_count,
3102 u64 iter_count, u64 iter_cycles,
3103 + u64 from_count,
3104 struct branch_type_stat *brtype_stat)
3105 {
3106 int printed;
3107 @@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize,
3108 predicted_count, abort_count, brtype_stat);
3109 } else {
3110 printed = branch_from_str(bf, bfsize, branch_count,
3111 - cycles_count, iter_count, iter_cycles);
3112 + cycles_count, iter_count, iter_cycles,
3113 + from_count);
3114 }
3115
3116 if (!printed)
3117 @@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
3118 u64 branch_count, u64 predicted_count,
3119 u64 abort_count, u64 cycles_count,
3120 u64 iter_count, u64 iter_cycles,
3121 + u64 from_count,
3122 struct branch_type_stat *brtype_stat)
3123 {
3124 char str[256];
3125
3126 counts_str_build(str, sizeof(str), branch_count,
3127 predicted_count, abort_count, cycles_count,
3128 - iter_count, iter_cycles, brtype_stat);
3129 + iter_count, iter_cycles, from_count, brtype_stat);
3130
3131 if (fp)
3132 return fprintf(fp, "%s", str);
3133 @@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
3134 u64 branch_count, predicted_count;
3135 u64 abort_count, cycles_count;
3136 u64 iter_count, iter_cycles;
3137 + u64 from_count;
3138
3139 branch_count = clist->branch_count;
3140 predicted_count = clist->predicted_count;
3141 @@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
3142 cycles_count = clist->cycles_count;
3143 iter_count = clist->iter_count;
3144 iter_cycles = clist->iter_cycles;
3145 + from_count = clist->from_count;
3146
3147 return callchain_counts_printf(fp, bf, bfsize, branch_count,
3148 predicted_count, abort_count,
3149 cycles_count, iter_count, iter_cycles,
3150 - &clist->brtype_stat);
3151 + from_count, &clist->brtype_stat);
3152 }
3153
3154 static void free_callchain_node(struct callchain_node *node)
3155 diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
3156 index 154560b1eb65..99d38ac019b8 100644
3157 --- a/tools/perf/util/callchain.h
3158 +++ b/tools/perf/util/callchain.h
3159 @@ -118,6 +118,7 @@ struct callchain_list {
3160 bool has_children;
3161 };
3162 u64 branch_count;
3163 + u64 from_count;
3164 u64 predicted_count;
3165 u64 abort_count;
3166 u64 cycles_count;
3167 diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
3168 index d7403d1207d7..b1508ce3e412 100644
3169 --- a/tools/perf/util/machine.c
3170 +++ b/tools/perf/util/machine.c
3171 @@ -1988,7 +1988,7 @@ static void save_iterations(struct iterations *iter,
3172 {
3173 int i;
3174
3175 - iter->nr_loop_iter = nr;
3176 + iter->nr_loop_iter++;
3177 iter->cycles = 0;
3178
3179 for (i = 0; i < nr; i++)