Contents of /trunk/kernel-alx/patches-4.9/0258-4.9.159-all-fixes.patch
Parent Directory | Revision Log
Revision 3311 -
(show annotations)
(download)
Tue Mar 12 10:43:16 2019 UTC (5 years, 6 months ago) by niro
File size: 80793 byte(s)
Tue Mar 12 10:43:16 2019 UTC (5 years, 6 months ago) by niro
File size: 80793 byte(s)
-linux-4.9.159
1 | diff --git a/Documentation/devicetree/bindings/eeprom/eeprom.txt b/Documentation/devicetree/bindings/eeprom/eeprom.txt |
2 | index 735bc94444bb..4dcce8ee5cee 100644 |
3 | --- a/Documentation/devicetree/bindings/eeprom/eeprom.txt |
4 | +++ b/Documentation/devicetree/bindings/eeprom/eeprom.txt |
5 | @@ -6,7 +6,8 @@ Required properties: |
6 | |
7 | "atmel,24c00", "atmel,24c01", "atmel,24c02", "atmel,24c04", |
8 | "atmel,24c08", "atmel,24c16", "atmel,24c32", "atmel,24c64", |
9 | - "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024" |
10 | + "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024", |
11 | + "atmel,24c2048" |
12 | |
13 | "catalyst,24c32" |
14 | |
15 | @@ -17,7 +18,7 @@ Required properties: |
16 | If there is no specific driver for <manufacturer>, a generic |
17 | driver based on <type> is selected. Possible types are: |
18 | "24c00", "24c01", "24c02", "24c04", "24c08", "24c16", "24c32", "24c64", |
19 | - "24c128", "24c256", "24c512", "24c1024", "spd" |
20 | + "24c128", "24c256", "24c512", "24c1024", "24c2048", "spd" |
21 | |
22 | - reg : the I2C address of the EEPROM |
23 | |
24 | diff --git a/Makefile b/Makefile |
25 | index 2b8434aaeece..a452ead13b1e 100644 |
26 | --- a/Makefile |
27 | +++ b/Makefile |
28 | @@ -1,6 +1,6 @@ |
29 | VERSION = 4 |
30 | PATCHLEVEL = 9 |
31 | -SUBLEVEL = 158 |
32 | +SUBLEVEL = 159 |
33 | EXTRAVERSION = |
34 | NAME = Roaring Lionus |
35 | |
36 | diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h |
37 | index 06377400dc09..469642801a68 100644 |
38 | --- a/arch/alpha/include/asm/irq.h |
39 | +++ b/arch/alpha/include/asm/irq.h |
40 | @@ -55,15 +55,15 @@ |
41 | |
42 | #elif defined(CONFIG_ALPHA_DP264) || \ |
43 | defined(CONFIG_ALPHA_LYNX) || \ |
44 | - defined(CONFIG_ALPHA_SHARK) || \ |
45 | - defined(CONFIG_ALPHA_EIGER) |
46 | + defined(CONFIG_ALPHA_SHARK) |
47 | # define NR_IRQS 64 |
48 | |
49 | #elif defined(CONFIG_ALPHA_TITAN) |
50 | #define NR_IRQS 80 |
51 | |
52 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ |
53 | - defined(CONFIG_ALPHA_TAKARA) |
54 | + defined(CONFIG_ALPHA_TAKARA) || \ |
55 | + defined(CONFIG_ALPHA_EIGER) |
56 | # define NR_IRQS 128 |
57 | |
58 | #elif defined(CONFIG_ALPHA_WILDFIRE) |
59 | diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c |
60 | index 83e9eee57a55..f70663127aad 100644 |
61 | --- a/arch/alpha/mm/fault.c |
62 | +++ b/arch/alpha/mm/fault.c |
63 | @@ -77,7 +77,7 @@ __load_new_mm_context(struct mm_struct *next_mm) |
64 | /* Macro for exception fixup code to access integer registers. */ |
65 | #define dpf_reg(r) \ |
66 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ |
67 | - (r) <= 18 ? (r)+8 : (r)-10]) |
68 | + (r) <= 18 ? (r)+10 : (r)-10]) |
69 | |
70 | asmlinkage void |
71 | do_page_fault(unsigned long address, unsigned long mmcsr, |
72 | diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts |
73 | index 78492a0bbbab..3c58ec707ea9 100644 |
74 | --- a/arch/arm/boot/dts/da850-evm.dts |
75 | +++ b/arch/arm/boot/dts/da850-evm.dts |
76 | @@ -156,7 +156,7 @@ |
77 | |
78 | sound { |
79 | compatible = "simple-audio-card"; |
80 | - simple-audio-card,name = "DA850/OMAP-L138 EVM"; |
81 | + simple-audio-card,name = "DA850-OMAPL138 EVM"; |
82 | simple-audio-card,widgets = |
83 | "Line", "Line In", |
84 | "Line", "Line Out"; |
85 | diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts |
86 | index 7b8ab21fed6c..920e64cdb673 100644 |
87 | --- a/arch/arm/boot/dts/da850-lcdk.dts |
88 | +++ b/arch/arm/boot/dts/da850-lcdk.dts |
89 | @@ -26,7 +26,7 @@ |
90 | |
91 | sound { |
92 | compatible = "simple-audio-card"; |
93 | - simple-audio-card,name = "DA850/OMAP-L138 LCDK"; |
94 | + simple-audio-card,name = "DA850-OMAPL138 LCDK"; |
95 | simple-audio-card,widgets = |
96 | "Line", "Line In", |
97 | "Line", "Line Out"; |
98 | diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi |
99 | index d8fca9db46d0..dddbc0d03da5 100644 |
100 | --- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi |
101 | +++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi |
102 | @@ -35,8 +35,8 @@ |
103 | compatible = "gpio-fan"; |
104 | pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>; |
105 | pinctrl-names = "default"; |
106 | - gpios = <&gpio1 14 GPIO_ACTIVE_LOW |
107 | - &gpio1 13 GPIO_ACTIVE_LOW>; |
108 | + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH |
109 | + &gpio1 13 GPIO_ACTIVE_HIGH>; |
110 | gpio-fan,speed-map = <0 0 |
111 | 3000 1 |
112 | 6000 2>; |
113 | diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h |
114 | index e616f61f859d..7d727506096f 100644 |
115 | --- a/arch/arm/include/asm/assembler.h |
116 | +++ b/arch/arm/include/asm/assembler.h |
117 | @@ -465,6 +465,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) |
118 | #endif |
119 | .endm |
120 | |
121 | + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req |
122 | +#ifdef CONFIG_CPU_SPECTRE |
123 | + sub \tmp, \limit, #1 |
124 | + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr |
125 | + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { |
126 | + subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) } |
127 | + movlo \addr, #0 @ if (tmp < 0) addr = NULL |
128 | + csdb |
129 | +#endif |
130 | + .endm |
131 | + |
132 | .macro uaccess_disable, tmp, isb=1 |
133 | #ifdef CONFIG_CPU_SW_DOMAIN_PAN |
134 | /* |
135 | diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h |
136 | index c55db1e22f0c..b9356dbfded0 100644 |
137 | --- a/arch/arm/include/asm/cputype.h |
138 | +++ b/arch/arm/include/asm/cputype.h |
139 | @@ -106,6 +106,7 @@ |
140 | #define ARM_CPU_PART_SCORPION 0x510002d0 |
141 | |
142 | extern unsigned int processor_id; |
143 | +struct proc_info_list *lookup_processor(u32 midr); |
144 | |
145 | #ifdef CONFIG_CPU_CP15 |
146 | #define read_cpuid(reg) \ |
147 | diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h |
148 | index f379f5f849a9..1bfcc3bcfc6d 100644 |
149 | --- a/arch/arm/include/asm/proc-fns.h |
150 | +++ b/arch/arm/include/asm/proc-fns.h |
151 | @@ -23,7 +23,7 @@ struct mm_struct; |
152 | /* |
153 | * Don't change this structure - ASM code relies on it. |
154 | */ |
155 | -extern struct processor { |
156 | +struct processor { |
157 | /* MISC |
158 | * get data abort address/flags |
159 | */ |
160 | @@ -79,9 +79,13 @@ extern struct processor { |
161 | unsigned int suspend_size; |
162 | void (*do_suspend)(void *); |
163 | void (*do_resume)(void *); |
164 | -} processor; |
165 | +}; |
166 | |
167 | #ifndef MULTI_CPU |
168 | +static inline void init_proc_vtable(const struct processor *p) |
169 | +{ |
170 | +} |
171 | + |
172 | extern void cpu_proc_init(void); |
173 | extern void cpu_proc_fin(void); |
174 | extern int cpu_do_idle(void); |
175 | @@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); |
176 | extern void cpu_do_suspend(void *); |
177 | extern void cpu_do_resume(void *); |
178 | #else |
179 | -#define cpu_proc_init processor._proc_init |
180 | -#define cpu_proc_fin processor._proc_fin |
181 | -#define cpu_reset processor.reset |
182 | -#define cpu_do_idle processor._do_idle |
183 | -#define cpu_dcache_clean_area processor.dcache_clean_area |
184 | -#define cpu_set_pte_ext processor.set_pte_ext |
185 | -#define cpu_do_switch_mm processor.switch_mm |
186 | |
187 | -/* These three are private to arch/arm/kernel/suspend.c */ |
188 | -#define cpu_do_suspend processor.do_suspend |
189 | -#define cpu_do_resume processor.do_resume |
190 | +extern struct processor processor; |
191 | +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) |
192 | +#include <linux/smp.h> |
193 | +/* |
194 | + * This can't be a per-cpu variable because we need to access it before |
195 | + * per-cpu has been initialised. We have a couple of functions that are |
196 | + * called in a pre-emptible context, and so can't use smp_processor_id() |
197 | + * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the |
198 | + * function pointers for these are identical across all CPUs. |
199 | + */ |
200 | +extern struct processor *cpu_vtable[]; |
201 | +#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f |
202 | +#define PROC_TABLE(f) cpu_vtable[0]->f |
203 | +static inline void init_proc_vtable(const struct processor *p) |
204 | +{ |
205 | + unsigned int cpu = smp_processor_id(); |
206 | + *cpu_vtable[cpu] = *p; |
207 | + WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area != |
208 | + cpu_vtable[0]->dcache_clean_area); |
209 | + WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext != |
210 | + cpu_vtable[0]->set_pte_ext); |
211 | +} |
212 | +#else |
213 | +#define PROC_VTABLE(f) processor.f |
214 | +#define PROC_TABLE(f) processor.f |
215 | +static inline void init_proc_vtable(const struct processor *p) |
216 | +{ |
217 | + processor = *p; |
218 | +} |
219 | +#endif |
220 | + |
221 | +#define cpu_proc_init PROC_VTABLE(_proc_init) |
222 | +#define cpu_check_bugs PROC_VTABLE(check_bugs) |
223 | +#define cpu_proc_fin PROC_VTABLE(_proc_fin) |
224 | +#define cpu_reset PROC_VTABLE(reset) |
225 | +#define cpu_do_idle PROC_VTABLE(_do_idle) |
226 | +#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area) |
227 | +#define cpu_set_pte_ext PROC_TABLE(set_pte_ext) |
228 | +#define cpu_do_switch_mm PROC_VTABLE(switch_mm) |
229 | + |
230 | +/* These two are private to arch/arm/kernel/suspend.c */ |
231 | +#define cpu_do_suspend PROC_VTABLE(do_suspend) |
232 | +#define cpu_do_resume PROC_VTABLE(do_resume) |
233 | #endif |
234 | |
235 | extern void cpu_resume(void); |
236 | diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h |
237 | index 57d2ad9c75ca..df8420672c7e 100644 |
238 | --- a/arch/arm/include/asm/thread_info.h |
239 | +++ b/arch/arm/include/asm/thread_info.h |
240 | @@ -124,8 +124,8 @@ extern void vfp_flush_hwstate(struct thread_info *); |
241 | struct user_vfp; |
242 | struct user_vfp_exc; |
243 | |
244 | -extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, |
245 | - struct user_vfp_exc __user *); |
246 | +extern int vfp_preserve_user_clear_hwstate(struct user_vfp *, |
247 | + struct user_vfp_exc *); |
248 | extern int vfp_restore_user_hwstate(struct user_vfp *, |
249 | struct user_vfp_exc *); |
250 | #endif |
251 | diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h |
252 | index 7b17460127fd..0f6c6b873bc5 100644 |
253 | --- a/arch/arm/include/asm/uaccess.h |
254 | +++ b/arch/arm/include/asm/uaccess.h |
255 | @@ -99,6 +99,14 @@ extern int __put_user_bad(void); |
256 | static inline void set_fs(mm_segment_t fs) |
257 | { |
258 | current_thread_info()->addr_limit = fs; |
259 | + |
260 | + /* |
261 | + * Prevent a mispredicted conditional call to set_fs from forwarding |
262 | + * the wrong address limit to access_ok under speculation. |
263 | + */ |
264 | + dsb(nsh); |
265 | + isb(); |
266 | + |
267 | modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); |
268 | } |
269 | |
270 | @@ -121,6 +129,32 @@ static inline void set_fs(mm_segment_t fs) |
271 | #define __inttype(x) \ |
272 | __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) |
273 | |
274 | +/* |
275 | + * Sanitise a uaccess pointer such that it becomes NULL if addr+size |
276 | + * is above the current addr_limit. |
277 | + */ |
278 | +#define uaccess_mask_range_ptr(ptr, size) \ |
279 | + ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size)) |
280 | +static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, |
281 | + size_t size) |
282 | +{ |
283 | + void __user *safe_ptr = (void __user *)ptr; |
284 | + unsigned long tmp; |
285 | + |
286 | + asm volatile( |
287 | + " sub %1, %3, #1\n" |
288 | + " subs %1, %1, %0\n" |
289 | + " addhs %1, %1, #1\n" |
290 | + " subhss %1, %1, %2\n" |
291 | + " movlo %0, #0\n" |
292 | + : "+r" (safe_ptr), "=&r" (tmp) |
293 | + : "r" (size), "r" (current_thread_info()->addr_limit) |
294 | + : "cc"); |
295 | + |
296 | + csdb(); |
297 | + return safe_ptr; |
298 | +} |
299 | + |
300 | /* |
301 | * Single-value transfer routines. They automatically use the right |
302 | * size if we just have the right pointer type. Note that the functions |
303 | @@ -392,6 +426,14 @@ do { \ |
304 | __pu_err; \ |
305 | }) |
306 | |
307 | +#ifdef CONFIG_CPU_SPECTRE |
308 | +/* |
309 | + * When mitigating Spectre variant 1.1, all accessors need to include |
310 | + * verification of the address space. |
311 | + */ |
312 | +#define __put_user(x, ptr) put_user(x, ptr) |
313 | + |
314 | +#else |
315 | #define __put_user(x, ptr) \ |
316 | ({ \ |
317 | long __pu_err = 0; \ |
318 | @@ -399,12 +441,6 @@ do { \ |
319 | __pu_err; \ |
320 | }) |
321 | |
322 | -#define __put_user_error(x, ptr, err) \ |
323 | -({ \ |
324 | - __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ |
325 | - (void) 0; \ |
326 | -}) |
327 | - |
328 | #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ |
329 | do { \ |
330 | unsigned long __pu_addr = (unsigned long)__pu_ptr; \ |
331 | @@ -484,6 +520,7 @@ do { \ |
332 | : "r" (x), "i" (-EFAULT) \ |
333 | : "cc") |
334 | |
335 | +#endif /* !CONFIG_CPU_SPECTRE */ |
336 | |
337 | #ifdef CONFIG_MMU |
338 | extern unsigned long __must_check |
339 | diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c |
340 | index 7be511310191..d41d3598e5e5 100644 |
341 | --- a/arch/arm/kernel/bugs.c |
342 | +++ b/arch/arm/kernel/bugs.c |
343 | @@ -6,8 +6,8 @@ |
344 | void check_other_bugs(void) |
345 | { |
346 | #ifdef MULTI_CPU |
347 | - if (processor.check_bugs) |
348 | - processor.check_bugs(); |
349 | + if (cpu_check_bugs) |
350 | + cpu_check_bugs(); |
351 | #endif |
352 | } |
353 | |
354 | diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S |
355 | index 8733012d231f..7e662bdd5cb3 100644 |
356 | --- a/arch/arm/kernel/head-common.S |
357 | +++ b/arch/arm/kernel/head-common.S |
358 | @@ -122,6 +122,9 @@ __mmap_switched_data: |
359 | .long init_thread_union + THREAD_START_SP @ sp |
360 | .size __mmap_switched_data, . - __mmap_switched_data |
361 | |
362 | + __FINIT |
363 | + .text |
364 | + |
365 | /* |
366 | * This provides a C-API version of __lookup_processor_type |
367 | */ |
368 | @@ -133,9 +136,6 @@ ENTRY(lookup_processor_type) |
369 | ldmfd sp!, {r4 - r6, r9, pc} |
370 | ENDPROC(lookup_processor_type) |
371 | |
372 | - __FINIT |
373 | - .text |
374 | - |
375 | /* |
376 | * Read processor ID register (CP#15, CR0), and look up in the linker-built |
377 | * supported processor list. Note that we can't use the absolute addresses |
378 | diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c |
379 | index f4e54503afa9..4764742db7b0 100644 |
380 | --- a/arch/arm/kernel/setup.c |
381 | +++ b/arch/arm/kernel/setup.c |
382 | @@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2); |
383 | |
384 | #ifdef MULTI_CPU |
385 | struct processor processor __ro_after_init; |
386 | +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) |
387 | +struct processor *cpu_vtable[NR_CPUS] = { |
388 | + [0] = &processor, |
389 | +}; |
390 | +#endif |
391 | #endif |
392 | #ifdef MULTI_TLB |
393 | struct cpu_tlb_fns cpu_tlb __ro_after_init; |
394 | @@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void) |
395 | } |
396 | #endif |
397 | |
398 | -static void __init setup_processor(void) |
399 | +/* |
400 | + * locate processor in the list of supported processor types. The linker |
401 | + * builds this table for us from the entries in arch/arm/mm/proc-*.S |
402 | + */ |
403 | +struct proc_info_list *lookup_processor(u32 midr) |
404 | { |
405 | - struct proc_info_list *list; |
406 | + struct proc_info_list *list = lookup_processor_type(midr); |
407 | |
408 | - /* |
409 | - * locate processor in the list of supported processor |
410 | - * types. The linker builds this table for us from the |
411 | - * entries in arch/arm/mm/proc-*.S |
412 | - */ |
413 | - list = lookup_processor_type(read_cpuid_id()); |
414 | if (!list) { |
415 | - pr_err("CPU configuration botched (ID %08x), unable to continue.\n", |
416 | - read_cpuid_id()); |
417 | - while (1); |
418 | + pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", |
419 | + smp_processor_id(), midr); |
420 | + while (1) |
421 | + /* can't use cpu_relax() here as it may require MMU setup */; |
422 | } |
423 | |
424 | + return list; |
425 | +} |
426 | + |
427 | +static void __init setup_processor(void) |
428 | +{ |
429 | + unsigned int midr = read_cpuid_id(); |
430 | + struct proc_info_list *list = lookup_processor(midr); |
431 | + |
432 | cpu_name = list->cpu_name; |
433 | __cpu_architecture = __get_cpu_architecture(); |
434 | |
435 | -#ifdef MULTI_CPU |
436 | - processor = *list->proc; |
437 | -#endif |
438 | + init_proc_vtable(list->proc); |
439 | #ifdef MULTI_TLB |
440 | cpu_tlb = *list->tlb; |
441 | #endif |
442 | @@ -700,7 +710,7 @@ static void __init setup_processor(void) |
443 | #endif |
444 | |
445 | pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", |
446 | - cpu_name, read_cpuid_id(), read_cpuid_id() & 15, |
447 | + list->cpu_name, midr, midr & 15, |
448 | proc_arch[cpu_architecture()], get_cr()); |
449 | |
450 | snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", |
451 | diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c |
452 | index 6bee5c9b1133..0a066f03b5ec 100644 |
453 | --- a/arch/arm/kernel/signal.c |
454 | +++ b/arch/arm/kernel/signal.c |
455 | @@ -94,17 +94,18 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) |
456 | |
457 | static int preserve_vfp_context(struct vfp_sigframe __user *frame) |
458 | { |
459 | - const unsigned long magic = VFP_MAGIC; |
460 | - const unsigned long size = VFP_STORAGE_SIZE; |
461 | + struct vfp_sigframe kframe; |
462 | int err = 0; |
463 | |
464 | - __put_user_error(magic, &frame->magic, err); |
465 | - __put_user_error(size, &frame->size, err); |
466 | + memset(&kframe, 0, sizeof(kframe)); |
467 | + kframe.magic = VFP_MAGIC; |
468 | + kframe.size = VFP_STORAGE_SIZE; |
469 | |
470 | + err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); |
471 | if (err) |
472 | - return -EFAULT; |
473 | + return err; |
474 | |
475 | - return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); |
476 | + return __copy_to_user(frame, &kframe, sizeof(kframe)); |
477 | } |
478 | |
479 | static int restore_vfp_context(struct vfp_sigframe __user *auxp) |
480 | @@ -256,30 +257,35 @@ static int |
481 | setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) |
482 | { |
483 | struct aux_sigframe __user *aux; |
484 | + struct sigcontext context; |
485 | int err = 0; |
486 | |
487 | - __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); |
488 | - __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); |
489 | - __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); |
490 | - __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); |
491 | - __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); |
492 | - __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); |
493 | - __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); |
494 | - __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); |
495 | - __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); |
496 | - __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); |
497 | - __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); |
498 | - __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); |
499 | - __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); |
500 | - __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); |
501 | - __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); |
502 | - __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); |
503 | - __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); |
504 | - |
505 | - __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); |
506 | - __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); |
507 | - __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); |
508 | - __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); |
509 | + context = (struct sigcontext) { |
510 | + .arm_r0 = regs->ARM_r0, |
511 | + .arm_r1 = regs->ARM_r1, |
512 | + .arm_r2 = regs->ARM_r2, |
513 | + .arm_r3 = regs->ARM_r3, |
514 | + .arm_r4 = regs->ARM_r4, |
515 | + .arm_r5 = regs->ARM_r5, |
516 | + .arm_r6 = regs->ARM_r6, |
517 | + .arm_r7 = regs->ARM_r7, |
518 | + .arm_r8 = regs->ARM_r8, |
519 | + .arm_r9 = regs->ARM_r9, |
520 | + .arm_r10 = regs->ARM_r10, |
521 | + .arm_fp = regs->ARM_fp, |
522 | + .arm_ip = regs->ARM_ip, |
523 | + .arm_sp = regs->ARM_sp, |
524 | + .arm_lr = regs->ARM_lr, |
525 | + .arm_pc = regs->ARM_pc, |
526 | + .arm_cpsr = regs->ARM_cpsr, |
527 | + |
528 | + .trap_no = current->thread.trap_no, |
529 | + .error_code = current->thread.error_code, |
530 | + .fault_address = current->thread.address, |
531 | + .oldmask = set->sig[0], |
532 | + }; |
533 | + |
534 | + err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); |
535 | |
536 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); |
537 | |
538 | @@ -296,7 +302,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) |
539 | if (err == 0) |
540 | err |= preserve_vfp_context(&aux->vfp); |
541 | #endif |
542 | - __put_user_error(0, &aux->end_magic, err); |
543 | + err |= __put_user(0, &aux->end_magic); |
544 | |
545 | return err; |
546 | } |
547 | @@ -428,7 +434,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
548 | /* |
549 | * Set uc.uc_flags to a value which sc.trap_no would never have. |
550 | */ |
551 | - __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); |
552 | + err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); |
553 | |
554 | err |= setup_sigframe(frame, regs, set); |
555 | if (err == 0) |
556 | @@ -448,8 +454,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
557 | |
558 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
559 | |
560 | - __put_user_error(0, &frame->sig.uc.uc_flags, err); |
561 | - __put_user_error(NULL, &frame->sig.uc.uc_link, err); |
562 | + err |= __put_user(0, &frame->sig.uc.uc_flags); |
563 | + err |= __put_user(NULL, &frame->sig.uc.uc_link); |
564 | |
565 | err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); |
566 | err |= setup_sigframe(&frame->sig, regs, set); |
567 | diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c |
568 | index 4b129aac7233..8faf869e9fb2 100644 |
569 | --- a/arch/arm/kernel/smp.c |
570 | +++ b/arch/arm/kernel/smp.c |
571 | @@ -27,6 +27,7 @@ |
572 | #include <linux/completion.h> |
573 | #include <linux/cpufreq.h> |
574 | #include <linux/irq_work.h> |
575 | +#include <linux/slab.h> |
576 | |
577 | #include <linux/atomic.h> |
578 | #include <asm/bugs.h> |
579 | @@ -40,6 +41,7 @@ |
580 | #include <asm/mmu_context.h> |
581 | #include <asm/pgtable.h> |
582 | #include <asm/pgalloc.h> |
583 | +#include <asm/procinfo.h> |
584 | #include <asm/processor.h> |
585 | #include <asm/sections.h> |
586 | #include <asm/tlbflush.h> |
587 | @@ -100,6 +102,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd) |
588 | #endif |
589 | } |
590 | |
591 | +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) |
592 | +static int secondary_biglittle_prepare(unsigned int cpu) |
593 | +{ |
594 | + if (!cpu_vtable[cpu]) |
595 | + cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); |
596 | + |
597 | + return cpu_vtable[cpu] ? 0 : -ENOMEM; |
598 | +} |
599 | + |
600 | +static void secondary_biglittle_init(void) |
601 | +{ |
602 | + init_proc_vtable(lookup_processor(read_cpuid_id())->proc); |
603 | +} |
604 | +#else |
605 | +static int secondary_biglittle_prepare(unsigned int cpu) |
606 | +{ |
607 | + return 0; |
608 | +} |
609 | + |
610 | +static void secondary_biglittle_init(void) |
611 | +{ |
612 | +} |
613 | +#endif |
614 | + |
615 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
616 | { |
617 | int ret; |
618 | @@ -107,6 +133,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) |
619 | if (!smp_ops.smp_boot_secondary) |
620 | return -ENOSYS; |
621 | |
622 | + ret = secondary_biglittle_prepare(cpu); |
623 | + if (ret) |
624 | + return ret; |
625 | + |
626 | /* |
627 | * We need to tell the secondary core where to find |
628 | * its stack and the page tables. |
629 | @@ -358,6 +388,8 @@ asmlinkage void secondary_start_kernel(void) |
630 | struct mm_struct *mm = &init_mm; |
631 | unsigned int cpu; |
632 | |
633 | + secondary_biglittle_init(); |
634 | + |
635 | /* |
636 | * The identity mapping is uncached (strongly ordered), so |
637 | * switch away from it before attempting any exclusive accesses. |
638 | diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c |
639 | index 640748e27035..d844c5c9364b 100644 |
640 | --- a/arch/arm/kernel/sys_oabi-compat.c |
641 | +++ b/arch/arm/kernel/sys_oabi-compat.c |
642 | @@ -276,6 +276,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, |
643 | int maxevents, int timeout) |
644 | { |
645 | struct epoll_event *kbuf; |
646 | + struct oabi_epoll_event e; |
647 | mm_segment_t fs; |
648 | long ret, err, i; |
649 | |
650 | @@ -294,8 +295,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, |
651 | set_fs(fs); |
652 | err = 0; |
653 | for (i = 0; i < ret; i++) { |
654 | - __put_user_error(kbuf[i].events, &events->events, err); |
655 | - __put_user_error(kbuf[i].data, &events->data, err); |
656 | + e.events = kbuf[i].events; |
657 | + e.data = kbuf[i].data; |
658 | + err = __copy_to_user(events, &e, sizeof(e)); |
659 | + if (err) |
660 | + break; |
661 | events++; |
662 | } |
663 | kfree(kbuf); |
664 | diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S |
665 | index a826df3d3814..6709a8d33963 100644 |
666 | --- a/arch/arm/lib/copy_from_user.S |
667 | +++ b/arch/arm/lib/copy_from_user.S |
668 | @@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user) |
669 | #ifdef CONFIG_CPU_SPECTRE |
670 | get_thread_info r3 |
671 | ldr r3, [r3, #TI_ADDR_LIMIT] |
672 | - adds ip, r1, r2 @ ip=addr+size |
673 | - sub r3, r3, #1 @ addr_limit - 1 |
674 | - cmpcc ip, r3 @ if (addr+size > addr_limit - 1) |
675 | - movcs r1, #0 @ addr = NULL |
676 | - csdb |
677 | + uaccess_mask_range_ptr r1, r2, r3, ip |
678 | #endif |
679 | |
680 | #include "copy_template.S" |
681 | diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S |
682 | index caf5019d8161..970abe521197 100644 |
683 | --- a/arch/arm/lib/copy_to_user.S |
684 | +++ b/arch/arm/lib/copy_to_user.S |
685 | @@ -94,6 +94,11 @@ |
686 | |
687 | ENTRY(__copy_to_user_std) |
688 | WEAK(arm_copy_to_user) |
689 | +#ifdef CONFIG_CPU_SPECTRE |
690 | + get_thread_info r3 |
691 | + ldr r3, [r3, #TI_ADDR_LIMIT] |
692 | + uaccess_mask_range_ptr r0, r2, r3, ip |
693 | +#endif |
694 | |
695 | #include "copy_template.S" |
696 | |
697 | @@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std) |
698 | rsb r0, r0, r2 |
699 | copy_abort_end |
700 | .popsection |
701 | - |
702 | diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c |
703 | index 6bd1089b07e0..f598d792bace 100644 |
704 | --- a/arch/arm/lib/uaccess_with_memcpy.c |
705 | +++ b/arch/arm/lib/uaccess_with_memcpy.c |
706 | @@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) |
707 | n = __copy_to_user_std(to, from, n); |
708 | uaccess_restore(ua_flags); |
709 | } else { |
710 | - n = __copy_to_user_memcpy(to, from, n); |
711 | + n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n), |
712 | + from, n); |
713 | } |
714 | return n; |
715 | } |
716 | diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c |
717 | index ed9a01484030..a52fe871adbc 100644 |
718 | --- a/arch/arm/mach-integrator/impd1.c |
719 | +++ b/arch/arm/mach-integrator/impd1.c |
720 | @@ -394,7 +394,11 @@ static int __ref impd1_probe(struct lm_device *dev) |
721 | sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), |
722 | GFP_KERNEL); |
723 | chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL); |
724 | - mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id); |
725 | + mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL, |
726 | + "lm%x:00700", dev->id); |
727 | + if (!lookup || !chipname || !mmciname) |
728 | + return -ENOMEM; |
729 | + |
730 | lookup->dev_id = mmciname; |
731 | /* |
732 | * Offsets on GPIO block 1: |
733 | diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S |
734 | index 7d9176c4a21d..f8bb65032b79 100644 |
735 | --- a/arch/arm/mm/proc-macros.S |
736 | +++ b/arch/arm/mm/proc-macros.S |
737 | @@ -275,6 +275,13 @@ |
738 | .endm |
739 | |
740 | .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 |
741 | +/* |
742 | + * If we are building for big.Little with branch predictor hardening, |
743 | + * we need the processor function tables to remain available after boot. |
744 | + */ |
745 | +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) |
746 | + .section ".rodata" |
747 | +#endif |
748 | .type \name\()_processor_functions, #object |
749 | .align 2 |
750 | ENTRY(\name\()_processor_functions) |
751 | @@ -310,6 +317,9 @@ ENTRY(\name\()_processor_functions) |
752 | .endif |
753 | |
754 | .size \name\()_processor_functions, . - \name\()_processor_functions |
755 | +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) |
756 | + .previous |
757 | +#endif |
758 | .endm |
759 | |
760 | .macro define_cache_functions name:req |
761 | diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c |
762 | index 5544b82a2e7a..9a07916af8dd 100644 |
763 | --- a/arch/arm/mm/proc-v7-bugs.c |
764 | +++ b/arch/arm/mm/proc-v7-bugs.c |
765 | @@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void) |
766 | case ARM_CPU_PART_CORTEX_A17: |
767 | case ARM_CPU_PART_CORTEX_A73: |
768 | case ARM_CPU_PART_CORTEX_A75: |
769 | - if (processor.switch_mm != cpu_v7_bpiall_switch_mm) |
770 | - goto bl_error; |
771 | per_cpu(harden_branch_predictor_fn, cpu) = |
772 | harden_branch_predictor_bpiall; |
773 | spectre_v2_method = "BPIALL"; |
774 | @@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void) |
775 | |
776 | case ARM_CPU_PART_CORTEX_A15: |
777 | case ARM_CPU_PART_BRAHMA_B15: |
778 | - if (processor.switch_mm != cpu_v7_iciallu_switch_mm) |
779 | - goto bl_error; |
780 | per_cpu(harden_branch_predictor_fn, cpu) = |
781 | harden_branch_predictor_iciallu; |
782 | spectre_v2_method = "ICIALLU"; |
783 | @@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void) |
784 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
785 | if ((int)res.a0 != 0) |
786 | break; |
787 | - if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu) |
788 | - goto bl_error; |
789 | per_cpu(harden_branch_predictor_fn, cpu) = |
790 | call_hvc_arch_workaround_1; |
791 | - processor.switch_mm = cpu_v7_hvc_switch_mm; |
792 | + cpu_do_switch_mm = cpu_v7_hvc_switch_mm; |
793 | spectre_v2_method = "hypervisor"; |
794 | break; |
795 | |
796 | @@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void) |
797 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
798 | if ((int)res.a0 != 0) |
799 | break; |
800 | - if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu) |
801 | - goto bl_error; |
802 | per_cpu(harden_branch_predictor_fn, cpu) = |
803 | call_smc_arch_workaround_1; |
804 | - processor.switch_mm = cpu_v7_smc_switch_mm; |
805 | + cpu_do_switch_mm = cpu_v7_smc_switch_mm; |
806 | spectre_v2_method = "firmware"; |
807 | break; |
808 | |
809 | @@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void) |
810 | if (spectre_v2_method) |
811 | pr_info("CPU%u: Spectre v2: using %s workaround\n", |
812 | smp_processor_id(), spectre_v2_method); |
813 | - return; |
814 | - |
815 | -bl_error: |
816 | - pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n", |
817 | - cpu); |
818 | } |
819 | #else |
820 | static void cpu_v7_spectre_init(void) |
821 | diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c |
822 | index 8e5e97989fda..00dd8cf36632 100644 |
823 | --- a/arch/arm/vfp/vfpmodule.c |
824 | +++ b/arch/arm/vfp/vfpmodule.c |
825 | @@ -554,12 +554,11 @@ void vfp_flush_hwstate(struct thread_info *thread) |
826 | * Save the current VFP state into the provided structures and prepare |
827 | * for entry into a new function (signal handler). |
828 | */ |
829 | -int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, |
830 | - struct user_vfp_exc __user *ufp_exc) |
831 | +int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, |
832 | + struct user_vfp_exc *ufp_exc) |
833 | { |
834 | struct thread_info *thread = current_thread_info(); |
835 | struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; |
836 | - int err = 0; |
837 | |
838 | /* Ensure that the saved hwstate is up-to-date. */ |
839 | vfp_sync_hwstate(thread); |
840 | @@ -568,22 +567,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, |
841 | * Copy the floating point registers. There can be unused |
842 | * registers see asm/hwcap.h for details. |
843 | */ |
844 | - err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, |
845 | - sizeof(hwstate->fpregs)); |
846 | + memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); |
847 | + |
848 | /* |
849 | * Copy the status and control register. |
850 | */ |
851 | - __put_user_error(hwstate->fpscr, &ufp->fpscr, err); |
852 | + ufp->fpscr = hwstate->fpscr; |
853 | |
854 | /* |
855 | * Copy the exception registers. |
856 | */ |
857 | - __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); |
858 | - __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); |
859 | - __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); |
860 | - |
861 | - if (err) |
862 | - return -EFAULT; |
863 | + ufp_exc->fpexc = hwstate->fpexc; |
864 | + ufp_exc->fpinst = hwstate->fpinst; |
865 | + ufp_exc->fpinst2 = hwstate->fpinst2; |
866 | |
867 | /* Ensure that VFP is disabled. */ |
868 | vfp_flush_hwstate(thread); |
869 | diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c |
870 | index cadf99923600..ab04751a12b6 100644 |
871 | --- a/arch/x86/events/core.c |
872 | +++ b/arch/x86/events/core.c |
873 | @@ -2196,6 +2196,19 @@ void perf_check_microcode(void) |
874 | } |
875 | EXPORT_SYMBOL_GPL(perf_check_microcode); |
876 | |
877 | +static int x86_pmu_check_period(struct perf_event *event, u64 value) |
878 | +{ |
879 | + if (x86_pmu.check_period && x86_pmu.check_period(event, value)) |
880 | + return -EINVAL; |
881 | + |
882 | + if (value && x86_pmu.limit_period) { |
883 | + if (x86_pmu.limit_period(event, value) > value) |
884 | + return -EINVAL; |
885 | + } |
886 | + |
887 | + return 0; |
888 | +} |
889 | + |
890 | static struct pmu pmu = { |
891 | .pmu_enable = x86_pmu_enable, |
892 | .pmu_disable = x86_pmu_disable, |
893 | @@ -2220,6 +2233,7 @@ static struct pmu pmu = { |
894 | .event_idx = x86_pmu_event_idx, |
895 | .sched_task = x86_pmu_sched_task, |
896 | .task_ctx_size = sizeof(struct x86_perf_task_context), |
897 | + .check_period = x86_pmu_check_period, |
898 | }; |
899 | |
900 | void arch_perf_update_userpage(struct perf_event *event, |
901 | diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
902 | index f600ab601e00..f0639c8ebcb6 100644 |
903 | --- a/arch/x86/events/intel/core.c |
904 | +++ b/arch/x86/events/intel/core.c |
905 | @@ -3262,6 +3262,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, |
906 | intel_pmu_lbr_sched_task(ctx, sched_in); |
907 | } |
908 | |
909 | +static int intel_pmu_check_period(struct perf_event *event, u64 value) |
910 | +{ |
911 | + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; |
912 | +} |
913 | + |
914 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
915 | |
916 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
917 | @@ -3328,6 +3333,8 @@ static __initconst const struct x86_pmu core_pmu = { |
918 | .cpu_starting = intel_pmu_cpu_starting, |
919 | .cpu_dying = intel_pmu_cpu_dying, |
920 | .cpu_dead = intel_pmu_cpu_dead, |
921 | + |
922 | + .check_period = intel_pmu_check_period, |
923 | }; |
924 | |
925 | static __initconst const struct x86_pmu intel_pmu = { |
926 | @@ -3367,6 +3374,8 @@ static __initconst const struct x86_pmu intel_pmu = { |
927 | |
928 | .guest_get_msrs = intel_guest_get_msrs, |
929 | .sched_task = intel_pmu_sched_task, |
930 | + |
931 | + .check_period = intel_pmu_check_period, |
932 | }; |
933 | |
934 | static __init void intel_clovertown_quirk(void) |
935 | diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h |
936 | index 7ace39c51ff7..5c21680b0a69 100644 |
937 | --- a/arch/x86/events/perf_event.h |
938 | +++ b/arch/x86/events/perf_event.h |
939 | @@ -626,6 +626,11 @@ struct x86_pmu { |
940 | * Intel host/guest support (KVM) |
941 | */ |
942 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
943 | + |
944 | + /* |
945 | + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. |
946 | + */ |
947 | + int (*check_period) (struct perf_event *event, u64 period); |
948 | }; |
949 | |
950 | struct x86_perf_task_context { |
951 | @@ -833,7 +838,7 @@ static inline int amd_pmu_init(void) |
952 | |
953 | #ifdef CONFIG_CPU_SUP_INTEL |
954 | |
955 | -static inline bool intel_pmu_has_bts(struct perf_event *event) |
956 | +static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
957 | { |
958 | struct hw_perf_event *hwc = &event->hw; |
959 | unsigned int hw_event, bts_event; |
960 | @@ -844,7 +849,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) |
961 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
962 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
963 | |
964 | - return hw_event == bts_event && hwc->sample_period == 1; |
965 | + return hw_event == bts_event && period == 1; |
966 | +} |
967 | + |
968 | +static inline bool intel_pmu_has_bts(struct perf_event *event) |
969 | +{ |
970 | + struct hw_perf_event *hwc = &event->hw; |
971 | + |
972 | + return intel_pmu_has_bts_period(event, hwc->sample_period); |
973 | } |
974 | |
975 | int intel_pmu_save_and_restart(struct perf_event *event); |
976 | diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c |
977 | index cb26f18d43af..555c002167ad 100644 |
978 | --- a/arch/x86/ia32/ia32_aout.c |
979 | +++ b/arch/x86/ia32/ia32_aout.c |
980 | @@ -50,7 +50,7 @@ static unsigned long get_dr(int n) |
981 | /* |
982 | * fill in the user structure for a core dump.. |
983 | */ |
984 | -static void dump_thread32(struct pt_regs *regs, struct user32 *dump) |
985 | +static void fill_dump(struct pt_regs *regs, struct user32 *dump) |
986 | { |
987 | u32 fs, gs; |
988 | memset(dump, 0, sizeof(*dump)); |
989 | @@ -156,10 +156,12 @@ static int aout_core_dump(struct coredump_params *cprm) |
990 | fs = get_fs(); |
991 | set_fs(KERNEL_DS); |
992 | has_dumped = 1; |
993 | + |
994 | + fill_dump(cprm->regs, &dump); |
995 | + |
996 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); |
997 | dump.u_ar0 = offsetof(struct user32, regs); |
998 | dump.signal = cprm->siginfo->si_signo; |
999 | - dump_thread32(cprm->regs, &dump); |
1000 | |
1001 | /* |
1002 | * If the size of the dump file exceeds the rlimit, then see |
1003 | diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h |
1004 | index e652a7cc6186..3f697a9e3f59 100644 |
1005 | --- a/arch/x86/include/asm/uv/bios.h |
1006 | +++ b/arch/x86/include/asm/uv/bios.h |
1007 | @@ -48,7 +48,8 @@ enum { |
1008 | BIOS_STATUS_SUCCESS = 0, |
1009 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, |
1010 | BIOS_STATUS_EINVAL = -EINVAL, |
1011 | - BIOS_STATUS_UNAVAIL = -EBUSY |
1012 | + BIOS_STATUS_UNAVAIL = -EBUSY, |
1013 | + BIOS_STATUS_ABORT = -EINTR, |
1014 | }; |
1015 | |
1016 | /* Address map parameters */ |
1017 | @@ -167,4 +168,9 @@ extern long system_serial_number; |
1018 | |
1019 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ |
1020 | |
1021 | +/* |
1022 | + * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details |
1023 | + */ |
1024 | +extern struct semaphore __efi_uv_runtime_lock; |
1025 | + |
1026 | #endif /* _ASM_X86_UV_BIOS_H */ |
1027 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
1028 | index 91db841101ca..1870fa7387b7 100644 |
1029 | --- a/arch/x86/kvm/vmx.c |
1030 | +++ b/arch/x86/kvm/vmx.c |
1031 | @@ -2178,7 +2178,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, |
1032 | if (!entry_only) |
1033 | j = find_msr(&m->host, msr); |
1034 | |
1035 | - if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { |
1036 | + if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || |
1037 | + (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { |
1038 | printk_once(KERN_WARNING "Not enough msr switch entries. " |
1039 | "Can't add msr %x\n", msr); |
1040 | return; |
1041 | diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c |
1042 | index 4a6a5a26c582..eb33432f2f24 100644 |
1043 | --- a/arch/x86/platform/uv/bios_uv.c |
1044 | +++ b/arch/x86/platform/uv/bios_uv.c |
1045 | @@ -29,7 +29,8 @@ |
1046 | |
1047 | struct uv_systab *uv_systab; |
1048 | |
1049 | -s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) |
1050 | +static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
1051 | + u64 a4, u64 a5) |
1052 | { |
1053 | struct uv_systab *tab = uv_systab; |
1054 | s64 ret; |
1055 | @@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) |
1056 | |
1057 | return ret; |
1058 | } |
1059 | + |
1060 | +s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) |
1061 | +{ |
1062 | + s64 ret; |
1063 | + |
1064 | + if (down_interruptible(&__efi_uv_runtime_lock)) |
1065 | + return BIOS_STATUS_ABORT; |
1066 | + |
1067 | + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); |
1068 | + up(&__efi_uv_runtime_lock); |
1069 | + |
1070 | + return ret; |
1071 | +} |
1072 | EXPORT_SYMBOL_GPL(uv_bios_call); |
1073 | |
1074 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
1075 | @@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
1076 | unsigned long bios_flags; |
1077 | s64 ret; |
1078 | |
1079 | + if (down_interruptible(&__efi_uv_runtime_lock)) |
1080 | + return BIOS_STATUS_ABORT; |
1081 | + |
1082 | local_irq_save(bios_flags); |
1083 | - ret = uv_bios_call(which, a1, a2, a3, a4, a5); |
1084 | + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); |
1085 | local_irq_restore(bios_flags); |
1086 | |
1087 | + up(&__efi_uv_runtime_lock); |
1088 | + |
1089 | return ret; |
1090 | } |
1091 | |
1092 | diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c |
1093 | index 17b518cb787c..0ea065c6725a 100644 |
1094 | --- a/drivers/acpi/numa.c |
1095 | +++ b/drivers/acpi/numa.c |
1096 | @@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) |
1097 | { |
1098 | struct acpi_srat_mem_affinity *p = |
1099 | (struct acpi_srat_mem_affinity *)header; |
1100 | - pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", |
1101 | - (unsigned long)p->base_address, |
1102 | - (unsigned long)p->length, |
1103 | + pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", |
1104 | + (unsigned long long)p->base_address, |
1105 | + (unsigned long long)p->length, |
1106 | p->proximity_domain, |
1107 | (p->flags & ACPI_SRAT_MEM_ENABLED) ? |
1108 | "enabled" : "disabled", |
1109 | diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
1110 | index d6d91e8afa9e..61fe4bbc6dc0 100644 |
1111 | --- a/drivers/cpufreq/cpufreq.c |
1112 | +++ b/drivers/cpufreq/cpufreq.c |
1113 | @@ -1496,17 +1496,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) |
1114 | { |
1115 | unsigned int ret_freq = 0; |
1116 | |
1117 | - if (!cpufreq_driver->get) |
1118 | + if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) |
1119 | return ret_freq; |
1120 | |
1121 | ret_freq = cpufreq_driver->get(policy->cpu); |
1122 | |
1123 | /* |
1124 | - * Updating inactive policies is invalid, so avoid doing that. Also |
1125 | - * if fast frequency switching is used with the given policy, the check |
1126 | + * If fast frequency switching is used with the given policy, the check |
1127 | * against policy->cur is pointless, so skip it in that case too. |
1128 | */ |
1129 | - if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) |
1130 | + if (policy->fast_switch_enabled) |
1131 | return ret_freq; |
1132 | |
1133 | if (ret_freq && policy->cur && |
1134 | diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c |
1135 | index ae54870b2788..dd7f63354ca0 100644 |
1136 | --- a/drivers/firmware/efi/runtime-wrappers.c |
1137 | +++ b/drivers/firmware/efi/runtime-wrappers.c |
1138 | @@ -49,6 +49,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) |
1139 | local_irq_restore(flags); |
1140 | } |
1141 | |
1142 | +/* |
1143 | + * Expose the EFI runtime lock to the UV platform |
1144 | + */ |
1145 | +#ifdef CONFIG_X86_UV |
1146 | +extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); |
1147 | +#endif |
1148 | + |
1149 | /* |
1150 | * According to section 7.1 of the UEFI spec, Runtime Services are not fully |
1151 | * reentrant, and there are particular combinations of calls that need to be |
1152 | diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c |
1153 | index f64f35cdc2ff..fa3f2f039a74 100644 |
1154 | --- a/drivers/gpu/drm/bridge/tc358767.c |
1155 | +++ b/drivers/gpu/drm/bridge/tc358767.c |
1156 | @@ -96,6 +96,8 @@ |
1157 | #define DP0_STARTVAL 0x064c |
1158 | #define DP0_ACTIVEVAL 0x0650 |
1159 | #define DP0_SYNCVAL 0x0654 |
1160 | +#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15) |
1161 | +#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31) |
1162 | #define DP0_MISC 0x0658 |
1163 | #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ |
1164 | #define BPC_6 (0 << 5) |
1165 | @@ -140,6 +142,8 @@ |
1166 | #define DP0_LTLOOPCTRL 0x06d8 |
1167 | #define DP0_SNKLTCTRL 0x06e4 |
1168 | |
1169 | +#define DP1_SRCCTRL 0x07a0 |
1170 | + |
1171 | /* PHY */ |
1172 | #define DP_PHY_CTRL 0x0800 |
1173 | #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ |
1174 | @@ -148,6 +152,7 @@ |
1175 | #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ |
1176 | #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ |
1177 | #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ |
1178 | +#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */ |
1179 | #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ |
1180 | #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ |
1181 | |
1182 | @@ -538,6 +543,7 @@ static int tc_aux_link_setup(struct tc_data *tc) |
1183 | unsigned long rate; |
1184 | u32 value; |
1185 | int ret; |
1186 | + u32 dp_phy_ctrl; |
1187 | |
1188 | rate = clk_get_rate(tc->refclk); |
1189 | switch (rate) { |
1190 | @@ -562,7 +568,10 @@ static int tc_aux_link_setup(struct tc_data *tc) |
1191 | value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; |
1192 | tc_write(SYS_PLLPARAM, value); |
1193 | |
1194 | - tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); |
1195 | + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN; |
1196 | + if (tc->link.base.num_lanes == 2) |
1197 | + dp_phy_ctrl |= PHY_2LANE; |
1198 | + tc_write(DP_PHY_CTRL, dp_phy_ctrl); |
1199 | |
1200 | /* |
1201 | * Initially PLLs are in bypass. Force PLL parameter update, |
1202 | @@ -717,7 +726,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) |
1203 | |
1204 | tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); |
1205 | |
1206 | - tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); |
1207 | + tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) | |
1208 | + ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) | |
1209 | + ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0)); |
1210 | |
1211 | tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | |
1212 | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); |
1213 | @@ -827,12 +838,11 @@ static int tc_main_link_setup(struct tc_data *tc) |
1214 | if (!tc->mode) |
1215 | return -EINVAL; |
1216 | |
1217 | - /* from excel file - DP0_SrcCtrl */ |
1218 | - tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | |
1219 | - DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | |
1220 | - DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); |
1221 | - /* from excel file - DP1_SrcCtrl */ |
1222 | - tc_write(0x07a0, 0x00003083); |
1223 | + tc_write(DP0_SRCCTRL, tc_srcctrl(tc)); |
1224 | + /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ |
1225 | + tc_write(DP1_SRCCTRL, |
1226 | + (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | |
1227 | + ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); |
1228 | |
1229 | rate = clk_get_rate(tc->refclk); |
1230 | switch (rate) { |
1231 | @@ -853,8 +863,11 @@ static int tc_main_link_setup(struct tc_data *tc) |
1232 | } |
1233 | value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; |
1234 | tc_write(SYS_PLLPARAM, value); |
1235 | + |
1236 | /* Setup Main Link */ |
1237 | - dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; |
1238 | + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; |
1239 | + if (tc->link.base.num_lanes == 2) |
1240 | + dp_phy_ctrl |= PHY_2LANE; |
1241 | tc_write(DP_PHY_CTRL, dp_phy_ctrl); |
1242 | msleep(100); |
1243 | |
1244 | @@ -1109,10 +1122,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, |
1245 | static int tc_connector_mode_valid(struct drm_connector *connector, |
1246 | struct drm_display_mode *mode) |
1247 | { |
1248 | + struct tc_data *tc = connector_to_tc(connector); |
1249 | + u32 req, avail; |
1250 | + u32 bits_per_pixel = 24; |
1251 | + |
1252 | /* DPI interface clock limitation: upto 154 MHz */ |
1253 | if (mode->clock > 154000) |
1254 | return MODE_CLOCK_HIGH; |
1255 | |
1256 | + req = mode->clock * bits_per_pixel / 8; |
1257 | + avail = tc->link.base.num_lanes * tc->link.base.rate; |
1258 | + |
1259 | + if (req > avail) |
1260 | + return MODE_BAD; |
1261 | + |
1262 | return MODE_OK; |
1263 | } |
1264 | |
1265 | diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c |
1266 | index 7b2030925825..6509031098d5 100644 |
1267 | --- a/drivers/gpu/drm/i915/i915_gem.c |
1268 | +++ b/drivers/gpu/drm/i915/i915_gem.c |
1269 | @@ -1593,6 +1593,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
1270 | return err; |
1271 | } |
1272 | |
1273 | +static inline bool |
1274 | +__vma_matches(struct vm_area_struct *vma, struct file *filp, |
1275 | + unsigned long addr, unsigned long size) |
1276 | +{ |
1277 | + if (vma->vm_file != filp) |
1278 | + return false; |
1279 | + |
1280 | + return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; |
1281 | +} |
1282 | + |
1283 | /** |
1284 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
1285 | * it is mapped to. |
1286 | @@ -1651,7 +1661,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1287 | return -EINTR; |
1288 | } |
1289 | vma = find_vma(mm, addr); |
1290 | - if (vma) |
1291 | + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
1292 | vma->vm_page_prot = |
1293 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1294 | else |
1295 | diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c |
1296 | index b0d445390ee4..d43bc7bd3387 100644 |
1297 | --- a/drivers/input/misc/bma150.c |
1298 | +++ b/drivers/input/misc/bma150.c |
1299 | @@ -482,13 +482,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) |
1300 | idev->close = bma150_irq_close; |
1301 | input_set_drvdata(idev, bma150); |
1302 | |
1303 | + bma150->input = idev; |
1304 | + |
1305 | error = input_register_device(idev); |
1306 | if (error) { |
1307 | input_free_device(idev); |
1308 | return error; |
1309 | } |
1310 | |
1311 | - bma150->input = idev; |
1312 | return 0; |
1313 | } |
1314 | |
1315 | @@ -511,15 +512,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) |
1316 | |
1317 | bma150_init_input_device(bma150, ipoll_dev->input); |
1318 | |
1319 | + bma150->input_polled = ipoll_dev; |
1320 | + bma150->input = ipoll_dev->input; |
1321 | + |
1322 | error = input_register_polled_device(ipoll_dev); |
1323 | if (error) { |
1324 | input_free_polled_device(ipoll_dev); |
1325 | return error; |
1326 | } |
1327 | |
1328 | - bma150->input_polled = ipoll_dev; |
1329 | - bma150->input = ipoll_dev->input; |
1330 | - |
1331 | return 0; |
1332 | } |
1333 | |
1334 | diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c |
1335 | index 30adc5745cba..25ce9047b682 100644 |
1336 | --- a/drivers/input/mouse/elan_i2c_core.c |
1337 | +++ b/drivers/input/mouse/elan_i2c_core.c |
1338 | @@ -1240,7 +1240,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id); |
1339 | static const struct acpi_device_id elan_acpi_id[] = { |
1340 | { "ELAN0000", 0 }, |
1341 | { "ELAN0100", 0 }, |
1342 | - { "ELAN0501", 0 }, |
1343 | { "ELAN0600", 0 }, |
1344 | { "ELAN0602", 0 }, |
1345 | { "ELAN0605", 0 }, |
1346 | @@ -1251,6 +1250,7 @@ static const struct acpi_device_id elan_acpi_id[] = { |
1347 | { "ELAN060C", 0 }, |
1348 | { "ELAN0611", 0 }, |
1349 | { "ELAN0612", 0 }, |
1350 | + { "ELAN0617", 0 }, |
1351 | { "ELAN0618", 0 }, |
1352 | { "ELAN061C", 0 }, |
1353 | { "ELAN061D", 0 }, |
1354 | diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c |
1355 | index c120afd9c46a..38edf8f5bf8a 100644 |
1356 | --- a/drivers/input/mouse/elantech.c |
1357 | +++ b/drivers/input/mouse/elantech.c |
1358 | @@ -1117,6 +1117,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, |
1359 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
1360 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
1361 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
1362 | + * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) |
1363 | + * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) |
1364 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons |
1365 | * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons |
1366 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons |
1367 | @@ -1169,6 +1171,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { |
1368 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), |
1369 | }, |
1370 | }, |
1371 | + { |
1372 | + /* Fujitsu H780 also has a middle button */ |
1373 | + .matches = { |
1374 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
1375 | + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), |
1376 | + }, |
1377 | + }, |
1378 | #endif |
1379 | { } |
1380 | }; |
1381 | diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c |
1382 | index 914c8a6bf93c..345f4d81ba07 100644 |
1383 | --- a/drivers/md/dm-thin.c |
1384 | +++ b/drivers/md/dm-thin.c |
1385 | @@ -257,6 +257,7 @@ struct pool { |
1386 | |
1387 | spinlock_t lock; |
1388 | struct bio_list deferred_flush_bios; |
1389 | + struct bio_list deferred_flush_completions; |
1390 | struct list_head prepared_mappings; |
1391 | struct list_head prepared_discards; |
1392 | struct list_head prepared_discards_pt2; |
1393 | @@ -925,6 +926,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) |
1394 | mempool_free(m, m->tc->pool->mapping_pool); |
1395 | } |
1396 | |
1397 | +static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) |
1398 | +{ |
1399 | + struct pool *pool = tc->pool; |
1400 | + unsigned long flags; |
1401 | + |
1402 | + /* |
1403 | + * If the bio has the REQ_FUA flag set we must commit the metadata |
1404 | + * before signaling its completion. |
1405 | + */ |
1406 | + if (!bio_triggers_commit(tc, bio)) { |
1407 | + bio_endio(bio); |
1408 | + return; |
1409 | + } |
1410 | + |
1411 | + /* |
1412 | + * Complete bio with an error if earlier I/O caused changes to the |
1413 | + * metadata that can't be committed, e.g, due to I/O errors on the |
1414 | + * metadata device. |
1415 | + */ |
1416 | + if (dm_thin_aborted_changes(tc->td)) { |
1417 | + bio_io_error(bio); |
1418 | + return; |
1419 | + } |
1420 | + |
1421 | + /* |
1422 | + * Batch together any bios that trigger commits and then issue a |
1423 | + * single commit for them in process_deferred_bios(). |
1424 | + */ |
1425 | + spin_lock_irqsave(&pool->lock, flags); |
1426 | + bio_list_add(&pool->deferred_flush_completions, bio); |
1427 | + spin_unlock_irqrestore(&pool->lock, flags); |
1428 | +} |
1429 | + |
1430 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) |
1431 | { |
1432 | struct thin_c *tc = m->tc; |
1433 | @@ -957,7 +991,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) |
1434 | */ |
1435 | if (bio) { |
1436 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); |
1437 | - bio_endio(bio); |
1438 | + complete_overwrite_bio(tc, bio); |
1439 | } else { |
1440 | inc_all_io_entry(tc->pool, m->cell->holder); |
1441 | remap_and_issue(tc, m->cell->holder, m->data_block); |
1442 | @@ -2303,7 +2337,7 @@ static void process_deferred_bios(struct pool *pool) |
1443 | { |
1444 | unsigned long flags; |
1445 | struct bio *bio; |
1446 | - struct bio_list bios; |
1447 | + struct bio_list bios, bio_completions; |
1448 | struct thin_c *tc; |
1449 | |
1450 | tc = get_first_thin(pool); |
1451 | @@ -2314,26 +2348,36 @@ static void process_deferred_bios(struct pool *pool) |
1452 | } |
1453 | |
1454 | /* |
1455 | - * If there are any deferred flush bios, we must commit |
1456 | - * the metadata before issuing them. |
1457 | + * If there are any deferred flush bios, we must commit the metadata |
1458 | + * before issuing them or signaling their completion. |
1459 | */ |
1460 | bio_list_init(&bios); |
1461 | + bio_list_init(&bio_completions); |
1462 | + |
1463 | spin_lock_irqsave(&pool->lock, flags); |
1464 | bio_list_merge(&bios, &pool->deferred_flush_bios); |
1465 | bio_list_init(&pool->deferred_flush_bios); |
1466 | + |
1467 | + bio_list_merge(&bio_completions, &pool->deferred_flush_completions); |
1468 | + bio_list_init(&pool->deferred_flush_completions); |
1469 | spin_unlock_irqrestore(&pool->lock, flags); |
1470 | |
1471 | - if (bio_list_empty(&bios) && |
1472 | + if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && |
1473 | !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) |
1474 | return; |
1475 | |
1476 | if (commit(pool)) { |
1477 | + bio_list_merge(&bios, &bio_completions); |
1478 | + |
1479 | while ((bio = bio_list_pop(&bios))) |
1480 | bio_io_error(bio); |
1481 | return; |
1482 | } |
1483 | pool->last_commit_jiffies = jiffies; |
1484 | |
1485 | + while ((bio = bio_list_pop(&bio_completions))) |
1486 | + bio_endio(bio); |
1487 | + |
1488 | while ((bio = bio_list_pop(&bios))) |
1489 | generic_make_request(bio); |
1490 | } |
1491 | @@ -2968,6 +3012,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, |
1492 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); |
1493 | spin_lock_init(&pool->lock); |
1494 | bio_list_init(&pool->deferred_flush_bios); |
1495 | + bio_list_init(&pool->deferred_flush_completions); |
1496 | INIT_LIST_HEAD(&pool->prepared_mappings); |
1497 | INIT_LIST_HEAD(&pool->prepared_discards); |
1498 | INIT_LIST_HEAD(&pool->prepared_discards_pt2); |
1499 | diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig |
1500 | index c4e41c26649e..fac10c0e852c 100644 |
1501 | --- a/drivers/misc/eeprom/Kconfig |
1502 | +++ b/drivers/misc/eeprom/Kconfig |
1503 | @@ -12,7 +12,7 @@ config EEPROM_AT24 |
1504 | ones like at24c64, 24lc02 or fm24c04: |
1505 | |
1506 | 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08, |
1507 | - 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024 |
1508 | + 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048 |
1509 | |
1510 | Unless you like data loss puzzles, always be sure that any chip |
1511 | you configure as a 24c32 (32 kbit) or larger is NOT really a |
1512 | diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c |
1513 | index d8a485f1798b..a37b9b6a315a 100644 |
1514 | --- a/drivers/misc/eeprom/at24.c |
1515 | +++ b/drivers/misc/eeprom/at24.c |
1516 | @@ -170,6 +170,7 @@ static const struct i2c_device_id at24_ids[] = { |
1517 | { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) }, |
1518 | { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) }, |
1519 | { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) }, |
1520 | + { "24c2048", AT24_DEVICE_MAGIC(2097152 / 8, AT24_FLAG_ADDR16) }, |
1521 | { "at24", 0 }, |
1522 | { /* END OF LIST */ } |
1523 | }; |
1524 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |
1525 | index 4bc2c806eb61..eeeb4c5740bf 100644 |
1526 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |
1527 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |
1528 | @@ -12979,6 +12979,24 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb, |
1529 | struct net_device *dev, |
1530 | netdev_features_t features) |
1531 | { |
1532 | + /* |
1533 | + * A skb with gso_size + header length > 9700 will cause a |
1534 | + * firmware panic. Drop GSO support. |
1535 | + * |
1536 | + * Eventually the upper layer should not pass these packets down. |
1537 | + * |
1538 | + * For speed, if the gso_size is <= 9000, assume there will |
1539 | + * not be 700 bytes of headers and pass it through. Only do a |
1540 | + * full (slow) validation if the gso_size is > 9000. |
1541 | + * |
1542 | + * (Due to the way SKB_BY_FRAGS works this will also do a full |
1543 | + * validation in that case.) |
1544 | + */ |
1545 | + if (unlikely(skb_is_gso(skb) && |
1546 | + (skb_shinfo(skb)->gso_size > 9000) && |
1547 | + !skb_gso_validate_mac_len(skb, 9700))) |
1548 | + features &= ~NETIF_F_GSO_MASK; |
1549 | + |
1550 | features = vlan_features_check(skb, features); |
1551 | return vxlan_features_check(skb, features); |
1552 | } |
1553 | diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c |
1554 | index 8a40202c0a17..c4f1c363e24b 100644 |
1555 | --- a/drivers/net/usb/ch9200.c |
1556 | +++ b/drivers/net/usb/ch9200.c |
1557 | @@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
1558 | tx_overhead = 0x40; |
1559 | |
1560 | len = skb->len; |
1561 | - if (skb_headroom(skb) < tx_overhead) { |
1562 | - struct sk_buff *skb2; |
1563 | - |
1564 | - skb2 = skb_copy_expand(skb, tx_overhead, 0, flags); |
1565 | + if (skb_cow_head(skb, tx_overhead)) { |
1566 | dev_kfree_skb_any(skb); |
1567 | - skb = skb2; |
1568 | - if (!skb) |
1569 | - return NULL; |
1570 | + return NULL; |
1571 | } |
1572 | |
1573 | __skb_push(skb, tx_overhead); |
1574 | diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c |
1575 | index 66b34ddbe216..72d9e7954b0a 100644 |
1576 | --- a/drivers/net/usb/kaweth.c |
1577 | +++ b/drivers/net/usb/kaweth.c |
1578 | @@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb, |
1579 | } |
1580 | |
1581 | /* We now decide whether we can put our special header into the sk_buff */ |
1582 | - if (skb_cloned(skb) || skb_headroom(skb) < 2) { |
1583 | - /* no such luck - we make our own */ |
1584 | - struct sk_buff *copied_skb; |
1585 | - copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); |
1586 | - dev_kfree_skb_irq(skb); |
1587 | - skb = copied_skb; |
1588 | - if (!copied_skb) { |
1589 | - kaweth->stats.tx_errors++; |
1590 | - netif_start_queue(net); |
1591 | - spin_unlock_irq(&kaweth->device_lock); |
1592 | - return NETDEV_TX_OK; |
1593 | - } |
1594 | + if (skb_cow_head(skb, 2)) { |
1595 | + kaweth->stats.tx_errors++; |
1596 | + netif_start_queue(net); |
1597 | + spin_unlock_irq(&kaweth->device_lock); |
1598 | + dev_kfree_skb_any(skb); |
1599 | + return NETDEV_TX_OK; |
1600 | } |
1601 | |
1602 | private_header = (__le16 *)__skb_push(skb, 2); |
1603 | diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c |
1604 | index e29f4c0767eb..e719ecd69d01 100644 |
1605 | --- a/drivers/net/usb/smsc95xx.c |
1606 | +++ b/drivers/net/usb/smsc95xx.c |
1607 | @@ -2011,13 +2011,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, |
1608 | /* We do not advertise SG, so skbs should be already linearized */ |
1609 | BUG_ON(skb_shinfo(skb)->nr_frags); |
1610 | |
1611 | - if (skb_headroom(skb) < overhead) { |
1612 | - struct sk_buff *skb2 = skb_copy_expand(skb, |
1613 | - overhead, 0, flags); |
1614 | + /* Make writable and expand header space by overhead if required */ |
1615 | + if (skb_cow_head(skb, overhead)) { |
1616 | + /* Must deallocate here as returning NULL to indicate error |
1617 | + * means the skb won't be deallocated in the caller. |
1618 | + */ |
1619 | dev_kfree_skb_any(skb); |
1620 | - skb = skb2; |
1621 | - if (!skb) |
1622 | - return NULL; |
1623 | + return NULL; |
1624 | } |
1625 | |
1626 | if (csum) { |
1627 | diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c |
1628 | index bedce3453dd3..5aa221487a9c 100644 |
1629 | --- a/drivers/pinctrl/qcom/pinctrl-msm.c |
1630 | +++ b/drivers/pinctrl/qcom/pinctrl-msm.c |
1631 | @@ -803,11 +803,24 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) |
1632 | return ret; |
1633 | } |
1634 | |
1635 | - ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio); |
1636 | - if (ret) { |
1637 | - dev_err(pctrl->dev, "Failed to add pin range\n"); |
1638 | - gpiochip_remove(&pctrl->chip); |
1639 | - return ret; |
1640 | + /* |
1641 | + * For DeviceTree-supported systems, the gpio core checks the |
1642 | + * pinctrl's device node for the "gpio-ranges" property. |
1643 | + * If it is present, it takes care of adding the pin ranges |
1644 | + * for the driver. In this case the driver can skip ahead. |
1645 | + * |
1646 | + * In order to remain compatible with older, existing DeviceTree |
1647 | + * files which don't set the "gpio-ranges" property or systems that |
1648 | + * utilize ACPI the driver has to call gpiochip_add_pin_range(). |
1649 | + */ |
1650 | + if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) { |
1651 | + ret = gpiochip_add_pin_range(&pctrl->chip, |
1652 | + dev_name(pctrl->dev), 0, 0, chip->ngpio); |
1653 | + if (ret) { |
1654 | + dev_err(pctrl->dev, "Failed to add pin range\n"); |
1655 | + gpiochip_remove(&pctrl->chip); |
1656 | + return ret; |
1657 | + } |
1658 | } |
1659 | |
1660 | ret = gpiochip_irqchip_add(chip, |
1661 | diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c |
1662 | index 85442edf3c49..913ebb6d0d29 100644 |
1663 | --- a/drivers/scsi/aic94xx/aic94xx_init.c |
1664 | +++ b/drivers/scsi/aic94xx/aic94xx_init.c |
1665 | @@ -281,7 +281,7 @@ static ssize_t asd_show_dev_rev(struct device *dev, |
1666 | return snprintf(buf, PAGE_SIZE, "%s\n", |
1667 | asd_dev_rev[asd_ha->revision_id]); |
1668 | } |
1669 | -static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); |
1670 | +static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); |
1671 | |
1672 | static ssize_t asd_show_dev_bios_build(struct device *dev, |
1673 | struct device_attribute *attr,char *buf) |
1674 | @@ -478,7 +478,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) |
1675 | { |
1676 | int err; |
1677 | |
1678 | - err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
1679 | + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); |
1680 | if (err) |
1681 | return err; |
1682 | |
1683 | @@ -500,13 +500,13 @@ err_update_bios: |
1684 | err_biosb: |
1685 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); |
1686 | err_rev: |
1687 | - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
1688 | + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); |
1689 | return err; |
1690 | } |
1691 | |
1692 | static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) |
1693 | { |
1694 | - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
1695 | + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); |
1696 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); |
1697 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); |
1698 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); |
1699 | diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c |
1700 | index 984d6aae7529..0e5435330c07 100644 |
1701 | --- a/drivers/usb/dwc2/hcd.c |
1702 | +++ b/drivers/usb/dwc2/hcd.c |
1703 | @@ -5202,7 +5202,6 @@ error3: |
1704 | error2: |
1705 | usb_put_hcd(hcd); |
1706 | error1: |
1707 | - kfree(hsotg->core_params); |
1708 | |
1709 | #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS |
1710 | kfree(hsotg->last_frame_num_array); |
1711 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
1712 | index a3046b6523c8..8ec296308729 100644 |
1713 | --- a/fs/cifs/file.c |
1714 | +++ b/fs/cifs/file.c |
1715 | @@ -1126,6 +1126,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) |
1716 | return -EINVAL; |
1717 | } |
1718 | |
1719 | + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > |
1720 | + PAGE_SIZE); |
1721 | + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), |
1722 | + PAGE_SIZE); |
1723 | max_num = (max_buf - sizeof(struct smb_hdr)) / |
1724 | sizeof(LOCKING_ANDX_RANGE); |
1725 | buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
1726 | @@ -1462,6 +1466,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, |
1727 | if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) |
1728 | return -EINVAL; |
1729 | |
1730 | + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > |
1731 | + PAGE_SIZE); |
1732 | + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), |
1733 | + PAGE_SIZE); |
1734 | max_num = (max_buf - sizeof(struct smb_hdr)) / |
1735 | sizeof(LOCKING_ANDX_RANGE); |
1736 | buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
1737 | diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c |
1738 | index b7885dc0d9bb..dee5250701de 100644 |
1739 | --- a/fs/cifs/smb2file.c |
1740 | +++ b/fs/cifs/smb2file.c |
1741 | @@ -129,6 +129,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, |
1742 | if (max_buf < sizeof(struct smb2_lock_element)) |
1743 | return -EINVAL; |
1744 | |
1745 | + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); |
1746 | + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); |
1747 | max_num = max_buf / sizeof(struct smb2_lock_element); |
1748 | buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); |
1749 | if (!buf) |
1750 | @@ -265,6 +267,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) |
1751 | return -EINVAL; |
1752 | } |
1753 | |
1754 | + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); |
1755 | + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); |
1756 | max_num = max_buf / sizeof(struct smb2_lock_element); |
1757 | buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); |
1758 | if (!buf) { |
1759 | diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h |
1760 | index 78ed8105e64d..ae8ecf821019 100644 |
1761 | --- a/include/linux/perf_event.h |
1762 | +++ b/include/linux/perf_event.h |
1763 | @@ -455,6 +455,11 @@ struct pmu { |
1764 | * Filter events for PMU-specific reasons. |
1765 | */ |
1766 | int (*filter_match) (struct perf_event *event); /* optional */ |
1767 | + |
1768 | + /* |
1769 | + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. |
1770 | + */ |
1771 | + int (*check_period) (struct perf_event *event, u64 value); /* optional */ |
1772 | }; |
1773 | |
1774 | /** |
1775 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
1776 | index ed329a39d621..f8761774a94f 100644 |
1777 | --- a/include/linux/skbuff.h |
1778 | +++ b/include/linux/skbuff.h |
1779 | @@ -3102,6 +3102,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); |
1780 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); |
1781 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); |
1782 | bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); |
1783 | +bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); |
1784 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
1785 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
1786 | int skb_ensure_writable(struct sk_buff *skb, int write_len); |
1787 | @@ -3880,6 +3881,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) |
1788 | return hdr_len + skb_gso_transport_seglen(skb); |
1789 | } |
1790 | |
1791 | +/** |
1792 | + * skb_gso_mac_seglen - Return length of individual segments of a gso packet |
1793 | + * |
1794 | + * @skb: GSO skb |
1795 | + * |
1796 | + * skb_gso_mac_seglen is used to determine the real size of the |
1797 | + * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 |
1798 | + * headers (TCP/UDP). |
1799 | + */ |
1800 | +static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) |
1801 | +{ |
1802 | + unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
1803 | + return hdr_len + skb_gso_transport_seglen(skb); |
1804 | +} |
1805 | + |
1806 | /* Local Checksum Offload. |
1807 | * Compute outer checksum based on the assumption that the |
1808 | * inner checksum will be offloaded later. |
1809 | diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h |
1810 | index b02af0bf5777..66f6b84df287 100644 |
1811 | --- a/include/net/netfilter/nf_tables.h |
1812 | +++ b/include/net/netfilter/nf_tables.h |
1813 | @@ -87,6 +87,35 @@ struct nft_regs { |
1814 | }; |
1815 | }; |
1816 | |
1817 | +/* Store/load an u16 or u8 integer to/from the u32 data register. |
1818 | + * |
1819 | + * Note, when using concatenations, register allocation happens at 32-bit |
1820 | + * level. So for store instruction, pad the rest part with zero to avoid |
1821 | + * garbage values. |
1822 | + */ |
1823 | + |
1824 | +static inline void nft_reg_store16(u32 *dreg, u16 val) |
1825 | +{ |
1826 | + *dreg = 0; |
1827 | + *(u16 *)dreg = val; |
1828 | +} |
1829 | + |
1830 | +static inline void nft_reg_store8(u32 *dreg, u8 val) |
1831 | +{ |
1832 | + *dreg = 0; |
1833 | + *(u8 *)dreg = val; |
1834 | +} |
1835 | + |
1836 | +static inline u16 nft_reg_load16(u32 *sreg) |
1837 | +{ |
1838 | + return *(u16 *)sreg; |
1839 | +} |
1840 | + |
1841 | +static inline u8 nft_reg_load8(u32 *sreg) |
1842 | +{ |
1843 | + return *(u8 *)sreg; |
1844 | +} |
1845 | + |
1846 | static inline void nft_data_copy(u32 *dst, const struct nft_data *src, |
1847 | unsigned int len) |
1848 | { |
1849 | diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h |
1850 | index 659b1634de61..3d3de5e9f9cc 100644 |
1851 | --- a/include/uapi/linux/if_ether.h |
1852 | +++ b/include/uapi/linux/if_ether.h |
1853 | @@ -139,11 +139,18 @@ |
1854 | * This is an Ethernet frame header. |
1855 | */ |
1856 | |
1857 | +/* allow libcs like musl to deactivate this, glibc does not implement this. */ |
1858 | +#ifndef __UAPI_DEF_ETHHDR |
1859 | +#define __UAPI_DEF_ETHHDR 1 |
1860 | +#endif |
1861 | + |
1862 | +#if __UAPI_DEF_ETHHDR |
1863 | struct ethhdr { |
1864 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
1865 | unsigned char h_source[ETH_ALEN]; /* source ether addr */ |
1866 | __be16 h_proto; /* packet type ID field */ |
1867 | } __attribute__((packed)); |
1868 | +#endif |
1869 | |
1870 | |
1871 | #endif /* _UAPI_LINUX_IF_ETHER_H */ |
1872 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
1873 | index 1af0bbf20984..17339506f9f8 100644 |
1874 | --- a/kernel/events/core.c |
1875 | +++ b/kernel/events/core.c |
1876 | @@ -4600,6 +4600,11 @@ static void __perf_event_period(struct perf_event *event, |
1877 | } |
1878 | } |
1879 | |
1880 | +static int perf_event_check_period(struct perf_event *event, u64 value) |
1881 | +{ |
1882 | + return event->pmu->check_period(event, value); |
1883 | +} |
1884 | + |
1885 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1886 | { |
1887 | u64 value; |
1888 | @@ -4616,6 +4621,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1889 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
1890 | return -EINVAL; |
1891 | |
1892 | + if (perf_event_check_period(event, value)) |
1893 | + return -EINVAL; |
1894 | + |
1895 | event_function_call(event, __perf_event_period, &value); |
1896 | |
1897 | return 0; |
1898 | @@ -8622,6 +8630,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) |
1899 | return 0; |
1900 | } |
1901 | |
1902 | +static int perf_event_nop_int(struct perf_event *event, u64 value) |
1903 | +{ |
1904 | + return 0; |
1905 | +} |
1906 | + |
1907 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
1908 | |
1909 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
1910 | @@ -8944,6 +8957,9 @@ got_cpu_context: |
1911 | pmu->pmu_disable = perf_pmu_nop_void; |
1912 | } |
1913 | |
1914 | + if (!pmu->check_period) |
1915 | + pmu->check_period = perf_event_nop_int; |
1916 | + |
1917 | if (!pmu->event_idx) |
1918 | pmu->event_idx = perf_event_idx_default; |
1919 | |
1920 | diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c |
1921 | index f4b5811ebe23..99becab2c1ce 100644 |
1922 | --- a/kernel/events/ring_buffer.c |
1923 | +++ b/kernel/events/ring_buffer.c |
1924 | @@ -700,7 +700,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) |
1925 | size = sizeof(struct ring_buffer); |
1926 | size += nr_pages * sizeof(void *); |
1927 | |
1928 | - if (order_base_2(size) >= MAX_ORDER) |
1929 | + if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) |
1930 | goto fail; |
1931 | |
1932 | rb = kzalloc(size, GFP_KERNEL); |
1933 | diff --git a/kernel/signal.c b/kernel/signal.c |
1934 | index 798b8f495ae2..c091dcc9f19b 100644 |
1935 | --- a/kernel/signal.c |
1936 | +++ b/kernel/signal.c |
1937 | @@ -2241,9 +2241,12 @@ relock: |
1938 | } |
1939 | |
1940 | /* Has this task already been marked for death? */ |
1941 | - ksig->info.si_signo = signr = SIGKILL; |
1942 | - if (signal_group_exit(signal)) |
1943 | + if (signal_group_exit(signal)) { |
1944 | + ksig->info.si_signo = signr = SIGKILL; |
1945 | + sigdelset(¤t->pending.signal, SIGKILL); |
1946 | + recalc_sigpending(); |
1947 | goto fatal; |
1948 | + } |
1949 | |
1950 | for (;;) { |
1951 | struct k_sigaction *ka; |
1952 | diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c |
1953 | index f0ab801a6437..c6eee3d9ed00 100644 |
1954 | --- a/kernel/trace/trace_uprobe.c |
1955 | +++ b/kernel/trace/trace_uprobe.c |
1956 | @@ -150,7 +150,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, |
1957 | |
1958 | ret = strncpy_from_user(dst, src, maxlen); |
1959 | if (ret == maxlen) |
1960 | - dst[--ret] = '\0'; |
1961 | + dst[ret - 1] = '\0'; |
1962 | + else if (ret >= 0) |
1963 | + /* |
1964 | + * Include the terminating null byte. In this case it |
1965 | + * was copied by strncpy_from_user but not accounted |
1966 | + * for in ret. |
1967 | + */ |
1968 | + ret++; |
1969 | |
1970 | if (ret < 0) { /* Failed to fetch string */ |
1971 | ((u8 *)get_rloc_data(dest))[0] = '\0'; |
1972 | diff --git a/mm/memory.c b/mm/memory.c |
1973 | index 35d8217bb046..47248dc0b9e1 100644 |
1974 | --- a/mm/memory.c |
1975 | +++ b/mm/memory.c |
1976 | @@ -3329,15 +3329,24 @@ static int do_fault(struct fault_env *fe) |
1977 | { |
1978 | struct vm_area_struct *vma = fe->vma; |
1979 | pgoff_t pgoff = linear_page_index(vma, fe->address); |
1980 | + int ret; |
1981 | |
1982 | /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ |
1983 | if (!vma->vm_ops->fault) |
1984 | - return VM_FAULT_SIGBUS; |
1985 | - if (!(fe->flags & FAULT_FLAG_WRITE)) |
1986 | - return do_read_fault(fe, pgoff); |
1987 | - if (!(vma->vm_flags & VM_SHARED)) |
1988 | - return do_cow_fault(fe, pgoff); |
1989 | - return do_shared_fault(fe, pgoff); |
1990 | + ret = VM_FAULT_SIGBUS; |
1991 | + else if (!(fe->flags & FAULT_FLAG_WRITE)) |
1992 | + ret = do_read_fault(fe, pgoff); |
1993 | + else if (!(vma->vm_flags & VM_SHARED)) |
1994 | + ret = do_cow_fault(fe, pgoff); |
1995 | + else |
1996 | + ret = do_shared_fault(fe, pgoff); |
1997 | + |
1998 | + /* preallocated pagetable is unused: free it */ |
1999 | + if (fe->prealloc_pte) { |
2000 | + pte_free(vma->vm_mm, fe->prealloc_pte); |
2001 | + fe->prealloc_pte = 0; |
2002 | + } |
2003 | + return ret; |
2004 | } |
2005 | |
2006 | static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, |
2007 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
2008 | index dca1fed0d7da..11501165f0df 100644 |
2009 | --- a/net/core/skbuff.c |
2010 | +++ b/net/core/skbuff.c |
2011 | @@ -4469,37 +4469,74 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
2012 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
2013 | |
2014 | /** |
2015 | - * skb_gso_validate_mtu - Return in case such skb fits a given MTU |
2016 | + * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS |
2017 | * |
2018 | - * @skb: GSO skb |
2019 | - * @mtu: MTU to validate against |
2020 | + * There are a couple of instances where we have a GSO skb, and we |
2021 | + * want to determine what size it would be after it is segmented. |
2022 | * |
2023 | - * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU |
2024 | - * once split. |
2025 | + * We might want to check: |
2026 | + * - L3+L4+payload size (e.g. IP forwarding) |
2027 | + * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) |
2028 | + * |
2029 | + * This is a helper to do that correctly considering GSO_BY_FRAGS. |
2030 | + * |
2031 | + * @seg_len: The segmented length (from skb_gso_*_seglen). In the |
2032 | + * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. |
2033 | + * |
2034 | + * @max_len: The maximum permissible length. |
2035 | + * |
2036 | + * Returns true if the segmented length <= max length. |
2037 | */ |
2038 | -bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) |
2039 | -{ |
2040 | +static inline bool skb_gso_size_check(const struct sk_buff *skb, |
2041 | + unsigned int seg_len, |
2042 | + unsigned int max_len) { |
2043 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
2044 | const struct sk_buff *iter; |
2045 | - unsigned int hlen; |
2046 | - |
2047 | - hlen = skb_gso_network_seglen(skb); |
2048 | |
2049 | if (shinfo->gso_size != GSO_BY_FRAGS) |
2050 | - return hlen <= mtu; |
2051 | + return seg_len <= max_len; |
2052 | |
2053 | /* Undo this so we can re-use header sizes */ |
2054 | - hlen -= GSO_BY_FRAGS; |
2055 | + seg_len -= GSO_BY_FRAGS; |
2056 | |
2057 | skb_walk_frags(skb, iter) { |
2058 | - if (hlen + skb_headlen(iter) > mtu) |
2059 | + if (seg_len + skb_headlen(iter) > max_len) |
2060 | return false; |
2061 | } |
2062 | |
2063 | return true; |
2064 | } |
2065 | + |
2066 | +/** |
2067 | + * skb_gso_validate_mtu - Return in case such skb fits a given MTU |
2068 | + * |
2069 | + * @skb: GSO skb |
2070 | + * @mtu: MTU to validate against |
2071 | + * |
2072 | + * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU |
2073 | + * once split. |
2074 | + */ |
2075 | +bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) |
2076 | +{ |
2077 | + return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); |
2078 | +} |
2079 | EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); |
2080 | |
2081 | +/** |
2082 | + * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? |
2083 | + * |
2084 | + * @skb: GSO skb |
2085 | + * @len: length to validate against |
2086 | + * |
2087 | + * skb_gso_validate_mac_len validates if a given skb will fit a wanted |
2088 | + * length once split, including L2, L3 and L4 headers and the payload. |
2089 | + */ |
2090 | +bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) |
2091 | +{ |
2092 | + return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); |
2093 | +} |
2094 | +EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); |
2095 | + |
2096 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
2097 | { |
2098 | int mac_len; |
2099 | diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c |
2100 | index 51ced81b616c..dc3628a396ec 100644 |
2101 | --- a/net/ipv4/netfilter/nft_masq_ipv4.c |
2102 | +++ b/net/ipv4/netfilter/nft_masq_ipv4.c |
2103 | @@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, |
2104 | memset(&range, 0, sizeof(range)); |
2105 | range.flags = priv->flags; |
2106 | if (priv->sreg_proto_min) { |
2107 | - range.min_proto.all = |
2108 | - *(__be16 *)®s->data[priv->sreg_proto_min]; |
2109 | - range.max_proto.all = |
2110 | - *(__be16 *)®s->data[priv->sreg_proto_max]; |
2111 | + range.min_proto.all = (__force __be16)nft_reg_load16( |
2112 | + ®s->data[priv->sreg_proto_min]); |
2113 | + range.max_proto.all = (__force __be16)nft_reg_load16( |
2114 | + ®s->data[priv->sreg_proto_max]); |
2115 | } |
2116 | regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, pkt->hook, |
2117 | &range, pkt->out); |
2118 | diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c |
2119 | index c09d4381427e..f760524e1353 100644 |
2120 | --- a/net/ipv4/netfilter/nft_redir_ipv4.c |
2121 | +++ b/net/ipv4/netfilter/nft_redir_ipv4.c |
2122 | @@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr, |
2123 | |
2124 | memset(&mr, 0, sizeof(mr)); |
2125 | if (priv->sreg_proto_min) { |
2126 | - mr.range[0].min.all = |
2127 | - *(__be16 *)®s->data[priv->sreg_proto_min]; |
2128 | - mr.range[0].max.all = |
2129 | - *(__be16 *)®s->data[priv->sreg_proto_max]; |
2130 | + mr.range[0].min.all = (__force __be16)nft_reg_load16( |
2131 | + ®s->data[priv->sreg_proto_min]); |
2132 | + mr.range[0].max.all = (__force __be16)nft_reg_load16( |
2133 | + ®s->data[priv->sreg_proto_max]); |
2134 | mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
2135 | } |
2136 | |
2137 | diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c |
2138 | index 9597ffb74077..b74a420050c4 100644 |
2139 | --- a/net/ipv6/netfilter/nft_masq_ipv6.c |
2140 | +++ b/net/ipv6/netfilter/nft_masq_ipv6.c |
2141 | @@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, |
2142 | memset(&range, 0, sizeof(range)); |
2143 | range.flags = priv->flags; |
2144 | if (priv->sreg_proto_min) { |
2145 | - range.min_proto.all = |
2146 | - *(__be16 *)®s->data[priv->sreg_proto_min]; |
2147 | - range.max_proto.all = |
2148 | - *(__be16 *)®s->data[priv->sreg_proto_max]; |
2149 | + range.min_proto.all = (__force __be16)nft_reg_load16( |
2150 | + ®s->data[priv->sreg_proto_min]); |
2151 | + range.max_proto.all = (__force __be16)nft_reg_load16( |
2152 | + ®s->data[priv->sreg_proto_max]); |
2153 | } |
2154 | regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); |
2155 | } |
2156 | diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c |
2157 | index aca44e89a881..7ef58e493fca 100644 |
2158 | --- a/net/ipv6/netfilter/nft_redir_ipv6.c |
2159 | +++ b/net/ipv6/netfilter/nft_redir_ipv6.c |
2160 | @@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr, |
2161 | |
2162 | memset(&range, 0, sizeof(range)); |
2163 | if (priv->sreg_proto_min) { |
2164 | - range.min_proto.all = |
2165 | - *(__be16 *)®s->data[priv->sreg_proto_min], |
2166 | - range.max_proto.all = |
2167 | - *(__be16 *)®s->data[priv->sreg_proto_max], |
2168 | + range.min_proto.all = (__force __be16)nft_reg_load16( |
2169 | + ®s->data[priv->sreg_proto_min]); |
2170 | + range.max_proto.all = (__force __be16)nft_reg_load16( |
2171 | + ®s->data[priv->sreg_proto_max]); |
2172 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
2173 | } |
2174 | |
2175 | diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c |
2176 | index d7b0d171172a..2b9fda71fa8b 100644 |
2177 | --- a/net/netfilter/nft_ct.c |
2178 | +++ b/net/netfilter/nft_ct.c |
2179 | @@ -77,7 +77,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, |
2180 | |
2181 | switch (priv->key) { |
2182 | case NFT_CT_DIRECTION: |
2183 | - *dest = CTINFO2DIR(ctinfo); |
2184 | + nft_reg_store8(dest, CTINFO2DIR(ctinfo)); |
2185 | return; |
2186 | case NFT_CT_STATUS: |
2187 | *dest = ct->status; |
2188 | @@ -129,10 +129,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr, |
2189 | return; |
2190 | } |
2191 | case NFT_CT_L3PROTOCOL: |
2192 | - *dest = nf_ct_l3num(ct); |
2193 | + nft_reg_store8(dest, nf_ct_l3num(ct)); |
2194 | return; |
2195 | case NFT_CT_PROTOCOL: |
2196 | - *dest = nf_ct_protonum(ct); |
2197 | + nft_reg_store8(dest, nf_ct_protonum(ct)); |
2198 | return; |
2199 | default: |
2200 | break; |
2201 | @@ -149,10 +149,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr, |
2202 | nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); |
2203 | return; |
2204 | case NFT_CT_PROTO_SRC: |
2205 | - *dest = (__force __u16)tuple->src.u.all; |
2206 | + nft_reg_store16(dest, (__force u16)tuple->src.u.all); |
2207 | return; |
2208 | case NFT_CT_PROTO_DST: |
2209 | - *dest = (__force __u16)tuple->dst.u.all; |
2210 | + nft_reg_store16(dest, (__force u16)tuple->dst.u.all); |
2211 | return; |
2212 | default: |
2213 | break; |
2214 | diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c |
2215 | index 7c3395513ff0..cec8dc0e5e6f 100644 |
2216 | --- a/net/netfilter/nft_meta.c |
2217 | +++ b/net/netfilter/nft_meta.c |
2218 | @@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr, |
2219 | *dest = skb->len; |
2220 | break; |
2221 | case NFT_META_PROTOCOL: |
2222 | - *dest = 0; |
2223 | - *(__be16 *)dest = skb->protocol; |
2224 | + nft_reg_store16(dest, (__force u16)skb->protocol); |
2225 | break; |
2226 | case NFT_META_NFPROTO: |
2227 | - *dest = pkt->pf; |
2228 | + nft_reg_store8(dest, pkt->pf); |
2229 | break; |
2230 | case NFT_META_L4PROTO: |
2231 | if (!pkt->tprot_set) |
2232 | goto err; |
2233 | - *dest = pkt->tprot; |
2234 | + nft_reg_store8(dest, pkt->tprot); |
2235 | break; |
2236 | case NFT_META_PRIORITY: |
2237 | *dest = skb->priority; |
2238 | @@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr, |
2239 | case NFT_META_IIFTYPE: |
2240 | if (in == NULL) |
2241 | goto err; |
2242 | - *dest = 0; |
2243 | - *(u16 *)dest = in->type; |
2244 | + nft_reg_store16(dest, in->type); |
2245 | break; |
2246 | case NFT_META_OIFTYPE: |
2247 | if (out == NULL) |
2248 | goto err; |
2249 | - *dest = 0; |
2250 | - *(u16 *)dest = out->type; |
2251 | + nft_reg_store16(dest, out->type); |
2252 | break; |
2253 | case NFT_META_SKUID: |
2254 | sk = skb_to_full_sk(skb); |
2255 | @@ -142,22 +139,22 @@ void nft_meta_get_eval(const struct nft_expr *expr, |
2256 | #endif |
2257 | case NFT_META_PKTTYPE: |
2258 | if (skb->pkt_type != PACKET_LOOPBACK) { |
2259 | - *dest = skb->pkt_type; |
2260 | + nft_reg_store8(dest, skb->pkt_type); |
2261 | break; |
2262 | } |
2263 | |
2264 | switch (pkt->pf) { |
2265 | case NFPROTO_IPV4: |
2266 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) |
2267 | - *dest = PACKET_MULTICAST; |
2268 | + nft_reg_store8(dest, PACKET_MULTICAST); |
2269 | else |
2270 | - *dest = PACKET_BROADCAST; |
2271 | + nft_reg_store8(dest, PACKET_BROADCAST); |
2272 | break; |
2273 | case NFPROTO_IPV6: |
2274 | if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) |
2275 | - *dest = PACKET_MULTICAST; |
2276 | + nft_reg_store8(dest, PACKET_MULTICAST); |
2277 | else |
2278 | - *dest = PACKET_BROADCAST; |
2279 | + nft_reg_store8(dest, PACKET_BROADCAST); |
2280 | break; |
2281 | case NFPROTO_NETDEV: |
2282 | switch (skb->protocol) { |
2283 | @@ -171,14 +168,14 @@ void nft_meta_get_eval(const struct nft_expr *expr, |
2284 | goto err; |
2285 | |
2286 | if (ipv4_is_multicast(iph->daddr)) |
2287 | - *dest = PACKET_MULTICAST; |
2288 | + nft_reg_store8(dest, PACKET_MULTICAST); |
2289 | else |
2290 | - *dest = PACKET_BROADCAST; |
2291 | + nft_reg_store8(dest, PACKET_BROADCAST); |
2292 | |
2293 | break; |
2294 | } |
2295 | case htons(ETH_P_IPV6): |
2296 | - *dest = PACKET_MULTICAST; |
2297 | + nft_reg_store8(dest, PACKET_MULTICAST); |
2298 | break; |
2299 | default: |
2300 | WARN_ON_ONCE(1); |
2301 | @@ -233,7 +230,9 @@ void nft_meta_set_eval(const struct nft_expr *expr, |
2302 | { |
2303 | const struct nft_meta *meta = nft_expr_priv(expr); |
2304 | struct sk_buff *skb = pkt->skb; |
2305 | - u32 value = regs->data[meta->sreg]; |
2306 | + u32 *sreg = ®s->data[meta->sreg]; |
2307 | + u32 value = *sreg; |
2308 | + u8 pkt_type; |
2309 | |
2310 | switch (meta->key) { |
2311 | case NFT_META_MARK: |
2312 | @@ -243,9 +242,12 @@ void nft_meta_set_eval(const struct nft_expr *expr, |
2313 | skb->priority = value; |
2314 | break; |
2315 | case NFT_META_PKTTYPE: |
2316 | - if (skb->pkt_type != value && |
2317 | - skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) |
2318 | - skb->pkt_type = value; |
2319 | + pkt_type = nft_reg_load8(sreg); |
2320 | + |
2321 | + if (skb->pkt_type != pkt_type && |
2322 | + skb_pkt_type_ok(pkt_type) && |
2323 | + skb_pkt_type_ok(skb->pkt_type)) |
2324 | + skb->pkt_type = pkt_type; |
2325 | break; |
2326 | case NFT_META_NFTRACE: |
2327 | skb->nf_trace = !!value; |
2328 | diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c |
2329 | index ee2d71753746..4c48e9bb21e2 100644 |
2330 | --- a/net/netfilter/nft_nat.c |
2331 | +++ b/net/netfilter/nft_nat.c |
2332 | @@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr, |
2333 | } |
2334 | |
2335 | if (priv->sreg_proto_min) { |
2336 | - range.min_proto.all = |
2337 | - *(__be16 *)®s->data[priv->sreg_proto_min]; |
2338 | - range.max_proto.all = |
2339 | - *(__be16 *)®s->data[priv->sreg_proto_max]; |
2340 | + range.min_proto.all = (__force __be16)nft_reg_load16( |
2341 | + ®s->data[priv->sreg_proto_min]); |
2342 | + range.max_proto.all = (__force __be16)nft_reg_load16( |
2343 | + ®s->data[priv->sreg_proto_max]); |
2344 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
2345 | } |
2346 | |
2347 | diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c |
2348 | index b3f7980b0f27..d646aa770ac8 100644 |
2349 | --- a/net/sched/sch_tbf.c |
2350 | +++ b/net/sched/sch_tbf.c |
2351 | @@ -142,16 +142,6 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r, |
2352 | return len; |
2353 | } |
2354 | |
2355 | -/* |
2356 | - * Return length of individual segments of a gso packet, |
2357 | - * including all headers (MAC, IP, TCP/UDP) |
2358 | - */ |
2359 | -static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) |
2360 | -{ |
2361 | - unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
2362 | - return hdr_len + skb_gso_transport_seglen(skb); |
2363 | -} |
2364 | - |
2365 | /* GSO packet is too big, segment it so that tbf can transmit |
2366 | * each segment in time |
2367 | */ |
2368 | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
2369 | index ba9cd75e4c98..447b3a8a83c3 100644 |
2370 | --- a/sound/pci/hda/patch_conexant.c |
2371 | +++ b/sound/pci/hda/patch_conexant.c |
2372 | @@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { |
2373 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), |
2374 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), |
2375 | SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), |
2376 | + SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), |
2377 | SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), |
2378 | SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), |
2379 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
2380 | diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c |
2381 | index e6ac7b9b4648..497bad9f2789 100644 |
2382 | --- a/sound/usb/pcm.c |
2383 | +++ b/sound/usb/pcm.c |
2384 | @@ -313,6 +313,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum, |
2385 | return 0; |
2386 | } |
2387 | |
2388 | +/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk |
2389 | + * applies. Returns 1 if a quirk was found. |
2390 | + */ |
2391 | static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, |
2392 | struct usb_device *dev, |
2393 | struct usb_interface_descriptor *altsd, |
2394 | @@ -391,7 +394,7 @@ add_sync_ep: |
2395 | |
2396 | subs->data_endpoint->sync_master = subs->sync_endpoint; |
2397 | |
2398 | - return 0; |
2399 | + return 1; |
2400 | } |
2401 | |
2402 | static int set_sync_endpoint(struct snd_usb_substream *subs, |
2403 | @@ -430,6 +433,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, |
2404 | if (err < 0) |
2405 | return err; |
2406 | |
2407 | + /* endpoint set by quirk */ |
2408 | + if (err > 0) |
2409 | + return 0; |
2410 | + |
2411 | if (altsd->bNumEndpoints < 2) |
2412 | return 0; |
2413 | |
2414 | diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c |
2415 | index 046a4850e3df..ff32ca1d81ff 100644 |
2416 | --- a/tools/perf/util/unwind-libdw.c |
2417 | +++ b/tools/perf/util/unwind-libdw.c |
2418 | @@ -231,7 +231,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, |
2419 | |
2420 | err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui); |
2421 | |
2422 | - if (err && !ui->max_stack) |
2423 | + if (err && ui->max_stack != max_stack) |
2424 | err = 0; |
2425 | |
2426 | /* |