Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.8/0111-4.8.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2851 - (show annotations) (download)
Fri Jan 6 09:45:13 2017 UTC (7 years, 3 months ago) by niro
File size: 51721 byte(s)
-linux-4.8.12
1 diff --git a/Makefile b/Makefile
2 index 2b1bcbacebcd..7b0c92f53169 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 8
8 -SUBLEVEL = 11
9 +SUBLEVEL = 12
10 EXTRAVERSION =
11 NAME = Psychotic Stoned Sheep
12
13 diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
14 index af12c2db9bb8..81c11a62b1fa 100644
15 --- a/arch/parisc/Kconfig
16 +++ b/arch/parisc/Kconfig
17 @@ -33,7 +33,9 @@ config PARISC
18 select HAVE_ARCH_HASH
19 select HAVE_ARCH_SECCOMP_FILTER
20 select HAVE_ARCH_TRACEHOOK
21 - select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
22 + select GENERIC_SCHED_CLOCK
23 + select HAVE_UNSTABLE_SCHED_CLOCK if SMP
24 + select GENERIC_CLOCKEVENTS
25 select ARCH_NO_COHERENT_DMA_MMAP
26 select CPU_NO_EFFICIENT_FFS
27
28 diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
29 index 67001277256c..c2259d4a3c33 100644
30 --- a/arch/parisc/kernel/cache.c
31 +++ b/arch/parisc/kernel/cache.c
32 @@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
33 {
34 unsigned long rangetime, alltime;
35 unsigned long size, start;
36 + unsigned long threshold;
37
38 alltime = mfctl(16);
39 flush_data_cache();
40 @@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void)
41 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
42 alltime, size, rangetime);
43
44 - /* Racy, but if we see an intermediate value, it's ok too... */
45 - parisc_cache_flush_threshold = size * alltime / rangetime;
46 -
47 - parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
48 - if (!parisc_cache_flush_threshold)
49 - parisc_cache_flush_threshold = FLUSH_THRESHOLD;
50 -
51 - if (parisc_cache_flush_threshold > cache_info.dc_size)
52 - parisc_cache_flush_threshold = cache_info.dc_size;
53 -
54 - printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
55 + threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
56 + if (threshold > cache_info.dc_size)
57 + threshold = cache_info.dc_size;
58 + if (threshold)
59 + parisc_cache_flush_threshold = threshold;
60 + printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
61 parisc_cache_flush_threshold/1024);
62
63 /* calculate TLB flush threshold */
64 @@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void)
65 flush_tlb_all();
66 alltime = mfctl(16) - alltime;
67
68 - size = PAGE_SIZE;
69 + size = 0;
70 start = (unsigned long) _text;
71 rangetime = mfctl(16);
72 while (start < (unsigned long) _end) {
73 @@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void)
74 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
75 alltime, size, rangetime);
76
77 - parisc_tlb_flush_threshold = size * alltime / rangetime;
78 - parisc_tlb_flush_threshold *= num_online_cpus();
79 - parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
80 - if (!parisc_tlb_flush_threshold)
81 - parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
82 -
83 - printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
84 + threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
85 + if (threshold)
86 + parisc_tlb_flush_threshold = threshold;
87 + printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
88 parisc_tlb_flush_threshold/1024);
89 }
90
91 diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
92 index b743a80eaba0..675521919229 100644
93 --- a/arch/parisc/kernel/pacache.S
94 +++ b/arch/parisc/kernel/pacache.S
95 @@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
96
97 fitmanymiddle: /* Loop if LOOP >= 2 */
98 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
99 - pitlbe 0(%sr1, %r28)
100 + pitlbe %r0(%sr1, %r28)
101 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
102 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
103 copy %arg3, %r31 /* Re-init inner loop count */
104 @@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
105
106 fdtmanymiddle: /* Loop if LOOP >= 2 */
107 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
108 - pdtlbe 0(%sr1, %r28)
109 + pdtlbe %r0(%sr1, %r28)
110 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
111 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
112 copy %arg3, %r31 /* Re-init inner loop count */
113 @@ -620,12 +620,12 @@ ENTRY(copy_user_page_asm)
114 /* Purge any old translations */
115
116 #ifdef CONFIG_PA20
117 - pdtlb,l 0(%r28)
118 - pdtlb,l 0(%r29)
119 + pdtlb,l %r0(%r28)
120 + pdtlb,l %r0(%r29)
121 #else
122 tlb_lock %r20,%r21,%r22
123 - pdtlb 0(%r28)
124 - pdtlb 0(%r29)
125 + pdtlb %r0(%r28)
126 + pdtlb %r0(%r29)
127 tlb_unlock %r20,%r21,%r22
128 #endif
129
130 @@ -768,10 +768,10 @@ ENTRY(clear_user_page_asm)
131 /* Purge any old translation */
132
133 #ifdef CONFIG_PA20
134 - pdtlb,l 0(%r28)
135 + pdtlb,l %r0(%r28)
136 #else
137 tlb_lock %r20,%r21,%r22
138 - pdtlb 0(%r28)
139 + pdtlb %r0(%r28)
140 tlb_unlock %r20,%r21,%r22
141 #endif
142
143 @@ -852,10 +852,10 @@ ENTRY(flush_dcache_page_asm)
144 /* Purge any old translation */
145
146 #ifdef CONFIG_PA20
147 - pdtlb,l 0(%r28)
148 + pdtlb,l %r0(%r28)
149 #else
150 tlb_lock %r20,%r21,%r22
151 - pdtlb 0(%r28)
152 + pdtlb %r0(%r28)
153 tlb_unlock %r20,%r21,%r22
154 #endif
155
156 @@ -892,10 +892,10 @@ ENTRY(flush_dcache_page_asm)
157 sync
158
159 #ifdef CONFIG_PA20
160 - pdtlb,l 0(%r25)
161 + pdtlb,l %r0(%r25)
162 #else
163 tlb_lock %r20,%r21,%r22
164 - pdtlb 0(%r25)
165 + pdtlb %r0(%r25)
166 tlb_unlock %r20,%r21,%r22
167 #endif
168
169 @@ -925,13 +925,18 @@ ENTRY(flush_icache_page_asm)
170 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
171 #endif
172
173 - /* Purge any old translation */
174 + /* Purge any old translation. Note that the FIC instruction
175 + * may use either the instruction or data TLB. Given that we
176 + * have a flat address space, it's not clear which TLB will be
177 + * used. So, we purge both entries. */
178
179 #ifdef CONFIG_PA20
180 + pdtlb,l %r0(%r28)
181 pitlb,l %r0(%sr4,%r28)
182 #else
183 tlb_lock %r20,%r21,%r22
184 - pitlb (%sr4,%r28)
185 + pdtlb %r0(%r28)
186 + pitlb %r0(%sr4,%r28)
187 tlb_unlock %r20,%r21,%r22
188 #endif
189
190 @@ -970,10 +975,12 @@ ENTRY(flush_icache_page_asm)
191 sync
192
193 #ifdef CONFIG_PA20
194 + pdtlb,l %r0(%r28)
195 pitlb,l %r0(%sr4,%r25)
196 #else
197 tlb_lock %r20,%r21,%r22
198 - pitlb (%sr4,%r25)
199 + pdtlb %r0(%r28)
200 + pitlb %r0(%sr4,%r25)
201 tlb_unlock %r20,%r21,%r22
202 #endif
203
204 diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
205 index 02d9ed0f3949..494ff6e8c88a 100644
206 --- a/arch/parisc/kernel/pci-dma.c
207 +++ b/arch/parisc/kernel/pci-dma.c
208 @@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
209
210 if (!pte_none(*pte))
211 printk(KERN_ERR "map_pte_uncached: page already exists\n");
212 - set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
213 purge_tlb_start(flags);
214 + set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
215 pdtlb_kernel(orig_vaddr);
216 purge_tlb_end(flags);
217 vaddr += PAGE_SIZE;
218 diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
219 index 81d6f6391944..2e66a887788e 100644
220 --- a/arch/parisc/kernel/setup.c
221 +++ b/arch/parisc/kernel/setup.c
222 @@ -334,6 +334,10 @@ static int __init parisc_init(void)
223 /* tell PDC we're Linux. Nevermind failure. */
224 pdc_stable_write(0x40, &osid, sizeof(osid));
225
226 + /* start with known state */
227 + flush_cache_all_local();
228 + flush_tlb_all_local(NULL);
229 +
230 processor_init();
231 #ifdef CONFIG_SMP
232 pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
233 diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
234 index 9b63b876a13a..325f30d82b64 100644
235 --- a/arch/parisc/kernel/time.c
236 +++ b/arch/parisc/kernel/time.c
237 @@ -14,6 +14,7 @@
238 #include <linux/module.h>
239 #include <linux/rtc.h>
240 #include <linux/sched.h>
241 +#include <linux/sched_clock.h>
242 #include <linux/kernel.h>
243 #include <linux/param.h>
244 #include <linux/string.h>
245 @@ -39,18 +40,6 @@
246
247 static unsigned long clocktick __read_mostly; /* timer cycles per tick */
248
249 -#ifndef CONFIG_64BIT
250 -/*
251 - * The processor-internal cycle counter (Control Register 16) is used as time
252 - * source for the sched_clock() function. This register is 64bit wide on a
253 - * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
254 - * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
255 - * with a per-cpu variable which we increase every time the counter
256 - * wraps-around (which happens every ~4 secounds).
257 - */
258 -static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
259 -#endif
260 -
261 /*
262 * We keep time on PA-RISC Linux by using the Interval Timer which is
263 * a pair of registers; one is read-only and one is write-only; both
264 @@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
265 */
266 mtctl(next_tick, 16);
267
268 -#if !defined(CONFIG_64BIT)
269 - /* check for overflow on a 32bit kernel (every ~4 seconds). */
270 - if (unlikely(next_tick < now))
271 - this_cpu_inc(cr16_high_32_bits);
272 -#endif
273 -
274 /* Skip one clocktick on purpose if we missed next_tick.
275 * The new CR16 must be "later" than current CR16 otherwise
276 * itimer would not fire until CR16 wrapped - e.g 4 seconds
277 @@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
278
279 /* clock source code */
280
281 -static cycle_t read_cr16(struct clocksource *cs)
282 +static cycle_t notrace read_cr16(struct clocksource *cs)
283 {
284 return get_cycles();
285 }
286 @@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
287 }
288
289
290 -/*
291 - * sched_clock() framework
292 - */
293 -
294 -static u32 cyc2ns_mul __read_mostly;
295 -static u32 cyc2ns_shift __read_mostly;
296 -
297 -u64 sched_clock(void)
298 +static u64 notrace read_cr16_sched_clock(void)
299 {
300 - u64 now;
301 -
302 - /* Get current cycle counter (Control Register 16). */
303 -#ifdef CONFIG_64BIT
304 - now = mfctl(16);
305 -#else
306 - now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
307 -#endif
308 -
309 - /* return the value in ns (cycles_2_ns) */
310 - return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
311 + return get_cycles();
312 }
313
314
315 @@ -316,17 +282,16 @@ u64 sched_clock(void)
316
317 void __init time_init(void)
318 {
319 - unsigned long current_cr16_khz;
320 + unsigned long cr16_hz;
321
322 - current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
323 clocktick = (100 * PAGE0->mem_10msec) / HZ;
324 -
325 - /* calculate mult/shift values for cr16 */
326 - clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
327 - NSEC_PER_MSEC, 0);
328 -
329 start_cpu_itimer(); /* get CPU 0 started */
330
331 + cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
332 +
333 /* register at clocksource framework */
334 - clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
335 + clocksource_register_hz(&clocksource_cr16, cr16_hz);
336 +
337 + /* register as sched_clock source */
338 + sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
339 }
340 diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
341 index d80161b633f4..60522d22a428 100644
342 --- a/arch/powerpc/boot/main.c
343 +++ b/arch/powerpc/boot/main.c
344 @@ -217,8 +217,12 @@ void start(void)
345 console_ops.close();
346
347 kentry = (kernel_entry_t) vmlinux.addr;
348 - if (ft_addr)
349 - kentry(ft_addr, 0, NULL);
350 + if (ft_addr) {
351 + if(platform_ops.kentry)
352 + platform_ops.kentry(ft_addr, vmlinux.addr);
353 + else
354 + kentry(ft_addr, 0, NULL);
355 + }
356 else
357 kentry((unsigned long)initrd.addr, initrd.size,
358 loader_info.promptr);
359 diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
360 index ff2f1b97bc53..2a99fc9a3ccf 100644
361 --- a/arch/powerpc/boot/opal-calls.S
362 +++ b/arch/powerpc/boot/opal-calls.S
363 @@ -12,6 +12,19 @@
364
365 .text
366
367 + .globl opal_kentry
368 +opal_kentry:
369 + /* r3 is the fdt ptr */
370 + mtctr r4
371 + li r4, 0
372 + li r5, 0
373 + li r6, 0
374 + li r7, 0
375 + ld r11,opal@got(r2)
376 + ld r8,0(r11)
377 + ld r9,8(r11)
378 + bctr
379 +
380 #define OPAL_CALL(name, token) \
381 .globl name; \
382 name: \
383 diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
384 index 1f37e1c1d6d8..d7b4fd47eb44 100644
385 --- a/arch/powerpc/boot/opal.c
386 +++ b/arch/powerpc/boot/opal.c
387 @@ -23,14 +23,25 @@ struct opal {
388
389 static u32 opal_con_id;
390
391 +/* see opal-wrappers.S */
392 int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
393 int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
394 int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
395 int64_t opal_console_flush(uint64_t term_number);
396 int64_t opal_poll_events(uint64_t *outstanding_event_mask);
397
398 +void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr);
399 +
400 static int opal_con_open(void)
401 {
402 + /*
403 + * When OPAL loads the boot kernel it stashes the OPAL base and entry
404 + * address in r8 and r9 so the kernel can use the OPAL console
405 + * before unflattening the devicetree. While executing the wrapper will
406 + * probably trash r8 and r9 so this kentry hook restores them before
407 + * entering the decompressed kernel.
408 + */
409 + platform_ops.kentry = opal_kentry;
410 return 0;
411 }
412
413 diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
414 index e19b64ef977a..deeae6f6ba9c 100644
415 --- a/arch/powerpc/boot/ops.h
416 +++ b/arch/powerpc/boot/ops.h
417 @@ -30,6 +30,7 @@ struct platform_ops {
418 void * (*realloc)(void *ptr, unsigned long size);
419 void (*exit)(void);
420 void * (*vmlinux_alloc)(unsigned long size);
421 + void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr);
422 };
423 extern struct platform_ops platform_ops;
424
425 diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
426 index e2fb408f8398..fd10b582fb2d 100644
427 --- a/arch/powerpc/include/asm/mmu.h
428 +++ b/arch/powerpc/include/asm/mmu.h
429 @@ -29,6 +29,12 @@
430 */
431
432 /*
433 + * Kernel read only support.
434 + * We added the ppp value 0b110 in ISA 2.04.
435 + */
436 +#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
437 +
438 +/*
439 * We need to clear top 16bits of va (from the remaining 64 bits )in
440 * tlbie* instructions
441 */
442 @@ -103,10 +109,10 @@
443 #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
444 #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
445 #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
446 -#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
447 -#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
448 -#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
449 -#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
450 +#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
451 +#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
452 +#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
453 +#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
454 #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
455 MMU_FTR_CI_LARGE_PAGE
456 #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
457 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
458 index 978dada662ae..52cbf043e960 100644
459 --- a/arch/powerpc/include/asm/reg.h
460 +++ b/arch/powerpc/include/asm/reg.h
461 @@ -355,6 +355,7 @@
462 #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
463 #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
464 #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
465 +#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
466 #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
467 #define LPCR_MER_SH 11
468 #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
469 diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
470 index 52ff3f025437..37c027ca83b2 100644
471 --- a/arch/powerpc/kernel/cpu_setup_power.S
472 +++ b/arch/powerpc/kernel/cpu_setup_power.S
473 @@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9)
474 li r0,0
475 mtspr SPRN_LPID,r0
476 mfspr r3,SPRN_LPCR
477 - ori r3, r3, LPCR_PECEDH
478 - ori r3, r3, LPCR_HVICE
479 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
480 + or r3, r3, r4
481 bl __init_LPCR
482 bl __init_HFSCR
483 bl __init_tlb_power9
484 @@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9)
485 li r0,0
486 mtspr SPRN_LPID,r0
487 mfspr r3,SPRN_LPCR
488 - ori r3, r3, LPCR_PECEDH
489 - ori r3, r3, LPCR_HVICE
490 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
491 + or r3, r3, r4
492 bl __init_LPCR
493 bl __init_HFSCR
494 bl __init_tlb_power9
495 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
496 index 28923b2e2df1..8dff9ce6fbc1 100644
497 --- a/arch/powerpc/mm/hash_utils_64.c
498 +++ b/arch/powerpc/mm/hash_utils_64.c
499 @@ -190,8 +190,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
500 /*
501 * Kernel read only mapped with ppp bits 0b110
502 */
503 - if (!(pteflags & _PAGE_WRITE))
504 - rflags |= (HPTE_R_PP0 | 0x2);
505 + if (!(pteflags & _PAGE_WRITE)) {
506 + if (mmu_has_feature(MMU_FTR_KERNEL_RO))
507 + rflags |= (HPTE_R_PP0 | 0x2);
508 + else
509 + rflags |= 0x3;
510 + }
511 } else {
512 if (pteflags & _PAGE_RWX)
513 rflags |= 0x2;
514 diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
515 index 178989e6d3e3..ea960d660917 100644
516 --- a/arch/tile/kernel/time.c
517 +++ b/arch/tile/kernel/time.c
518 @@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
519 */
520 unsigned long long sched_clock(void)
521 {
522 - return clocksource_cyc2ns(get_cycles(),
523 - sched_clock_mult, SCHED_CLOCK_SHIFT);
524 + return mult_frac(get_cycles(),
525 + sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
526 }
527
528 int setup_profiling_timer(unsigned int multiplier)
529 diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
530 index 9b983a474253..8fc714b4f18a 100644
531 --- a/arch/x86/events/intel/ds.c
532 +++ b/arch/x86/events/intel/ds.c
533 @@ -1070,20 +1070,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
534 }
535
536 /*
537 - * We use the interrupt regs as a base because the PEBS record
538 - * does not contain a full regs set, specifically it seems to
539 - * lack segment descriptors, which get used by things like
540 - * user_mode().
541 + * We use the interrupt regs as a base because the PEBS record does not
542 + * contain a full regs set, specifically it seems to lack segment
543 + * descriptors, which get used by things like user_mode().
544 *
545 - * In the simple case fix up only the IP and BP,SP regs, for
546 - * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
547 - * A possible PERF_SAMPLE_REGS will have to transfer all regs.
548 + * In the simple case fix up only the IP for PERF_SAMPLE_IP.
549 + *
550 + * We must however always use BP,SP from iregs for the unwinder to stay
551 + * sane; the record BP,SP can point into thin air when the record is
552 + * from a previous PMI context or an (I)RET happend between the record
553 + * and PMI.
554 */
555 *regs = *iregs;
556 regs->flags = pebs->flags;
557 set_linear_ip(regs, pebs->ip);
558 - regs->bp = pebs->bp;
559 - regs->sp = pebs->sp;
560
561 if (sample_type & PERF_SAMPLE_REGS_INTR) {
562 regs->ax = pebs->ax;
563 @@ -1092,10 +1092,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
564 regs->dx = pebs->dx;
565 regs->si = pebs->si;
566 regs->di = pebs->di;
567 - regs->bp = pebs->bp;
568 - regs->sp = pebs->sp;
569
570 - regs->flags = pebs->flags;
571 + /*
572 + * Per the above; only set BP,SP if we don't need callchains.
573 + *
574 + * XXX: does this make sense?
575 + */
576 + if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
577 + regs->bp = pebs->bp;
578 + regs->sp = pebs->sp;
579 + }
580 +
581 + /*
582 + * Preserve PERF_EFLAGS_VM from set_linear_ip().
583 + */
584 + regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
585 #ifndef CONFIG_X86_32
586 regs->r8 = pebs->r8;
587 regs->r9 = pebs->r9;
588 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
589 index 8c4a47706296..181c238d4df9 100644
590 --- a/arch/x86/events/perf_event.h
591 +++ b/arch/x86/events/perf_event.h
592 @@ -113,7 +113,7 @@ struct debug_store {
593 * Per register state.
594 */
595 struct er_account {
596 - raw_spinlock_t lock; /* per-core: protect structure */
597 + raw_spinlock_t lock; /* per-core: protect structure */
598 u64 config; /* extra MSR config */
599 u64 reg; /* extra MSR number */
600 atomic_t ref; /* reference count */
601 diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
602 index 3fc03a09a93b..c289e2f4a6e5 100644
603 --- a/arch/x86/kernel/fpu/core.c
604 +++ b/arch/x86/kernel/fpu/core.c
605 @@ -517,14 +517,14 @@ void fpu__clear(struct fpu *fpu)
606 {
607 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
608
609 - if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
610 - /* FPU state will be reallocated lazily at the first use. */
611 - fpu__drop(fpu);
612 - } else {
613 - if (!fpu->fpstate_active) {
614 - fpu__activate_curr(fpu);
615 - user_fpu_begin();
616 - }
617 + fpu__drop(fpu);
618 +
619 + /*
620 + * Make sure fpstate is cleared and initialized.
621 + */
622 + if (static_cpu_has(X86_FEATURE_FPU)) {
623 + fpu__activate_curr(fpu);
624 + user_fpu_begin();
625 copy_init_fpstate_to_fpregs();
626 }
627 }
628 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
629 index cbd7b92585bb..a3ce9d260d68 100644
630 --- a/arch/x86/kvm/emulate.c
631 +++ b/arch/x86/kvm/emulate.c
632 @@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
633 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
634 {
635 int rc;
636 - unsigned short sel, old_sel;
637 - struct desc_struct old_desc, new_desc;
638 - const struct x86_emulate_ops *ops = ctxt->ops;
639 + unsigned short sel;
640 + struct desc_struct new_desc;
641 u8 cpl = ctxt->ops->cpl(ctxt);
642
643 - /* Assignment of RIP may only fail in 64-bit mode */
644 - if (ctxt->mode == X86EMUL_MODE_PROT64)
645 - ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
646 - VCPU_SREG_CS);
647 -
648 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
649
650 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
651 @@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
652 return rc;
653
654 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
655 - if (rc != X86EMUL_CONTINUE) {
656 - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
657 - /* assigning eip failed; restore the old cs */
658 - ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
659 - return rc;
660 - }
661 + /* Error handling is not implemented. */
662 + if (rc != X86EMUL_CONTINUE)
663 + return X86EMUL_UNHANDLEABLE;
664 +
665 return rc;
666 }
667
668 @@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
669 {
670 int rc;
671 unsigned long eip, cs;
672 - u16 old_cs;
673 int cpl = ctxt->ops->cpl(ctxt);
674 - struct desc_struct old_desc, new_desc;
675 - const struct x86_emulate_ops *ops = ctxt->ops;
676 -
677 - if (ctxt->mode == X86EMUL_MODE_PROT64)
678 - ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
679 - VCPU_SREG_CS);
680 + struct desc_struct new_desc;
681
682 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
683 if (rc != X86EMUL_CONTINUE)
684 @@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
685 if (rc != X86EMUL_CONTINUE)
686 return rc;
687 rc = assign_eip_far(ctxt, eip, &new_desc);
688 - if (rc != X86EMUL_CONTINUE) {
689 - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
690 - ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
691 - }
692 + /* Error handling is not implemented. */
693 + if (rc != X86EMUL_CONTINUE)
694 + return X86EMUL_UNHANDLEABLE;
695 +
696 return rc;
697 }
698
699 diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
700 index 1a22de70f7f7..6e219e5c07d2 100644
701 --- a/arch/x86/kvm/ioapic.c
702 +++ b/arch/x86/kvm/ioapic.c
703 @@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
704 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
705 {
706 ioapic->rtc_status.pending_eoi = 0;
707 - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
708 + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
709 }
710
711 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
712 diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
713 index 7d2692a49657..1cc6e54436db 100644
714 --- a/arch/x86/kvm/ioapic.h
715 +++ b/arch/x86/kvm/ioapic.h
716 @@ -42,13 +42,13 @@ struct kvm_vcpu;
717
718 struct dest_map {
719 /* vcpu bitmap where IRQ has been sent */
720 - DECLARE_BITMAP(map, KVM_MAX_VCPUS);
721 + DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
722
723 /*
724 * Vector sent to a given vcpu, only valid when
725 * the vcpu's bit in map is set
726 */
727 - u8 vectors[KVM_MAX_VCPUS];
728 + u8 vectors[KVM_MAX_VCPU_ID];
729 };
730
731
732 diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
733 index 25810b144b58..e7a112ac51a8 100644
734 --- a/arch/x86/kvm/irq_comm.c
735 +++ b/arch/x86/kvm/irq_comm.c
736 @@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
737 bool line_status)
738 {
739 struct kvm_pic *pic = pic_irqchip(kvm);
740 +
741 + /*
742 + * XXX: rejecting pic routes when pic isn't in use would be better,
743 + * but the default routing table is installed while kvm->arch.vpic is
744 + * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
745 + */
746 + if (!pic)
747 + return -1;
748 +
749 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
750 }
751
752 @@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
753 bool line_status)
754 {
755 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
756 +
757 + if (!ioapic)
758 + return -1;
759 +
760 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
761 line_status);
762 }
763 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
764 index b62c85229711..d2255e4f9589 100644
765 --- a/arch/x86/kvm/lapic.c
766 +++ b/arch/x86/kvm/lapic.c
767 @@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
768 *mask = dest_id & 0xff;
769 return true;
770 case KVM_APIC_MODE_XAPIC_CLUSTER:
771 - *cluster = map->xapic_cluster_map[dest_id >> 4];
772 + *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
773 *mask = dest_id & 0xf;
774 return true;
775 default:
776 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
777 index 832b98f822be..a3a983fd4248 100644
778 --- a/arch/x86/mm/extable.c
779 +++ b/arch/x86/mm/extable.c
780 @@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
781 if (early_recursion_flag > 2)
782 goto halt_loop;
783
784 - if (regs->cs != __KERNEL_CS)
785 + /*
786 + * Old CPUs leave the high bits of CS on the stack
787 + * undefined. I'm not sure which CPUs do this, but at least
788 + * the 486 DX works this way.
789 + */
790 + if ((regs->cs & 0xFFFF) != __KERNEL_CS)
791 goto fail;
792
793 /*
794 diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
795 index 865f46ea724f..c80765b211cf 100644
796 --- a/crypto/asymmetric_keys/x509_cert_parser.c
797 +++ b/crypto/asymmetric_keys/x509_cert_parser.c
798 @@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
799 return cert;
800
801 error_decode:
802 - kfree(cert->pub->key);
803 kfree(ctx);
804 error_no_ctx:
805 x509_free_certificate(cert);
806 diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
807 index 29f600f2c447..ff64313770bd 100644
808 --- a/drivers/dax/dax.c
809 +++ b/drivers/dax/dax.c
810 @@ -323,8 +323,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
811 if (!dax_dev->alive)
812 return -ENXIO;
813
814 - /* prevent private / writable mappings from being established */
815 - if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
816 + /* prevent private mappings from being established */
817 + if ((vma->vm_flags & VM_SHARED) != VM_SHARED) {
818 dev_info(dev, "%s: %s: fail, attempted private mapping\n",
819 current->comm, func);
820 return -EINVAL;
821 diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
822 index 73ae849f5170..76dd42dd7088 100644
823 --- a/drivers/dax/pmem.c
824 +++ b/drivers/dax/pmem.c
825 @@ -77,7 +77,9 @@ static int dax_pmem_probe(struct device *dev)
826 nsio = to_nd_namespace_io(&ndns->dev);
827
828 /* parse the 'pfn' info block via ->rw_bytes */
829 - devm_nsio_enable(dev, nsio);
830 + rc = devm_nsio_enable(dev, nsio);
831 + if (rc)
832 + return rc;
833 altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
834 if (IS_ERR(altmap))
835 return PTR_ERR(altmap);
836 diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
837 index 58470f5ced04..8c53748a769d 100644
838 --- a/drivers/iommu/dmar.c
839 +++ b/drivers/iommu/dmar.c
840 @@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
841 struct pci_dev *pdev = to_pci_dev(data);
842 struct dmar_pci_notify_info *info;
843
844 - /* Only care about add/remove events for physical functions */
845 + /* Only care about add/remove events for physical functions.
846 + * For VFs we actually do the lookup based on the corresponding
847 + * PF in device_to_iommu() anyway. */
848 if (pdev->is_virtfn)
849 return NOTIFY_DONE;
850 if (action != BUS_NOTIFY_ADD_DEVICE &&
851 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
852 index 1257b0b80296..7fb538708cec 100644
853 --- a/drivers/iommu/intel-iommu.c
854 +++ b/drivers/iommu/intel-iommu.c
855 @@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
856 return NULL;
857
858 if (dev_is_pci(dev)) {
859 + struct pci_dev *pf_pdev;
860 +
861 pdev = to_pci_dev(dev);
862 + /* VFs aren't listed in scope tables; we need to look up
863 + * the PF instead to find the IOMMU. */
864 + pf_pdev = pci_physfn(pdev);
865 + dev = &pf_pdev->dev;
866 segment = pci_domain_nr(pdev->bus);
867 } else if (has_acpi_companion(dev))
868 dev = &ACPI_COMPANION(dev)->dev;
869 @@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
870 for_each_active_dev_scope(drhd->devices,
871 drhd->devices_cnt, i, tmp) {
872 if (tmp == dev) {
873 + /* For a VF use its original BDF# not that of the PF
874 + * which we used for the IOMMU lookup. Strictly speaking
875 + * we could do this for all PCI devices; we only need to
876 + * get the BDF# from the scope table for ACPI matches. */
877 + if (pdev->is_virtfn)
878 + goto got_pdev;
879 +
880 *bus = drhd->devices[i].bus;
881 *devfn = drhd->devices[i].devfn;
882 goto out;
883 diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
884 index 8ebb3530afa7..cb72e0011310 100644
885 --- a/drivers/iommu/intel-svm.c
886 +++ b/drivers/iommu/intel-svm.c
887 @@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
888 struct page *pages;
889 int order;
890
891 - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
892 - if (order < 0)
893 - order = 0;
894 -
895 + /* Start at 2 because it's defined as 2^(1+PSS) */
896 + iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
897 +
898 + /* Eventually I'm promised we will get a multi-level PASID table
899 + * and it won't have to be physically contiguous. Until then,
900 + * limit the size because 8MiB contiguous allocations can be hard
901 + * to come by. The limit of 0x20000, which is 1MiB for each of
902 + * the PASID and PASID-state tables, is somewhat arbitrary. */
903 + if (iommu->pasid_max > 0x20000)
904 + iommu->pasid_max = 0x20000;
905 +
906 + order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
907 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
908 if (!pages) {
909 pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
910 @@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
911 pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
912
913 if (ecap_dis(iommu->ecap)) {
914 + /* Just making it explicit... */
915 + BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
916 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
917 if (pages)
918 iommu->pasid_state_table = page_address(pages);
919 @@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
920
921 int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
922 {
923 - int order;
924 -
925 - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
926 - if (order < 0)
927 - order = 0;
928 + int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
929
930 if (iommu->pasid_table) {
931 free_pages((unsigned long)iommu->pasid_table, order);
932 @@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
933 }
934 svm->iommu = iommu;
935
936 - if (pasid_max > 2 << ecap_pss(iommu->ecap))
937 - pasid_max = 2 << ecap_pss(iommu->ecap);
938 + if (pasid_max > iommu->pasid_max)
939 + pasid_max = iommu->pasid_max;
940
941 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
942 ret = idr_alloc(&iommu->pasid_idr, svm,
943 diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
944 index 317ef63ee789..8d96a22647b3 100644
945 --- a/drivers/media/tuners/tuner-xc2028.c
946 +++ b/drivers/media/tuners/tuner-xc2028.c
947 @@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
948 int i;
949 tuner_dbg("%s called\n", __func__);
950
951 + /* free allocated f/w string */
952 + if (priv->fname != firmware_name)
953 + kfree(priv->fname);
954 + priv->fname = NULL;
955 +
956 + priv->state = XC2028_NO_FIRMWARE;
957 + memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
958 +
959 if (!priv->firm)
960 return;
961
962 @@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
963
964 priv->firm = NULL;
965 priv->firm_size = 0;
966 - priv->state = XC2028_NO_FIRMWARE;
967 -
968 - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
969 }
970
971 static int load_all_firmwares(struct dvb_frontend *fe,
972 @@ -884,9 +889,8 @@ read_not_reliable:
973 return 0;
974
975 fail:
976 - priv->state = XC2028_NO_FIRMWARE;
977 + free_firmware(priv);
978
979 - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
980 if (retry_count < 8) {
981 msleep(50);
982 retry_count++;
983 @@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
984 mutex_lock(&xc2028_list_mutex);
985
986 /* only perform final cleanup if this is the last instance */
987 - if (hybrid_tuner_report_instance_count(priv) == 1) {
988 + if (hybrid_tuner_report_instance_count(priv) == 1)
989 free_firmware(priv);
990 - kfree(priv->ctrl.fname);
991 - priv->ctrl.fname = NULL;
992 - }
993
994 if (priv)
995 hybrid_tuner_release_state(priv);
996 @@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
997
998 /*
999 * Copy the config data.
1000 - * For the firmware name, keep a local copy of the string,
1001 - * in order to avoid troubles during device release.
1002 */
1003 - kfree(priv->ctrl.fname);
1004 - priv->ctrl.fname = NULL;
1005 memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
1006 - if (p->fname) {
1007 - priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
1008 - if (priv->ctrl.fname == NULL) {
1009 - rc = -ENOMEM;
1010 - goto unlock;
1011 - }
1012 - }
1013
1014 /*
1015 * If firmware name changed, frees firmware. As free_firmware will
1016 @@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1017
1018 if (priv->state == XC2028_NO_FIRMWARE) {
1019 if (!firmware_name[0])
1020 - priv->fname = priv->ctrl.fname;
1021 + priv->fname = kstrdup(p->fname, GFP_KERNEL);
1022 else
1023 priv->fname = firmware_name;
1024
1025 + if (!priv->fname) {
1026 + rc = -ENOMEM;
1027 + goto unlock;
1028 + }
1029 +
1030 rc = request_firmware_nowait(THIS_MODULE, 1,
1031 priv->fname,
1032 priv->i2c_props.adap->dev.parent,
1033 diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
1034 index 239be2fde242..2267601f0ac1 100644
1035 --- a/drivers/mmc/host/sdhci-of-esdhc.c
1036 +++ b/drivers/mmc/host/sdhci-of-esdhc.c
1037 @@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
1038 return ret;
1039 }
1040 }
1041 + /*
1042 + * The DAT[3:0] line signal levels and the CMD line signal level are
1043 + * not compatible with standard SDHC register. The line signal levels
1044 + * DAT[7:0] are at bits 31:24 and the command line signal level is at
1045 + * bit 23. All other bits are the same as in the standard SDHC
1046 + * register.
1047 + */
1048 + if (spec_reg == SDHCI_PRESENT_STATE) {
1049 + ret = value & 0x000fffff;
1050 + ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
1051 + ret |= (value << 1) & SDHCI_CMD_LVL;
1052 + return ret;
1053 + }
1054 +
1055 ret = value;
1056 return ret;
1057 }
1058 diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
1059 index 0411c9f36461..1b3bd1c7f4f6 100644
1060 --- a/drivers/mmc/host/sdhci.h
1061 +++ b/drivers/mmc/host/sdhci.h
1062 @@ -73,6 +73,7 @@
1063 #define SDHCI_DATA_LVL_MASK 0x00F00000
1064 #define SDHCI_DATA_LVL_SHIFT 20
1065 #define SDHCI_DATA_0_LVL_MASK 0x00100000
1066 +#define SDHCI_CMD_LVL 0x01000000
1067
1068 #define SDHCI_HOST_CONTROL 0x28
1069 #define SDHCI_CTRL_LED 0x01
1070 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1071 index 46c0f5ecd99d..58e60298a360 100644
1072 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1073 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1074 @@ -3894,6 +3894,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
1075 }
1076 }
1077
1078 +static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
1079 +{
1080 + return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
1081 +}
1082 +
1083 /**
1084 * _scsih_flush_running_cmds - completing outstanding commands.
1085 * @ioc: per adapter object
1086 @@ -3915,6 +3920,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
1087 if (!scmd)
1088 continue;
1089 count++;
1090 + if (ata_12_16_cmd(scmd))
1091 + scsi_internal_device_unblock(scmd->device,
1092 + SDEV_RUNNING);
1093 mpt3sas_base_free_smid(ioc, smid);
1094 scsi_dma_unmap(scmd);
1095 if (ioc->pci_error_recovery)
1096 @@ -4019,8 +4027,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
1097 SAM_STAT_CHECK_CONDITION;
1098 }
1099
1100 -
1101 -
1102 /**
1103 * scsih_qcmd - main scsi request entry point
1104 * @scmd: pointer to scsi command object
1105 @@ -4047,6 +4053,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1106 if (ioc->logging_level & MPT_DEBUG_SCSI)
1107 scsi_print_command(scmd);
1108
1109 + /*
1110 + * Lock the device for any subsequent command until command is
1111 + * done.
1112 + */
1113 + if (ata_12_16_cmd(scmd))
1114 + scsi_internal_device_block(scmd->device);
1115 +
1116 sas_device_priv_data = scmd->device->hostdata;
1117 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1118 scmd->result = DID_NO_CONNECT << 16;
1119 @@ -4622,6 +4635,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
1120 if (scmd == NULL)
1121 return 1;
1122
1123 + if (ata_12_16_cmd(scmd))
1124 + scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
1125 +
1126 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1127
1128 if (mpi_reply == NULL) {
1129 diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
1130 index 7a223074df3d..afada655f861 100644
1131 --- a/drivers/thermal/intel_powerclamp.c
1132 +++ b/drivers/thermal/intel_powerclamp.c
1133 @@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
1134 .set_cur_state = powerclamp_set_cur_state,
1135 };
1136
1137 +static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
1138 + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
1139 + {}
1140 +};
1141 +MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
1142 +
1143 static int __init powerclamp_probe(void)
1144 {
1145 - if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
1146 +
1147 + if (!x86_match_cpu(intel_powerclamp_ids)) {
1148 pr_err("CPU does not support MWAIT");
1149 return -ENODEV;
1150 }
1151 diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
1152 index 69426e644d17..3dbb4a21ab44 100644
1153 --- a/drivers/usb/chipidea/core.c
1154 +++ b/drivers/usb/chipidea/core.c
1155 @@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
1156 if (!ci)
1157 return -ENOMEM;
1158
1159 + spin_lock_init(&ci->lock);
1160 ci->dev = dev;
1161 ci->platdata = dev_get_platdata(dev);
1162 ci->imx28_write_fix = !!(ci->platdata->flags &
1163 diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
1164 index b93356834bb5..bced28fa1cbd 100644
1165 --- a/drivers/usb/chipidea/udc.c
1166 +++ b/drivers/usb/chipidea/udc.c
1167 @@ -1895,8 +1895,6 @@ static int udc_start(struct ci_hdrc *ci)
1168 struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
1169 int retval = 0;
1170
1171 - spin_lock_init(&ci->lock);
1172 -
1173 ci->gadget.ops = &usb_gadget_ops;
1174 ci->gadget.speed = USB_SPEED_UNKNOWN;
1175 ci->gadget.max_speed = USB_SPEED_HIGH;
1176 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1177 index f61477bed3a8..243ac5ebe46a 100644
1178 --- a/drivers/usb/serial/cp210x.c
1179 +++ b/drivers/usb/serial/cp210x.c
1180 @@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
1181 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1182 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1183 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
1184 + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
1185 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
1186 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
1187 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
1188 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1189 index 0ff7f38d7800..6e9fc8bcc285 100644
1190 --- a/drivers/usb/serial/ftdi_sio.c
1191 +++ b/drivers/usb/serial/ftdi_sio.c
1192 @@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
1193 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1194 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1195 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
1196 + { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
1197 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1198 { } /* Terminating entry */
1199 };
1200
1201 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1202 index 21011c0a4c64..48ee04c94a75 100644
1203 --- a/drivers/usb/serial/ftdi_sio_ids.h
1204 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1205 @@ -596,6 +596,12 @@
1206 #define STK541_PID 0x2109 /* Zigbee Controller */
1207
1208 /*
1209 + * Texas Instruments
1210 + */
1211 +#define TI_VID 0x0451
1212 +#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
1213 +
1214 +/*
1215 * Blackfin gnICE JTAG
1216 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
1217 */
1218 diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
1219 index ffd086733421..1a59f335b063 100644
1220 --- a/drivers/usb/storage/transport.c
1221 +++ b/drivers/usb/storage/transport.c
1222 @@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
1223
1224 /* COMMAND STAGE */
1225 /* let's send the command via the control pipe */
1226 + /*
1227 + * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
1228 + * Stack may be vmallocated. So no DMA for us. Make a copy.
1229 + */
1230 + memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
1231 result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
1232 US_CBI_ADSC,
1233 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
1234 - us->ifnum, srb->cmnd, srb->cmd_len);
1235 + us->ifnum, us->iobuf, srb->cmd_len);
1236
1237 /* check the return code for the command */
1238 usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
1239 diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
1240 index 52a28311e2a4..48efe62e1302 100644
1241 --- a/fs/nfs/callback.c
1242 +++ b/fs/nfs/callback.c
1243 @@ -261,7 +261,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
1244 }
1245
1246 ret = -EPROTONOSUPPORT;
1247 - if (minorversion == 0)
1248 + if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
1249 ret = nfs4_callback_up_net(serv, net);
1250 else if (xprt->ops->bc_up)
1251 ret = xprt->ops->bc_up(serv, net);
1252 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
1253 index 2d9b650047a5..d49e26c6cdc7 100644
1254 --- a/include/linux/intel-iommu.h
1255 +++ b/include/linux/intel-iommu.h
1256 @@ -429,6 +429,7 @@ struct intel_iommu {
1257 struct page_req_dsc *prq;
1258 unsigned char prq_name[16]; /* Name for PRQ interrupt */
1259 struct idr pasid_idr;
1260 + u32 pasid_max;
1261 #endif
1262 struct q_inval *qi; /* Queued invalidation info */
1263 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
1264 diff --git a/kernel/events/core.c b/kernel/events/core.c
1265 index fc9bb2225291..f8c5f5ec666e 100644
1266 --- a/kernel/events/core.c
1267 +++ b/kernel/events/core.c
1268 @@ -7908,6 +7908,7 @@ restart:
1269 * if <size> is not specified, the range is treated as a single address.
1270 */
1271 enum {
1272 + IF_ACT_NONE = -1,
1273 IF_ACT_FILTER,
1274 IF_ACT_START,
1275 IF_ACT_STOP,
1276 @@ -7931,6 +7932,7 @@ static const match_table_t if_tokens = {
1277 { IF_SRC_KERNEL, "%u/%u" },
1278 { IF_SRC_FILEADDR, "%u@%s" },
1279 { IF_SRC_KERNELADDR, "%u" },
1280 + { IF_ACT_NONE, NULL },
1281 };
1282
1283 /*
1284 diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
1285 index 5464c8744ea9..e24388a863a7 100644
1286 --- a/lib/mpi/mpi-pow.c
1287 +++ b/lib/mpi/mpi-pow.c
1288 @@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
1289 if (!esize) {
1290 /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
1291 * depending on if MOD equals 1. */
1292 - rp[0] = 1;
1293 res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
1294 + if (res->nlimbs) {
1295 + if (mpi_resize(res, 1) < 0)
1296 + goto enomem;
1297 + rp = res->d;
1298 + rp[0] = 1;
1299 + }
1300 res->sign = 0;
1301 goto leave;
1302 }
1303 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1304 index a2214c64ed3c..7401e996009a 100644
1305 --- a/mm/page_alloc.c
1306 +++ b/mm/page_alloc.c
1307 @@ -3161,6 +3161,16 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
1308 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
1309 return false;
1310
1311 +#ifdef CONFIG_COMPACTION
1312 + /*
1313 + * This is a gross workaround to compensate a lack of reliable compaction
1314 + * operation. We cannot simply go OOM with the current state of the compaction
1315 + * code because this can lead to pre mature OOM declaration.
1316 + */
1317 + if (order <= PAGE_ALLOC_COSTLY_ORDER)
1318 + return true;
1319 +#endif
1320 +
1321 /*
1322 * There are setups with compaction disabled which would prefer to loop
1323 * inside the allocator rather than hit the oom killer prematurely.
1324 diff --git a/net/can/bcm.c b/net/can/bcm.c
1325 index 8af9d25ff988..436a7537e6a9 100644
1326 --- a/net/can/bcm.c
1327 +++ b/net/can/bcm.c
1328 @@ -77,7 +77,7 @@
1329 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
1330 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
1331
1332 -#define CAN_BCM_VERSION "20160617"
1333 +#define CAN_BCM_VERSION "20161123"
1334
1335 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
1336 MODULE_LICENSE("Dual BSD/GPL");
1337 @@ -109,8 +109,9 @@ struct bcm_op {
1338 u32 count;
1339 u32 nframes;
1340 u32 currframe;
1341 - struct canfd_frame *frames;
1342 - struct canfd_frame *last_frames;
1343 + /* void pointers to arrays of struct can[fd]_frame */
1344 + void *frames;
1345 + void *last_frames;
1346 struct canfd_frame sframe;
1347 struct canfd_frame last_sframe;
1348 struct sock *sk;
1349 @@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
1350
1351 if (op->flags & RX_FILTER_ID) {
1352 /* the easiest case */
1353 - bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
1354 + bcm_rx_update_and_send(op, op->last_frames, rxframe);
1355 goto rx_starttimer;
1356 }
1357
1358 @@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1359
1360 if (msg_head->nframes) {
1361 /* update CAN frames content */
1362 - err = memcpy_from_msg((u8 *)op->frames, msg,
1363 + err = memcpy_from_msg(op->frames, msg,
1364 msg_head->nframes * op->cfsiz);
1365 if (err < 0)
1366 return err;
1367 @@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1368 }
1369
1370 if (msg_head->nframes) {
1371 - err = memcpy_from_msg((u8 *)op->frames, msg,
1372 + err = memcpy_from_msg(op->frames, msg,
1373 msg_head->nframes * op->cfsiz);
1374 if (err < 0) {
1375 if (op->frames != &op->sframe)
1376 @@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1377 /* check flags */
1378
1379 if (op->flags & RX_RTR_FRAME) {
1380 + struct canfd_frame *frame0 = op->frames;
1381
1382 /* no timers in RTR-mode */
1383 hrtimer_cancel(&op->thrtimer);
1384 @@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1385 * prevent a full-load-loopback-test ... ;-]
1386 */
1387 if ((op->flags & TX_CP_CAN_ID) ||
1388 - (op->frames[0].can_id == op->can_id))
1389 - op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1390 + (frame0->can_id == op->can_id))
1391 + frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1392
1393 } else {
1394 if (op->flags & SETTIMER) {
1395 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1396 index 5550a86f7264..396aac7e6e79 100644
1397 --- a/net/core/flow_dissector.c
1398 +++ b/net/core/flow_dissector.c
1399 @@ -945,4 +945,4 @@ static int __init init_default_flow_dissectors(void)
1400 return 0;
1401 }
1402
1403 -late_initcall_sync(init_default_flow_dissectors);
1404 +core_initcall(init_default_flow_dissectors);
1405 diff --git a/net/wireless/core.h b/net/wireless/core.h
1406 index eee91443924d..66f2a1145d7c 100644
1407 --- a/net/wireless/core.h
1408 +++ b/net/wireless/core.h
1409 @@ -71,6 +71,7 @@ struct cfg80211_registered_device {
1410 struct list_head bss_list;
1411 struct rb_root bss_tree;
1412 u32 bss_generation;
1413 + u32 bss_entries;
1414 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
1415 struct sk_buff *scan_msg;
1416 struct cfg80211_sched_scan_request __rcu *sched_scan_req;
1417 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
1418 index 0358e12be54b..438143a3827d 100644
1419 --- a/net/wireless/scan.c
1420 +++ b/net/wireless/scan.c
1421 @@ -57,6 +57,19 @@
1422 * also linked into the probe response struct.
1423 */
1424
1425 +/*
1426 + * Limit the number of BSS entries stored in mac80211. Each one is
1427 + * a bit over 4k at most, so this limits to roughly 4-5M of memory.
1428 + * If somebody wants to really attack this though, they'd likely
1429 + * use small beacons, and only one type of frame, limiting each of
1430 + * the entries to a much smaller size (in order to generate more
1431 + * entries in total, so overhead is bigger.)
1432 + */
1433 +static int bss_entries_limit = 1000;
1434 +module_param(bss_entries_limit, int, 0644);
1435 +MODULE_PARM_DESC(bss_entries_limit,
1436 + "limit to number of scan BSS entries (per wiphy, default 1000)");
1437 +
1438 #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
1439
1440 static void bss_free(struct cfg80211_internal_bss *bss)
1441 @@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
1442
1443 list_del_init(&bss->list);
1444 rb_erase(&bss->rbn, &rdev->bss_tree);
1445 + rdev->bss_entries--;
1446 + WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
1447 + "rdev bss entries[%d]/list[empty:%d] corruption\n",
1448 + rdev->bss_entries, list_empty(&rdev->bss_list));
1449 bss_ref_put(rdev, bss);
1450 return true;
1451 }
1452 @@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
1453 rdev->bss_generation++;
1454 }
1455
1456 +static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
1457 +{
1458 + struct cfg80211_internal_bss *bss, *oldest = NULL;
1459 + bool ret;
1460 +
1461 + lockdep_assert_held(&rdev->bss_lock);
1462 +
1463 + list_for_each_entry(bss, &rdev->bss_list, list) {
1464 + if (atomic_read(&bss->hold))
1465 + continue;
1466 +
1467 + if (!list_empty(&bss->hidden_list) &&
1468 + !bss->pub.hidden_beacon_bss)
1469 + continue;
1470 +
1471 + if (oldest && time_before(oldest->ts, bss->ts))
1472 + continue;
1473 + oldest = bss;
1474 + }
1475 +
1476 + if (WARN_ON(!oldest))
1477 + return false;
1478 +
1479 + /*
1480 + * The callers make sure to increase rdev->bss_generation if anything
1481 + * gets removed (and a new entry added), so there's no need to also do
1482 + * it here.
1483 + */
1484 +
1485 + ret = __cfg80211_unlink_bss(rdev, oldest);
1486 + WARN_ON(!ret);
1487 + return ret;
1488 +}
1489 +
1490 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
1491 bool send_message)
1492 {
1493 @@ -693,6 +744,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
1494 const u8 *ie;
1495 int i, ssidlen;
1496 u8 fold = 0;
1497 + u32 n_entries = 0;
1498
1499 ies = rcu_access_pointer(new->pub.beacon_ies);
1500 if (WARN_ON(!ies))
1501 @@ -716,6 +768,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
1502 /* This is the bad part ... */
1503
1504 list_for_each_entry(bss, &rdev->bss_list, list) {
1505 + /*
1506 + * we're iterating all the entries anyway, so take the
1507 + * opportunity to validate the list length accounting
1508 + */
1509 + n_entries++;
1510 +
1511 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
1512 continue;
1513 if (bss->pub.channel != new->pub.channel)
1514 @@ -744,6 +802,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
1515 new->pub.beacon_ies);
1516 }
1517
1518 + WARN_ONCE(n_entries != rdev->bss_entries,
1519 + "rdev bss entries[%d]/list[len:%d] corruption\n",
1520 + rdev->bss_entries, n_entries);
1521 +
1522 return true;
1523 }
1524
1525 @@ -898,7 +960,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
1526 }
1527 }
1528
1529 + if (rdev->bss_entries >= bss_entries_limit &&
1530 + !cfg80211_bss_expire_oldest(rdev)) {
1531 + kfree(new);
1532 + goto drop;
1533 + }
1534 +
1535 list_add_tail(&new->list, &rdev->bss_list);
1536 + rdev->bss_entries++;
1537 rb_insert_bss(rdev, new);
1538 found = new;
1539 }
1540 diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
1541 index fc3036b34e51..a4d90aa1045a 100644
1542 --- a/security/apparmor/domain.c
1543 +++ b/security/apparmor/domain.c
1544 @@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
1545 /* released below */
1546 cred = get_current_cred();
1547 cxt = cred_cxt(cred);
1548 - profile = aa_cred_profile(cred);
1549 - previous_profile = cxt->previous;
1550 + profile = aa_get_newest_profile(aa_cred_profile(cred));
1551 + previous_profile = aa_get_newest_profile(cxt->previous);
1552
1553 if (unconfined(profile)) {
1554 info = "unconfined";
1555 @@ -718,6 +718,8 @@ audit:
1556 out:
1557 aa_put_profile(hat);
1558 kfree(name);
1559 + aa_put_profile(profile);
1560 + aa_put_profile(previous_profile);
1561 put_cred(cred);
1562
1563 return error;