Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.14/0100-3.14.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2437 - (hide annotations) (download)
Thu Jun 5 08:04:56 2014 UTC (9 years, 11 months ago) by niro
File size: 37902 byte(s)
-linux-3.14.5
1 niro 2437 diff --git a/Makefile b/Makefile
2     index e5ac8a62e6e5..7d0b6992d9ed 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 14
8     -SUBLEVEL = 0
9     +SUBLEVEL = 1
10     EXTRAVERSION =
11     NAME = Shuffling Zombie Juror
12    
13     diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
14     index ea16d782af58..4f31b2eb5cdf 100644
15     --- a/arch/arc/boot/dts/nsimosci.dts
16     +++ b/arch/arc/boot/dts/nsimosci.dts
17     @@ -11,13 +11,16 @@
18    
19     / {
20     compatible = "snps,nsimosci";
21     - clock-frequency = <80000000>; /* 80 MHZ */
22     + clock-frequency = <20000000>; /* 20 MHZ */
23     #address-cells = <1>;
24     #size-cells = <1>;
25     interrupt-parent = <&intc>;
26    
27     chosen {
28     - bootargs = "console=tty0 consoleblank=0";
29     + /* this is for console on PGU */
30     + /* bootargs = "console=tty0 consoleblank=0"; */
31     + /* this is for console on serial */
32     + bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
33     };
34    
35     aliases {
36     @@ -44,15 +47,14 @@
37     };
38    
39     uart0: serial@c0000000 {
40     - compatible = "snps,dw-apb-uart";
41     + compatible = "ns8250";
42     reg = <0xc0000000 0x2000>;
43     interrupts = <11>;
44     - #clock-frequency = <80000000>;
45     clock-frequency = <3686400>;
46     baud = <115200>;
47     reg-shift = <2>;
48     reg-io-width = <4>;
49     - status = "okay";
50     + no-loopback-test = <1>;
51     };
52    
53     pgu0: pgu@c9000000 {
54     diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
55     index 451af30914f6..c01ba35a4eff 100644
56     --- a/arch/arc/configs/nsimosci_defconfig
57     +++ b/arch/arc/configs/nsimosci_defconfig
58     @@ -54,6 +54,7 @@ CONFIG_SERIO_ARC_PS2=y
59     CONFIG_SERIAL_8250=y
60     CONFIG_SERIAL_8250_CONSOLE=y
61     CONFIG_SERIAL_8250_DW=y
62     +CONFIG_SERIAL_OF_PLATFORM=y
63     CONFIG_SERIAL_ARC=y
64     CONFIG_SERIAL_ARC_CONSOLE=y
65     # CONFIG_HW_RANDOM is not set
66     diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
67     index dbdd2231c75d..b2e322939256 100644
68     --- a/arch/m68k/Kconfig
69     +++ b/arch/m68k/Kconfig
70     @@ -17,6 +17,7 @@ config M68K
71     select FPU if MMU
72     select ARCH_WANT_IPC_PARSE_VERSION
73     select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
74     + select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
75     select HAVE_MOD_ARCH_SPECIFIC
76     select MODULES_USE_ELF_REL
77     select MODULES_USE_ELF_RELA
78     diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
79     index 65a07750f4f9..bb74b21f007a 100644
80     --- a/arch/s390/Kconfig
81     +++ b/arch/s390/Kconfig
82     @@ -117,6 +117,7 @@ config S390
83     select HAVE_FUNCTION_GRAPH_TRACER
84     select HAVE_FUNCTION_TRACER
85     select HAVE_FUNCTION_TRACE_MCOUNT_TEST
86     + select HAVE_FUTEX_CMPXCHG if FUTEX
87     select HAVE_KERNEL_BZIP2
88     select HAVE_KERNEL_GZIP
89     select HAVE_KERNEL_LZ4
90     diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
91     index 586f41aac361..185fad49d86f 100644
92     --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
93     +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
94     @@ -24,10 +24,6 @@
95     .align 16
96     .Lbswap_mask:
97     .octa 0x000102030405060708090a0b0c0d0e0f
98     -.Lpoly:
99     - .octa 0xc2000000000000000000000000000001
100     -.Ltwo_one:
101     - .octa 0x00000001000000000000000000000001
102    
103     #define DATA %xmm0
104     #define SHASH %xmm1
105     @@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update)
106     .Lupdate_just_ret:
107     ret
108     ENDPROC(clmul_ghash_update)
109     -
110     -/*
111     - * void clmul_ghash_setkey(be128 *shash, const u8 *key);
112     - *
113     - * Calculate hash_key << 1 mod poly
114     - */
115     -ENTRY(clmul_ghash_setkey)
116     - movaps .Lbswap_mask, BSWAP
117     - movups (%rsi), %xmm0
118     - PSHUFB_XMM BSWAP %xmm0
119     - movaps %xmm0, %xmm1
120     - psllq $1, %xmm0
121     - psrlq $63, %xmm1
122     - movaps %xmm1, %xmm2
123     - pslldq $8, %xmm1
124     - psrldq $8, %xmm2
125     - por %xmm1, %xmm0
126     - # reduction
127     - pshufd $0b00100100, %xmm2, %xmm1
128     - pcmpeqd .Ltwo_one, %xmm1
129     - pand .Lpoly, %xmm1
130     - pxor %xmm1, %xmm0
131     - movups %xmm0, (%rdi)
132     - ret
133     -ENDPROC(clmul_ghash_setkey)
134     diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
135     index 6759dd1135be..d785cf2c529c 100644
136     --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
137     +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
138     @@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash);
139     void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
140     const be128 *shash);
141    
142     -void clmul_ghash_setkey(be128 *shash, const u8 *key);
143     -
144     struct ghash_async_ctx {
145     struct cryptd_ahash *cryptd_tfm;
146     };
147     @@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm,
148     const u8 *key, unsigned int keylen)
149     {
150     struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
151     + be128 *x = (be128 *)key;
152     + u64 a, b;
153    
154     if (keylen != GHASH_BLOCK_SIZE) {
155     crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
156     return -EINVAL;
157     }
158    
159     - clmul_ghash_setkey(&ctx->shash, key);
160     + /* perform multiplication by 'x' in GF(2^128) */
161     + a = be64_to_cpu(x->a);
162     + b = be64_to_cpu(x->b);
163     +
164     + ctx->shash.a = (__be64)((b << 1) | (a >> 63));
165     + ctx->shash.b = (__be64)((a << 1) | (b >> 63));
166     +
167     + if (a >> 63)
168     + ctx->shash.b ^= cpu_to_be64(0xc2);
169    
170     return 0;
171     }
172     diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
173     index acd86c850414..f949715e3957 100644
174     --- a/arch/x86/include/asm/efi.h
175     +++ b/arch/x86/include/asm/efi.h
176     @@ -130,7 +130,8 @@ extern void efi_memory_uc(u64 addr, unsigned long size);
177     extern void __init efi_map_region(efi_memory_desc_t *md);
178     extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
179     extern void efi_sync_low_kernel_mappings(void);
180     -extern void efi_setup_page_tables(void);
181     +extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
182     +extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
183     extern void __init old_map_region(efi_memory_desc_t *md);
184     extern void __init runtime_code_page_mkexec(void);
185     extern void __init efi_runtime_mkexec(void);
186     diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
187     index 1aa9ccd43223..94e40f1efdfd 100644
188     --- a/arch/x86/include/asm/pgtable_types.h
189     +++ b/arch/x86/include/asm/pgtable_types.h
190     @@ -385,6 +385,8 @@ extern pte_t *lookup_address(unsigned long address, unsigned int *level);
191     extern phys_addr_t slow_virt_to_phys(void *__address);
192     extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
193     unsigned numpages, unsigned long page_flags);
194     +void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
195     + unsigned numpages);
196     #endif /* !__ASSEMBLY__ */
197    
198     #endif /* _ASM_X86_PGTABLE_DEFS_H */
199     diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
200     index b3b19f46c016..a3488689e301 100644
201     --- a/arch/x86/mm/pageattr.c
202     +++ b/arch/x86/mm/pageattr.c
203     @@ -692,6 +692,18 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
204     return true;
205     }
206    
207     +static bool try_to_free_pud_page(pud_t *pud)
208     +{
209     + int i;
210     +
211     + for (i = 0; i < PTRS_PER_PUD; i++)
212     + if (!pud_none(pud[i]))
213     + return false;
214     +
215     + free_page((unsigned long)pud);
216     + return true;
217     +}
218     +
219     static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
220     {
221     pte_t *pte = pte_offset_kernel(pmd, start);
222     @@ -805,6 +817,16 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
223     */
224     }
225    
226     +static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
227     +{
228     + pgd_t *pgd_entry = root + pgd_index(addr);
229     +
230     + unmap_pud_range(pgd_entry, addr, end);
231     +
232     + if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
233     + pgd_clear(pgd_entry);
234     +}
235     +
236     static int alloc_pte_page(pmd_t *pmd)
237     {
238     pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
239     @@ -999,9 +1021,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
240     static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
241     {
242     pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
243     - bool allocd_pgd = false;
244     - pgd_t *pgd_entry;
245     pud_t *pud = NULL; /* shut up gcc */
246     + pgd_t *pgd_entry;
247     int ret;
248    
249     pgd_entry = cpa->pgd + pgd_index(addr);
250     @@ -1015,7 +1036,6 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
251     return -1;
252    
253     set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
254     - allocd_pgd = true;
255     }
256    
257     pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
258     @@ -1023,19 +1043,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
259    
260     ret = populate_pud(cpa, addr, pgd_entry, pgprot);
261     if (ret < 0) {
262     - unmap_pud_range(pgd_entry, addr,
263     + unmap_pgd_range(cpa->pgd, addr,
264     addr + (cpa->numpages << PAGE_SHIFT));
265     -
266     - if (allocd_pgd) {
267     - /*
268     - * If I allocated this PUD page, I can just as well
269     - * free it in this error path.
270     - */
271     - pgd_clear(pgd_entry);
272     - free_page((unsigned long)pud);
273     - }
274     return ret;
275     }
276     +
277     cpa->numpages = ret;
278     return 0;
279     }
280     @@ -1861,6 +1873,12 @@ out:
281     return retval;
282     }
283    
284     +void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
285     + unsigned numpages)
286     +{
287     + unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
288     +}
289     +
290     /*
291     * The testcases use internal knowledge of the implementation that shouldn't
292     * be exposed to the rest of the kernel. Include these directly here.
293     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
294     index b97acecf3fd9..abb81b0ad83f 100644
295     --- a/arch/x86/platform/efi/efi.c
296     +++ b/arch/x86/platform/efi/efi.c
297     @@ -939,14 +939,36 @@ static void __init efi_map_regions_fixed(void)
298    
299     }
300    
301     +static void *realloc_pages(void *old_memmap, int old_shift)
302     +{
303     + void *ret;
304     +
305     + ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
306     + if (!ret)
307     + goto out;
308     +
309     + /*
310     + * A first-time allocation doesn't have anything to copy.
311     + */
312     + if (!old_memmap)
313     + return ret;
314     +
315     + memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
316     +
317     +out:
318     + free_pages((unsigned long)old_memmap, old_shift);
319     + return ret;
320     +}
321     +
322     /*
323     - * Map efi memory ranges for runtime serivce and update new_memmap with virtual
324     - * addresses.
325     + * Map the efi memory ranges of the runtime services and update new_mmap with
326     + * virtual addresses.
327     */
328     -static void * __init efi_map_regions(int *count)
329     +static void * __init efi_map_regions(int *count, int *pg_shift)
330     {
331     + void *p, *new_memmap = NULL;
332     + unsigned long left = 0;
333     efi_memory_desc_t *md;
334     - void *p, *tmp, *new_memmap = NULL;
335    
336     for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
337     md = p;
338     @@ -961,20 +983,23 @@ static void * __init efi_map_regions(int *count)
339     efi_map_region(md);
340     get_systab_virt_addr(md);
341    
342     - tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size,
343     - GFP_KERNEL);
344     - if (!tmp)
345     - goto out;
346     - new_memmap = tmp;
347     + if (left < memmap.desc_size) {
348     + new_memmap = realloc_pages(new_memmap, *pg_shift);
349     + if (!new_memmap)
350     + return NULL;
351     +
352     + left += PAGE_SIZE << *pg_shift;
353     + (*pg_shift)++;
354     + }
355     +
356     memcpy(new_memmap + (*count * memmap.desc_size), md,
357     memmap.desc_size);
358     +
359     + left -= memmap.desc_size;
360     (*count)++;
361     }
362    
363     return new_memmap;
364     -out:
365     - kfree(new_memmap);
366     - return NULL;
367     }
368    
369     /*
370     @@ -1000,9 +1025,9 @@ out:
371     */
372     void __init efi_enter_virtual_mode(void)
373     {
374     - efi_status_t status;
375     + int err, count = 0, pg_shift = 0;
376     void *new_memmap = NULL;
377     - int err, count = 0;
378     + efi_status_t status;
379    
380     efi.systab = NULL;
381    
382     @@ -1019,20 +1044,24 @@ void __init efi_enter_virtual_mode(void)
383     efi_map_regions_fixed();
384     } else {
385     efi_merge_regions();
386     - new_memmap = efi_map_regions(&count);
387     + new_memmap = efi_map_regions(&count, &pg_shift);
388     if (!new_memmap) {
389     pr_err("Error reallocating memory, EFI runtime non-functional!\n");
390     return;
391     }
392     - }
393    
394     - err = save_runtime_map();
395     - if (err)
396     - pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
397     + err = save_runtime_map();
398     + if (err)
399     + pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
400     + }
401    
402     BUG_ON(!efi.systab);
403    
404     - efi_setup_page_tables();
405     + if (!efi_setup) {
406     + if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift))
407     + return;
408     + }
409     +
410     efi_sync_low_kernel_mappings();
411    
412     if (!efi_setup) {
413     @@ -1072,7 +1101,35 @@ void __init efi_enter_virtual_mode(void)
414    
415     efi_runtime_mkexec();
416    
417     - kfree(new_memmap);
418     +
419     + /*
420     + * We mapped the descriptor array into the EFI pagetable above but we're
421     + * not unmapping it here. Here's why:
422     + *
423     + * We're copying select PGDs from the kernel page table to the EFI page
424     + * table and when we do so and make changes to those PGDs like unmapping
425     + * stuff from them, those changes appear in the kernel page table and we
426     + * go boom.
427     + *
428     + * From setup_real_mode():
429     + *
430     + * ...
431     + * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
432     + *
433     + * In this particular case, our allocation is in PGD 0 of the EFI page
434     + * table but we've copied that PGD from PGD[272] of the EFI page table:
435     + *
436     + * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
437     + *
438     + * where the direct memory mapping in kernel space is.
439     + *
440     + * new_memmap's VA comes from that direct mapping and thus clearing it,
441     + * it would get cleared in the kernel page table too.
442     + *
443     + * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
444     + */
445     + if (!efi_setup)
446     + free_pages((unsigned long)new_memmap, pg_shift);
447    
448     /* clean DUMMY object */
449     efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
450     diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
451     index 0b74cdf7f816..9ee3491e31fb 100644
452     --- a/arch/x86/platform/efi/efi_32.c
453     +++ b/arch/x86/platform/efi/efi_32.c
454     @@ -40,7 +40,12 @@
455     static unsigned long efi_rt_eflags;
456    
457     void efi_sync_low_kernel_mappings(void) {}
458     -void efi_setup_page_tables(void) {}
459     +void __init efi_dump_pagetable(void) {}
460     +int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
461     +{
462     + return 0;
463     +}
464     +void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) {}
465    
466     void __init efi_map_region(efi_memory_desc_t *md)
467     {
468     diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
469     index 0c2a234fef1e..666b74a09092 100644
470     --- a/arch/x86/platform/efi/efi_64.c
471     +++ b/arch/x86/platform/efi/efi_64.c
472     @@ -137,12 +137,38 @@ void efi_sync_low_kernel_mappings(void)
473     sizeof(pgd_t) * num_pgds);
474     }
475    
476     -void efi_setup_page_tables(void)
477     +int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
478     {
479     + pgd_t *pgd;
480     +
481     + if (efi_enabled(EFI_OLD_MEMMAP))
482     + return 0;
483     +
484     efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
485     + pgd = __va(efi_scratch.efi_pgt);
486    
487     - if (!efi_enabled(EFI_OLD_MEMMAP))
488     - efi_scratch.use_pgd = true;
489     + /*
490     + * It can happen that the physical address of new_memmap lands in memory
491     + * which is not mapped in the EFI page table. Therefore we need to go
492     + * and ident-map those pages containing the map before calling
493     + * phys_efi_set_virtual_address_map().
494     + */
495     + if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
496     + pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
497     + return 1;
498     + }
499     +
500     + efi_scratch.use_pgd = true;
501     +
502     +
503     + return 0;
504     +}
505     +
506     +void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
507     +{
508     + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
509     +
510     + kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
511     }
512    
513     static void __init __map_region(efi_memory_desc_t *md, u64 va)
514     diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
515     index 02125e6a9109..5a4da94aefb0 100644
516     --- a/drivers/isdn/isdnloop/isdnloop.c
517     +++ b/drivers/isdn/isdnloop/isdnloop.c
518     @@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
519     static void
520     isdnloop_fake_err(isdnloop_card *card)
521     {
522     - char buf[60];
523     + char buf[64];
524    
525     - sprintf(buf, "E%s", card->omsg);
526     + snprintf(buf, sizeof(buf), "E%s", card->omsg);
527     isdnloop_fake(card, buf, -1);
528     isdnloop_fake(card, "NAK", -1);
529     }
530     @@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
531     case 7:
532     /* 0x;EAZ */
533     p += 3;
534     + if (strlen(p) >= sizeof(card->eazlist[0]))
535     + break;
536     strcpy(card->eazlist[ch - 1], p);
537     break;
538     case 8:
539     @@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
540     return -EBUSY;
541     if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
542     return -EFAULT;
543     +
544     + for (i = 0; i < 3; i++) {
545     + if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
546     + return -EINVAL;
547     + }
548     +
549     spin_lock_irqsave(&card->isdnloop_lock, flags);
550     switch (sdef.ptype) {
551     case ISDN_PTYPE_EURO:
552     @@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
553     {
554     ulong a;
555     int i;
556     - char cbuf[60];
557     + char cbuf[80];
558     isdn_ctrl cmd;
559     isdnloop_cdef cdef;
560    
561     @@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
562     break;
563     if ((c->arg & 255) < ISDNLOOP_BCH) {
564     char *p;
565     - char dial[50];
566     char dcode[4];
567    
568     a = c->arg;
569     @@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
570     } else
571     /* Normal Dial */
572     strcpy(dcode, "CAL");
573     - strcpy(dial, p);
574     - sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
575     - dcode, dial, c->parm.setup.si1,
576     - c->parm.setup.si2, c->parm.setup.eazmsn);
577     + snprintf(cbuf, sizeof(cbuf),
578     + "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
579     + dcode, p, c->parm.setup.si1,
580     + c->parm.setup.si2, c->parm.setup.eazmsn);
581     i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
582     }
583     break;
584     diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
585     index ce75de9bae9e..4a79edaf3885 100644
586     --- a/drivers/net/ethernet/cadence/at91_ether.c
587     +++ b/drivers/net/ethernet/cadence/at91_ether.c
588     @@ -342,6 +342,9 @@ static int __init at91ether_probe(struct platform_device *pdev)
589     }
590     clk_enable(lp->pclk);
591    
592     + lp->hclk = ERR_PTR(-ENOENT);
593     + lp->tx_clk = ERR_PTR(-ENOENT);
594     +
595     /* Install the interrupt handler */
596     dev->irq = platform_get_irq(pdev, 0);
597     res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
598     diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
599     index 174a92f5fe51..7645a3ce3854 100644
600     --- a/drivers/net/ethernet/sfc/ef10.c
601     +++ b/drivers/net/ethernet/sfc/ef10.c
602     @@ -565,10 +565,17 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
603     * several of each (in fact that's the only option if host
604     * page size is >4K). So we may allocate some extra VIs just
605     * for writing PIO buffers through.
606     + *
607     + * The UC mapping contains (min_vis - 1) complete VIs and the
608     + * first half of the next VI. Then the WC mapping begins with
609     + * the second half of this last VI.
610     */
611     uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
612     ER_DZ_TX_PIOBUF);
613     if (nic_data->n_piobufs) {
614     + /* pio_write_vi_base rounds down to give the number of complete
615     + * VIs inside the UC mapping.
616     + */
617     pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
618     wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
619     nic_data->n_piobufs) *
620     diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
621     index 83d464347021..f06c790fba5a 100644
622     --- a/drivers/net/ethernet/sfc/efx.c
623     +++ b/drivers/net/ethernet/sfc/efx.c
624     @@ -1603,6 +1603,8 @@ static int efx_probe_nic(struct efx_nic *efx)
625     if (rc)
626     goto fail1;
627    
628     + efx_set_channels(efx);
629     +
630     rc = efx->type->dimension_resources(efx);
631     if (rc)
632     goto fail2;
633     @@ -1613,7 +1615,6 @@ static int efx_probe_nic(struct efx_nic *efx)
634     efx->rx_indir_table[i] =
635     ethtool_rxfh_indir_default(i, efx->rss_spread);
636    
637     - efx_set_channels(efx);
638     netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
639     netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
640    
641     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
642     index 1236812c7be6..d091e52b00e1 100644
643     --- a/drivers/net/vxlan.c
644     +++ b/drivers/net/vxlan.c
645     @@ -871,6 +871,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
646     if (err)
647     return err;
648    
649     + if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
650     + return -EAFNOSUPPORT;
651     +
652     spin_lock_bh(&vxlan->hash_lock);
653     err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
654     port, vni, ifindex, ndm->ndm_flags);
655     @@ -2612,9 +2615,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
656     vni = nla_get_u32(data[IFLA_VXLAN_ID]);
657     dst->remote_vni = vni;
658    
659     + /* Unless IPv6 is explicitly requested, assume IPv4 */
660     + dst->remote_ip.sa.sa_family = AF_INET;
661     if (data[IFLA_VXLAN_GROUP]) {
662     dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
663     - dst->remote_ip.sa.sa_family = AF_INET;
664     } else if (data[IFLA_VXLAN_GROUP6]) {
665     if (!IS_ENABLED(CONFIG_IPV6))
666     return -EPFNOSUPPORT;
667     diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
668     index 6abf74e1351f..5bc871513505 100644
669     --- a/drivers/net/wireless/iwlwifi/mvm/rs.c
670     +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
671     @@ -211,9 +211,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
672     .next_columns = {
673     RS_COLUMN_LEGACY_ANT_B,
674     RS_COLUMN_SISO_ANT_A,
675     + RS_COLUMN_SISO_ANT_B,
676     RS_COLUMN_MIMO2,
677     - RS_COLUMN_INVALID,
678     - RS_COLUMN_INVALID,
679     + RS_COLUMN_MIMO2_SGI,
680     },
681     },
682     [RS_COLUMN_LEGACY_ANT_B] = {
683     @@ -221,10 +221,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
684     .ant = ANT_B,
685     .next_columns = {
686     RS_COLUMN_LEGACY_ANT_A,
687     + RS_COLUMN_SISO_ANT_A,
688     RS_COLUMN_SISO_ANT_B,
689     RS_COLUMN_MIMO2,
690     - RS_COLUMN_INVALID,
691     - RS_COLUMN_INVALID,
692     + RS_COLUMN_MIMO2_SGI,
693     },
694     },
695     [RS_COLUMN_SISO_ANT_A] = {
696     @@ -234,8 +234,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
697     RS_COLUMN_SISO_ANT_B,
698     RS_COLUMN_MIMO2,
699     RS_COLUMN_SISO_ANT_A_SGI,
700     - RS_COLUMN_INVALID,
701     - RS_COLUMN_INVALID,
702     + RS_COLUMN_SISO_ANT_B_SGI,
703     + RS_COLUMN_MIMO2_SGI,
704     },
705     .checks = {
706     rs_siso_allow,
707     @@ -248,8 +248,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
708     RS_COLUMN_SISO_ANT_A,
709     RS_COLUMN_MIMO2,
710     RS_COLUMN_SISO_ANT_B_SGI,
711     - RS_COLUMN_INVALID,
712     - RS_COLUMN_INVALID,
713     + RS_COLUMN_SISO_ANT_A_SGI,
714     + RS_COLUMN_MIMO2_SGI,
715     },
716     .checks = {
717     rs_siso_allow,
718     @@ -263,8 +263,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
719     RS_COLUMN_SISO_ANT_B_SGI,
720     RS_COLUMN_MIMO2_SGI,
721     RS_COLUMN_SISO_ANT_A,
722     - RS_COLUMN_INVALID,
723     - RS_COLUMN_INVALID,
724     + RS_COLUMN_SISO_ANT_B,
725     + RS_COLUMN_MIMO2,
726     },
727     .checks = {
728     rs_siso_allow,
729     @@ -279,8 +279,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
730     RS_COLUMN_SISO_ANT_A_SGI,
731     RS_COLUMN_MIMO2_SGI,
732     RS_COLUMN_SISO_ANT_B,
733     - RS_COLUMN_INVALID,
734     - RS_COLUMN_INVALID,
735     + RS_COLUMN_SISO_ANT_A,
736     + RS_COLUMN_MIMO2,
737     },
738     .checks = {
739     rs_siso_allow,
740     @@ -292,10 +292,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
741     .ant = ANT_AB,
742     .next_columns = {
743     RS_COLUMN_SISO_ANT_A,
744     + RS_COLUMN_SISO_ANT_B,
745     + RS_COLUMN_SISO_ANT_A_SGI,
746     + RS_COLUMN_SISO_ANT_B_SGI,
747     RS_COLUMN_MIMO2_SGI,
748     - RS_COLUMN_INVALID,
749     - RS_COLUMN_INVALID,
750     - RS_COLUMN_INVALID,
751     },
752     .checks = {
753     rs_mimo_allow,
754     @@ -307,10 +307,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
755     .sgi = true,
756     .next_columns = {
757     RS_COLUMN_SISO_ANT_A_SGI,
758     + RS_COLUMN_SISO_ANT_B_SGI,
759     + RS_COLUMN_SISO_ANT_A,
760     + RS_COLUMN_SISO_ANT_B,
761     RS_COLUMN_MIMO2,
762     - RS_COLUMN_INVALID,
763     - RS_COLUMN_INVALID,
764     - RS_COLUMN_INVALID,
765     },
766     .checks = {
767     rs_mimo_allow,
768     diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
769     index ae413a2cbee7..4bf5b334664e 100644
770     --- a/drivers/net/xen-netback/common.h
771     +++ b/drivers/net/xen-netback/common.h
772     @@ -113,6 +113,11 @@ struct xenvif {
773     domid_t domid;
774     unsigned int handle;
775    
776     + /* Is this interface disabled? True when backend discovers
777     + * frontend is rogue.
778     + */
779     + bool disabled;
780     +
781     /* Use NAPI for guest TX */
782     struct napi_struct napi;
783     /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
784     diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
785     index 301cc037fda8..2e92d52c0a6d 100644
786     --- a/drivers/net/xen-netback/interface.c
787     +++ b/drivers/net/xen-netback/interface.c
788     @@ -62,6 +62,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
789     struct xenvif *vif = container_of(napi, struct xenvif, napi);
790     int work_done;
791    
792     + /* This vif is rogue, we pretend we've there is nothing to do
793     + * for this vif to deschedule it from NAPI. But this interface
794     + * will be turned off in thread context later.
795     + */
796     + if (unlikely(vif->disabled)) {
797     + napi_complete(napi);
798     + return 0;
799     + }
800     +
801     work_done = xenvif_tx_action(vif, budget);
802    
803     if (work_done < budget) {
804     @@ -321,6 +330,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
805     vif->ip_csum = 1;
806     vif->dev = dev;
807    
808     + vif->disabled = false;
809     +
810     vif->credit_bytes = vif->remaining_credit = ~0UL;
811     vif->credit_usec = 0UL;
812     init_timer(&vif->credit_timeout);
813     diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
814     index 438d0c09b7e6..97030c193afd 100644
815     --- a/drivers/net/xen-netback/netback.c
816     +++ b/drivers/net/xen-netback/netback.c
817     @@ -192,8 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
818     * into multiple copies tend to give large frags their
819     * own buffers as before.
820     */
821     - if ((offset + size > MAX_BUFFER_OFFSET) &&
822     - (size <= MAX_BUFFER_OFFSET) && offset && !head)
823     + BUG_ON(size > MAX_BUFFER_OFFSET);
824     + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
825     return true;
826    
827     return false;
828     @@ -482,6 +482,8 @@ static void xenvif_rx_action(struct xenvif *vif)
829    
830     while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
831     RING_IDX max_slots_needed;
832     + RING_IDX old_req_cons;
833     + RING_IDX ring_slots_used;
834     int i;
835    
836     /* We need a cheap worse case estimate for the number of
837     @@ -493,9 +495,28 @@ static void xenvif_rx_action(struct xenvif *vif)
838     PAGE_SIZE);
839     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
840     unsigned int size;
841     + unsigned int offset;
842     +
843     size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
844     - max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
845     + offset = skb_shinfo(skb)->frags[i].page_offset;
846     +
847     + /* For a worse-case estimate we need to factor in
848     + * the fragment page offset as this will affect the
849     + * number of times xenvif_gop_frag_copy() will
850     + * call start_new_rx_buffer().
851     + */
852     + max_slots_needed += DIV_ROUND_UP(offset + size,
853     + PAGE_SIZE);
854     }
855     +
856     + /* To avoid the estimate becoming too pessimal for some
857     + * frontends that limit posted rx requests, cap the estimate
858     + * at MAX_SKB_FRAGS.
859     + */
860     + if (max_slots_needed > MAX_SKB_FRAGS)
861     + max_slots_needed = MAX_SKB_FRAGS;
862     +
863     + /* We may need one more slot for GSO metadata */
864     if (skb_is_gso(skb) &&
865     (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
866     skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
867     @@ -511,8 +532,12 @@ static void xenvif_rx_action(struct xenvif *vif)
868     vif->rx_last_skb_slots = 0;
869    
870     sco = (struct skb_cb_overlay *)skb->cb;
871     +
872     + old_req_cons = vif->rx.req_cons;
873     sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
874     - BUG_ON(sco->meta_slots_used > max_slots_needed);
875     + ring_slots_used = vif->rx.req_cons - old_req_cons;
876     +
877     + BUG_ON(ring_slots_used > max_slots_needed);
878    
879     __skb_queue_tail(&rxq, skb);
880     }
881     @@ -655,7 +680,8 @@ static void xenvif_tx_err(struct xenvif *vif,
882     static void xenvif_fatal_tx_err(struct xenvif *vif)
883     {
884     netdev_err(vif->dev, "fatal error; disabling device\n");
885     - xenvif_carrier_off(vif);
886     + vif->disabled = true;
887     + xenvif_kick_thread(vif);
888     }
889    
890     static int xenvif_count_requests(struct xenvif *vif,
891     @@ -1126,7 +1152,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
892     vif->tx.sring->req_prod, vif->tx.req_cons,
893     XEN_NETIF_TX_RING_SIZE);
894     xenvif_fatal_tx_err(vif);
895     - continue;
896     + break;
897     }
898    
899     work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
900     @@ -1548,7 +1574,18 @@ int xenvif_kthread(void *data)
901     while (!kthread_should_stop()) {
902     wait_event_interruptible(vif->wq,
903     rx_work_todo(vif) ||
904     + vif->disabled ||
905     kthread_should_stop());
906     +
907     + /* This frontend is found to be rogue, disable it in
908     + * kthread context. Currently this is only set when
909     + * netback finds out frontend sends malformed packet,
910     + * but we cannot disable the interface in softirq
911     + * context so we defer it here.
912     + */
913     + if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
914     + xenvif_carrier_off(vif);
915     +
916     if (kthread_should_stop())
917     break;
918    
919     diff --git a/include/linux/futex.h b/include/linux/futex.h
920     index b0d95cac826e..6435f46d6e13 100644
921     --- a/include/linux/futex.h
922     +++ b/include/linux/futex.h
923     @@ -55,7 +55,11 @@ union futex_key {
924     #ifdef CONFIG_FUTEX
925     extern void exit_robust_list(struct task_struct *curr);
926     extern void exit_pi_state_list(struct task_struct *curr);
927     +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
928     +#define futex_cmpxchg_enabled 1
929     +#else
930     extern int futex_cmpxchg_enabled;
931     +#endif
932     #else
933     static inline void exit_robust_list(struct task_struct *curr)
934     {
935     diff --git a/init/Kconfig b/init/Kconfig
936     index 009a797dd242..d56cb03c1b49 100644
937     --- a/init/Kconfig
938     +++ b/init/Kconfig
939     @@ -1387,6 +1387,13 @@ config FUTEX
940     support for "fast userspace mutexes". The resulting kernel may not
941     run glibc-based applications correctly.
942    
943     +config HAVE_FUTEX_CMPXCHG
944     + bool
945     + help
946     + Architectures should select this if futex_atomic_cmpxchg_inatomic()
947     + is implemented and always working. This removes a couple of runtime
948     + checks.
949     +
950     config EPOLL
951     bool "Enable eventpoll support" if EXPERT
952     default y
953     diff --git a/kernel/futex.c b/kernel/futex.c
954     index 08ec814ad9d2..6801b3751a95 100644
955     --- a/kernel/futex.c
956     +++ b/kernel/futex.c
957     @@ -157,7 +157,9 @@
958     * enqueue.
959     */
960    
961     +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
962     int __read_mostly futex_cmpxchg_enabled;
963     +#endif
964    
965     /*
966     * Futex flags used to encode options to functions and preserve them across
967     @@ -1450,6 +1452,7 @@ retry:
968     hb2 = hash_futex(&key2);
969    
970     retry_private:
971     + hb_waiters_inc(hb2);
972     double_lock_hb(hb1, hb2);
973    
974     if (likely(cmpval != NULL)) {
975     @@ -1459,6 +1462,7 @@ retry_private:
976    
977     if (unlikely(ret)) {
978     double_unlock_hb(hb1, hb2);
979     + hb_waiters_dec(hb2);
980    
981     ret = get_user(curval, uaddr1);
982     if (ret)
983     @@ -1508,6 +1512,7 @@ retry_private:
984     break;
985     case -EFAULT:
986     double_unlock_hb(hb1, hb2);
987     + hb_waiters_dec(hb2);
988     put_futex_key(&key2);
989     put_futex_key(&key1);
990     ret = fault_in_user_writeable(uaddr2);
991     @@ -1517,6 +1522,7 @@ retry_private:
992     case -EAGAIN:
993     /* The owner was exiting, try again. */
994     double_unlock_hb(hb1, hb2);
995     + hb_waiters_dec(hb2);
996     put_futex_key(&key2);
997     put_futex_key(&key1);
998     cond_resched();
999     @@ -1592,6 +1598,7 @@ retry_private:
1000    
1001     out_unlock:
1002     double_unlock_hb(hb1, hb2);
1003     + hb_waiters_dec(hb2);
1004    
1005     /*
1006     * drop_futex_key_refs() must be called outside the spinlocks. During
1007     @@ -2875,9 +2882,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1008     return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1009     }
1010    
1011     -static int __init futex_init(void)
1012     +static void __init futex_detect_cmpxchg(void)
1013     {
1014     +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
1015     u32 curval;
1016     +
1017     + /*
1018     + * This will fail and we want it. Some arch implementations do
1019     + * runtime detection of the futex_atomic_cmpxchg_inatomic()
1020     + * functionality. We want to know that before we call in any
1021     + * of the complex code paths. Also we want to prevent
1022     + * registration of robust lists in that case. NULL is
1023     + * guaranteed to fault and we get -EFAULT on functional
1024     + * implementation, the non-functional ones will return
1025     + * -ENOSYS.
1026     + */
1027     + if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
1028     + futex_cmpxchg_enabled = 1;
1029     +#endif
1030     +}
1031     +
1032     +static int __init futex_init(void)
1033     +{
1034     unsigned int futex_shift;
1035     unsigned long i;
1036    
1037     @@ -2893,18 +2919,8 @@ static int __init futex_init(void)
1038     &futex_shift, NULL,
1039     futex_hashsize, futex_hashsize);
1040     futex_hashsize = 1UL << futex_shift;
1041     - /*
1042     - * This will fail and we want it. Some arch implementations do
1043     - * runtime detection of the futex_atomic_cmpxchg_inatomic()
1044     - * functionality. We want to know that before we call in any
1045     - * of the complex code paths. Also we want to prevent
1046     - * registration of robust lists in that case. NULL is
1047     - * guaranteed to fault and we get -EFAULT on functional
1048     - * implementation, the non-functional ones will return
1049     - * -ENOSYS.
1050     - */
1051     - if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
1052     - futex_cmpxchg_enabled = 1;
1053     +
1054     + futex_detect_cmpxchg();
1055    
1056     for (i = 0; i < futex_hashsize; i++) {
1057     atomic_set(&futex_queues[i].waiters, 0);
1058     diff --git a/lib/nlattr.c b/lib/nlattr.c
1059     index 18eca7809b08..fc6754720ced 100644
1060     --- a/lib/nlattr.c
1061     +++ b/lib/nlattr.c
1062     @@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
1063     */
1064     int nla_strcmp(const struct nlattr *nla, const char *str)
1065     {
1066     - int len = strlen(str) + 1;
1067     - int d = nla_len(nla) - len;
1068     + int len = strlen(str);
1069     + char *buf = nla_data(nla);
1070     + int attrlen = nla_len(nla);
1071     + int d;
1072    
1073     + if (attrlen > 0 && buf[attrlen - 1] == '\0')
1074     + attrlen--;
1075     +
1076     + d = attrlen - len;
1077     if (d == 0)
1078     d = memcmp(nla_data(nla), str, len);
1079    
1080     diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
1081     index f2610e157660..7b326529e6a2 100644
1082     --- a/net/ipv6/icmp.c
1083     +++ b/net/ipv6/icmp.c
1084     @@ -520,7 +520,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
1085     np->tclass, NULL, &fl6, (struct rt6_info *)dst,
1086     MSG_DONTWAIT, np->dontfrag);
1087     if (err) {
1088     - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
1089     + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
1090     ip6_flush_pending_frames(sk);
1091     } else {
1092     err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
1093     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1094     index 64d6073731d3..3702d179506d 100644
1095     --- a/net/ipv6/ip6_output.c
1096     +++ b/net/ipv6/ip6_output.c
1097     @@ -1566,8 +1566,8 @@ int ip6_push_pending_frames(struct sock *sk)
1098     if (proto == IPPROTO_ICMPV6) {
1099     struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1100    
1101     - ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1102     - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1103     + ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1104     + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1105     }
1106    
1107     err = ip6_local_out(skb);
1108     diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1109     index e1e47350784b..08b367c6b9cf 100644
1110     --- a/net/ipv6/mcast.c
1111     +++ b/net/ipv6/mcast.c
1112     @@ -1620,11 +1620,12 @@ static void mld_sendpack(struct sk_buff *skb)
1113     dst_output);
1114     out:
1115     if (!err) {
1116     - ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
1117     - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1118     - IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1119     - } else
1120     - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1121     + ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1122     + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1123     + IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1124     + } else {
1125     + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1126     + }
1127    
1128     rcu_read_unlock();
1129     return;
1130     diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
1131     index 587bbdcb22b4..bda74291c3e0 100644
1132     --- a/net/ipv6/ping.c
1133     +++ b/net/ipv6/ping.c
1134     @@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1135     MSG_DONTWAIT, np->dontfrag);
1136    
1137     if (err) {
1138     - ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
1139     - ICMP6_MIB_OUTERRORS);
1140     + ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
1141     + ICMP6_MIB_OUTERRORS);
1142     ip6_flush_pending_frames(sk);
1143     } else {
1144     err = icmpv6_push_pending_frames(sk, &fl6,
1145     diff --git a/net/rds/iw.c b/net/rds/iw.c
1146     index 7826d46baa70..589935661d66 100644
1147     --- a/net/rds/iw.c
1148     +++ b/net/rds/iw.c
1149     @@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
1150     ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
1151     /* due to this, we will claim to support IB devices unless we
1152     check node_type. */
1153     - if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
1154     + if (ret || !cm_id->device ||
1155     + cm_id->device->node_type != RDMA_NODE_RNIC)
1156     ret = -EADDRNOTAVAIL;
1157    
1158     rdsdebug("addr %pI4 ret %d node type %d\n",
1159     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
1160     index b332e2cc0954..e294b86c8d88 100644
1161     --- a/security/selinux/hooks.c
1162     +++ b/security/selinux/hooks.c
1163     @@ -1418,15 +1418,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
1164     isec->sid = sbsec->sid;
1165    
1166     if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
1167     - if (opt_dentry) {
1168     - isec->sclass = inode_mode_to_security_class(inode->i_mode);
1169     - rc = selinux_proc_get_sid(opt_dentry,
1170     - isec->sclass,
1171     - &sid);
1172     - if (rc)
1173     - goto out_unlock;
1174     - isec->sid = sid;
1175     - }
1176     + /* We must have a dentry to determine the label on
1177     + * procfs inodes */
1178     + if (opt_dentry)
1179     + /* Called from d_instantiate or
1180     + * d_splice_alias. */
1181     + dentry = dget(opt_dentry);
1182     + else
1183     + /* Called from selinux_complete_init, try to
1184     + * find a dentry. */
1185     + dentry = d_find_alias(inode);
1186     + /*
1187     + * This can be hit on boot when a file is accessed
1188     + * before the policy is loaded. When we load policy we
1189     + * may find inodes that have no dentry on the
1190     + * sbsec->isec_head list. No reason to complain as
1191     + * these will get fixed up the next time we go through
1192     + * inode_doinit() with a dentry, before these inodes
1193     + * could be used again by userspace.
1194     + */
1195     + if (!dentry)
1196     + goto out_unlock;
1197     + isec->sclass = inode_mode_to_security_class(inode->i_mode);
1198     + rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
1199     + dput(dentry);
1200     + if (rc)
1201     + goto out_unlock;
1202     + isec->sid = sid;
1203     }
1204     break;
1205     }
1206     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1207     index e354ab1ec20f..a8dec9e9e876 100644
1208     --- a/sound/pci/hda/hda_intel.c
1209     +++ b/sound/pci/hda/hda_intel.c
1210     @@ -297,9 +297,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
1211     #define ULI_NUM_CAPTURE 5
1212     #define ULI_NUM_PLAYBACK 6
1213    
1214     -/* ATI HDMI may have up to 8 playbacks and 0 capture */
1215     +/* ATI HDMI has 1 playback and 0 capture */
1216     #define ATIHDMI_NUM_CAPTURE 0
1217     -#define ATIHDMI_NUM_PLAYBACK 8
1218     +#define ATIHDMI_NUM_PLAYBACK 1
1219    
1220     /* TERA has 4 playback and 3 capture */
1221     #define TERA_NUM_CAPTURE 3