Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.15/0106-3.15.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2487 - (hide annotations) (download)
Thu Aug 7 08:27:59 2014 UTC (9 years, 10 months ago) by niro
File size: 122778 byte(s)
-linux-3.15.7
1 niro 2487 diff --git a/Makefile b/Makefile
2     index fefa0237c2d1..833f67f3f80f 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 15
8     -SUBLEVEL = 6
9     +SUBLEVEL = 7
10     EXTRAVERSION =
11     NAME = Shuffling Zombie Juror
12    
13     diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
14     index 2618cc13ba75..76a7739aab1c 100644
15     --- a/arch/arc/include/uapi/asm/ptrace.h
16     +++ b/arch/arc/include/uapi/asm/ptrace.h
17     @@ -11,6 +11,7 @@
18     #ifndef _UAPI__ASM_ARC_PTRACE_H
19     #define _UAPI__ASM_ARC_PTRACE_H
20    
21     +#define PTRACE_GET_THREAD_AREA 25
22    
23     #ifndef __ASSEMBLY__
24     /*
25     diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
26     index 5d76706139dd..13b3ffb27a38 100644
27     --- a/arch/arc/kernel/ptrace.c
28     +++ b/arch/arc/kernel/ptrace.c
29     @@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
30     pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
31    
32     switch (request) {
33     + case PTRACE_GET_THREAD_AREA:
34     + ret = put_user(task_thread_info(child)->thr_ptr,
35     + (unsigned long __user *)data);
36     + break;
37     default:
38     ret = ptrace_request(child, request, addr, data);
39     break;
40     diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
41     index db3c5414223e..34c7a24714a7 100644
42     --- a/arch/arm/Kconfig
43     +++ b/arch/arm/Kconfig
44     @@ -6,6 +6,7 @@ config ARM
45     select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
46     select ARCH_HAVE_CUSTOM_GPIO_H
47     select ARCH_MIGHT_HAVE_PC_PARPORT
48     + select ARCH_SUPPORTS_ATOMIC_RMW
49     select ARCH_USE_BUILTIN_BSWAP
50     select ARCH_USE_CMPXCHG_LOCKREF
51     select ARCH_WANT_IPC_PARSE_VERSION
52     diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
53     index ea323f09dc78..413d8f0594cb 100644
54     --- a/arch/arm/boot/dts/imx25.dtsi
55     +++ b/arch/arm/boot/dts/imx25.dtsi
56     @@ -14,6 +14,7 @@
57    
58     / {
59     aliases {
60     + ethernet0 = &fec;
61     gpio0 = &gpio1;
62     gpio1 = &gpio2;
63     gpio2 = &gpio3;
64     diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
65     index 137e010eab35..00cf66c1b8f3 100644
66     --- a/arch/arm/boot/dts/imx27.dtsi
67     +++ b/arch/arm/boot/dts/imx27.dtsi
68     @@ -16,6 +16,7 @@
69    
70     / {
71     aliases {
72     + ethernet0 = &fec;
73     gpio0 = &gpio1;
74     gpio1 = &gpio2;
75     gpio2 = &gpio3;
76     diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
77     index 88b218f8f810..e59ccb4d98e3 100644
78     --- a/arch/arm/boot/dts/imx35.dtsi
79     +++ b/arch/arm/boot/dts/imx35.dtsi
80     @@ -13,6 +13,7 @@
81    
82     / {
83     aliases {
84     + ethernet0 = &fec;
85     gpio0 = &gpio1;
86     gpio1 = &gpio2;
87     gpio2 = &gpio3;
88     diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
89     index 9c89d1ca97c2..6a201cf54366 100644
90     --- a/arch/arm/boot/dts/imx50.dtsi
91     +++ b/arch/arm/boot/dts/imx50.dtsi
92     @@ -17,6 +17,7 @@
93    
94     / {
95     aliases {
96     + ethernet0 = &fec;
97     gpio0 = &gpio1;
98     gpio1 = &gpio2;
99     gpio2 = &gpio3;
100     diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
101     index 150bb4e2f744..51b86700cd88 100644
102     --- a/arch/arm/boot/dts/imx51.dtsi
103     +++ b/arch/arm/boot/dts/imx51.dtsi
104     @@ -19,6 +19,7 @@
105    
106     / {
107     aliases {
108     + ethernet0 = &fec;
109     gpio0 = &gpio1;
110     gpio1 = &gpio2;
111     gpio2 = &gpio3;
112     diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
113     index 6a1bf4ff83d5..eaa627fa82ba 100644
114     --- a/arch/arm/boot/dts/imx53.dtsi
115     +++ b/arch/arm/boot/dts/imx53.dtsi
116     @@ -18,6 +18,7 @@
117    
118     / {
119     aliases {
120     + ethernet0 = &fec;
121     gpio0 = &gpio1;
122     gpio1 = &gpio2;
123     gpio2 = &gpio3;
124     diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
125     index eca0971d4db1..02a6afca7530 100644
126     --- a/arch/arm/boot/dts/imx6qdl.dtsi
127     +++ b/arch/arm/boot/dts/imx6qdl.dtsi
128     @@ -16,6 +16,7 @@
129    
130     / {
131     aliases {
132     + ethernet0 = &fec;
133     can0 = &can1;
134     can1 = &can2;
135     gpio0 = &gpio1;
136     diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
137     index d26b099260a3..2d4e5285f3f3 100644
138     --- a/arch/arm/boot/dts/imx6sl.dtsi
139     +++ b/arch/arm/boot/dts/imx6sl.dtsi
140     @@ -14,6 +14,7 @@
141    
142     / {
143     aliases {
144     + ethernet0 = &fec;
145     gpio0 = &gpio1;
146     gpio1 = &gpio2;
147     gpio2 = &gpio3;
148     diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
149     index e759af5d7098..6376a39b767e 100644
150     --- a/arch/arm64/Kconfig
151     +++ b/arch/arm64/Kconfig
152     @@ -2,6 +2,7 @@ config ARM64
153     def_bool y
154     select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
155     select ARCH_USE_CMPXCHG_LOCKREF
156     + select ARCH_SUPPORTS_ATOMIC_RMW
157     select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
158     select ARCH_WANT_OPTIONAL_GPIOLIB
159     select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
160     diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
161     index c95c4b8c3e74..004851f3d841 100644
162     --- a/arch/powerpc/Kconfig
163     +++ b/arch/powerpc/Kconfig
164     @@ -145,6 +145,7 @@ config PPC
165     select HAVE_IRQ_EXIT_ON_IRQ_STACK
166     select ARCH_USE_CMPXCHG_LOCKREF if PPC64
167     select HAVE_ARCH_AUDITSYSCALL
168     + select ARCH_SUPPORTS_ATOMIC_RMW
169    
170     config GENERIC_CSUM
171     def_bool CPU_LITTLE_ENDIAN
172     diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
173     index 29f2e988c56a..407c87d9879a 100644
174     --- a/arch/sparc/Kconfig
175     +++ b/arch/sparc/Kconfig
176     @@ -78,6 +78,7 @@ config SPARC64
177     select HAVE_C_RECORDMCOUNT
178     select NO_BOOTMEM
179     select HAVE_ARCH_AUDITSYSCALL
180     + select ARCH_SUPPORTS_ATOMIC_RMW
181    
182     config ARCH_DEFCONFIG
183     string
184     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
185     index 6b8b429c832f..512e45f0c204 100644
186     --- a/arch/x86/Kconfig
187     +++ b/arch/x86/Kconfig
188     @@ -130,6 +130,7 @@ config X86
189     select HAVE_CC_STACKPROTECTOR
190     select GENERIC_CPU_AUTOPROBE
191     select HAVE_ARCH_AUDITSYSCALL
192     + select ARCH_SUPPORTS_ATOMIC_RMW
193    
194     config INSTRUCTION_DECODER
195     def_bool y
196     diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
197     index 84c223479e3c..7a6d43a554d7 100644
198     --- a/arch/x86/boot/header.S
199     +++ b/arch/x86/boot/header.S
200     @@ -91,10 +91,9 @@ bs_die:
201    
202     .section ".bsdata", "a"
203     bugger_off_msg:
204     - .ascii "Direct floppy boot is not supported. "
205     - .ascii "Use a boot loader program instead.\r\n"
206     + .ascii "Use a boot loader.\r\n"
207     .ascii "\n"
208     - .ascii "Remove disk and press any key to reboot ...\r\n"
209     + .ascii "Remove disk and press any key to reboot...\r\n"
210     .byte 0
211    
212     #ifdef CONFIG_EFI_STUB
213     @@ -108,7 +107,7 @@ coff_header:
214     #else
215     .word 0x8664 # x86-64
216     #endif
217     - .word 3 # nr_sections
218     + .word 4 # nr_sections
219     .long 0 # TimeDateStamp
220     .long 0 # PointerToSymbolTable
221     .long 1 # NumberOfSymbols
222     @@ -250,6 +249,25 @@ section_table:
223     .word 0 # NumberOfLineNumbers
224     .long 0x60500020 # Characteristics (section flags)
225    
226     + #
227     + # The offset & size fields are filled in by build.c.
228     + #
229     + .ascii ".bss"
230     + .byte 0
231     + .byte 0
232     + .byte 0
233     + .byte 0
234     + .long 0
235     + .long 0x0
236     + .long 0 # Size of initialized data
237     + # on disk
238     + .long 0x0
239     + .long 0 # PointerToRelocations
240     + .long 0 # PointerToLineNumbers
241     + .word 0 # NumberOfRelocations
242     + .word 0 # NumberOfLineNumbers
243     + .long 0xc8000080 # Characteristics (section flags)
244     +
245     #endif /* CONFIG_EFI_STUB */
246    
247     # Kernel attributes; used by setup. This is part 1 of the
248     diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
249     index 1a2f2121cada..a7661c430cd9 100644
250     --- a/arch/x86/boot/tools/build.c
251     +++ b/arch/x86/boot/tools/build.c
252     @@ -143,7 +143,7 @@ static void usage(void)
253    
254     #ifdef CONFIG_EFI_STUB
255    
256     -static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
257     +static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
258     {
259     unsigned int pe_header;
260     unsigned short num_sections;
261     @@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
262     put_unaligned_le32(size, section + 0x8);
263    
264     /* section header vma field */
265     - put_unaligned_le32(offset, section + 0xc);
266     + put_unaligned_le32(vma, section + 0xc);
267    
268     /* section header 'size of initialised data' field */
269     - put_unaligned_le32(size, section + 0x10);
270     + put_unaligned_le32(datasz, section + 0x10);
271    
272     /* section header 'file offset' field */
273     put_unaligned_le32(offset, section + 0x14);
274     @@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
275     }
276     }
277    
278     +static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
279     +{
280     + update_pecoff_section_header_fields(section_name, offset, size, size, offset);
281     +}
282     +
283     static void update_pecoff_setup_and_reloc(unsigned int size)
284     {
285     u32 setup_offset = 0x200;
286     @@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
287    
288     pe_header = get_unaligned_le32(&buf[0x3c]);
289    
290     - /* Size of image */
291     - put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
292     -
293     /*
294     * Size of code: Subtract the size of the first sector (512 bytes)
295     * which includes the header.
296     @@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
297     update_pecoff_section_header(".text", text_start, text_sz);
298     }
299    
300     +static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
301     +{
302     + unsigned int pe_header;
303     + unsigned int bss_sz = init_sz - file_sz;
304     +
305     + pe_header = get_unaligned_le32(&buf[0x3c]);
306     +
307     + /* Size of uninitialized data */
308     + put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
309     +
310     + /* Size of image */
311     + put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
312     +
313     + update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
314     +}
315     +
316     static int reserve_pecoff_reloc_section(int c)
317     {
318     /* Reserve 0x20 bytes for .reloc section */
319     @@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
320     static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
321     static inline void update_pecoff_text(unsigned int text_start,
322     unsigned int file_sz) {}
323     +static inline void update_pecoff_bss(unsigned int file_sz,
324     + unsigned int init_sz) {}
325     static inline void efi_stub_defaults(void) {}
326     static inline void efi_stub_entry_update(void) {}
327    
328     @@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
329    
330     int main(int argc, char ** argv)
331     {
332     - unsigned int i, sz, setup_sectors;
333     + unsigned int i, sz, setup_sectors, init_sz;
334     int c;
335     u32 sys_size;
336     struct stat sb;
337     @@ -376,7 +396,9 @@ int main(int argc, char ** argv)
338     buf[0x1f1] = setup_sectors-1;
339     put_unaligned_le32(sys_size, &buf[0x1f4]);
340    
341     - update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
342     + update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
343     + init_sz = get_unaligned_le32(&buf[0x260]);
344     + update_pecoff_bss(i + (sys_size * 16), init_sz);
345    
346     efi_stub_entry_update();
347    
348     diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
349     index adb02aa62af5..07846d738bdb 100644
350     --- a/arch/x86/kernel/cpu/perf_event_intel.c
351     +++ b/arch/x86/kernel/cpu/perf_event_intel.c
352     @@ -1382,6 +1382,15 @@ again:
353     intel_pmu_lbr_read();
354    
355     /*
356     + * CondChgd bit 63 doesn't mean any overflow status. Ignore
357     + * and clear the bit.
358     + */
359     + if (__test_and_clear_bit(63, (unsigned long *)&status)) {
360     + if (!status)
361     + goto done;
362     + }
363     +
364     + /*
365     * PEBS overflow sets bit 62 in the global status register
366     */
367     if (__test_and_clear_bit(62, (unsigned long *)&status)) {
368     diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
369     index 57e5ce126d5a..ea030319b321 100644
370     --- a/arch/x86/kernel/tsc.c
371     +++ b/arch/x86/kernel/tsc.c
372     @@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
373     tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
374     if (!(freq->flags & CPUFREQ_CONST_LOOPS))
375     mark_tsc_unstable("cpufreq changes");
376     - }
377    
378     - set_cyc2ns_scale(tsc_khz, freq->cpu);
379     + set_cyc2ns_scale(tsc_khz, freq->cpu);
380     + }
381    
382     return 0;
383     }
384     diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
385     index a83b57e57b63..e625969bb921 100644
386     --- a/drivers/bluetooth/ath3k.c
387     +++ b/drivers/bluetooth/ath3k.c
388     @@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
389     { USB_DEVICE(0x0b05, 0x17d0) },
390     { USB_DEVICE(0x0CF3, 0x0036) },
391     { USB_DEVICE(0x0CF3, 0x3004) },
392     - { USB_DEVICE(0x0CF3, 0x3005) },
393     { USB_DEVICE(0x0CF3, 0x3008) },
394     { USB_DEVICE(0x0CF3, 0x311D) },
395     { USB_DEVICE(0x0CF3, 0x311E) },
396     @@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
397     { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
398     { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
399     { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
400     - { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
401     { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
402     { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
403     { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
404     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
405     index a7dfbf9a3afb..55cee1d67681 100644
406     --- a/drivers/bluetooth/btusb.c
407     +++ b/drivers/bluetooth/btusb.c
408     @@ -160,7 +160,6 @@ static const struct usb_device_id blacklist_table[] = {
409     { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
410     { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
411     { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
412     - { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
413     { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
414     { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
415     { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
416     diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
417     index 04680ead9275..fede8ca7147c 100644
418     --- a/drivers/bluetooth/hci_h5.c
419     +++ b/drivers/bluetooth/hci_h5.c
420     @@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
421     H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
422     BT_ERR("Non-link packet received in non-active state");
423     h5_reset_rx(h5);
424     + return 0;
425     }
426    
427     h5->rx_func = h5_rx_payload;
428     diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
429     index 334601cc81cf..2a451b14b3cc 100644
430     --- a/drivers/char/hw_random/core.c
431     +++ b/drivers/char/hw_random/core.c
432     @@ -55,16 +55,35 @@ static DEFINE_MUTEX(rng_mutex);
433     static int data_avail;
434     static u8 *rng_buffer;
435    
436     +static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
437     + int wait);
438     +
439     static size_t rng_buffer_size(void)
440     {
441     return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
442     }
443    
444     +static void add_early_randomness(struct hwrng *rng)
445     +{
446     + unsigned char bytes[16];
447     + int bytes_read;
448     +
449     + bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
450     + if (bytes_read > 0)
451     + add_device_randomness(bytes, bytes_read);
452     +}
453     +
454     static inline int hwrng_init(struct hwrng *rng)
455     {
456     - if (!rng->init)
457     - return 0;
458     - return rng->init(rng);
459     + if (rng->init) {
460     + int ret;
461     +
462     + ret = rng->init(rng);
463     + if (ret)
464     + return ret;
465     + }
466     + add_early_randomness(rng);
467     + return 0;
468     }
469    
470     static inline void hwrng_cleanup(struct hwrng *rng)
471     @@ -304,8 +323,6 @@ int hwrng_register(struct hwrng *rng)
472     {
473     int err = -EINVAL;
474     struct hwrng *old_rng, *tmp;
475     - unsigned char bytes[16];
476     - int bytes_read;
477    
478     if (rng->name == NULL ||
479     (rng->data_read == NULL && rng->read == NULL))
480     @@ -347,9 +364,17 @@ int hwrng_register(struct hwrng *rng)
481     INIT_LIST_HEAD(&rng->list);
482     list_add_tail(&rng->list, &rng_list);
483    
484     - bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
485     - if (bytes_read > 0)
486     - add_device_randomness(bytes, bytes_read);
487     + if (old_rng && !rng->init) {
488     + /*
489     + * Use a new device's input to add some randomness to
490     + * the system. If this rng device isn't going to be
491     + * used right away, its init function hasn't been
492     + * called yet; so only use the randomness from devices
493     + * that don't need an init callback.
494     + */
495     + add_early_randomness(rng);
496     + }
497     +
498     out_unlock:
499     mutex_unlock(&rng_mutex);
500     out:
501     diff --git a/drivers/char/random.c b/drivers/char/random.c
502     index 2b6e4cd8de8e..18ec40459598 100644
503     --- a/drivers/char/random.c
504     +++ b/drivers/char/random.c
505     @@ -641,7 +641,7 @@ retry:
506     } while (unlikely(entropy_count < pool_size-2 && pnfrac));
507     }
508    
509     - if (entropy_count < 0) {
510     + if (unlikely(entropy_count < 0)) {
511     pr_warn("random: negative entropy/overflow: pool %s count %d\n",
512     r->name, entropy_count);
513     WARN_ON(1);
514     @@ -980,7 +980,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
515     int reserved)
516     {
517     int entropy_count, orig;
518     - size_t ibytes;
519     + size_t ibytes, nfrac;
520    
521     BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
522    
523     @@ -998,7 +998,17 @@ retry:
524     }
525     if (ibytes < min)
526     ibytes = 0;
527     - if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
528     +
529     + if (unlikely(entropy_count < 0)) {
530     + pr_warn("random: negative entropy count: pool %s count %d\n",
531     + r->name, entropy_count);
532     + WARN_ON(1);
533     + entropy_count = 0;
534     + }
535     + nfrac = ibytes << (ENTROPY_SHIFT + 3);
536     + if ((size_t) entropy_count > nfrac)
537     + entropy_count -= nfrac;
538     + else
539     entropy_count = 0;
540    
541     if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
542     @@ -1375,6 +1385,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
543     "with %d bits of entropy available\n",
544     current->comm, nonblocking_pool.entropy_total);
545    
546     + nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
547     ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
548    
549     trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
550     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
551     index 558224cf55bf..dcac12dc6803 100644
552     --- a/drivers/cpufreq/cpufreq.c
553     +++ b/drivers/cpufreq/cpufreq.c
554     @@ -1139,10 +1139,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
555     * the creation of a brand new one. So we need to perform this update
556     * by invoking update_policy_cpu().
557     */
558     - if (recover_policy && cpu != policy->cpu)
559     + if (recover_policy && cpu != policy->cpu) {
560     update_policy_cpu(policy, cpu);
561     - else
562     + WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
563     + } else {
564     policy->cpu = cpu;
565     + }
566    
567     cpumask_copy(policy->cpus, cpumask_of(cpu));
568    
569     diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
570     index ed5711f77e2d..4d25a06bb45e 100644
571     --- a/drivers/gpio/gpio-dwapb.c
572     +++ b/drivers/gpio/gpio-dwapb.c
573     @@ -260,9 +260,6 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
574     ct->regs.ack = GPIO_PORTA_EOI;
575     ct->regs.mask = GPIO_INTMASK;
576    
577     - irq_setup_generic_chip(irq_gc, IRQ_MSK(port->bgc.gc.ngpio),
578     - IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
579     -
580     irq_set_chained_handler(irq, dwapb_irq_handler);
581     irq_set_handler_data(irq, gpio);
582    
583     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
584     index 2a00cb828d20..61963d3acce2 100644
585     --- a/drivers/gpu/drm/i915/intel_dp.c
586     +++ b/drivers/gpu/drm/i915/intel_dp.c
587     @@ -833,8 +833,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
588     mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
589     bpp);
590    
591     - for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
592     - for (clock = min_clock; clock <= max_clock; clock++) {
593     + for (clock = min_clock; clock <= max_clock; clock++) {
594     + for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
595     link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
596     link_avail = intel_dp_max_data_rate(link_clock,
597     lane_count);
598     diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
599     index 28f84b4fce32..3485bdccf8b8 100644
600     --- a/drivers/gpu/drm/qxl/qxl_irq.c
601     +++ b/drivers/gpu/drm/qxl/qxl_irq.c
602     @@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
603    
604     pending = xchg(&qdev->ram_header->int_pending, 0);
605    
606     + if (!pending)
607     + return IRQ_NONE;
608     +
609     atomic_inc(&qdev->irq_received);
610    
611     if (pending & QXL_INTERRUPT_DISPLAY) {
612     diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
613     index 2b2908440644..7d68203a3737 100644
614     --- a/drivers/gpu/drm/radeon/atombios_encoders.c
615     +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
616     @@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
617     struct backlight_properties props;
618     struct radeon_backlight_privdata *pdata;
619     struct radeon_encoder_atom_dig *dig;
620     - u8 backlight_level;
621     char bl_name[16];
622    
623     /* Mac laptops with multiple GPUs use the gmux driver for backlight
624     @@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
625    
626     pdata->encoder = radeon_encoder;
627    
628     - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
629     -
630     dig = radeon_encoder->enc_priv;
631     dig->bl_dev = bd;
632    
633     bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
634     + /* Set a reasonable default here if the level is 0 otherwise
635     + * fbdev will attempt to turn the backlight on after console
636     + * unblanking and it will try and restore 0 which turns the backlight
637     + * off again.
638     + */
639     + if (bd->props.brightness == 0)
640     + bd->props.brightness = RADEON_MAX_BL_LEVEL;
641     bd->props.power = FB_BLANK_UNBLANK;
642     backlight_update_status(bd);
643    
644     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
645     index 356b733caafe..9445db514de0 100644
646     --- a/drivers/gpu/drm/radeon/radeon_display.c
647     +++ b/drivers/gpu/drm/radeon/radeon_display.c
648     @@ -757,6 +757,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
649     struct radeon_device *rdev = dev->dev_private;
650     int ret = 0;
651    
652     + /* don't leak the edid if we already fetched it in detect() */
653     + if (radeon_connector->edid)
654     + goto got_edid;
655     +
656     /* on hw with routers, select right port */
657     if (radeon_connector->router.ddc_valid)
658     radeon_router_select_ddc_port(radeon_connector);
659     @@ -795,6 +799,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
660     radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
661     }
662     if (radeon_connector->edid) {
663     +got_edid:
664     drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
665     ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
666     drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
667     diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
668     index eaaa3d843b80..23b2ce294c4c 100644
669     --- a/drivers/hv/hv_fcopy.c
670     +++ b/drivers/hv/hv_fcopy.c
671     @@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
672     /*
673     * Send the information to the user-level daemon.
674     */
675     - fcopy_send_data();
676     schedule_delayed_work(&fcopy_work, 5*HZ);
677     + fcopy_send_data();
678     return;
679     }
680     icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
681     diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
682     index ea852537307e..2b931feb5131 100644
683     --- a/drivers/hv/hv_kvp.c
684     +++ b/drivers/hv/hv_kvp.c
685     @@ -127,6 +127,15 @@ kvp_work_func(struct work_struct *dummy)
686     kvp_respond_to_host(NULL, HV_E_FAIL);
687     }
688    
689     +static void poll_channel(struct vmbus_channel *channel)
690     +{
691     + unsigned long flags;
692     +
693     + spin_lock_irqsave(&channel->inbound_lock, flags);
694     + hv_kvp_onchannelcallback(channel);
695     + spin_unlock_irqrestore(&channel->inbound_lock, flags);
696     +}
697     +
698     static int kvp_handle_handshake(struct hv_kvp_msg *msg)
699     {
700     int ret = 1;
701     @@ -155,7 +164,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
702     kvp_register(dm_reg_value);
703     kvp_transaction.active = false;
704     if (kvp_transaction.kvp_context)
705     - hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
706     + poll_channel(kvp_transaction.kvp_context);
707     }
708     return ret;
709     }
710     @@ -568,6 +577,7 @@ response_done:
711    
712     vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
713     VM_PKT_DATA_INBAND, 0);
714     + poll_channel(channel);
715    
716     }
717    
718     @@ -603,7 +613,7 @@ void hv_kvp_onchannelcallback(void *context)
719     return;
720     }
721    
722     - vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
723     + vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
724     &requestid);
725    
726     if (recvlen > 0) {
727     diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
728     index dd761806f0e8..3b9c9ef0deb8 100644
729     --- a/drivers/hv/hv_util.c
730     +++ b/drivers/hv/hv_util.c
731     @@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev,
732     (struct hv_util_service *)dev_id->driver_data;
733     int ret;
734    
735     - srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
736     + srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
737     if (!srv->recv_buffer)
738     return -ENOMEM;
739     if (srv->util_init) {
740     diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
741     index 0f4dea5ccf17..9ee3913850d6 100644
742     --- a/drivers/hwmon/adt7470.c
743     +++ b/drivers/hwmon/adt7470.c
744     @@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
745     return -EINVAL;
746    
747     temp = DIV_ROUND_CLOSEST(temp, 1000);
748     - temp = clamp_val(temp, 0, 255);
749     + temp = clamp_val(temp, -128, 127);
750    
751     mutex_lock(&data->lock);
752     data->temp_min[attr->index] = temp;
753     @@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
754     return -EINVAL;
755    
756     temp = DIV_ROUND_CLOSEST(temp, 1000);
757     - temp = clamp_val(temp, 0, 255);
758     + temp = clamp_val(temp, -128, 127);
759    
760     mutex_lock(&data->lock);
761     data->temp_max[attr->index] = temp;
762     @@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
763     return -EINVAL;
764    
765     temp = DIV_ROUND_CLOSEST(temp, 1000);
766     - temp = clamp_val(temp, 0, 255);
767     + temp = clamp_val(temp, -128, 127);
768    
769     mutex_lock(&data->lock);
770     data->pwm_tmin[attr->index] = temp;
771     diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
772     index afd31042b452..d14ab3c45daa 100644
773     --- a/drivers/hwmon/da9052-hwmon.c
774     +++ b/drivers/hwmon/da9052-hwmon.c
775     @@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
776     struct device_attribute *devattr,
777     char *buf)
778     {
779     - return sprintf(buf, "da9052-hwmon\n");
780     + return sprintf(buf, "da9052\n");
781     }
782    
783     static ssize_t show_label(struct device *dev,
784     diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
785     index 73b3865f1207..35eb7738d711 100644
786     --- a/drivers/hwmon/da9055-hwmon.c
787     +++ b/drivers/hwmon/da9055-hwmon.c
788     @@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
789     struct device_attribute *devattr,
790     char *buf)
791     {
792     - return sprintf(buf, "da9055-hwmon\n");
793     + return sprintf(buf, "da9055\n");
794     }
795    
796     static ssize_t show_label(struct device *dev,
797     diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
798     index ea6e06b9c7d4..11dd986e5fa0 100644
799     --- a/drivers/iio/industrialio-event.c
800     +++ b/drivers/iio/industrialio-event.c
801     @@ -341,6 +341,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
802     &indio_dev->event_interface->dev_attr_list);
803     kfree(postfix);
804    
805     + if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
806     + continue;
807     +
808     if (ret)
809     return ret;
810    
811     diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
812     index 8914ea90ddd9..bff1a8ed47d0 100644
813     --- a/drivers/infiniband/hw/cxgb4/device.c
814     +++ b/drivers/infiniband/hw/cxgb4/device.c
815     @@ -654,6 +654,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
816     pr_err(MOD "error allocating status page\n");
817     goto err4;
818     }
819     + rdev->status_page->db_off = 0;
820     return 0;
821     err4:
822     c4iw_rqtpool_destroy(rdev);
823     diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
824     index dc930ed21eca..c69adc1705b4 100644
825     --- a/drivers/infiniband/hw/mlx5/qp.c
826     +++ b/drivers/infiniband/hw/mlx5/qp.c
827     @@ -671,7 +671,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
828     int err;
829    
830     uuari = &dev->mdev.priv.uuari;
831     - if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
832     + if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
833     return -EINVAL;
834    
835     if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
836     diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
837     index 57d165e026f4..739ca6756cb9 100644
838     --- a/drivers/irqchip/irq-gic.c
839     +++ b/drivers/irqchip/irq-gic.c
840     @@ -42,6 +42,7 @@
841     #include <linux/irqchip/chained_irq.h>
842     #include <linux/irqchip/arm-gic.h>
843    
844     +#include <asm/cputype.h>
845     #include <asm/irq.h>
846     #include <asm/exception.h>
847     #include <asm/smp_plat.h>
848     @@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
849     }
850    
851     for_each_possible_cpu(cpu) {
852     - unsigned long offset = percpu_offset * cpu_logical_map(cpu);
853     + u32 mpidr = cpu_logical_map(cpu);
854     + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
855     + unsigned long offset = percpu_offset * core_id;
856     *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
857     *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
858     }
859     @@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
860     gic_cnt++;
861     return 0;
862     }
863     +IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
864     IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
865     IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
866     +IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
867     IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
868     IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
869    
870     diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
871     index a5da511e3c9a..158ed32ac21c 100644
872     --- a/drivers/isdn/i4l/isdn_ppp.c
873     +++ b/drivers/isdn/i4l/isdn_ppp.c
874     @@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
875     {
876     struct sock_fprog uprog;
877     struct sock_filter *code = NULL;
878     - int len, err;
879     + int len;
880    
881     if (copy_from_user(&uprog, arg, sizeof(uprog)))
882     return -EFAULT;
883     @@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
884     if (IS_ERR(code))
885     return PTR_ERR(code);
886    
887     - err = sk_chk_filter(code, uprog.len);
888     - if (err) {
889     - kfree(code);
890     - return err;
891     - }
892     -
893     *p = code;
894     return uprog.len;
895     }
896     @@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
897     fprog.len = len;
898     fprog.filter = code;
899    
900     - if (is->pass_filter)
901     + if (is->pass_filter) {
902     sk_unattached_filter_destroy(is->pass_filter);
903     - err = sk_unattached_filter_create(&is->pass_filter, &fprog);
904     + is->pass_filter = NULL;
905     + }
906     + if (fprog.filter != NULL)
907     + err = sk_unattached_filter_create(&is->pass_filter,
908     + &fprog);
909     + else
910     + err = 0;
911     kfree(code);
912    
913     return err;
914     @@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
915     fprog.len = len;
916     fprog.filter = code;
917    
918     - if (is->active_filter)
919     + if (is->active_filter) {
920     sk_unattached_filter_destroy(is->active_filter);
921     - err = sk_unattached_filter_create(&is->active_filter, &fprog);
922     + is->active_filter = NULL;
923     + }
924     + if (fprog.filter != NULL)
925     + err = sk_unattached_filter_create(&is->active_filter,
926     + &fprog);
927     + else
928     + err = 0;
929     kfree(code);
930    
931     return err;
932     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
933     index 4ead4ba60656..d2899e7eb3aa 100644
934     --- a/drivers/md/dm-cache-metadata.c
935     +++ b/drivers/md/dm-cache-metadata.c
936     @@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
937    
938     disk_super = dm_block_data(sblock);
939    
940     + /* Verify the data block size hasn't changed */
941     + if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
942     + DMERR("changing the data block size (from %u to %llu) is not supported",
943     + le32_to_cpu(disk_super->data_block_size),
944     + (unsigned long long)cmd->data_block_size);
945     + r = -EINVAL;
946     + goto bad;
947     + }
948     +
949     r = __check_incompat_features(disk_super, cmd);
950     if (r < 0)
951     goto bad;
952     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
953     index b086a945edcb..e9d33ad59df5 100644
954     --- a/drivers/md/dm-thin-metadata.c
955     +++ b/drivers/md/dm-thin-metadata.c
956     @@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
957    
958     disk_super = dm_block_data(sblock);
959    
960     + /* Verify the data block size hasn't changed */
961     + if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
962     + DMERR("changing the data block size (from %u to %llu) is not supported",
963     + le32_to_cpu(disk_super->data_block_size),
964     + (unsigned long long)pmd->data_block_size);
965     + r = -EINVAL;
966     + goto bad_unlock_sblock;
967     + }
968     +
969     r = __check_incompat_features(disk_super, pmd);
970     if (r < 0)
971     goto bad_unlock_sblock;
972     diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
973     index 2fd1c5e31a0f..339adce7c7a5 100644
974     --- a/drivers/media/usb/gspca/pac7302.c
975     +++ b/drivers/media/usb/gspca/pac7302.c
976     @@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
977     {USB_DEVICE(0x093a, 0x2620)},
978     {USB_DEVICE(0x093a, 0x2621)},
979     {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
980     + {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
981     {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
982     {USB_DEVICE(0x093a, 0x2625)},
983     {USB_DEVICE(0x093a, 0x2626)},
984     diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
985     index 1fd4a0f77967..d85dd3693f47 100644
986     --- a/drivers/mtd/devices/elm.c
987     +++ b/drivers/mtd/devices/elm.c
988     @@ -445,6 +445,7 @@ static int elm_context_save(struct elm_info *info)
989     ELM_SYNDROME_FRAGMENT_1 + offset);
990     regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
991     ELM_SYNDROME_FRAGMENT_0 + offset);
992     + break;
993     default:
994     return -EINVAL;
995     }
996     @@ -483,6 +484,7 @@ static int elm_context_restore(struct elm_info *info)
997     regs->elm_syndrome_fragment_1[i]);
998     elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
999     regs->elm_syndrome_fragment_0[i]);
1000     + break;
1001     default:
1002     return -EINVAL;
1003     }
1004     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1005     index d3a67896d435..96fee83c9606 100644
1006     --- a/drivers/net/bonding/bond_main.c
1007     +++ b/drivers/net/bonding/bond_main.c
1008     @@ -4028,7 +4028,7 @@ static int bond_check_params(struct bond_params *params)
1009     }
1010    
1011     if (ad_select) {
1012     - bond_opt_initstr(&newval, lacp_rate);
1013     + bond_opt_initstr(&newval, ad_select);
1014     valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
1015     &newval);
1016     if (!valptr) {
1017     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1018     index dcf9196f6316..ea4d4f1a6411 100644
1019     --- a/drivers/net/can/slcan.c
1020     +++ b/drivers/net/can/slcan.c
1021     @@ -52,6 +52,7 @@
1022     #include <linux/delay.h>
1023     #include <linux/init.h>
1024     #include <linux/kernel.h>
1025     +#include <linux/workqueue.h>
1026     #include <linux/can.h>
1027     #include <linux/can/skb.h>
1028    
1029     @@ -85,6 +86,7 @@ struct slcan {
1030     struct tty_struct *tty; /* ptr to TTY structure */
1031     struct net_device *dev; /* easy for intr handling */
1032     spinlock_t lock;
1033     + struct work_struct tx_work; /* Flushes transmit buffer */
1034    
1035     /* These are pointers to the malloc()ed frame buffers. */
1036     unsigned char rbuff[SLC_MTU]; /* receiver buffer */
1037     @@ -309,36 +311,46 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
1038     sl->dev->stats.tx_bytes += cf->can_dlc;
1039     }
1040    
1041     -/*
1042     - * Called by the driver when there's room for more data. If we have
1043     - * more packets to send, we send them here.
1044     - */
1045     -static void slcan_write_wakeup(struct tty_struct *tty)
1046     +/* Write out any remaining transmit buffer. Scheduled when tty is writable */
1047     +static void slcan_transmit(struct work_struct *work)
1048     {
1049     + struct slcan *sl = container_of(work, struct slcan, tx_work);
1050     int actual;
1051     - struct slcan *sl = (struct slcan *) tty->disc_data;
1052    
1053     + spin_lock_bh(&sl->lock);
1054     /* First make sure we're connected. */
1055     - if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
1056     + if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
1057     + spin_unlock_bh(&sl->lock);
1058     return;
1059     + }
1060    
1061     - spin_lock_bh(&sl->lock);
1062     if (sl->xleft <= 0) {
1063     /* Now serial buffer is almost free & we can start
1064     * transmission of another packet */
1065     sl->dev->stats.tx_packets++;
1066     - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
1067     + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
1068     spin_unlock_bh(&sl->lock);
1069     netif_wake_queue(sl->dev);
1070     return;
1071     }
1072    
1073     - actual = tty->ops->write(tty, sl->xhead, sl->xleft);
1074     + actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
1075     sl->xleft -= actual;
1076     sl->xhead += actual;
1077     spin_unlock_bh(&sl->lock);
1078     }
1079    
1080     +/*
1081     + * Called by the driver when there's room for more data.
1082     + * Schedule the transmit.
1083     + */
1084     +static void slcan_write_wakeup(struct tty_struct *tty)
1085     +{
1086     + struct slcan *sl = tty->disc_data;
1087     +
1088     + schedule_work(&sl->tx_work);
1089     +}
1090     +
1091     /* Send a can_frame to a TTY queue. */
1092     static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
1093     {
1094     @@ -528,6 +540,7 @@ static struct slcan *slc_alloc(dev_t line)
1095     sl->magic = SLCAN_MAGIC;
1096     sl->dev = dev;
1097     spin_lock_init(&sl->lock);
1098     + INIT_WORK(&sl->tx_work, slcan_transmit);
1099     slcan_devs[i] = dev;
1100    
1101     return sl;
1102     @@ -626,8 +639,12 @@ static void slcan_close(struct tty_struct *tty)
1103     if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
1104     return;
1105    
1106     + spin_lock_bh(&sl->lock);
1107     tty->disc_data = NULL;
1108     sl->tty = NULL;
1109     + spin_unlock_bh(&sl->lock);
1110     +
1111     + flush_work(&sl->tx_work);
1112    
1113     /* Flush network side */
1114     unregister_netdev(sl->dev);
1115     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1116     index 9261d5313b5b..0979967577a1 100644
1117     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1118     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1119     @@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1120    
1121     return;
1122     }
1123     - bnx2x_frag_free(fp, new_data);
1124     + if (new_data)
1125     + bnx2x_frag_free(fp, new_data);
1126     drop:
1127     /* drop the packet and keep the buffer in the bin */
1128     DP(NETIF_MSG_RX_STATUS,
1129     diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1130     index dc19bc5dec77..2b5ab7c770b5 100644
1131     --- a/drivers/net/ethernet/emulex/benet/be_main.c
1132     +++ b/drivers/net/ethernet/emulex/benet/be_main.c
1133     @@ -2858,7 +2858,7 @@ static int be_open(struct net_device *netdev)
1134     for_all_evt_queues(adapter, eqo, i) {
1135     napi_enable(&eqo->napi);
1136     be_enable_busy_poll(eqo);
1137     - be_eq_notify(adapter, eqo->q.id, true, false, 0);
1138     + be_eq_notify(adapter, eqo->q.id, true, true, 0);
1139     }
1140     adapter->flags |= BE_FLAGS_NAPI_ENABLED;
1141    
1142     diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
1143     index fa36fe12e775..4c8d2d530e26 100644
1144     --- a/drivers/net/ethernet/intel/igb/e1000_82575.c
1145     +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
1146     @@ -1489,6 +1489,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1147     s32 ret_val;
1148     u16 i, rar_count = mac->rar_entry_count;
1149    
1150     + if ((hw->mac.type >= e1000_i210) &&
1151     + !(igb_get_flash_presence_i210(hw))) {
1152     + ret_val = igb_pll_workaround_i210(hw);
1153     + if (ret_val)
1154     + return ret_val;
1155     + }
1156     +
1157     /* Initialize identification LED */
1158     ret_val = igb_id_led_init(hw);
1159     if (ret_val) {
1160     diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
1161     index b05bf925ac72..25d236143e9d 100644
1162     --- a/drivers/net/ethernet/intel/igb/e1000_defines.h
1163     +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
1164     @@ -49,14 +49,15 @@
1165     #define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
1166    
1167     /* Physical Func Reset Done Indication */
1168     -#define E1000_CTRL_EXT_PFRSTD 0x00004000
1169     -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1170     -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
1171     -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
1172     -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
1173     -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1174     -#define E1000_CTRL_EXT_EIAME 0x01000000
1175     -#define E1000_CTRL_EXT_IRCA 0x00000001
1176     +#define E1000_CTRL_EXT_PFRSTD 0x00004000
1177     +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
1178     +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1179     +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
1180     +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
1181     +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
1182     +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1183     +#define E1000_CTRL_EXT_EIAME 0x01000000
1184     +#define E1000_CTRL_EXT_IRCA 0x00000001
1185     /* Interrupt delay cancellation */
1186     /* Driver loaded bit for FW */
1187     #define E1000_CTRL_EXT_DRV_LOAD 0x10000000
1188     @@ -65,6 +66,7 @@
1189     /* packet buffer parity error detection enabled */
1190     /* descriptor FIFO parity error detection enable */
1191     #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
1192     +#define E1000_CTRL_EXT_PHYPDEN 0x00100000
1193     #define E1000_I2CCMD_REG_ADDR_SHIFT 16
1194     #define E1000_I2CCMD_PHY_ADDR_SHIFT 24
1195     #define E1000_I2CCMD_OPCODE_READ 0x08000000
1196     diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
1197     index 10741d170f2d..e990cacbf698 100644
1198     --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
1199     +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
1200     @@ -571,4 +571,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
1201     /* These functions must be implemented by drivers */
1202     s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
1203     s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
1204     +
1205     +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
1206     +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
1207     #endif /* _E1000_HW_H_ */
1208     diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
1209     index f67f8a170b90..9e2c43361dfe 100644
1210     --- a/drivers/net/ethernet/intel/igb/e1000_i210.c
1211     +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
1212     @@ -836,3 +836,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
1213     }
1214     return ret_val;
1215     }
1216     +
1217     +/**
1218     + * igb_pll_workaround_i210
1219     + * @hw: pointer to the HW structure
1220     + *
1221     + * Works around an errata in the PLL circuit where it occasionally
1222     + * provides the wrong clock frequency after power up.
1223     + **/
1224     +s32 igb_pll_workaround_i210(struct e1000_hw *hw)
1225     +{
1226     + s32 ret_val;
1227     + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
1228     + u16 nvm_word, phy_word, pci_word, tmp_nvm;
1229     + int i;
1230     +
1231     + /* Get and set needed register values */
1232     + wuc = rd32(E1000_WUC);
1233     + mdicnfg = rd32(E1000_MDICNFG);
1234     + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
1235     + wr32(E1000_MDICNFG, reg_val);
1236     +
1237     + /* Get data from NVM, or set default */
1238     + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
1239     + &nvm_word);
1240     + if (ret_val)
1241     + nvm_word = E1000_INVM_DEFAULT_AL;
1242     + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
1243     + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
1244     + /* check current state directly from internal PHY */
1245     + igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
1246     + E1000_PHY_PLL_FREQ_REG), &phy_word);
1247     + if ((phy_word & E1000_PHY_PLL_UNCONF)
1248     + != E1000_PHY_PLL_UNCONF) {
1249     + ret_val = 0;
1250     + break;
1251     + } else {
1252     + ret_val = -E1000_ERR_PHY;
1253     + }
1254     + /* directly reset the internal PHY */
1255     + ctrl = rd32(E1000_CTRL);
1256     + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
1257     +
1258     + ctrl_ext = rd32(E1000_CTRL_EXT);
1259     + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
1260     + wr32(E1000_CTRL_EXT, ctrl_ext);
1261     +
1262     + wr32(E1000_WUC, 0);
1263     + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
1264     + wr32(E1000_EEARBC_I210, reg_val);
1265     +
1266     + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
1267     + pci_word |= E1000_PCI_PMCSR_D3;
1268     + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
1269     + usleep_range(1000, 2000);
1270     + pci_word &= ~E1000_PCI_PMCSR_D3;
1271     + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
1272     + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
1273     + wr32(E1000_EEARBC_I210, reg_val);
1274     +
1275     + /* restore WUC register */
1276     + wr32(E1000_WUC, wuc);
1277     + }
1278     + /* restore MDICNFG setting */
1279     + wr32(E1000_MDICNFG, mdicnfg);
1280     + return ret_val;
1281     +}
1282     diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
1283     index 907fe99a9813..8205e1976595 100644
1284     --- a/drivers/net/ethernet/intel/igb/e1000_i210.h
1285     +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
1286     @@ -36,6 +36,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
1287     s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
1288     s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
1289     bool igb_get_flash_presence_i210(struct e1000_hw *hw);
1290     +s32 igb_pll_workaround_i210(struct e1000_hw *hw);
1291    
1292     #define E1000_STM_OPCODE 0xDB00
1293     #define E1000_EEPROM_FLASH_SIZE_WORD 0x11
1294     @@ -81,4 +82,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
1295     #define NVM_LED_1_CFG_DEFAULT_I211 0x0184
1296     #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
1297    
1298     +/* PLL Defines */
1299     +#define E1000_PCI_PMCSR 0x44
1300     +#define E1000_PCI_PMCSR_D3 0x03
1301     +#define E1000_MAX_PLL_TRIES 5
1302     +#define E1000_PHY_PLL_UNCONF 0xFF
1303     +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
1304     +#define E1000_PHY_PLL_FREQ_REG 0x000E
1305     +#define E1000_INVM_DEFAULT_AL 0x202F
1306     +#define E1000_INVM_AUTOLOAD 0x0A
1307     +#define E1000_INVM_PLL_WO_VAL 0x0010
1308     +
1309     #endif
1310     diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
1311     index bdb246e848e1..8ded9a12d409 100644
1312     --- a/drivers/net/ethernet/intel/igb/e1000_regs.h
1313     +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
1314     @@ -69,6 +69,7 @@
1315     #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
1316     #define E1000_PBS 0x01008 /* Packet Buffer Size */
1317     #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
1318     +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
1319     #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
1320     #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
1321     #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
1322     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1323     index 16430a8440fa..d731df1da919 100644
1324     --- a/drivers/net/ethernet/intel/igb/igb_main.c
1325     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
1326     @@ -7204,6 +7204,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1327     }
1328     }
1329    
1330     +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
1331     +{
1332     + struct igb_adapter *adapter = hw->back;
1333     +
1334     + pci_read_config_word(adapter->pdev, reg, value);
1335     +}
1336     +
1337     +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
1338     +{
1339     + struct igb_adapter *adapter = hw->back;
1340     +
1341     + pci_write_config_word(adapter->pdev, reg, *value);
1342     +}
1343     +
1344     s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
1345     {
1346     struct igb_adapter *adapter = hw->back;
1347     @@ -7567,6 +7581,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
1348    
1349     if (netif_running(netdev))
1350     igb_close(netdev);
1351     + else
1352     + igb_reset(adapter);
1353    
1354     igb_clear_interrupt_scheme(adapter);
1355    
1356     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
1357     index 14786c8bf99e..d63c4bf96c20 100644
1358     --- a/drivers/net/ethernet/marvell/mvneta.c
1359     +++ b/drivers/net/ethernet/marvell/mvneta.c
1360     @@ -1189,7 +1189,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1361     command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1362     command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1363    
1364     - if (l3_proto == swab16(ETH_P_IP))
1365     + if (l3_proto == htons(ETH_P_IP))
1366     command |= MVNETA_TXD_IP_CSUM;
1367     else
1368     command |= MVNETA_TX_L3_IP6;
1369     @@ -2365,7 +2365,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
1370    
1371     if (phydev->speed == SPEED_1000)
1372     val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
1373     - else
1374     + else if (phydev->speed == SPEED_100)
1375     val |= MVNETA_GMAC_CONFIG_MII_SPEED;
1376    
1377     mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1378     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1379     index 7e4b1720c3d1..e03c9aff81ba 100644
1380     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1381     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1382     @@ -2303,7 +2303,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
1383     struct mlx4_en_priv *priv = netdev_priv(dev);
1384     __be16 current_port;
1385    
1386     - if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
1387     + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1388     return;
1389    
1390     if (sa_family == AF_INET6)
1391     diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1392     index 8fa321f39dfd..52b4c3986c99 100644
1393     --- a/drivers/net/ethernet/mellanox/mlx4/main.c
1394     +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1395     @@ -2466,7 +2466,8 @@ slave_start:
1396     "with IB port. Single port VFs syntax"
1397     " is only supported when all ports "
1398     "are configured as ethernet\n");
1399     - goto err_close;
1400     + err = -EINVAL;
1401     + goto err_master_mfunc;
1402     }
1403     for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
1404     unsigned j;
1405     diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
1406     index 1c24a8f368bd..fd411d6e19a2 100644
1407     --- a/drivers/net/ethernet/sun/sunvnet.c
1408     +++ b/drivers/net/ethernet/sun/sunvnet.c
1409     @@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1410     return vp;
1411     }
1412    
1413     +static void vnet_cleanup(void)
1414     +{
1415     + struct vnet *vp;
1416     + struct net_device *dev;
1417     +
1418     + mutex_lock(&vnet_list_mutex);
1419     + while (!list_empty(&vnet_list)) {
1420     + vp = list_first_entry(&vnet_list, struct vnet, list);
1421     + list_del(&vp->list);
1422     + dev = vp->dev;
1423     + /* vio_unregister_driver() should have cleaned up port_list */
1424     + BUG_ON(!list_empty(&vp->port_list));
1425     + unregister_netdev(dev);
1426     + free_netdev(dev);
1427     + }
1428     + mutex_unlock(&vnet_list_mutex);
1429     +}
1430     +
1431     static const char *local_mac_prop = "local-mac-address";
1432    
1433     static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1434     @@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
1435    
1436     kfree(port);
1437    
1438     - unregister_netdev(vp->dev);
1439     }
1440     return 0;
1441     }
1442     @@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
1443     static void __exit vnet_exit(void)
1444     {
1445     vio_unregister_driver(&vnet_port_driver);
1446     + vnet_cleanup();
1447     }
1448    
1449     module_init(vnet_init);
1450     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
1451     index c331b7ebc812..80d75ee60da8 100644
1452     --- a/drivers/net/ethernet/ti/cpsw.c
1453     +++ b/drivers/net/ethernet/ti/cpsw.c
1454     @@ -1201,7 +1201,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1455     for_each_slave(priv, cpsw_slave_open, priv);
1456    
1457     /* Add default VLAN */
1458     - cpsw_add_default_vlan(priv);
1459     + if (!priv->data.dual_emac)
1460     + cpsw_add_default_vlan(priv);
1461     + else
1462     + cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
1463     + ALE_ALL_PORTS << priv->host_port,
1464     + ALE_ALL_PORTS << priv->host_port, 0, 0);
1465    
1466     if (!cpsw_common_res_usage_state(priv)) {
1467     /* setup tx dma to fixed prio and zero offset */
1468     diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
1469     index e3923ebb693f..3c41a83a1572 100644
1470     --- a/drivers/net/ppp/ppp_generic.c
1471     +++ b/drivers/net/ppp/ppp_generic.c
1472     @@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
1473     {
1474     struct sock_fprog uprog;
1475     struct sock_filter *code = NULL;
1476     - int len, err;
1477     + int len;
1478    
1479     if (copy_from_user(&uprog, arg, sizeof(uprog)))
1480     return -EFAULT;
1481     @@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
1482     if (IS_ERR(code))
1483     return PTR_ERR(code);
1484    
1485     - err = sk_chk_filter(code, uprog.len);
1486     - if (err) {
1487     - kfree(code);
1488     - return err;
1489     - }
1490     -
1491     *p = code;
1492     return uprog.len;
1493     }
1494     @@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1495     };
1496    
1497     ppp_lock(ppp);
1498     - if (ppp->pass_filter)
1499     + if (ppp->pass_filter) {
1500     sk_unattached_filter_destroy(ppp->pass_filter);
1501     - err = sk_unattached_filter_create(&ppp->pass_filter,
1502     - &fprog);
1503     + ppp->pass_filter = NULL;
1504     + }
1505     + if (fprog.filter != NULL)
1506     + err = sk_unattached_filter_create(&ppp->pass_filter,
1507     + &fprog);
1508     + else
1509     + err = 0;
1510     kfree(code);
1511     ppp_unlock(ppp);
1512     }
1513     @@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1514     };
1515    
1516     ppp_lock(ppp);
1517     - if (ppp->active_filter)
1518     + if (ppp->active_filter) {
1519     sk_unattached_filter_destroy(ppp->active_filter);
1520     - err = sk_unattached_filter_create(&ppp->active_filter,
1521     - &fprog);
1522     + ppp->active_filter = NULL;
1523     + }
1524     + if (fprog.filter != NULL)
1525     + err = sk_unattached_filter_create(&ppp->active_filter,
1526     + &fprog);
1527     + else
1528     + err = 0;
1529     kfree(code);
1530     ppp_unlock(ppp);
1531     }
1532     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1533     index 2ea7efd11857..6c9c16d76935 100644
1534     --- a/drivers/net/ppp/pppoe.c
1535     +++ b/drivers/net/ppp/pppoe.c
1536     @@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
1537     po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
1538     dev->hard_header_len);
1539    
1540     - po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
1541     + po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
1542     po->chan.private = sk;
1543     po->chan.ops = &pppoe_chan_ops;
1544    
1545     diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
1546     index ad4a94e9ff57..87526443841f 100644
1547     --- a/drivers/net/slip/slip.c
1548     +++ b/drivers/net/slip/slip.c
1549     @@ -83,6 +83,7 @@
1550     #include <linux/delay.h>
1551     #include <linux/init.h>
1552     #include <linux/slab.h>
1553     +#include <linux/workqueue.h>
1554     #include "slip.h"
1555     #ifdef CONFIG_INET
1556     #include <linux/ip.h>
1557     @@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
1558     #endif
1559     }
1560    
1561     -/*
1562     - * Called by the driver when there's room for more data. If we have
1563     - * more packets to send, we send them here.
1564     - */
1565     -static void slip_write_wakeup(struct tty_struct *tty)
1566     +/* Write out any remaining transmit buffer. Scheduled when tty is writable */
1567     +static void slip_transmit(struct work_struct *work)
1568     {
1569     + struct slip *sl = container_of(work, struct slip, tx_work);
1570     int actual;
1571     - struct slip *sl = tty->disc_data;
1572    
1573     + spin_lock_bh(&sl->lock);
1574     /* First make sure we're connected. */
1575     - if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
1576     + if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
1577     + spin_unlock_bh(&sl->lock);
1578     return;
1579     + }
1580    
1581     - spin_lock_bh(&sl->lock);
1582     if (sl->xleft <= 0) {
1583     /* Now serial buffer is almost free & we can start
1584     * transmission of another packet */
1585     sl->dev->stats.tx_packets++;
1586     - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
1587     + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
1588     spin_unlock_bh(&sl->lock);
1589     sl_unlock(sl);
1590     return;
1591     }
1592    
1593     - actual = tty->ops->write(tty, sl->xhead, sl->xleft);
1594     + actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
1595     sl->xleft -= actual;
1596     sl->xhead += actual;
1597     spin_unlock_bh(&sl->lock);
1598     }
1599    
1600     +/*
1601     + * Called by the driver when there's room for more data.
1602     + * Schedule the transmit.
1603     + */
1604     +static void slip_write_wakeup(struct tty_struct *tty)
1605     +{
1606     + struct slip *sl = tty->disc_data;
1607     +
1608     + schedule_work(&sl->tx_work);
1609     +}
1610     +
1611     static void sl_tx_timeout(struct net_device *dev)
1612     {
1613     struct slip *sl = netdev_priv(dev);
1614     @@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
1615     sl->magic = SLIP_MAGIC;
1616     sl->dev = dev;
1617     spin_lock_init(&sl->lock);
1618     + INIT_WORK(&sl->tx_work, slip_transmit);
1619     sl->mode = SL_MODE_DEFAULT;
1620     #ifdef CONFIG_SLIP_SMART
1621     /* initialize timer_list struct */
1622     @@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
1623     if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
1624     return;
1625    
1626     + spin_lock_bh(&sl->lock);
1627     tty->disc_data = NULL;
1628     sl->tty = NULL;
1629     + spin_unlock_bh(&sl->lock);
1630     +
1631     + flush_work(&sl->tx_work);
1632    
1633     /* VSV = very important to remove timers */
1634     #ifdef CONFIG_SLIP_SMART
1635     diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
1636     index 67673cf1266b..cf32aadf508f 100644
1637     --- a/drivers/net/slip/slip.h
1638     +++ b/drivers/net/slip/slip.h
1639     @@ -53,6 +53,7 @@ struct slip {
1640     struct tty_struct *tty; /* ptr to TTY structure */
1641     struct net_device *dev; /* easy for intr handling */
1642     spinlock_t lock;
1643     + struct work_struct tx_work; /* Flushes transmit buffer */
1644    
1645     #ifdef SL_INCLUDE_CSLIP
1646     struct slcompress *slcomp; /* for header compression */
1647     diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
1648     index 312178d7b698..a01462523bc7 100644
1649     --- a/drivers/net/usb/huawei_cdc_ncm.c
1650     +++ b/drivers/net/usb/huawei_cdc_ncm.c
1651     @@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
1652     ctx = drvstate->ctx;
1653    
1654     if (usbnet_dev->status)
1655     - /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
1656     - * decimal (0x100)"
1657     + /* The wMaxCommand buffer must be big enough to hold
1658     + * any message from the modem. Experience has shown
1659     + * that some replies are more than 256 bytes long
1660     */
1661     subdriver = usb_cdc_wdm_register(ctx->control,
1662     &usbnet_dev->status->desc,
1663     - 256, /* wMaxCommand */
1664     + 1024, /* wMaxCommand */
1665     huawei_cdc_ncm_wdm_manage_power);
1666     if (IS_ERR(subdriver)) {
1667     ret = PTR_ERR(subdriver);
1668     @@ -206,6 +207,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
1669     { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
1670     .driver_info = (unsigned long)&huawei_cdc_ncm_info,
1671     },
1672     + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
1673     + .driver_info = (unsigned long)&huawei_cdc_ncm_info,
1674     + },
1675    
1676     /* Terminating entry */
1677     {
1678     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1679     index cf62d7e8329f..22756db53dca 100644
1680     --- a/drivers/net/usb/qmi_wwan.c
1681     +++ b/drivers/net/usb/qmi_wwan.c
1682     @@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
1683     {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
1684     {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
1685     {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1686     + {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1687     {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1688     {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1689     {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1690     @@ -741,6 +742,7 @@ static const struct usb_device_id products[] = {
1691     {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
1692     {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
1693     {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
1694     + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
1695     {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1696     {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1697     {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1698     @@ -756,6 +758,7 @@ static const struct usb_device_id products[] = {
1699     {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
1700     {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
1701     {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
1702     + {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
1703     {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1704     {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1705     {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1706     diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1707     index 3fbfb0869030..d2c007098a3b 100644
1708     --- a/drivers/net/usb/r8152.c
1709     +++ b/drivers/net/usb/r8152.c
1710     @@ -1361,7 +1361,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
1711     struct sk_buff_head seg_list;
1712     struct sk_buff *segs, *nskb;
1713    
1714     - features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1715     + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1716     segs = skb_gso_segment(skb, features);
1717     if (IS_ERR(segs) || !segs)
1718     goto drop;
1719     diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
1720     index 503a81e58185..c1e311341b74 100644
1721     --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
1722     +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
1723     @@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1724     /* recalculate basic rates */
1725     iwl_calc_basic_rates(priv, ctx);
1726    
1727     - /*
1728     - * force CTS-to-self frames protection if RTS-CTS is not preferred
1729     - * one aggregation protection method
1730     - */
1731     - if (!priv->hw_params.use_rts_for_aggregation)
1732     - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1733     -
1734     if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1735     !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1736     ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1737     @@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1738     else
1739     ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1740    
1741     - if (bss_conf->use_cts_prot)
1742     - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1743     - else
1744     - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1745     -
1746     memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1747    
1748     if (vif->type == NL80211_IFTYPE_AP ||
1749     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
1750     index 9ccec10bba16..c3c8194d85b5 100644
1751     --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
1752     +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
1753     @@ -667,13 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
1754     if (vif->bss_conf.qos)
1755     cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
1756    
1757     - /* Don't use cts to self as the fw doesn't support it currently. */
1758     - if (vif->bss_conf.use_cts_prot) {
1759     + if (vif->bss_conf.use_cts_prot)
1760     cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
1761     - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
1762     - cmd->protection_flags |=
1763     - cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
1764     - }
1765     +
1766     IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
1767     vif->bss_conf.use_cts_prot,
1768     vif->bss_conf.ht_operation_mode);
1769     diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
1770     index 3d1d57f9f5bc..087cb618521c 100644
1771     --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
1772     +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
1773     @@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1774     {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
1775     {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
1776     {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
1777     + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
1778     {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
1779     {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
1780     {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
1781     @@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1782     {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
1783     {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
1784     {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
1785     - {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
1786     + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
1787     {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
1788     {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
1789     {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
1790     diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
1791     index 9c771b3e9918..6ffbe87aecc0 100644
1792     --- a/drivers/net/wireless/mwifiex/main.c
1793     +++ b/drivers/net/wireless/mwifiex/main.c
1794     @@ -647,6 +647,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1795     }
1796    
1797     tx_info = MWIFIEX_SKB_TXCB(skb);
1798     + memset(tx_info, 0, sizeof(*tx_info));
1799     tx_info->bss_num = priv->bss_num;
1800     tx_info->bss_type = priv->bss_type;
1801     tx_info->pkt_len = skb->len;
1802     diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1803     index 7367208ee8cd..034645a2978f 100644
1804     --- a/drivers/net/xen-netback/netback.c
1805     +++ b/drivers/net/xen-netback/netback.c
1806     @@ -1007,14 +1007,21 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
1807     {
1808     struct gnttab_map_grant_ref *gop_map = *gopp_map;
1809     u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1810     + /* This always points to the shinfo of the skb being checked, which
1811     + * could be either the first or the one on the frag_list
1812     + */
1813     struct skb_shared_info *shinfo = skb_shinfo(skb);
1814     + /* If this is non-NULL, we are currently checking the frag_list skb, and
1815     + * this points to the shinfo of the first one
1816     + */
1817     + struct skb_shared_info *first_shinfo = NULL;
1818     int nr_frags = shinfo->nr_frags;
1819     + const bool sharedslot = nr_frags &&
1820     + frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1821     int i, err;
1822     - struct sk_buff *first_skb = NULL;
1823    
1824     /* Check status of header. */
1825     err = (*gopp_copy)->status;
1826     - (*gopp_copy)++;
1827     if (unlikely(err)) {
1828     if (net_ratelimit())
1829     netdev_dbg(vif->dev,
1830     @@ -1022,8 +1029,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
1831     (*gopp_copy)->status,
1832     pending_idx,
1833     (*gopp_copy)->source.u.ref);
1834     - xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
1835     + /* The first frag might still have this slot mapped */
1836     + if (!sharedslot)
1837     + xenvif_idx_release(vif, pending_idx,
1838     + XEN_NETIF_RSP_ERROR);
1839     }
1840     + (*gopp_copy)++;
1841    
1842     check_frags:
1843     for (i = 0; i < nr_frags; i++, gop_map++) {
1844     @@ -1039,8 +1050,19 @@ check_frags:
1845     pending_idx,
1846     gop_map->handle);
1847     /* Had a previous error? Invalidate this fragment. */
1848     - if (unlikely(err))
1849     + if (unlikely(err)) {
1850     xenvif_idx_unmap(vif, pending_idx);
1851     + /* If the mapping of the first frag was OK, but
1852     + * the header's copy failed, and they are
1853     + * sharing a slot, send an error
1854     + */
1855     + if (i == 0 && sharedslot)
1856     + xenvif_idx_release(vif, pending_idx,
1857     + XEN_NETIF_RSP_ERROR);
1858     + else
1859     + xenvif_idx_release(vif, pending_idx,
1860     + XEN_NETIF_RSP_OKAY);
1861     + }
1862     continue;
1863     }
1864    
1865     @@ -1052,42 +1074,53 @@ check_frags:
1866     gop_map->status,
1867     pending_idx,
1868     gop_map->ref);
1869     +
1870     xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
1871    
1872     /* Not the first error? Preceding frags already invalidated. */
1873     if (err)
1874     continue;
1875     - /* First error: invalidate preceding fragments. */
1876     +
1877     + /* First error: if the header haven't shared a slot with the
1878     + * first frag, release it as well.
1879     + */
1880     + if (!sharedslot)
1881     + xenvif_idx_release(vif,
1882     + XENVIF_TX_CB(skb)->pending_idx,
1883     + XEN_NETIF_RSP_OKAY);
1884     +
1885     + /* Invalidate preceding fragments of this skb. */
1886     for (j = 0; j < i; j++) {
1887     pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1888     xenvif_idx_unmap(vif, pending_idx);
1889     + xenvif_idx_release(vif, pending_idx,
1890     + XEN_NETIF_RSP_OKAY);
1891     + }
1892     +
1893     + /* And if we found the error while checking the frag_list, unmap
1894     + * the first skb's frags
1895     + */
1896     + if (first_shinfo) {
1897     + for (j = 0; j < first_shinfo->nr_frags; j++) {
1898     + pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1899     + xenvif_idx_unmap(vif, pending_idx);
1900     + xenvif_idx_release(vif, pending_idx,
1901     + XEN_NETIF_RSP_OKAY);
1902     + }
1903     }
1904    
1905     /* Remember the error: invalidate all subsequent fragments. */
1906     err = newerr;
1907     }
1908    
1909     - if (skb_has_frag_list(skb)) {
1910     - first_skb = skb;
1911     - skb = shinfo->frag_list;
1912     - shinfo = skb_shinfo(skb);
1913     + if (skb_has_frag_list(skb) && !first_shinfo) {
1914     + first_shinfo = skb_shinfo(skb);
1915     + shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1916     nr_frags = shinfo->nr_frags;
1917    
1918     goto check_frags;
1919     }
1920    
1921     - /* There was a mapping error in the frag_list skb. We have to unmap
1922     - * the first skb's frags
1923     - */
1924     - if (first_skb && err) {
1925     - int j;
1926     - shinfo = skb_shinfo(first_skb);
1927     - for (j = 0; j < shinfo->nr_frags; j++) {
1928     - pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1929     - xenvif_idx_unmap(vif, pending_idx);
1930     - }
1931     - }
1932     -
1933     *gopp_map = gop_map;
1934     return err;
1935     }
1936     @@ -1495,7 +1528,16 @@ static int xenvif_tx_submit(struct xenvif *vif)
1937    
1938     /* Check the remap error code. */
1939     if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
1940     + /* If there was an error, xenvif_tx_check_gop is
1941     + * expected to release all the frags which were mapped,
1942     + * so kfree_skb shouldn't do it again
1943     + */
1944     skb_shinfo(skb)->nr_frags = 0;
1945     + if (skb_has_frag_list(skb)) {
1946     + struct sk_buff *nskb =
1947     + skb_shinfo(skb)->frag_list;
1948     + skb_shinfo(nskb)->nr_frags = 0;
1949     + }
1950     kfree_skb(skb);
1951     continue;
1952     }
1953     @@ -1799,8 +1841,6 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1954     tx_unmap_op.status);
1955     BUG();
1956     }
1957     -
1958     - xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1959     }
1960    
1961     static inline int rx_work_todo(struct xenvif *vif)
1962     diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
1963     index 482c45b777d3..9d13da48ebe1 100644
1964     --- a/drivers/usb/chipidea/udc.c
1965     +++ b/drivers/usb/chipidea/udc.c
1966     @@ -1176,8 +1176,8 @@ static int ep_enable(struct usb_ep *ep,
1967    
1968     if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1969     cap |= QH_IOS;
1970     - if (hwep->num)
1971     - cap |= QH_ZLT;
1972     +
1973     + cap |= QH_ZLT;
1974     cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1975     /*
1976     * For ISO-TX, we set mult at QH as the largest value, and use
1977     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1978     index 229a73f64304..00c4b96b4e4f 100644
1979     --- a/drivers/usb/core/hub.c
1980     +++ b/drivers/usb/core/hub.c
1981     @@ -893,6 +893,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
1982     if (!hub_is_superspeed(hub->hdev))
1983     return -EINVAL;
1984    
1985     + ret = hub_port_status(hub, port1, &portstatus, &portchange);
1986     + if (ret < 0)
1987     + return ret;
1988     +
1989     + /*
1990     + * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
1991     + * Controller [1022:7814] will have spurious result making the following
1992     + * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
1993     + * as high-speed device if we set the usb 3.0 port link state to
1994     + * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
1995     + * check the state here to avoid the bug.
1996     + */
1997     + if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
1998     + USB_SS_PORT_LS_RX_DETECT) {
1999     + dev_dbg(&hub->ports[port1 - 1]->dev,
2000     + "Not disabling port; link state is RxDetect\n");
2001     + return ret;
2002     + }
2003     +
2004     ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
2005     if (ret)
2006     return ret;
2007     diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
2008     index b7a506f2bb14..5c660c77f03b 100644
2009     --- a/drivers/xen/balloon.c
2010     +++ b/drivers/xen/balloon.c
2011     @@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
2012     * p2m are consistent.
2013     */
2014     if (!xen_feature(XENFEAT_auto_translated_physmap)) {
2015     - unsigned long p;
2016     - struct page *scratch_page = get_balloon_scratch_page();
2017     -
2018     if (!PageHighMem(page)) {
2019     + struct page *scratch_page = get_balloon_scratch_page();
2020     +
2021     ret = HYPERVISOR_update_va_mapping(
2022     (unsigned long)__va(pfn << PAGE_SHIFT),
2023     pfn_pte(page_to_pfn(scratch_page),
2024     PAGE_KERNEL_RO), 0);
2025     BUG_ON(ret);
2026     - }
2027     - p = page_to_pfn(scratch_page);
2028     - __set_phys_to_machine(pfn, pfn_to_mfn(p));
2029    
2030     - put_balloon_scratch_page();
2031     + put_balloon_scratch_page();
2032     + }
2033     + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
2034     }
2035     #endif
2036    
2037     diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
2038     index 32f9236c959f..8684e0e4d41b 100644
2039     --- a/drivers/xen/manage.c
2040     +++ b/drivers/xen/manage.c
2041     @@ -109,7 +109,6 @@ static int xen_suspend(void *data)
2042    
2043     if (!si->cancelled) {
2044     xen_irq_resume();
2045     - xen_console_resume();
2046     xen_timer_resume();
2047     }
2048    
2049     @@ -166,6 +165,10 @@ static void do_suspend(void)
2050    
2051     err = stop_machine(xen_suspend, &si, cpumask_of(0));
2052    
2053     + /* Resume console as early as possible. */
2054     + if (!si.cancelled)
2055     + xen_console_resume();
2056     +
2057     raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
2058    
2059     dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
2060     diff --git a/fs/aio.c b/fs/aio.c
2061     index e609e15f36b9..6d68e01dc7ca 100644
2062     --- a/fs/aio.c
2063     +++ b/fs/aio.c
2064     @@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
2065     static void put_reqs_available(struct kioctx *ctx, unsigned nr)
2066     {
2067     struct kioctx_cpu *kcpu;
2068     + unsigned long flags;
2069    
2070     preempt_disable();
2071     kcpu = this_cpu_ptr(ctx->cpu);
2072    
2073     + local_irq_save(flags);
2074     kcpu->reqs_available += nr;
2075     +
2076     while (kcpu->reqs_available >= ctx->req_batch * 2) {
2077     kcpu->reqs_available -= ctx->req_batch;
2078     atomic_add(ctx->req_batch, &ctx->reqs_available);
2079     }
2080    
2081     + local_irq_restore(flags);
2082     preempt_enable();
2083     }
2084    
2085     @@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
2086     {
2087     struct kioctx_cpu *kcpu;
2088     bool ret = false;
2089     + unsigned long flags;
2090    
2091     preempt_disable();
2092     kcpu = this_cpu_ptr(ctx->cpu);
2093    
2094     + local_irq_save(flags);
2095     if (!kcpu->reqs_available) {
2096     int old, avail = atomic_read(&ctx->reqs_available);
2097    
2098     @@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
2099     ret = true;
2100     kcpu->reqs_available--;
2101     out:
2102     + local_irq_restore(flags);
2103     preempt_enable();
2104     return ret;
2105     }
2106     diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2107     index aac71ce373e4..75fa055012b2 100644
2108     --- a/fs/fuse/dev.c
2109     +++ b/fs/fuse/dev.c
2110     @@ -643,9 +643,8 @@ struct fuse_copy_state {
2111     unsigned long seglen;
2112     unsigned long addr;
2113     struct page *pg;
2114     - void *mapaddr;
2115     - void *buf;
2116     unsigned len;
2117     + unsigned offset;
2118     unsigned move_pages:1;
2119     };
2120    
2121     @@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
2122     if (cs->currbuf) {
2123     struct pipe_buffer *buf = cs->currbuf;
2124    
2125     - if (!cs->write) {
2126     - kunmap_atomic(cs->mapaddr);
2127     - } else {
2128     - kunmap_atomic(cs->mapaddr);
2129     + if (cs->write)
2130     buf->len = PAGE_SIZE - cs->len;
2131     - }
2132     cs->currbuf = NULL;
2133     - cs->mapaddr = NULL;
2134     - } else if (cs->mapaddr) {
2135     - kunmap_atomic(cs->mapaddr);
2136     + } else if (cs->pg) {
2137     if (cs->write) {
2138     flush_dcache_page(cs->pg);
2139     set_page_dirty_lock(cs->pg);
2140     }
2141     put_page(cs->pg);
2142     - cs->mapaddr = NULL;
2143     }
2144     + cs->pg = NULL;
2145     }
2146    
2147     /*
2148     @@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
2149     */
2150     static int fuse_copy_fill(struct fuse_copy_state *cs)
2151     {
2152     - unsigned long offset;
2153     + struct page *page;
2154     int err;
2155    
2156     unlock_request(cs->fc, cs->req);
2157     @@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
2158    
2159     BUG_ON(!cs->nr_segs);
2160     cs->currbuf = buf;
2161     - cs->mapaddr = kmap_atomic(buf->page);
2162     + cs->pg = buf->page;
2163     + cs->offset = buf->offset;
2164     cs->len = buf->len;
2165     - cs->buf = cs->mapaddr + buf->offset;
2166     cs->pipebufs++;
2167     cs->nr_segs--;
2168     } else {
2169     - struct page *page;
2170     -
2171     if (cs->nr_segs == cs->pipe->buffers)
2172     return -EIO;
2173    
2174     @@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
2175     buf->len = 0;
2176    
2177     cs->currbuf = buf;
2178     - cs->mapaddr = kmap_atomic(page);
2179     - cs->buf = cs->mapaddr;
2180     + cs->pg = page;
2181     + cs->offset = 0;
2182     cs->len = PAGE_SIZE;
2183     cs->pipebufs++;
2184     cs->nr_segs++;
2185     @@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
2186     cs->iov++;
2187     cs->nr_segs--;
2188     }
2189     - err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
2190     + err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
2191     if (err < 0)
2192     return err;
2193     BUG_ON(err != 1);
2194     - offset = cs->addr % PAGE_SIZE;
2195     - cs->mapaddr = kmap_atomic(cs->pg);
2196     - cs->buf = cs->mapaddr + offset;
2197     - cs->len = min(PAGE_SIZE - offset, cs->seglen);
2198     + cs->pg = page;
2199     + cs->offset = cs->addr % PAGE_SIZE;
2200     + cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
2201     cs->seglen -= cs->len;
2202     cs->addr += cs->len;
2203     }
2204     @@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
2205     {
2206     unsigned ncpy = min(*size, cs->len);
2207     if (val) {
2208     + void *pgaddr = kmap_atomic(cs->pg);
2209     + void *buf = pgaddr + cs->offset;
2210     +
2211     if (cs->write)
2212     - memcpy(cs->buf, *val, ncpy);
2213     + memcpy(buf, *val, ncpy);
2214     else
2215     - memcpy(*val, cs->buf, ncpy);
2216     + memcpy(*val, buf, ncpy);
2217     +
2218     + kunmap_atomic(pgaddr);
2219     *val += ncpy;
2220     }
2221     *size -= ncpy;
2222     cs->len -= ncpy;
2223     - cs->buf += ncpy;
2224     + cs->offset += ncpy;
2225     return ncpy;
2226     }
2227    
2228     @@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
2229     out_fallback_unlock:
2230     unlock_page(newpage);
2231     out_fallback:
2232     - cs->mapaddr = kmap_atomic(buf->page);
2233     - cs->buf = cs->mapaddr + buf->offset;
2234     + cs->pg = buf->page;
2235     + cs->offset = buf->offset;
2236    
2237     err = lock_request(cs->fc, cs->req);
2238     if (err)
2239     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
2240     index 42198359fa1b..202a9721be93 100644
2241     --- a/fs/fuse/dir.c
2242     +++ b/fs/fuse/dir.c
2243     @@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
2244     inode = ACCESS_ONCE(entry->d_inode);
2245     if (inode && is_bad_inode(inode))
2246     goto invalid;
2247     - else if (fuse_dentry_time(entry) < get_jiffies_64()) {
2248     + else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
2249     + (flags & LOOKUP_REVAL)) {
2250     int err;
2251     struct fuse_entry_out outarg;
2252     struct fuse_req *req;
2253     @@ -985,7 +986,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
2254     int err;
2255     bool r;
2256    
2257     - if (fi->i_time < get_jiffies_64()) {
2258     + if (time_before64(fi->i_time, get_jiffies_64())) {
2259     r = true;
2260     err = fuse_do_getattr(inode, stat, file);
2261     } else {
2262     @@ -1171,7 +1172,7 @@ static int fuse_permission(struct inode *inode, int mask)
2263     ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
2264     struct fuse_inode *fi = get_fuse_inode(inode);
2265    
2266     - if (fi->i_time < get_jiffies_64()) {
2267     + if (time_before64(fi->i_time, get_jiffies_64())) {
2268     refreshed = true;
2269    
2270     err = fuse_perm_getattr(inode, mask);
2271     diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
2272     index 754dcf23de8a..e2cd799e4d21 100644
2273     --- a/fs/fuse/inode.c
2274     +++ b/fs/fuse/inode.c
2275     @@ -478,6 +478,17 @@ static const match_table_t tokens = {
2276     {OPT_ERR, NULL}
2277     };
2278    
2279     +static int fuse_match_uint(substring_t *s, unsigned int *res)
2280     +{
2281     + int err = -ENOMEM;
2282     + char *buf = match_strdup(s);
2283     + if (buf) {
2284     + err = kstrtouint(buf, 10, res);
2285     + kfree(buf);
2286     + }
2287     + return err;
2288     +}
2289     +
2290     static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
2291     {
2292     char *p;
2293     @@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
2294     while ((p = strsep(&opt, ",")) != NULL) {
2295     int token;
2296     int value;
2297     + unsigned uv;
2298     substring_t args[MAX_OPT_ARGS];
2299     if (!*p)
2300     continue;
2301     @@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
2302     break;
2303    
2304     case OPT_USER_ID:
2305     - if (match_int(&args[0], &value))
2306     + if (fuse_match_uint(&args[0], &uv))
2307     return 0;
2308     - d->user_id = make_kuid(current_user_ns(), value);
2309     + d->user_id = make_kuid(current_user_ns(), uv);
2310     if (!uid_valid(d->user_id))
2311     return 0;
2312     d->user_id_present = 1;
2313     break;
2314    
2315     case OPT_GROUP_ID:
2316     - if (match_int(&args[0], &value))
2317     + if (fuse_match_uint(&args[0], &uv))
2318     return 0;
2319     - d->group_id = make_kgid(current_user_ns(), value);
2320     + d->group_id = make_kgid(current_user_ns(), uv);
2321     if (!gid_valid(d->group_id))
2322     return 0;
2323     d->group_id_present = 1;
2324     diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
2325     index 9cd5f63715c0..7f30bdc57d13 100644
2326     --- a/fs/quota/dquot.c
2327     +++ b/fs/quota/dquot.c
2328     @@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2329     struct dquot *dquot;
2330     unsigned long freed = 0;
2331    
2332     + spin_lock(&dq_list_lock);
2333     head = free_dquots.prev;
2334     while (head != &free_dquots && sc->nr_to_scan) {
2335     dquot = list_entry(head, struct dquot, dq_free);
2336     @@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2337     freed++;
2338     head = free_dquots.prev;
2339     }
2340     + spin_unlock(&dq_list_lock);
2341     return freed;
2342     }
2343    
2344     diff --git a/include/net/sock.h b/include/net/sock.h
2345     index 21569cf456ed..f5a7e22fb09f 100644
2346     --- a/include/net/sock.h
2347     +++ b/include/net/sock.h
2348     @@ -1728,8 +1728,8 @@ sk_dst_get(struct sock *sk)
2349    
2350     rcu_read_lock();
2351     dst = rcu_dereference(sk->sk_dst_cache);
2352     - if (dst)
2353     - dst_hold(dst);
2354     + if (dst && !atomic_inc_not_zero(&dst->__refcnt))
2355     + dst = NULL;
2356     rcu_read_unlock();
2357     return dst;
2358     }
2359     @@ -1766,9 +1766,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
2360     static inline void
2361     sk_dst_set(struct sock *sk, struct dst_entry *dst)
2362     {
2363     - spin_lock(&sk->sk_dst_lock);
2364     - __sk_dst_set(sk, dst);
2365     - spin_unlock(&sk->sk_dst_lock);
2366     + struct dst_entry *old_dst;
2367     +
2368     + sk_tx_queue_clear(sk);
2369     + old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
2370     + dst_release(old_dst);
2371     }
2372    
2373     static inline void
2374     @@ -1780,9 +1782,7 @@ __sk_dst_reset(struct sock *sk)
2375     static inline void
2376     sk_dst_reset(struct sock *sk)
2377     {
2378     - spin_lock(&sk->sk_dst_lock);
2379     - __sk_dst_reset(sk);
2380     - spin_unlock(&sk->sk_dst_lock);
2381     + sk_dst_set(sk, NULL);
2382     }
2383    
2384     struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2385     diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
2386     index d2b32ac27a39..ecee67a00f5f 100644
2387     --- a/kernel/Kconfig.locks
2388     +++ b/kernel/Kconfig.locks
2389     @@ -220,6 +220,9 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
2390    
2391     endif
2392    
2393     +config ARCH_SUPPORTS_ATOMIC_RMW
2394     + bool
2395     +
2396     config MUTEX_SPIN_ON_OWNER
2397     def_bool y
2398     - depends on SMP && !DEBUG_MUTEXES
2399     + depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
2400     diff --git a/kernel/events/core.c b/kernel/events/core.c
2401     index 440eefc67397..935271c4b4d4 100644
2402     --- a/kernel/events/core.c
2403     +++ b/kernel/events/core.c
2404     @@ -2315,7 +2315,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2405     next_parent = rcu_dereference(next_ctx->parent_ctx);
2406    
2407     /* If neither context have a parent context; they cannot be clones. */
2408     - if (!parent && !next_parent)
2409     + if (!parent || !next_parent)
2410     goto unlock;
2411    
2412     if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2413     diff --git a/kernel/power/process.c b/kernel/power/process.c
2414     index 06ec8869dbf1..14f9a8d4725d 100644
2415     --- a/kernel/power/process.c
2416     +++ b/kernel/power/process.c
2417     @@ -184,6 +184,7 @@ void thaw_processes(void)
2418    
2419     printk("Restarting tasks ... ");
2420    
2421     + __usermodehelper_set_disable_depth(UMH_FREEZING);
2422     thaw_workqueues();
2423    
2424     read_lock(&tasklist_lock);
2425     diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
2426     index 695f9773bb60..627b3c34b821 100644
2427     --- a/kernel/sched/debug.c
2428     +++ b/kernel/sched/debug.c
2429     @@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
2430    
2431     avg_atom = p->se.sum_exec_runtime;
2432     if (nr_switches)
2433     - do_div(avg_atom, nr_switches);
2434     + avg_atom = div64_ul(avg_atom, nr_switches);
2435     else
2436     avg_atom = -1LL;
2437    
2438     diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
2439     index 88c9c65a430d..fe75444ae7ec 100644
2440     --- a/kernel/time/alarmtimer.c
2441     +++ b/kernel/time/alarmtimer.c
2442     @@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
2443     struct itimerspec *new_setting,
2444     struct itimerspec *old_setting)
2445     {
2446     + ktime_t exp;
2447     +
2448     if (!rtcdev)
2449     return -ENOTSUPP;
2450    
2451     + if (flags & ~TIMER_ABSTIME)
2452     + return -EINVAL;
2453     +
2454     if (old_setting)
2455     alarm_timer_get(timr, old_setting);
2456    
2457     @@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
2458    
2459     /* start the timer */
2460     timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
2461     - alarm_start(&timr->it.alarm.alarmtimer,
2462     - timespec_to_ktime(new_setting->it_value));
2463     + exp = timespec_to_ktime(new_setting->it_value);
2464     + /* Convert (if necessary) to absolute time */
2465     + if (flags != TIMER_ABSTIME) {
2466     + ktime_t now;
2467     +
2468     + now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
2469     + exp = ktime_add(now, exp);
2470     + }
2471     +
2472     + alarm_start(&timr->it.alarm.alarmtimer, exp);
2473     return 0;
2474     }
2475    
2476     @@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
2477     if (!alarmtimer_get_rtcdev())
2478     return -ENOTSUPP;
2479    
2480     + if (flags & ~TIMER_ABSTIME)
2481     + return -EINVAL;
2482     +
2483     if (!capable(CAP_WAKE_ALARM))
2484     return -EPERM;
2485    
2486     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2487     index 4a54a25afa2f..5aeac5338b30 100644
2488     --- a/kernel/trace/ftrace.c
2489     +++ b/kernel/trace/ftrace.c
2490     @@ -325,12 +325,12 @@ static void update_ftrace_function(void)
2491     func = ftrace_ops_list_func;
2492     }
2493    
2494     + update_function_graph_func();
2495     +
2496     /* If there's no change, then do nothing more here */
2497     if (ftrace_trace_function == func)
2498     return;
2499    
2500     - update_function_graph_func();
2501     -
2502     /*
2503     * If we are using the list function, it doesn't care
2504     * about the function_trace_ops.
2505     diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2506     index 7c56c3d06943..ff7027199a9a 100644
2507     --- a/kernel/trace/ring_buffer.c
2508     +++ b/kernel/trace/ring_buffer.c
2509     @@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
2510     struct ring_buffer_per_cpu *cpu_buffer;
2511     struct rb_irq_work *work;
2512    
2513     - if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
2514     - (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
2515     - return POLLIN | POLLRDNORM;
2516     -
2517     if (cpu == RING_BUFFER_ALL_CPUS)
2518     work = &buffer->irq_work;
2519     else {
2520     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2521     index 39a12265c253..4dcbf7dc3c51 100644
2522     --- a/kernel/trace/trace.c
2523     +++ b/kernel/trace/trace.c
2524     @@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
2525     struct print_entry *entry;
2526     unsigned long irq_flags;
2527     int alloc;
2528     + int pc;
2529     +
2530     + if (!(trace_flags & TRACE_ITER_PRINTK))
2531     + return 0;
2532     +
2533     + pc = preempt_count();
2534    
2535     if (unlikely(tracing_selftest_running || tracing_disabled))
2536     return 0;
2537     @@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
2538     local_save_flags(irq_flags);
2539     buffer = global_trace.trace_buffer.buffer;
2540     event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
2541     - irq_flags, preempt_count());
2542     + irq_flags, pc);
2543     if (!event)
2544     return 0;
2545    
2546     @@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
2547     entry->buf[size] = '\0';
2548    
2549     __buffer_unlock_commit(buffer, event);
2550     + ftrace_trace_stack(buffer, irq_flags, 4, pc);
2551    
2552     return size;
2553     }
2554     @@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
2555     struct bputs_entry *entry;
2556     unsigned long irq_flags;
2557     int size = sizeof(struct bputs_entry);
2558     + int pc;
2559     +
2560     + if (!(trace_flags & TRACE_ITER_PRINTK))
2561     + return 0;
2562     +
2563     + pc = preempt_count();
2564    
2565     if (unlikely(tracing_selftest_running || tracing_disabled))
2566     return 0;
2567     @@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
2568     local_save_flags(irq_flags);
2569     buffer = global_trace.trace_buffer.buffer;
2570     event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
2571     - irq_flags, preempt_count());
2572     + irq_flags, pc);
2573     if (!event)
2574     return 0;
2575    
2576     @@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
2577     entry->str = str;
2578    
2579     __buffer_unlock_commit(buffer, event);
2580     + ftrace_trace_stack(buffer, irq_flags, 4, pc);
2581    
2582     return 1;
2583     }
2584     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2585     index 3ddfd8f62c05..aec1dac0a4e4 100644
2586     --- a/kernel/trace/trace_events.c
2587     +++ b/kernel/trace/trace_events.c
2588     @@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
2589    
2590     list_del(&file->list);
2591     remove_subsystem(file->system);
2592     + free_event_filter(file->filter);
2593     kmem_cache_free(file_cachep, file);
2594     }
2595    
2596     diff --git a/mm/shmem.c b/mm/shmem.c
2597     index a2801ba8ae2d..a731cef61305 100644
2598     --- a/mm/shmem.c
2599     +++ b/mm/shmem.c
2600     @@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
2601     #define SHORT_SYMLINK_LEN 128
2602    
2603     /*
2604     - * shmem_fallocate and shmem_writepage communicate via inode->i_private
2605     - * (with i_mutex making sure that it has only one user at a time):
2606     - * we would prefer not to enlarge the shmem inode just for that.
2607     + * shmem_fallocate communicates with shmem_fault or shmem_writepage via
2608     + * inode->i_private (with i_mutex making sure that it has only one user at
2609     + * a time): we would prefer not to enlarge the shmem inode just for that.
2610     */
2611     struct shmem_falloc {
2612     + wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
2613     pgoff_t start; /* start of range currently being fallocated */
2614     pgoff_t next; /* the next page offset to be fallocated */
2615     pgoff_t nr_falloced; /* how many new pages have been fallocated */
2616     @@ -467,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
2617     return;
2618    
2619     index = start;
2620     - for ( ; ; ) {
2621     + while (index < end) {
2622     cond_resched();
2623    
2624     pvec.nr = find_get_entries(mapping, index,
2625     min(end - index, (pgoff_t)PAGEVEC_SIZE),
2626     pvec.pages, indices);
2627     if (!pvec.nr) {
2628     - if (index == start || unfalloc)
2629     + /* If all gone or hole-punch or unfalloc, we're done */
2630     + if (index == start || end != -1)
2631     break;
2632     + /* But if truncating, restart to make sure all gone */
2633     index = start;
2634     continue;
2635     }
2636     - if ((index == start || unfalloc) && indices[0] >= end) {
2637     - pagevec_remove_exceptionals(&pvec);
2638     - pagevec_release(&pvec);
2639     - break;
2640     - }
2641     mem_cgroup_uncharge_start();
2642     for (i = 0; i < pagevec_count(&pvec); i++) {
2643     struct page *page = pvec.pages[i];
2644     @@ -495,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
2645     if (radix_tree_exceptional_entry(page)) {
2646     if (unfalloc)
2647     continue;
2648     - nr_swaps_freed += !shmem_free_swap(mapping,
2649     - index, page);
2650     + if (shmem_free_swap(mapping, index, page)) {
2651     + /* Swap was replaced by page: retry */
2652     + index--;
2653     + break;
2654     + }
2655     + nr_swaps_freed++;
2656     continue;
2657     }
2658    
2659     @@ -505,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
2660     if (page->mapping == mapping) {
2661     VM_BUG_ON_PAGE(PageWriteback(page), page);
2662     truncate_inode_page(mapping, page);
2663     + } else {
2664     + /* Page was replaced by swap: retry */
2665     + unlock_page(page);
2666     + index--;
2667     + break;
2668     }
2669     }
2670     unlock_page(page);
2671     @@ -759,6 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
2672     spin_lock(&inode->i_lock);
2673     shmem_falloc = inode->i_private;
2674     if (shmem_falloc &&
2675     + !shmem_falloc->waitq &&
2676     index >= shmem_falloc->start &&
2677     index < shmem_falloc->next)
2678     shmem_falloc->nr_unswapped++;
2679     @@ -1233,6 +1241,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2680     int error;
2681     int ret = VM_FAULT_LOCKED;
2682    
2683     + /*
2684     + * Trinity finds that probing a hole which tmpfs is punching can
2685     + * prevent the hole-punch from ever completing: which in turn
2686     + * locks writers out with its hold on i_mutex. So refrain from
2687     + * faulting pages into the hole while it's being punched. Although
2688     + * shmem_undo_range() does remove the additions, it may be unable to
2689     + * keep up, as each new page needs its own unmap_mapping_range() call,
2690     + * and the i_mmap tree grows ever slower to scan if new vmas are added.
2691     + *
2692     + * It does not matter if we sometimes reach this check just before the
2693     + * hole-punch begins, so that one fault then races with the punch:
2694     + * we just need to make racing faults a rare case.
2695     + *
2696     + * The implementation below would be much simpler if we just used a
2697     + * standard mutex or completion: but we cannot take i_mutex in fault,
2698     + * and bloating every shmem inode for this unlikely case would be sad.
2699     + */
2700     + if (unlikely(inode->i_private)) {
2701     + struct shmem_falloc *shmem_falloc;
2702     +
2703     + spin_lock(&inode->i_lock);
2704     + shmem_falloc = inode->i_private;
2705     + if (shmem_falloc &&
2706     + shmem_falloc->waitq &&
2707     + vmf->pgoff >= shmem_falloc->start &&
2708     + vmf->pgoff < shmem_falloc->next) {
2709     + wait_queue_head_t *shmem_falloc_waitq;
2710     + DEFINE_WAIT(shmem_fault_wait);
2711     +
2712     + ret = VM_FAULT_NOPAGE;
2713     + if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
2714     + !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
2715     + /* It's polite to up mmap_sem if we can */
2716     + up_read(&vma->vm_mm->mmap_sem);
2717     + ret = VM_FAULT_RETRY;
2718     + }
2719     +
2720     + shmem_falloc_waitq = shmem_falloc->waitq;
2721     + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2722     + TASK_UNINTERRUPTIBLE);
2723     + spin_unlock(&inode->i_lock);
2724     + schedule();
2725     +
2726     + /*
2727     + * shmem_falloc_waitq points into the shmem_fallocate()
2728     + * stack of the hole-punching task: shmem_falloc_waitq
2729     + * is usually invalid by the time we reach here, but
2730     + * finish_wait() does not dereference it in that case;
2731     + * though i_lock needed lest racing with wake_up_all().
2732     + */
2733     + spin_lock(&inode->i_lock);
2734     + finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2735     + spin_unlock(&inode->i_lock);
2736     + return ret;
2737     + }
2738     + spin_unlock(&inode->i_lock);
2739     + }
2740     +
2741     error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
2742     if (error)
2743     return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
2744     @@ -1737,12 +1803,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2745     struct address_space *mapping = file->f_mapping;
2746     loff_t unmap_start = round_up(offset, PAGE_SIZE);
2747     loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2748     + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2749     +
2750     + shmem_falloc.waitq = &shmem_falloc_waitq;
2751     + shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2752     + shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2753     + spin_lock(&inode->i_lock);
2754     + inode->i_private = &shmem_falloc;
2755     + spin_unlock(&inode->i_lock);
2756    
2757     if ((u64)unmap_end > (u64)unmap_start)
2758     unmap_mapping_range(mapping, unmap_start,
2759     1 + unmap_end - unmap_start, 0);
2760     shmem_truncate_range(inode, offset, offset + len - 1);
2761     /* No need to unmap again: hole-punching leaves COWed pages */
2762     +
2763     + spin_lock(&inode->i_lock);
2764     + inode->i_private = NULL;
2765     + wake_up_all(&shmem_falloc_waitq);
2766     + spin_unlock(&inode->i_lock);
2767     error = 0;
2768     goto out;
2769     }
2770     @@ -1760,6 +1839,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2771     goto out;
2772     }
2773    
2774     + shmem_falloc.waitq = NULL;
2775     shmem_falloc.start = start;
2776     shmem_falloc.next = start;
2777     shmem_falloc.nr_falloced = 0;
2778     diff --git a/mm/vmscan.c b/mm/vmscan.c
2779     index a50bde6edbbc..229c017cd090 100644
2780     --- a/mm/vmscan.c
2781     +++ b/mm/vmscan.c
2782     @@ -1554,19 +1554,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
2783     * If dirty pages are scanned that are not queued for IO, it
2784     * implies that flushers are not keeping up. In this case, flag
2785     * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
2786     - * pages from reclaim context. It will forcibly stall in the
2787     - * next check.
2788     + * pages from reclaim context.
2789     */
2790     if (nr_unqueued_dirty == nr_taken)
2791     zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
2792    
2793     /*
2794     - * In addition, if kswapd scans pages marked marked for
2795     - * immediate reclaim and under writeback (nr_immediate), it
2796     - * implies that pages are cycling through the LRU faster than
2797     + * If kswapd scans pages marked marked for immediate
2798     + * reclaim and under writeback (nr_immediate), it implies
2799     + * that pages are cycling through the LRU faster than
2800     * they are written so also forcibly stall.
2801     */
2802     - if (nr_unqueued_dirty == nr_taken || nr_immediate)
2803     + if (nr_immediate)
2804     congestion_wait(BLK_RW_ASYNC, HZ/10);
2805     }
2806    
2807     diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
2808     index 3c32bd257b73..80e0d0360b80 100644
2809     --- a/net/8021q/vlan_core.c
2810     +++ b/net/8021q/vlan_core.c
2811     @@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
2812    
2813     static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
2814     {
2815     - if (skb_cow(skb, skb_headroom(skb)) < 0)
2816     + if (skb_cow(skb, skb_headroom(skb)) < 0) {
2817     + kfree_skb(skb);
2818     return NULL;
2819     + }
2820     +
2821     memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
2822     skb->mac_header += VLAN_HLEN;
2823     return skb;
2824     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2825     index 019efb79708f..6806d03a67a1 100644
2826     --- a/net/8021q/vlan_dev.c
2827     +++ b/net/8021q/vlan_dev.c
2828     @@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
2829     struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
2830     int i;
2831    
2832     - free_percpu(vlan->vlan_pcpu_stats);
2833     - vlan->vlan_pcpu_stats = NULL;
2834     for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
2835     while ((pm = vlan->egress_priority_map[i]) != NULL) {
2836     vlan->egress_priority_map[i] = pm->next;
2837     @@ -787,6 +785,15 @@ static const struct net_device_ops vlan_netdev_ops = {
2838     .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
2839     };
2840    
2841     +static void vlan_dev_free(struct net_device *dev)
2842     +{
2843     + struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
2844     +
2845     + free_percpu(vlan->vlan_pcpu_stats);
2846     + vlan->vlan_pcpu_stats = NULL;
2847     + free_netdev(dev);
2848     +}
2849     +
2850     void vlan_setup(struct net_device *dev)
2851     {
2852     ether_setup(dev);
2853     @@ -796,7 +803,7 @@ void vlan_setup(struct net_device *dev)
2854     dev->tx_queue_len = 0;
2855    
2856     dev->netdev_ops = &vlan_netdev_ops;
2857     - dev->destructor = free_netdev;
2858     + dev->destructor = vlan_dev_free;
2859     dev->ethtool_ops = &vlan_ethtool_ops;
2860    
2861     memset(dev->broadcast, 0, ETH_ALEN);
2862     diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
2863     index 786ee2f83d5f..82f6e63e1a4a 100644
2864     --- a/net/appletalk/ddp.c
2865     +++ b/net/appletalk/ddp.c
2866     @@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
2867     goto drop;
2868    
2869     /* Queue packet (standard) */
2870     - skb->sk = sock;
2871     -
2872     if (sock_queue_rcv_skb(sock, skb) < 0)
2873     goto drop;
2874    
2875     @@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
2876     if (!skb)
2877     goto out;
2878    
2879     - skb->sk = sk;
2880     skb_reserve(skb, ddp_dl->header_length);
2881     skb_reserve(skb, dev->hard_header_len);
2882     skb->dev = dev;
2883     diff --git a/net/core/dev.c b/net/core/dev.c
2884     index a30bef1882f5..a7621f3ff505 100644
2885     --- a/net/core/dev.c
2886     +++ b/net/core/dev.c
2887     @@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
2888     static struct list_head offload_base __read_mostly;
2889    
2890     static int netif_rx_internal(struct sk_buff *skb);
2891     +static int call_netdevice_notifiers_info(unsigned long val,
2892     + struct net_device *dev,
2893     + struct netdev_notifier_info *info);
2894    
2895     /*
2896     * The @dev_base_head list is protected by @dev_base_lock and the rtnl
2897     @@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
2898     void netdev_state_change(struct net_device *dev)
2899     {
2900     if (dev->flags & IFF_UP) {
2901     - call_netdevice_notifiers(NETDEV_CHANGE, dev);
2902     + struct netdev_notifier_change_info change_info;
2903     +
2904     + change_info.flags_changed = 0;
2905     + call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
2906     + &change_info.info);
2907     rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
2908     }
2909     }
2910     @@ -4057,6 +4064,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2911     skb->vlan_tci = 0;
2912     skb->dev = napi->dev;
2913     skb->skb_iif = 0;
2914     + skb->encapsulation = 0;
2915     + skb_shinfo(skb)->gso_type = 0;
2916     skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
2917    
2918     napi->skb = skb;
2919     diff --git a/net/core/dst.c b/net/core/dst.c
2920     index 80d6286c8b62..a028409ee438 100644
2921     --- a/net/core/dst.c
2922     +++ b/net/core/dst.c
2923     @@ -269,6 +269,15 @@ again:
2924     }
2925     EXPORT_SYMBOL(dst_destroy);
2926    
2927     +static void dst_destroy_rcu(struct rcu_head *head)
2928     +{
2929     + struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
2930     +
2931     + dst = dst_destroy(dst);
2932     + if (dst)
2933     + __dst_free(dst);
2934     +}
2935     +
2936     void dst_release(struct dst_entry *dst)
2937     {
2938     if (dst) {
2939     @@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
2940    
2941     newrefcnt = atomic_dec_return(&dst->__refcnt);
2942     WARN_ON(newrefcnt < 0);
2943     - if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
2944     - dst = dst_destroy(dst);
2945     - if (dst)
2946     - __dst_free(dst);
2947     - }
2948     + if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
2949     + call_rcu(&dst->rcu_head, dst_destroy_rcu);
2950     }
2951     }
2952     EXPORT_SYMBOL(dst_release);
2953     diff --git a/net/core/filter.c b/net/core/filter.c
2954     index 4aec7b93f1a9..5310d5e0884f 100644
2955     --- a/net/core/filter.c
2956     +++ b/net/core/filter.c
2957     @@ -872,7 +872,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
2958     BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
2959     BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
2960    
2961     - if (len <= 0 || len >= BPF_MAXINSNS)
2962     + if (len <= 0 || len > BPF_MAXINSNS)
2963     return -EINVAL;
2964    
2965     if (new_prog) {
2966     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2967     index 8383b2bddeb9..9433047b2453 100644
2968     --- a/net/core/skbuff.c
2969     +++ b/net/core/skbuff.c
2970     @@ -2881,12 +2881,13 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2971     int pos;
2972     int dummy;
2973    
2974     + __skb_push(head_skb, doffset);
2975     proto = skb_network_protocol(head_skb, &dummy);
2976     if (unlikely(!proto))
2977     return ERR_PTR(-EINVAL);
2978    
2979     csum = !!can_checksum_protocol(features, proto);
2980     - __skb_push(head_skb, doffset);
2981     +
2982     headroom = skb_headroom(head_skb);
2983     pos = skb_headlen(head_skb);
2984    
2985     diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
2986     index e7b6d53eef88..f005cc760535 100644
2987     --- a/net/dns_resolver/dns_query.c
2988     +++ b/net/dns_resolver/dns_query.c
2989     @@ -149,7 +149,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
2990     if (!*_result)
2991     goto put;
2992    
2993     - memcpy(*_result, upayload->data, len + 1);
2994     + memcpy(*_result, upayload->data, len);
2995     + (*_result)[len] = '\0';
2996     +
2997     if (_expiry)
2998     *_expiry = rkey->expiry;
2999    
3000     diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
3001     index 6d6dd345bc4d..6af8ab6e1706 100644
3002     --- a/net/ipv4/af_inet.c
3003     +++ b/net/ipv4/af_inet.c
3004     @@ -1434,6 +1434,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
3005     int proto = iph->protocol;
3006     int err = -ENOSYS;
3007    
3008     + if (skb->encapsulation)
3009     + skb_set_inner_network_header(skb, nhoff);
3010     +
3011     csum_replace2(&iph->check, iph->tot_len, newlen);
3012     iph->tot_len = newlen;
3013    
3014     diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
3015     index f1d32280cb54..2d24f293f977 100644
3016     --- a/net/ipv4/gre_offload.c
3017     +++ b/net/ipv4/gre_offload.c
3018     @@ -255,6 +255,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
3019     int err = -ENOENT;
3020     __be16 type;
3021    
3022     + skb->encapsulation = 1;
3023     + skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
3024     +
3025     type = greh->protocol;
3026     if (greh->flags & GRE_KEY)
3027     grehlen += GRE_HEADER_SECTION;
3028     diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
3029     index 0134663fdbce..1e4aa8354f93 100644
3030     --- a/net/ipv4/icmp.c
3031     +++ b/net/ipv4/icmp.c
3032     @@ -732,8 +732,6 @@ static void icmp_unreach(struct sk_buff *skb)
3033     /* fall through */
3034     case 0:
3035     info = ntohs(icmph->un.frag.mtu);
3036     - if (!info)
3037     - goto out;
3038     }
3039     break;
3040     case ICMP_SR_FAILED:
3041     diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
3042     index 97e4d1655d26..9db3b877fcaf 100644
3043     --- a/net/ipv4/igmp.c
3044     +++ b/net/ipv4/igmp.c
3045     @@ -1952,6 +1952,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
3046    
3047     rtnl_lock();
3048     in_dev = ip_mc_find_dev(net, imr);
3049     + if (!in_dev) {
3050     + ret = -ENODEV;
3051     + goto out;
3052     + }
3053     ifindex = imr->imr_ifindex;
3054     for (imlp = &inet->mc_list;
3055     (iml = rtnl_dereference(*imlp)) != NULL;
3056     @@ -1969,16 +1973,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
3057    
3058     *imlp = iml->next_rcu;
3059    
3060     - if (in_dev)
3061     - ip_mc_dec_group(in_dev, group);
3062     + ip_mc_dec_group(in_dev, group);
3063     rtnl_unlock();
3064     /* decrease mem now to avoid the memleak warning */
3065     atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
3066     kfree_rcu(iml, rcu);
3067     return 0;
3068     }
3069     - if (!in_dev)
3070     - ret = -ENODEV;
3071     +out:
3072     rtnl_unlock();
3073     return ret;
3074     }
3075     diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
3076     index f4ab72e19af9..96f90b89df32 100644
3077     --- a/net/ipv4/ip_options.c
3078     +++ b/net/ipv4/ip_options.c
3079     @@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
3080     optptr++;
3081     continue;
3082     }
3083     + if (unlikely(l < 2)) {
3084     + pp_ptr = optptr;
3085     + goto error;
3086     + }
3087     optlen = optptr[1];
3088     if (optlen < 2 || optlen > l) {
3089     pp_ptr = optptr;
3090     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
3091     index 2acc2337d38b..b77b6a55b05e 100644
3092     --- a/net/ipv4/ip_tunnel.c
3093     +++ b/net/ipv4/ip_tunnel.c
3094     @@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
3095     {
3096     struct dst_entry *old_dst;
3097    
3098     - if (dst) {
3099     - if (dst->flags & DST_NOCACHE)
3100     - dst = NULL;
3101     - else
3102     - dst_clone(dst);
3103     - }
3104     + dst_clone(dst);
3105     old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
3106     dst_release(old_dst);
3107     }
3108     @@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
3109    
3110     rcu_read_lock();
3111     dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
3112     + if (dst && !atomic_inc_not_zero(&dst->__refcnt))
3113     + dst = NULL;
3114     if (dst) {
3115     if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
3116     - rcu_read_unlock();
3117     tunnel_dst_reset(t);
3118     - return NULL;
3119     + dst_release(dst);
3120     + dst = NULL;
3121     }
3122     - dst_hold(dst);
3123     }
3124     rcu_read_unlock();
3125     return (struct rtable *)dst;
3126     @@ -173,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
3127    
3128     hlist_for_each_entry_rcu(t, head, hash_node) {
3129     if (remote != t->parms.iph.daddr ||
3130     + t->parms.iph.saddr != 0 ||
3131     !(t->dev->flags & IFF_UP))
3132     continue;
3133    
3134     @@ -189,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
3135     head = &itn->tunnels[hash];
3136    
3137     hlist_for_each_entry_rcu(t, head, hash_node) {
3138     - if ((local != t->parms.iph.saddr &&
3139     - (local != t->parms.iph.daddr ||
3140     - !ipv4_is_multicast(local))) ||
3141     - !(t->dev->flags & IFF_UP))
3142     + if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
3143     + (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
3144     + continue;
3145     +
3146     + if (!(t->dev->flags & IFF_UP))
3147     continue;
3148    
3149     if (!ip_tunnel_key_match(&t->parms, flags, key))
3150     @@ -209,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
3151    
3152     hlist_for_each_entry_rcu(t, head, hash_node) {
3153     if (t->parms.i_key != key ||
3154     + t->parms.iph.saddr != 0 ||
3155     + t->parms.iph.daddr != 0 ||
3156     !(t->dev->flags & IFF_UP))
3157     continue;
3158    
3159     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3160     index 5e676be3daeb..be9f2b1ac3ab 100644
3161     --- a/net/ipv4/route.c
3162     +++ b/net/ipv4/route.c
3163     @@ -1022,7 +1022,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
3164     const struct iphdr *iph = (const struct iphdr *) skb->data;
3165     struct flowi4 fl4;
3166     struct rtable *rt;
3167     - struct dst_entry *dst;
3168     + struct dst_entry *odst = NULL;
3169     bool new = false;
3170    
3171     bh_lock_sock(sk);
3172     @@ -1030,16 +1030,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
3173     if (!ip_sk_accept_pmtu(sk))
3174     goto out;
3175    
3176     - rt = (struct rtable *) __sk_dst_get(sk);
3177     + odst = sk_dst_get(sk);
3178    
3179     - if (sock_owned_by_user(sk) || !rt) {
3180     + if (sock_owned_by_user(sk) || !odst) {
3181     __ipv4_sk_update_pmtu(skb, sk, mtu);
3182     goto out;
3183     }
3184    
3185     __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
3186    
3187     - if (!__sk_dst_check(sk, 0)) {
3188     + rt = (struct rtable *)odst;
3189     + if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
3190     rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
3191     if (IS_ERR(rt))
3192     goto out;
3193     @@ -1049,8 +1050,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
3194    
3195     __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
3196    
3197     - dst = dst_check(&rt->dst, 0);
3198     - if (!dst) {
3199     + if (!dst_check(&rt->dst, 0)) {
3200     if (new)
3201     dst_release(&rt->dst);
3202    
3203     @@ -1062,10 +1062,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
3204     }
3205    
3206     if (new)
3207     - __sk_dst_set(sk, &rt->dst);
3208     + sk_dst_set(sk, &rt->dst);
3209    
3210     out:
3211     bh_unlock_sock(sk);
3212     + dst_release(odst);
3213     }
3214     EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
3215    
3216     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3217     index 4bd6d52eeffb..599e53e760e3 100644
3218     --- a/net/ipv4/tcp.c
3219     +++ b/net/ipv4/tcp.c
3220     @@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
3221     if (unlikely(tp->repair)) {
3222     if (tp->repair_queue == TCP_RECV_QUEUE) {
3223     copied = tcp_send_rcvq(sk, msg, size);
3224     - goto out;
3225     + goto out_nopush;
3226     }
3227    
3228     err = -EINVAL;
3229     @@ -1282,6 +1282,7 @@ wait_for_memory:
3230     out:
3231     if (copied)
3232     tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
3233     +out_nopush:
3234     release_sock(sk);
3235     return copied + copied_syn;
3236    
3237     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3238     index 3a26b3b23f16..09b85cdda165 100644
3239     --- a/net/ipv4/tcp_input.c
3240     +++ b/net/ipv4/tcp_input.c
3241     @@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
3242     }
3243    
3244     /* D-SACK for already forgotten data... Do dumb counting. */
3245     - if (dup_sack && tp->undo_marker && tp->undo_retrans &&
3246     + if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
3247     !after(end_seq_0, prior_snd_una) &&
3248     after(end_seq_0, tp->undo_marker))
3249     tp->undo_retrans--;
3250     @@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
3251     unsigned int new_len = (pkt_len / mss) * mss;
3252     if (!in_sack && new_len < pkt_len) {
3253     new_len += mss;
3254     - if (new_len > skb->len)
3255     + if (new_len >= skb->len)
3256     return 0;
3257     }
3258     pkt_len = new_len;
3259     @@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
3260    
3261     /* Account D-SACK for retransmitted packet. */
3262     if (dup_sack && (sacked & TCPCB_RETRANS)) {
3263     - if (tp->undo_marker && tp->undo_retrans &&
3264     + if (tp->undo_marker && tp->undo_retrans > 0 &&
3265     after(end_seq, tp->undo_marker))
3266     tp->undo_retrans--;
3267     if (sacked & TCPCB_SACKED_ACKED)
3268     @@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
3269     tp->lost_out = 0;
3270    
3271     tp->undo_marker = 0;
3272     - tp->undo_retrans = 0;
3273     + tp->undo_retrans = -1;
3274     }
3275    
3276     void tcp_clear_retrans(struct tcp_sock *tp)
3277     @@ -2664,7 +2664,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
3278    
3279     tp->prior_ssthresh = 0;
3280     tp->undo_marker = tp->snd_una;
3281     - tp->undo_retrans = tp->retrans_out;
3282     + tp->undo_retrans = tp->retrans_out ? : -1;
3283    
3284     if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
3285     if (!ece_ack)
3286     diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
3287     index b92b81718ca4..c25953a386d0 100644
3288     --- a/net/ipv4/tcp_offload.c
3289     +++ b/net/ipv4/tcp_offload.c
3290     @@ -310,7 +310,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
3291    
3292     th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
3293     iph->daddr, 0);
3294     - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3295     + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
3296    
3297     return tcp_gro_complete(skb);
3298     }
3299     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
3300     index 12d6016bdd9a..589b5ac1339e 100644
3301     --- a/net/ipv4/tcp_output.c
3302     +++ b/net/ipv4/tcp_output.c
3303     @@ -2472,8 +2472,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
3304     if (!tp->retrans_stamp)
3305     tp->retrans_stamp = TCP_SKB_CB(skb)->when;
3306    
3307     - tp->undo_retrans += tcp_skb_pcount(skb);
3308     -
3309     /* snd_nxt is stored to detect loss of retransmitted segment,
3310     * see tcp_input.c tcp_sacktag_write_queue().
3311     */
3312     @@ -2481,6 +2479,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
3313     } else {
3314     NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
3315     }
3316     +
3317     + if (tp->undo_retrans < 0)
3318     + tp->undo_retrans = 0;
3319     + tp->undo_retrans += tcp_skb_pcount(skb);
3320     return err;
3321     }
3322    
3323     diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
3324     index 8517d3cd1aed..01b0ff9a0c2c 100644
3325     --- a/net/ipv6/tcpv6_offload.c
3326     +++ b/net/ipv6/tcpv6_offload.c
3327     @@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
3328    
3329     th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
3330     &iph->daddr, 0);
3331     - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3332     + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
3333    
3334     return tcp_gro_complete(skb);
3335     }
3336     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3337     index f22757a29cd0..6c0fe9766094 100644
3338     --- a/net/netlink/af_netlink.c
3339     +++ b/net/netlink/af_netlink.c
3340     @@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
3341     while (nlk->cb_running && netlink_dump_space(nlk)) {
3342     err = netlink_dump(sk);
3343     if (err < 0) {
3344     - sk->sk_err = err;
3345     + sk->sk_err = -err;
3346     sk->sk_error_report(sk);
3347     break;
3348     }
3349     @@ -2453,7 +2453,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
3350     atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
3351     ret = netlink_dump(sk);
3352     if (ret) {
3353     - sk->sk_err = ret;
3354     + sk->sk_err = -ret;
3355     sk->sk_error_report(sk);
3356     }
3357     }
3358     diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
3359     index c82fdc1eab7c..dfa532f00d88 100644
3360     --- a/net/sctp/sysctl.c
3361     +++ b/net/sctp/sysctl.c
3362     @@ -307,41 +307,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
3363     loff_t *ppos)
3364     {
3365     struct net *net = current->nsproxy->net_ns;
3366     - char tmp[8];
3367     struct ctl_table tbl;
3368     - int ret;
3369     - int changed = 0;
3370     + bool changed = false;
3371     char *none = "none";
3372     + char tmp[8];
3373     + int ret;
3374    
3375     memset(&tbl, 0, sizeof(struct ctl_table));
3376    
3377     if (write) {
3378     tbl.data = tmp;
3379     - tbl.maxlen = 8;
3380     + tbl.maxlen = sizeof(tmp);
3381     } else {
3382     tbl.data = net->sctp.sctp_hmac_alg ? : none;
3383     tbl.maxlen = strlen(tbl.data);
3384     }
3385     - ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
3386    
3387     - if (write) {
3388     + ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
3389     + if (write && ret == 0) {
3390     #ifdef CONFIG_CRYPTO_MD5
3391     if (!strncmp(tmp, "md5", 3)) {
3392     net->sctp.sctp_hmac_alg = "md5";
3393     - changed = 1;
3394     + changed = true;
3395     }
3396     #endif
3397     #ifdef CONFIG_CRYPTO_SHA1
3398     if (!strncmp(tmp, "sha1", 4)) {
3399     net->sctp.sctp_hmac_alg = "sha1";
3400     - changed = 1;
3401     + changed = true;
3402     }
3403     #endif
3404     if (!strncmp(tmp, "none", 4)) {
3405     net->sctp.sctp_hmac_alg = NULL;
3406     - changed = 1;
3407     + changed = true;
3408     }
3409     -
3410     if (!changed)
3411     ret = -EINVAL;
3412     }
3413     @@ -354,11 +353,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
3414     loff_t *ppos)
3415     {
3416     struct net *net = current->nsproxy->net_ns;
3417     - int new_value;
3418     - struct ctl_table tbl;
3419     unsigned int min = *(unsigned int *) ctl->extra1;
3420     unsigned int max = *(unsigned int *) ctl->extra2;
3421     - int ret;
3422     + struct ctl_table tbl;
3423     + int ret, new_value;
3424    
3425     memset(&tbl, 0, sizeof(struct ctl_table));
3426     tbl.maxlen = sizeof(unsigned int);
3427     @@ -367,12 +365,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
3428     tbl.data = &new_value;
3429     else
3430     tbl.data = &net->sctp.rto_min;
3431     +
3432     ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
3433     - if (write) {
3434     - if (ret || new_value > max || new_value < min)
3435     + if (write && ret == 0) {
3436     + if (new_value > max || new_value < min)
3437     return -EINVAL;
3438     +
3439     net->sctp.rto_min = new_value;
3440     }
3441     +
3442     return ret;
3443     }
3444    
3445     @@ -381,11 +382,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
3446     loff_t *ppos)
3447     {
3448     struct net *net = current->nsproxy->net_ns;
3449     - int new_value;
3450     - struct ctl_table tbl;
3451     unsigned int min = *(unsigned int *) ctl->extra1;
3452     unsigned int max = *(unsigned int *) ctl->extra2;
3453     - int ret;
3454     + struct ctl_table tbl;
3455     + int ret, new_value;
3456    
3457     memset(&tbl, 0, sizeof(struct ctl_table));
3458     tbl.maxlen = sizeof(unsigned int);
3459     @@ -394,12 +394,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
3460     tbl.data = &new_value;
3461     else
3462     tbl.data = &net->sctp.rto_max;
3463     +
3464     ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
3465     - if (write) {
3466     - if (ret || new_value > max || new_value < min)
3467     + if (write && ret == 0) {
3468     + if (new_value > max || new_value < min)
3469     return -EINVAL;
3470     +
3471     net->sctp.rto_max = new_value;
3472     }
3473     +
3474     return ret;
3475     }
3476    
3477     @@ -420,8 +423,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
3478     tbl.data = &net->sctp.auth_enable;
3479    
3480     ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
3481     -
3482     - if (write) {
3483     + if (write && ret == 0) {
3484     struct sock *sk = net->sctp.ctl_sock;
3485    
3486     net->sctp.auth_enable = new_value;
3487     diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
3488     index 85c64658bd0b..b6842fdb53d4 100644
3489     --- a/net/sctp/ulpevent.c
3490     +++ b/net/sctp/ulpevent.c
3491     @@ -366,9 +366,10 @@ fail:
3492     * specification [SCTP] and any extensions for a list of possible
3493     * error formats.
3494     */
3495     -struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
3496     - const struct sctp_association *asoc, struct sctp_chunk *chunk,
3497     - __u16 flags, gfp_t gfp)
3498     +struct sctp_ulpevent *
3499     +sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
3500     + struct sctp_chunk *chunk, __u16 flags,
3501     + gfp_t gfp)
3502     {
3503     struct sctp_ulpevent *event;
3504     struct sctp_remote_error *sre;
3505     @@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
3506     /* Copy the skb to a new skb with room for us to prepend
3507     * notification with.
3508     */
3509     - skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
3510     - 0, gfp);
3511     + skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
3512    
3513     /* Pull off the rest of the cause TLV from the chunk. */
3514     skb_pull(chunk->skb, elen);
3515     @@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
3516     event = sctp_skb2event(skb);
3517     sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
3518    
3519     - sre = (struct sctp_remote_error *)
3520     - skb_push(skb, sizeof(struct sctp_remote_error));
3521     + sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
3522    
3523     /* Trim the buffer to the right length. */
3524     - skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
3525     + skb_trim(skb, sizeof(*sre) + elen);
3526    
3527     - /* Socket Extensions for SCTP
3528     - * 5.3.1.3 SCTP_REMOTE_ERROR
3529     - *
3530     - * sre_type:
3531     - * It should be SCTP_REMOTE_ERROR.
3532     - */
3533     + /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
3534     + memset(sre, 0, sizeof(*sre));
3535     sre->sre_type = SCTP_REMOTE_ERROR;
3536     -
3537     - /*
3538     - * Socket Extensions for SCTP
3539     - * 5.3.1.3 SCTP_REMOTE_ERROR
3540     - *
3541     - * sre_flags: 16 bits (unsigned integer)
3542     - * Currently unused.
3543     - */
3544     sre->sre_flags = 0;
3545     -
3546     - /* Socket Extensions for SCTP
3547     - * 5.3.1.3 SCTP_REMOTE_ERROR
3548     - *
3549     - * sre_length: sizeof (__u32)
3550     - *
3551     - * This field is the total length of the notification data,
3552     - * including the notification header.
3553     - */
3554     sre->sre_length = skb->len;
3555     -
3556     - /* Socket Extensions for SCTP
3557     - * 5.3.1.3 SCTP_REMOTE_ERROR
3558     - *
3559     - * sre_error: 16 bits (unsigned integer)
3560     - * This value represents one of the Operational Error causes defined in
3561     - * the SCTP specification, in network byte order.
3562     - */
3563     sre->sre_error = cause;
3564     -
3565     - /* Socket Extensions for SCTP
3566     - * 5.3.1.3 SCTP_REMOTE_ERROR
3567     - *
3568     - * sre_assoc_id: sizeof (sctp_assoc_t)
3569     - *
3570     - * The association id field, holds the identifier for the association.
3571     - * All notifications for a given association have the same association
3572     - * identifier. For TCP style socket, this field is ignored.
3573     - */
3574     sctp_ulpevent_set_owner(event, asoc);
3575     sre->sre_assoc_id = sctp_assoc2id(asoc);
3576    
3577     return event;
3578     -
3579     fail:
3580     return NULL;
3581     }
3582     @@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
3583     return notification->sn_header.sn_type;
3584     }
3585    
3586     -/* Copy out the sndrcvinfo into a msghdr. */
3587     +/* RFC6458, Section 5.3.2. SCTP Header Information Structure
3588     + * (SCTP_SNDRCV, DEPRECATED)
3589     + */
3590     void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
3591     struct msghdr *msghdr)
3592     {
3593     @@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
3594     if (sctp_ulpevent_is_notification(event))
3595     return;
3596    
3597     - /* Sockets API Extensions for SCTP
3598     - * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
3599     - *
3600     - * sinfo_stream: 16 bits (unsigned integer)
3601     - *
3602     - * For recvmsg() the SCTP stack places the message's stream number in
3603     - * this value.
3604     - */
3605     + memset(&sinfo, 0, sizeof(sinfo));
3606     sinfo.sinfo_stream = event->stream;
3607     - /* sinfo_ssn: 16 bits (unsigned integer)
3608     - *
3609     - * For recvmsg() this value contains the stream sequence number that
3610     - * the remote endpoint placed in the DATA chunk. For fragmented
3611     - * messages this is the same number for all deliveries of the message
3612     - * (if more than one recvmsg() is needed to read the message).
3613     - */
3614     sinfo.sinfo_ssn = event->ssn;
3615     - /* sinfo_ppid: 32 bits (unsigned integer)
3616     - *
3617     - * In recvmsg() this value is
3618     - * the same information that was passed by the upper layer in the peer
3619     - * application. Please note that byte order issues are NOT accounted
3620     - * for and this information is passed opaquely by the SCTP stack from
3621     - * one end to the other.
3622     - */
3623     sinfo.sinfo_ppid = event->ppid;
3624     - /* sinfo_flags: 16 bits (unsigned integer)
3625     - *
3626     - * This field may contain any of the following flags and is composed of
3627     - * a bitwise OR of these values.
3628     - *
3629     - * recvmsg() flags:
3630     - *
3631     - * SCTP_UNORDERED - This flag is present when the message was sent
3632     - * non-ordered.
3633     - */
3634     sinfo.sinfo_flags = event->flags;
3635     - /* sinfo_tsn: 32 bit (unsigned integer)
3636     - *
3637     - * For the receiving side, this field holds a TSN that was
3638     - * assigned to one of the SCTP Data Chunks.
3639     - */
3640     sinfo.sinfo_tsn = event->tsn;
3641     - /* sinfo_cumtsn: 32 bit (unsigned integer)
3642     - *
3643     - * This field will hold the current cumulative TSN as
3644     - * known by the underlying SCTP layer. Note this field is
3645     - * ignored when sending and only valid for a receive
3646     - * operation when sinfo_flags are set to SCTP_UNORDERED.
3647     - */
3648     sinfo.sinfo_cumtsn = event->cumtsn;
3649     - /* sinfo_assoc_id: sizeof (sctp_assoc_t)
3650     - *
3651     - * The association handle field, sinfo_assoc_id, holds the identifier
3652     - * for the association announced in the COMMUNICATION_UP notification.
3653     - * All notifications for a given association have the same identifier.
3654     - * Ignored for one-to-one style sockets.
3655     - */
3656     sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
3657     -
3658     - /* context value that is set via SCTP_CONTEXT socket option. */
3659     + /* Context value that is set via SCTP_CONTEXT socket option. */
3660     sinfo.sinfo_context = event->asoc->default_rcv_context;
3661     -
3662     /* These fields are not used while receiving. */
3663     sinfo.sinfo_timetolive = 0;
3664    
3665     put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
3666     - sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
3667     + sizeof(sinfo), &sinfo);
3668     }
3669    
3670     /* Do accounting for bytes received and hold a reference to the association
3671     diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
3672     index 95ab5ef92920..958279c8e7d7 100644
3673     --- a/net/tipc/bcast.c
3674     +++ b/net/tipc/bcast.c
3675     @@ -536,6 +536,7 @@ receive:
3676    
3677     buf = node->bclink.deferred_head;
3678     node->bclink.deferred_head = buf->next;
3679     + buf->next = NULL;
3680     node->bclink.deferred_size--;
3681     goto receive;
3682     }
3683     diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
3684     index 480bbddbd801..6df04d91c93c 100644
3685     --- a/sound/pci/hda/hda_controller.c
3686     +++ b/sound/pci/hda/hda_controller.c
3687     @@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
3688     dsp_unlock(azx_dev);
3689     return azx_dev;
3690     }
3691     - if (!res)
3692     + if (!res ||
3693     + (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
3694     res = azx_dev;
3695     }
3696     dsp_unlock(azx_dev);
3697     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3698     index bc36f9cdd9d2..893beca9d45f 100644
3699     --- a/sound/pci/hda/hda_intel.c
3700     +++ b/sound/pci/hda/hda_intel.c
3701     @@ -227,7 +227,7 @@ enum {
3702     /* quirks for Intel PCH */
3703     #define AZX_DCAPS_INTEL_PCH_NOPM \
3704     (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
3705     - AZX_DCAPS_COUNT_LPIB_DELAY)
3706     + AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
3707    
3708     #define AZX_DCAPS_INTEL_PCH \
3709     (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
3710     @@ -590,7 +590,7 @@ static int azx_suspend(struct device *dev)
3711     struct azx *chip = card->private_data;
3712     struct azx_pcm *p;
3713    
3714     - if (chip->disabled)
3715     + if (chip->disabled || chip->init_failed)
3716     return 0;
3717    
3718     snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
3719     @@ -622,7 +622,7 @@ static int azx_resume(struct device *dev)
3720     struct snd_card *card = dev_get_drvdata(dev);
3721     struct azx *chip = card->private_data;
3722    
3723     - if (chip->disabled)
3724     + if (chip->disabled || chip->init_failed)
3725     return 0;
3726    
3727     if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
3728     @@ -659,7 +659,7 @@ static int azx_runtime_suspend(struct device *dev)
3729     struct snd_card *card = dev_get_drvdata(dev);
3730     struct azx *chip = card->private_data;
3731    
3732     - if (chip->disabled)
3733     + if (chip->disabled || chip->init_failed)
3734     return 0;
3735    
3736     if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
3737     @@ -686,7 +686,7 @@ static int azx_runtime_resume(struct device *dev)
3738     struct hda_codec *codec;
3739     int status;
3740    
3741     - if (chip->disabled)
3742     + if (chip->disabled || chip->init_failed)
3743     return 0;
3744    
3745     if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
3746     @@ -723,7 +723,7 @@ static int azx_runtime_idle(struct device *dev)
3747     struct snd_card *card = dev_get_drvdata(dev);
3748     struct azx *chip = card->private_data;
3749    
3750     - if (chip->disabled)
3751     + if (chip->disabled || chip->init_failed)
3752     return 0;
3753    
3754     if (!power_save_controller ||
3755     diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
3756     index 4a7cb01fa912..e9d1a5762a55 100644
3757     --- a/sound/pci/hda/hda_priv.h
3758     +++ b/sound/pci/hda/hda_priv.h
3759     @@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
3760     #define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
3761     #define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
3762     #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
3763     +#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
3764     #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
3765     #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
3766     #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */