Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.14/0113-3.14.14-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2506 - (hide annotations) (download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 7 months ago) by niro
File size: 93171 byte(s)
-patches for 3.14
1 niro 2506 diff --git a/Makefile b/Makefile
2     index 7a2981c972ae..230c7f694ab7 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 14
8     -SUBLEVEL = 13
9     +SUBLEVEL = 14
10     EXTRAVERSION =
11     NAME = Remembering Coco
12    
13     diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
14     index 2618cc13ba75..76a7739aab1c 100644
15     --- a/arch/arc/include/uapi/asm/ptrace.h
16     +++ b/arch/arc/include/uapi/asm/ptrace.h
17     @@ -11,6 +11,7 @@
18     #ifndef _UAPI__ASM_ARC_PTRACE_H
19     #define _UAPI__ASM_ARC_PTRACE_H
20    
21     +#define PTRACE_GET_THREAD_AREA 25
22    
23     #ifndef __ASSEMBLY__
24     /*
25     diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
26     index 5d76706139dd..13b3ffb27a38 100644
27     --- a/arch/arc/kernel/ptrace.c
28     +++ b/arch/arc/kernel/ptrace.c
29     @@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
30     pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
31    
32     switch (request) {
33     + case PTRACE_GET_THREAD_AREA:
34     + ret = put_user(task_thread_info(child)->thr_ptr,
35     + (unsigned long __user *)data);
36     + break;
37     default:
38     ret = ptrace_request(child, request, addr, data);
39     break;
40     diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
41     index 44298add8a48..4733d327cfb1 100644
42     --- a/arch/arm/Kconfig
43     +++ b/arch/arm/Kconfig
44     @@ -6,6 +6,7 @@ config ARM
45     select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
46     select ARCH_HAVE_CUSTOM_GPIO_H
47     select ARCH_MIGHT_HAVE_PC_PARPORT
48     + select ARCH_SUPPORTS_ATOMIC_RMW
49     select ARCH_USE_BUILTIN_BSWAP
50     select ARCH_USE_CMPXCHG_LOCKREF
51     select ARCH_WANT_IPC_PARSE_VERSION
52     diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
53     index 737ed5da8f71..de1611966d8b 100644
54     --- a/arch/arm/boot/dts/imx25.dtsi
55     +++ b/arch/arm/boot/dts/imx25.dtsi
56     @@ -30,6 +30,7 @@
57     spi2 = &spi3;
58     usb0 = &usbotg;
59     usb1 = &usbhost1;
60     + ethernet0 = &fec;
61     };
62    
63     cpus {
64     diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
65     index 826231eb4446..da2eb7f6a5b2 100644
66     --- a/arch/arm/boot/dts/imx27.dtsi
67     +++ b/arch/arm/boot/dts/imx27.dtsi
68     @@ -30,6 +30,7 @@
69     spi0 = &cspi1;
70     spi1 = &cspi2;
71     spi2 = &cspi3;
72     + ethernet0 = &fec;
73     };
74    
75     aitc: aitc-interrupt-controller@e0000000 {
76     diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
77     index 4bcdd3ad15e5..e1b601595a09 100644
78     --- a/arch/arm/boot/dts/imx51.dtsi
79     +++ b/arch/arm/boot/dts/imx51.dtsi
80     @@ -27,6 +27,7 @@
81     spi0 = &ecspi1;
82     spi1 = &ecspi2;
83     spi2 = &cspi;
84     + ethernet0 = &fec;
85     };
86    
87     tzic: tz-interrupt-controller@e0000000 {
88     diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
89     index dc72353de0b3..50eda500f39a 100644
90     --- a/arch/arm/boot/dts/imx53.dtsi
91     +++ b/arch/arm/boot/dts/imx53.dtsi
92     @@ -33,6 +33,7 @@
93     spi0 = &ecspi1;
94     spi1 = &ecspi2;
95     spi2 = &cspi;
96     + ethernet0 = &fec;
97     };
98    
99     cpus {
100     diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
101     index 27bbcfc7202a..65b788410bd9 100644
102     --- a/arch/arm64/Kconfig
103     +++ b/arch/arm64/Kconfig
104     @@ -2,6 +2,7 @@ config ARM64
105     def_bool y
106     select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
107     select ARCH_USE_CMPXCHG_LOCKREF
108     + select ARCH_SUPPORTS_ATOMIC_RMW
109     select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
110     select ARCH_WANT_OPTIONAL_GPIOLIB
111     select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
112     diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
113     index 2156fa2d25fe..ee3c6608126a 100644
114     --- a/arch/powerpc/Kconfig
115     +++ b/arch/powerpc/Kconfig
116     @@ -141,6 +141,7 @@ config PPC
117     select HAVE_DEBUG_STACKOVERFLOW
118     select HAVE_IRQ_EXIT_ON_IRQ_STACK
119     select ARCH_USE_CMPXCHG_LOCKREF if PPC64
120     + select ARCH_SUPPORTS_ATOMIC_RMW
121    
122     config GENERIC_CSUM
123     def_bool CPU_LITTLE_ENDIAN
124     diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
125     index 7d8b7e94b93b..b398c68b2713 100644
126     --- a/arch/sparc/Kconfig
127     +++ b/arch/sparc/Kconfig
128     @@ -77,6 +77,7 @@ config SPARC64
129     select ARCH_HAVE_NMI_SAFE_CMPXCHG
130     select HAVE_C_RECORDMCOUNT
131     select NO_BOOTMEM
132     + select ARCH_SUPPORTS_ATOMIC_RMW
133    
134     config ARCH_DEFCONFIG
135     string
136     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
137     index 1981dd9b8a11..7324107acb40 100644
138     --- a/arch/x86/Kconfig
139     +++ b/arch/x86/Kconfig
140     @@ -127,6 +127,7 @@ config X86
141     select HAVE_DEBUG_STACKOVERFLOW
142     select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
143     select HAVE_CC_STACKPROTECTOR
144     + select ARCH_SUPPORTS_ATOMIC_RMW
145    
146     config INSTRUCTION_DECODER
147     def_bool y
148     diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
149     index aa333d966886..1340ebfcb467 100644
150     --- a/arch/x86/kernel/cpu/perf_event_intel.c
151     +++ b/arch/x86/kernel/cpu/perf_event_intel.c
152     @@ -1383,6 +1383,15 @@ again:
153     intel_pmu_lbr_read();
154    
155     /*
156     + * CondChgd bit 63 doesn't mean any overflow status. Ignore
157     + * and clear the bit.
158     + */
159     + if (__test_and_clear_bit(63, (unsigned long *)&status)) {
160     + if (!status)
161     + goto done;
162     + }
163     +
164     + /*
165     * PEBS overflow sets bit 62 in the global status register
166     */
167     if (__test_and_clear_bit(62, (unsigned long *)&status)) {
168     diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
169     index cfbe99f88830..e0d1d7a8354e 100644
170     --- a/arch/x86/kernel/tsc.c
171     +++ b/arch/x86/kernel/tsc.c
172     @@ -921,9 +921,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
173     tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
174     if (!(freq->flags & CPUFREQ_CONST_LOOPS))
175     mark_tsc_unstable("cpufreq changes");
176     - }
177    
178     - set_cyc2ns_scale(tsc_khz, freq->cpu);
179     + set_cyc2ns_scale(tsc_khz, freq->cpu);
180     + }
181    
182     return 0;
183     }
184     diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
185     index f6f497450560..e36a0245f2c1 100644
186     --- a/drivers/bluetooth/hci_h5.c
187     +++ b/drivers/bluetooth/hci_h5.c
188     @@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
189     H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
190     BT_ERR("Non-link packet received in non-active state");
191     h5_reset_rx(h5);
192     + return 0;
193     }
194    
195     h5->rx_func = h5_rx_payload;
196     diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
197     index 28f84b4fce32..3485bdccf8b8 100644
198     --- a/drivers/gpu/drm/qxl/qxl_irq.c
199     +++ b/drivers/gpu/drm/qxl/qxl_irq.c
200     @@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
201    
202     pending = xchg(&qdev->ram_header->int_pending, 0);
203    
204     + if (!pending)
205     + return IRQ_NONE;
206     +
207     atomic_inc(&qdev->irq_received);
208    
209     if (pending & QXL_INTERRUPT_DISPLAY) {
210     diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
211     index ccca8b224d18..e7bfd5502410 100644
212     --- a/drivers/gpu/drm/radeon/atombios_encoders.c
213     +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
214     @@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
215     struct backlight_properties props;
216     struct radeon_backlight_privdata *pdata;
217     struct radeon_encoder_atom_dig *dig;
218     - u8 backlight_level;
219     char bl_name[16];
220    
221     /* Mac laptops with multiple GPUs use the gmux driver for backlight
222     @@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
223    
224     pdata->encoder = radeon_encoder;
225    
226     - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
227     -
228     dig = radeon_encoder->enc_priv;
229     dig->bl_dev = bd;
230    
231     bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
232     + /* Set a reasonable default here if the level is 0 otherwise
233     + * fbdev will attempt to turn the backlight on after console
234     + * unblanking and it will try and restore 0 which turns the backlight
235     + * off again.
236     + */
237     + if (bd->props.brightness == 0)
238     + bd->props.brightness = RADEON_MAX_BL_LEVEL;
239     bd->props.power = FB_BLANK_UNBLANK;
240     backlight_update_status(bd);
241    
242     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
243     index df6d0079d0af..11d06c7b5afa 100644
244     --- a/drivers/gpu/drm/radeon/radeon_display.c
245     +++ b/drivers/gpu/drm/radeon/radeon_display.c
246     @@ -755,6 +755,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
247     struct radeon_device *rdev = dev->dev_private;
248     int ret = 0;
249    
250     + /* don't leak the edid if we already fetched it in detect() */
251     + if (radeon_connector->edid)
252     + goto got_edid;
253     +
254     /* on hw with routers, select right port */
255     if (radeon_connector->router.ddc_valid)
256     radeon_router_select_ddc_port(radeon_connector);
257     @@ -794,6 +798,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
258     radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
259     }
260     if (radeon_connector->edid) {
261     +got_edid:
262     drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
263     ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
264     drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
265     diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
266     index 09988b289622..816782a65488 100644
267     --- a/drivers/hv/hv_kvp.c
268     +++ b/drivers/hv/hv_kvp.c
269     @@ -127,6 +127,15 @@ kvp_work_func(struct work_struct *dummy)
270     kvp_respond_to_host(NULL, HV_E_FAIL);
271     }
272    
273     +static void poll_channel(struct vmbus_channel *channel)
274     +{
275     + unsigned long flags;
276     +
277     + spin_lock_irqsave(&channel->inbound_lock, flags);
278     + hv_kvp_onchannelcallback(channel);
279     + spin_unlock_irqrestore(&channel->inbound_lock, flags);
280     +}
281     +
282     static int kvp_handle_handshake(struct hv_kvp_msg *msg)
283     {
284     int ret = 1;
285     @@ -155,7 +164,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
286     kvp_register(dm_reg_value);
287     kvp_transaction.active = false;
288     if (kvp_transaction.kvp_context)
289     - hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
290     + poll_channel(kvp_transaction.kvp_context);
291     }
292     return ret;
293     }
294     @@ -568,6 +577,7 @@ response_done:
295    
296     vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
297     VM_PKT_DATA_INBAND, 0);
298     + poll_channel(channel);
299    
300     }
301    
302     @@ -603,7 +613,7 @@ void hv_kvp_onchannelcallback(void *context)
303     return;
304     }
305    
306     - vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
307     + vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
308     &requestid);
309    
310     if (recvlen > 0) {
311     diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
312     index 62dfd246b948..d016be36cc03 100644
313     --- a/drivers/hv/hv_util.c
314     +++ b/drivers/hv/hv_util.c
315     @@ -312,7 +312,7 @@ static int util_probe(struct hv_device *dev,
316     (struct hv_util_service *)dev_id->driver_data;
317     int ret;
318    
319     - srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
320     + srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
321     if (!srv->recv_buffer)
322     return -ENOMEM;
323     if (srv->util_init) {
324     diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
325     index 0f4dea5ccf17..9ee3913850d6 100644
326     --- a/drivers/hwmon/adt7470.c
327     +++ b/drivers/hwmon/adt7470.c
328     @@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
329     return -EINVAL;
330    
331     temp = DIV_ROUND_CLOSEST(temp, 1000);
332     - temp = clamp_val(temp, 0, 255);
333     + temp = clamp_val(temp, -128, 127);
334    
335     mutex_lock(&data->lock);
336     data->temp_min[attr->index] = temp;
337     @@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
338     return -EINVAL;
339    
340     temp = DIV_ROUND_CLOSEST(temp, 1000);
341     - temp = clamp_val(temp, 0, 255);
342     + temp = clamp_val(temp, -128, 127);
343    
344     mutex_lock(&data->lock);
345     data->temp_max[attr->index] = temp;
346     @@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
347     return -EINVAL;
348    
349     temp = DIV_ROUND_CLOSEST(temp, 1000);
350     - temp = clamp_val(temp, 0, 255);
351     + temp = clamp_val(temp, -128, 127);
352    
353     mutex_lock(&data->lock);
354     data->pwm_tmin[attr->index] = temp;
355     diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
356     index afd31042b452..d14ab3c45daa 100644
357     --- a/drivers/hwmon/da9052-hwmon.c
358     +++ b/drivers/hwmon/da9052-hwmon.c
359     @@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
360     struct device_attribute *devattr,
361     char *buf)
362     {
363     - return sprintf(buf, "da9052-hwmon\n");
364     + return sprintf(buf, "da9052\n");
365     }
366    
367     static ssize_t show_label(struct device *dev,
368     diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
369     index 73b3865f1207..35eb7738d711 100644
370     --- a/drivers/hwmon/da9055-hwmon.c
371     +++ b/drivers/hwmon/da9055-hwmon.c
372     @@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
373     struct device_attribute *devattr,
374     char *buf)
375     {
376     - return sprintf(buf, "da9055-hwmon\n");
377     + return sprintf(buf, "da9055\n");
378     }
379    
380     static ssize_t show_label(struct device *dev,
381     diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
382     index c9c1419fe6e0..f9360f497ed4 100644
383     --- a/drivers/iio/industrialio-event.c
384     +++ b/drivers/iio/industrialio-event.c
385     @@ -343,6 +343,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
386     &indio_dev->event_interface->dev_attr_list);
387     kfree(postfix);
388    
389     + if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
390     + continue;
391     +
392     if (ret)
393     return ret;
394    
395     diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
396     index ac2d41bd71a0..12698ee9e06b 100644
397     --- a/drivers/irqchip/irq-gic.c
398     +++ b/drivers/irqchip/irq-gic.c
399     @@ -42,6 +42,7 @@
400     #include <linux/irqchip/chained_irq.h>
401     #include <linux/irqchip/arm-gic.h>
402    
403     +#include <asm/cputype.h>
404     #include <asm/irq.h>
405     #include <asm/exception.h>
406     #include <asm/smp_plat.h>
407     @@ -903,7 +904,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
408     }
409    
410     for_each_possible_cpu(cpu) {
411     - unsigned long offset = percpu_offset * cpu_logical_map(cpu);
412     + u32 mpidr = cpu_logical_map(cpu);
413     + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
414     + unsigned long offset = percpu_offset * core_id;
415     *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
416     *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
417     }
418     @@ -1008,8 +1011,10 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
419     gic_cnt++;
420     return 0;
421     }
422     +IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
423     IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
424     IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
425     +IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
426     IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
427     IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
428    
429     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
430     index 5320332390b7..a87d3fab0271 100644
431     --- a/drivers/md/dm-cache-metadata.c
432     +++ b/drivers/md/dm-cache-metadata.c
433     @@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
434    
435     disk_super = dm_block_data(sblock);
436    
437     + /* Verify the data block size hasn't changed */
438     + if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
439     + DMERR("changing the data block size (from %u to %llu) is not supported",
440     + le32_to_cpu(disk_super->data_block_size),
441     + (unsigned long long)cmd->data_block_size);
442     + r = -EINVAL;
443     + goto bad;
444     + }
445     +
446     r = __check_incompat_features(disk_super, cmd);
447     if (r < 0)
448     goto bad;
449     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
450     index b086a945edcb..e9d33ad59df5 100644
451     --- a/drivers/md/dm-thin-metadata.c
452     +++ b/drivers/md/dm-thin-metadata.c
453     @@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
454    
455     disk_super = dm_block_data(sblock);
456    
457     + /* Verify the data block size hasn't changed */
458     + if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
459     + DMERR("changing the data block size (from %u to %llu) is not supported",
460     + le32_to_cpu(disk_super->data_block_size),
461     + (unsigned long long)pmd->data_block_size);
462     + r = -EINVAL;
463     + goto bad_unlock_sblock;
464     + }
465     +
466     r = __check_incompat_features(disk_super, pmd);
467     if (r < 0)
468     goto bad_unlock_sblock;
469     diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
470     index 2fd1c5e31a0f..339adce7c7a5 100644
471     --- a/drivers/media/usb/gspca/pac7302.c
472     +++ b/drivers/media/usb/gspca/pac7302.c
473     @@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
474     {USB_DEVICE(0x093a, 0x2620)},
475     {USB_DEVICE(0x093a, 0x2621)},
476     {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
477     + {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
478     {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
479     {USB_DEVICE(0x093a, 0x2625)},
480     {USB_DEVICE(0x093a, 0x2626)},
481     diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
482     index d1dd6a33a050..3059a7a53bff 100644
483     --- a/drivers/mtd/devices/elm.c
484     +++ b/drivers/mtd/devices/elm.c
485     @@ -428,6 +428,7 @@ static int elm_context_save(struct elm_info *info)
486     ELM_SYNDROME_FRAGMENT_1 + offset);
487     regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
488     ELM_SYNDROME_FRAGMENT_0 + offset);
489     + break;
490     default:
491     return -EINVAL;
492     }
493     @@ -466,6 +467,7 @@ static int elm_context_restore(struct elm_info *info)
494     regs->elm_syndrome_fragment_1[i]);
495     elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
496     regs->elm_syndrome_fragment_0[i]);
497     + break;
498     default:
499     return -EINVAL;
500     }
501     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
502     index 91ec8cd12478..a95b322f0924 100644
503     --- a/drivers/net/bonding/bond_main.c
504     +++ b/drivers/net/bonding/bond_main.c
505     @@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
506     }
507    
508     if (ad_select) {
509     - bond_opt_initstr(&newval, lacp_rate);
510     + bond_opt_initstr(&newval, ad_select);
511     valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
512     &newval);
513     if (!valptr) {
514     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
515     index 3fcdae266377..1d0dab854b90 100644
516     --- a/drivers/net/can/slcan.c
517     +++ b/drivers/net/can/slcan.c
518     @@ -52,6 +52,7 @@
519     #include <linux/delay.h>
520     #include <linux/init.h>
521     #include <linux/kernel.h>
522     +#include <linux/workqueue.h>
523     #include <linux/can.h>
524     #include <linux/can/skb.h>
525    
526     @@ -85,6 +86,7 @@ struct slcan {
527     struct tty_struct *tty; /* ptr to TTY structure */
528     struct net_device *dev; /* easy for intr handling */
529     spinlock_t lock;
530     + struct work_struct tx_work; /* Flushes transmit buffer */
531    
532     /* These are pointers to the malloc()ed frame buffers. */
533     unsigned char rbuff[SLC_MTU]; /* receiver buffer */
534     @@ -309,34 +311,44 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
535     sl->dev->stats.tx_bytes += cf->can_dlc;
536     }
537    
538     -/*
539     - * Called by the driver when there's room for more data. If we have
540     - * more packets to send, we send them here.
541     - */
542     -static void slcan_write_wakeup(struct tty_struct *tty)
543     +/* Write out any remaining transmit buffer. Scheduled when tty is writable */
544     +static void slcan_transmit(struct work_struct *work)
545     {
546     + struct slcan *sl = container_of(work, struct slcan, tx_work);
547     int actual;
548     - struct slcan *sl = (struct slcan *) tty->disc_data;
549    
550     + spin_lock_bh(&sl->lock);
551     /* First make sure we're connected. */
552     - if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
553     + if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
554     + spin_unlock_bh(&sl->lock);
555     return;
556     + }
557    
558     - spin_lock(&sl->lock);
559     if (sl->xleft <= 0) {
560     /* Now serial buffer is almost free & we can start
561     * transmission of another packet */
562     sl->dev->stats.tx_packets++;
563     - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
564     - spin_unlock(&sl->lock);
565     + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
566     + spin_unlock_bh(&sl->lock);
567     netif_wake_queue(sl->dev);
568     return;
569     }
570    
571     - actual = tty->ops->write(tty, sl->xhead, sl->xleft);
572     + actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
573     sl->xleft -= actual;
574     sl->xhead += actual;
575     - spin_unlock(&sl->lock);
576     + spin_unlock_bh(&sl->lock);
577     +}
578     +
579     +/*
580     + * Called by the driver when there's room for more data.
581     + * Schedule the transmit.
582     + */
583     +static void slcan_write_wakeup(struct tty_struct *tty)
584     +{
585     + struct slcan *sl = tty->disc_data;
586     +
587     + schedule_work(&sl->tx_work);
588     }
589    
590     /* Send a can_frame to a TTY queue. */
591     @@ -522,6 +534,7 @@ static struct slcan *slc_alloc(dev_t line)
592     sl->magic = SLCAN_MAGIC;
593     sl->dev = dev;
594     spin_lock_init(&sl->lock);
595     + INIT_WORK(&sl->tx_work, slcan_transmit);
596     slcan_devs[i] = dev;
597    
598     return sl;
599     @@ -620,8 +633,12 @@ static void slcan_close(struct tty_struct *tty)
600     if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
601     return;
602    
603     + spin_lock_bh(&sl->lock);
604     tty->disc_data = NULL;
605     sl->tty = NULL;
606     + spin_unlock_bh(&sl->lock);
607     +
608     + flush_work(&sl->tx_work);
609    
610     /* Flush network side */
611     unregister_netdev(sl->dev);
612     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
613     index dbcff509dc3f..5ed512473b12 100644
614     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
615     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
616     @@ -793,7 +793,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
617    
618     return;
619     }
620     - bnx2x_frag_free(fp, new_data);
621     + if (new_data)
622     + bnx2x_frag_free(fp, new_data);
623     drop:
624     /* drop the packet and keep the buffer in the bin */
625     DP(NETIF_MSG_RX_STATUS,
626     diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
627     index 36c80612e21a..80bfa0391913 100644
628     --- a/drivers/net/ethernet/emulex/benet/be_main.c
629     +++ b/drivers/net/ethernet/emulex/benet/be_main.c
630     @@ -2797,7 +2797,7 @@ static int be_open(struct net_device *netdev)
631     for_all_evt_queues(adapter, eqo, i) {
632     napi_enable(&eqo->napi);
633     be_enable_busy_poll(eqo);
634     - be_eq_notify(adapter, eqo->q.id, true, false, 0);
635     + be_eq_notify(adapter, eqo->q.id, true, true, 0);
636     }
637     adapter->flags |= BE_FLAGS_NAPI_ENABLED;
638    
639     diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
640     index 42f0f6717511..70e16f71f574 100644
641     --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
642     +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
643     @@ -1374,7 +1374,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
644     /* RAR[1-6] are owned by manageability. Skip those and program the
645     * next address into the SHRA register array.
646     */
647     - if (index < (u32)(hw->mac.rar_entry_count - 6)) {
648     + if (index < (u32)(hw->mac.rar_entry_count)) {
649     s32 ret_val;
650    
651     ret_val = e1000_acquire_swflag_ich8lan(hw);
652     diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
653     index 217090df33e7..59865695b282 100644
654     --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
655     +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
656     @@ -98,7 +98,7 @@
657     #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
658    
659     #define E1000_ICH_RAR_ENTRIES 7
660     -#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
661     +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
662     #define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
663    
664     #define PHY_PAGE_SHIFT 5
665     diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
666     index 06df6928f44c..4fa5c2a77d49 100644
667     --- a/drivers/net/ethernet/intel/igb/e1000_82575.c
668     +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
669     @@ -1492,6 +1492,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
670     s32 ret_val;
671     u16 i, rar_count = mac->rar_entry_count;
672    
673     + if ((hw->mac.type >= e1000_i210) &&
674     + !(igb_get_flash_presence_i210(hw))) {
675     + ret_val = igb_pll_workaround_i210(hw);
676     + if (ret_val)
677     + return ret_val;
678     + }
679     +
680     /* Initialize identification LED */
681     ret_val = igb_id_led_init(hw);
682     if (ret_val) {
683     diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
684     index 0571b973be80..20b37668284a 100644
685     --- a/drivers/net/ethernet/intel/igb/e1000_defines.h
686     +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
687     @@ -46,14 +46,15 @@
688     /* Extended Device Control */
689     #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
690     /* Physical Func Reset Done Indication */
691     -#define E1000_CTRL_EXT_PFRSTD 0x00004000
692     -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
693     -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
694     -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
695     -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
696     -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
697     -#define E1000_CTRL_EXT_EIAME 0x01000000
698     -#define E1000_CTRL_EXT_IRCA 0x00000001
699     +#define E1000_CTRL_EXT_PFRSTD 0x00004000
700     +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
701     +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
702     +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
703     +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
704     +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
705     +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
706     +#define E1000_CTRL_EXT_EIAME 0x01000000
707     +#define E1000_CTRL_EXT_IRCA 0x00000001
708     /* Interrupt delay cancellation */
709     /* Driver loaded bit for FW */
710     #define E1000_CTRL_EXT_DRV_LOAD 0x10000000
711     @@ -62,6 +63,7 @@
712     /* packet buffer parity error detection enabled */
713     /* descriptor FIFO parity error detection enable */
714     #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
715     +#define E1000_CTRL_EXT_PHYPDEN 0x00100000
716     #define E1000_I2CCMD_REG_ADDR_SHIFT 16
717     #define E1000_I2CCMD_PHY_ADDR_SHIFT 24
718     #define E1000_I2CCMD_OPCODE_READ 0x08000000
719     diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
720     index ab99e2b582a8..b79980ad225b 100644
721     --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
722     +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
723     @@ -572,4 +572,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
724     /* These functions must be implemented by drivers */
725     s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
726     s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
727     +
728     +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
729     +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
730     #endif /* _E1000_HW_H_ */
731     diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
732     index 0c0393316a3a..0217d4e229a0 100644
733     --- a/drivers/net/ethernet/intel/igb/e1000_i210.c
734     +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
735     @@ -835,3 +835,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
736     }
737     return ret_val;
738     }
739     +
740     +/**
741     + * igb_pll_workaround_i210
742     + * @hw: pointer to the HW structure
743     + *
744     + * Works around an errata in the PLL circuit where it occasionally
745     + * provides the wrong clock frequency after power up.
746     + **/
747     +s32 igb_pll_workaround_i210(struct e1000_hw *hw)
748     +{
749     + s32 ret_val;
750     + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
751     + u16 nvm_word, phy_word, pci_word, tmp_nvm;
752     + int i;
753     +
754     + /* Get and set needed register values */
755     + wuc = rd32(E1000_WUC);
756     + mdicnfg = rd32(E1000_MDICNFG);
757     + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
758     + wr32(E1000_MDICNFG, reg_val);
759     +
760     + /* Get data from NVM, or set default */
761     + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
762     + &nvm_word);
763     + if (ret_val)
764     + nvm_word = E1000_INVM_DEFAULT_AL;
765     + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
766     + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
767     + /* check current state directly from internal PHY */
768     + igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
769     + E1000_PHY_PLL_FREQ_REG), &phy_word);
770     + if ((phy_word & E1000_PHY_PLL_UNCONF)
771     + != E1000_PHY_PLL_UNCONF) {
772     + ret_val = 0;
773     + break;
774     + } else {
775     + ret_val = -E1000_ERR_PHY;
776     + }
777     + /* directly reset the internal PHY */
778     + ctrl = rd32(E1000_CTRL);
779     + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
780     +
781     + ctrl_ext = rd32(E1000_CTRL_EXT);
782     + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
783     + wr32(E1000_CTRL_EXT, ctrl_ext);
784     +
785     + wr32(E1000_WUC, 0);
786     + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
787     + wr32(E1000_EEARBC_I210, reg_val);
788     +
789     + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
790     + pci_word |= E1000_PCI_PMCSR_D3;
791     + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
792     + usleep_range(1000, 2000);
793     + pci_word &= ~E1000_PCI_PMCSR_D3;
794     + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
795     + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
796     + wr32(E1000_EEARBC_I210, reg_val);
797     +
798     + /* restore WUC register */
799     + wr32(E1000_WUC, wuc);
800     + }
801     + /* restore MDICNFG setting */
802     + wr32(E1000_MDICNFG, mdicnfg);
803     + return ret_val;
804     +}
805     diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
806     index 2d913716573a..710f8e9f10fb 100644
807     --- a/drivers/net/ethernet/intel/igb/e1000_i210.h
808     +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
809     @@ -46,6 +46,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
810     s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
811     s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
812     bool igb_get_flash_presence_i210(struct e1000_hw *hw);
813     +s32 igb_pll_workaround_i210(struct e1000_hw *hw);
814    
815     #define E1000_STM_OPCODE 0xDB00
816     #define E1000_EEPROM_FLASH_SIZE_WORD 0x11
817     @@ -91,4 +92,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
818     #define NVM_LED_1_CFG_DEFAULT_I211 0x0184
819     #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
820    
821     +/* PLL Defines */
822     +#define E1000_PCI_PMCSR 0x44
823     +#define E1000_PCI_PMCSR_D3 0x03
824     +#define E1000_MAX_PLL_TRIES 5
825     +#define E1000_PHY_PLL_UNCONF 0xFF
826     +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
827     +#define E1000_PHY_PLL_FREQ_REG 0x000E
828     +#define E1000_INVM_DEFAULT_AL 0x202F
829     +#define E1000_INVM_AUTOLOAD 0x0A
830     +#define E1000_INVM_PLL_WO_VAL 0x0010
831     +
832     #endif
833     diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
834     index 82632c6c53af..7156981ec813 100644
835     --- a/drivers/net/ethernet/intel/igb/e1000_regs.h
836     +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
837     @@ -69,6 +69,7 @@
838     #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
839     #define E1000_PBS 0x01008 /* Packet Buffer Size */
840     #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
841     +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
842     #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
843     #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
844     #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
845     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
846     index d9c7eb279141..5ca8c479666e 100644
847     --- a/drivers/net/ethernet/intel/igb/igb_main.c
848     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
849     @@ -7128,6 +7128,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
850     }
851     }
852    
853     +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
854     +{
855     + struct igb_adapter *adapter = hw->back;
856     +
857     + pci_read_config_word(adapter->pdev, reg, value);
858     +}
859     +
860     +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
861     +{
862     + struct igb_adapter *adapter = hw->back;
863     +
864     + pci_write_config_word(adapter->pdev, reg, *value);
865     +}
866     +
867     s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
868     {
869     struct igb_adapter *adapter = hw->back;
870     @@ -7491,6 +7505,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
871    
872     if (netif_running(netdev))
873     igb_close(netdev);
874     + else
875     + igb_reset(adapter);
876    
877     igb_clear_interrupt_scheme(adapter);
878    
879     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
880     index ca2dfbe01598..c4c00d9f2c04 100644
881     --- a/drivers/net/ethernet/marvell/mvneta.c
882     +++ b/drivers/net/ethernet/marvell/mvneta.c
883     @@ -1217,7 +1217,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
884     command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
885     command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
886    
887     - if (l3_proto == swab16(ETH_P_IP))
888     + if (l3_proto == htons(ETH_P_IP))
889     command |= MVNETA_TXD_IP_CSUM;
890     else
891     command |= MVNETA_TX_L3_IP6;
892     @@ -2393,7 +2393,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
893    
894     if (phydev->speed == SPEED_1000)
895     val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
896     - else
897     + else if (phydev->speed == SPEED_100)
898     val |= MVNETA_GMAC_CONFIG_MII_SPEED;
899    
900     mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
901     diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
902     index 1c24a8f368bd..fd411d6e19a2 100644
903     --- a/drivers/net/ethernet/sun/sunvnet.c
904     +++ b/drivers/net/ethernet/sun/sunvnet.c
905     @@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
906     return vp;
907     }
908    
909     +static void vnet_cleanup(void)
910     +{
911     + struct vnet *vp;
912     + struct net_device *dev;
913     +
914     + mutex_lock(&vnet_list_mutex);
915     + while (!list_empty(&vnet_list)) {
916     + vp = list_first_entry(&vnet_list, struct vnet, list);
917     + list_del(&vp->list);
918     + dev = vp->dev;
919     + /* vio_unregister_driver() should have cleaned up port_list */
920     + BUG_ON(!list_empty(&vp->port_list));
921     + unregister_netdev(dev);
922     + free_netdev(dev);
923     + }
924     + mutex_unlock(&vnet_list_mutex);
925     +}
926     +
927     static const char *local_mac_prop = "local-mac-address";
928    
929     static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
930     @@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
931    
932     kfree(port);
933    
934     - unregister_netdev(vp->dev);
935     }
936     return 0;
937     }
938     @@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
939     static void __exit vnet_exit(void)
940     {
941     vio_unregister_driver(&vnet_port_driver);
942     + vnet_cleanup();
943     }
944    
945     module_init(vnet_init);
946     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
947     index 2ea7efd11857..6c9c16d76935 100644
948     --- a/drivers/net/ppp/pppoe.c
949     +++ b/drivers/net/ppp/pppoe.c
950     @@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
951     po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
952     dev->hard_header_len);
953    
954     - po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
955     + po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
956     po->chan.private = sk;
957     po->chan.ops = &pppoe_chan_ops;
958    
959     diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
960     index ad4a94e9ff57..87526443841f 100644
961     --- a/drivers/net/slip/slip.c
962     +++ b/drivers/net/slip/slip.c
963     @@ -83,6 +83,7 @@
964     #include <linux/delay.h>
965     #include <linux/init.h>
966     #include <linux/slab.h>
967     +#include <linux/workqueue.h>
968     #include "slip.h"
969     #ifdef CONFIG_INET
970     #include <linux/ip.h>
971     @@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
972     #endif
973     }
974    
975     -/*
976     - * Called by the driver when there's room for more data. If we have
977     - * more packets to send, we send them here.
978     - */
979     -static void slip_write_wakeup(struct tty_struct *tty)
980     +/* Write out any remaining transmit buffer. Scheduled when tty is writable */
981     +static void slip_transmit(struct work_struct *work)
982     {
983     + struct slip *sl = container_of(work, struct slip, tx_work);
984     int actual;
985     - struct slip *sl = tty->disc_data;
986    
987     + spin_lock_bh(&sl->lock);
988     /* First make sure we're connected. */
989     - if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
990     + if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
991     + spin_unlock_bh(&sl->lock);
992     return;
993     + }
994    
995     - spin_lock_bh(&sl->lock);
996     if (sl->xleft <= 0) {
997     /* Now serial buffer is almost free & we can start
998     * transmission of another packet */
999     sl->dev->stats.tx_packets++;
1000     - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
1001     + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
1002     spin_unlock_bh(&sl->lock);
1003     sl_unlock(sl);
1004     return;
1005     }
1006    
1007     - actual = tty->ops->write(tty, sl->xhead, sl->xleft);
1008     + actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
1009     sl->xleft -= actual;
1010     sl->xhead += actual;
1011     spin_unlock_bh(&sl->lock);
1012     }
1013    
1014     +/*
1015     + * Called by the driver when there's room for more data.
1016     + * Schedule the transmit.
1017     + */
1018     +static void slip_write_wakeup(struct tty_struct *tty)
1019     +{
1020     + struct slip *sl = tty->disc_data;
1021     +
1022     + schedule_work(&sl->tx_work);
1023     +}
1024     +
1025     static void sl_tx_timeout(struct net_device *dev)
1026     {
1027     struct slip *sl = netdev_priv(dev);
1028     @@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
1029     sl->magic = SLIP_MAGIC;
1030     sl->dev = dev;
1031     spin_lock_init(&sl->lock);
1032     + INIT_WORK(&sl->tx_work, slip_transmit);
1033     sl->mode = SL_MODE_DEFAULT;
1034     #ifdef CONFIG_SLIP_SMART
1035     /* initialize timer_list struct */
1036     @@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
1037     if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
1038     return;
1039    
1040     + spin_lock_bh(&sl->lock);
1041     tty->disc_data = NULL;
1042     sl->tty = NULL;
1043     + spin_unlock_bh(&sl->lock);
1044     +
1045     + flush_work(&sl->tx_work);
1046    
1047     /* VSV = very important to remove timers */
1048     #ifdef CONFIG_SLIP_SMART
1049     diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
1050     index 67673cf1266b..cf32aadf508f 100644
1051     --- a/drivers/net/slip/slip.h
1052     +++ b/drivers/net/slip/slip.h
1053     @@ -53,6 +53,7 @@ struct slip {
1054     struct tty_struct *tty; /* ptr to TTY structure */
1055     struct net_device *dev; /* easy for intr handling */
1056     spinlock_t lock;
1057     + struct work_struct tx_work; /* Flushes transmit buffer */
1058    
1059     #ifdef SL_INCLUDE_CSLIP
1060     struct slcompress *slcomp; /* for header compression */
1061     diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
1062     index 312178d7b698..a01462523bc7 100644
1063     --- a/drivers/net/usb/huawei_cdc_ncm.c
1064     +++ b/drivers/net/usb/huawei_cdc_ncm.c
1065     @@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
1066     ctx = drvstate->ctx;
1067    
1068     if (usbnet_dev->status)
1069     - /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
1070     - * decimal (0x100)"
1071     + /* The wMaxCommand buffer must be big enough to hold
1072     + * any message from the modem. Experience has shown
1073     + * that some replies are more than 256 bytes long
1074     */
1075     subdriver = usb_cdc_wdm_register(ctx->control,
1076     &usbnet_dev->status->desc,
1077     - 256, /* wMaxCommand */
1078     + 1024, /* wMaxCommand */
1079     huawei_cdc_ncm_wdm_manage_power);
1080     if (IS_ERR(subdriver)) {
1081     ret = PTR_ERR(subdriver);
1082     @@ -206,6 +207,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
1083     { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
1084     .driver_info = (unsigned long)&huawei_cdc_ncm_info,
1085     },
1086     + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
1087     + .driver_info = (unsigned long)&huawei_cdc_ncm_info,
1088     + },
1089    
1090     /* Terminating entry */
1091     {
1092     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1093     index b71120842c4f..d510f1d41bae 100644
1094     --- a/drivers/net/usb/qmi_wwan.c
1095     +++ b/drivers/net/usb/qmi_wwan.c
1096     @@ -660,6 +660,7 @@ static const struct usb_device_id products[] = {
1097     {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
1098     {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
1099     {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1100     + {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1101     {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1102     {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1103     {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1104     @@ -734,6 +735,7 @@ static const struct usb_device_id products[] = {
1105     {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
1106     {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
1107     {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
1108     + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
1109     {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1110     {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1111     {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1112     @@ -746,6 +748,7 @@ static const struct usb_device_id products[] = {
1113     {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
1114     {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
1115     {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
1116     + {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
1117     {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1118     {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1119     {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1120     diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
1121     index 503a81e58185..c1e311341b74 100644
1122     --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
1123     +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
1124     @@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1125     /* recalculate basic rates */
1126     iwl_calc_basic_rates(priv, ctx);
1127    
1128     - /*
1129     - * force CTS-to-self frames protection if RTS-CTS is not preferred
1130     - * one aggregation protection method
1131     - */
1132     - if (!priv->hw_params.use_rts_for_aggregation)
1133     - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1134     -
1135     if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1136     !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1137     ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1138     @@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1139     else
1140     ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1141    
1142     - if (bss_conf->use_cts_prot)
1143     - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1144     - else
1145     - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1146     -
1147     memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1148    
1149     if (vif->type == NL80211_IFTYPE_AP ||
1150     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
1151     index ba723d50939a..820797af7abf 100644
1152     --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
1153     +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
1154     @@ -651,13 +651,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
1155     if (vif->bss_conf.qos)
1156     cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
1157    
1158     - /* Don't use cts to self as the fw doesn't support it currently. */
1159     - if (vif->bss_conf.use_cts_prot) {
1160     + if (vif->bss_conf.use_cts_prot)
1161     cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
1162     - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
1163     - cmd->protection_flags |=
1164     - cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
1165     - }
1166     +
1167     IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
1168     vif->bss_conf.use_cts_prot,
1169     vif->bss_conf.ht_operation_mode);
1170     diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
1171     index 43e27a174430..df1f5e732ab5 100644
1172     --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
1173     +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
1174     @@ -366,6 +366,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1175     {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
1176     {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
1177     {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
1178     + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
1179     {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
1180     {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
1181     {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
1182     @@ -379,7 +380,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1183     {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
1184     {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
1185     {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
1186     - {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
1187     + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
1188     {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
1189     {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
1190     {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
1191     diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
1192     index 9d3d2758ec35..952a47f6554e 100644
1193     --- a/drivers/net/wireless/mwifiex/main.c
1194     +++ b/drivers/net/wireless/mwifiex/main.c
1195     @@ -646,6 +646,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1196     }
1197    
1198     tx_info = MWIFIEX_SKB_TXCB(skb);
1199     + memset(tx_info, 0, sizeof(*tx_info));
1200     tx_info->bss_num = priv->bss_num;
1201     tx_info->bss_type = priv->bss_type;
1202     tx_info->pkt_len = skb->len;
1203     diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
1204     index 3314516018c6..86b1fd673749 100644
1205     --- a/drivers/usb/chipidea/udc.c
1206     +++ b/drivers/usb/chipidea/udc.c
1207     @@ -1179,8 +1179,8 @@ static int ep_enable(struct usb_ep *ep,
1208    
1209     if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1210     cap |= QH_IOS;
1211     - if (hwep->num)
1212     - cap |= QH_ZLT;
1213     +
1214     + cap |= QH_ZLT;
1215     cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1216     /*
1217     * For ISO-TX, we set mult at QH as the largest value, and use
1218     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1219     index 3baa51bf8a6a..36b1e856bd00 100644
1220     --- a/drivers/usb/core/hub.c
1221     +++ b/drivers/usb/core/hub.c
1222     @@ -884,6 +884,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
1223     if (!hub_is_superspeed(hub->hdev))
1224     return -EINVAL;
1225    
1226     + ret = hub_port_status(hub, port1, &portstatus, &portchange);
1227     + if (ret < 0)
1228     + return ret;
1229     +
1230     + /*
1231     + * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
1232     + * Controller [1022:7814] will have spurious result making the following
1233     + * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
1234     + * as high-speed device if we set the usb 3.0 port link state to
1235     + * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
1236     + * check the state here to avoid the bug.
1237     + */
1238     + if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
1239     + USB_SS_PORT_LS_RX_DETECT) {
1240     + dev_dbg(&hub->ports[port1 - 1]->dev,
1241     + "Not disabling port; link state is RxDetect\n");
1242     + return ret;
1243     + }
1244     +
1245     ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
1246     if (ret)
1247     return ret;
1248     diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
1249     index 61a6ac8fa8fc..08834b565aab 100644
1250     --- a/drivers/xen/balloon.c
1251     +++ b/drivers/xen/balloon.c
1252     @@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
1253     * p2m are consistent.
1254     */
1255     if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1256     - unsigned long p;
1257     - struct page *scratch_page = get_balloon_scratch_page();
1258     -
1259     if (!PageHighMem(page)) {
1260     + struct page *scratch_page = get_balloon_scratch_page();
1261     +
1262     ret = HYPERVISOR_update_va_mapping(
1263     (unsigned long)__va(pfn << PAGE_SHIFT),
1264     pfn_pte(page_to_pfn(scratch_page),
1265     PAGE_KERNEL_RO), 0);
1266     BUG_ON(ret);
1267     - }
1268     - p = page_to_pfn(scratch_page);
1269     - __set_phys_to_machine(pfn, pfn_to_mfn(p));
1270    
1271     - put_balloon_scratch_page();
1272     + put_balloon_scratch_page();
1273     + }
1274     + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
1275     }
1276     #endif
1277    
1278     diff --git a/fs/aio.c b/fs/aio.c
1279     index e609e15f36b9..6d68e01dc7ca 100644
1280     --- a/fs/aio.c
1281     +++ b/fs/aio.c
1282     @@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
1283     static void put_reqs_available(struct kioctx *ctx, unsigned nr)
1284     {
1285     struct kioctx_cpu *kcpu;
1286     + unsigned long flags;
1287    
1288     preempt_disable();
1289     kcpu = this_cpu_ptr(ctx->cpu);
1290    
1291     + local_irq_save(flags);
1292     kcpu->reqs_available += nr;
1293     +
1294     while (kcpu->reqs_available >= ctx->req_batch * 2) {
1295     kcpu->reqs_available -= ctx->req_batch;
1296     atomic_add(ctx->req_batch, &ctx->reqs_available);
1297     }
1298    
1299     + local_irq_restore(flags);
1300     preempt_enable();
1301     }
1302    
1303     @@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
1304     {
1305     struct kioctx_cpu *kcpu;
1306     bool ret = false;
1307     + unsigned long flags;
1308    
1309     preempt_disable();
1310     kcpu = this_cpu_ptr(ctx->cpu);
1311    
1312     + local_irq_save(flags);
1313     if (!kcpu->reqs_available) {
1314     int old, avail = atomic_read(&ctx->reqs_available);
1315    
1316     @@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
1317     ret = true;
1318     kcpu->reqs_available--;
1319     out:
1320     + local_irq_restore(flags);
1321     preempt_enable();
1322     return ret;
1323     }
1324     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
1325     index 1d1292c581c3..342f0239fcbf 100644
1326     --- a/fs/fuse/dir.c
1327     +++ b/fs/fuse/dir.c
1328     @@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
1329     inode = ACCESS_ONCE(entry->d_inode);
1330     if (inode && is_bad_inode(inode))
1331     goto invalid;
1332     - else if (fuse_dentry_time(entry) < get_jiffies_64()) {
1333     + else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
1334     + (flags & LOOKUP_REVAL)) {
1335     int err;
1336     struct fuse_entry_out outarg;
1337     struct fuse_req *req;
1338     @@ -925,7 +926,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
1339     int err;
1340     bool r;
1341    
1342     - if (fi->i_time < get_jiffies_64()) {
1343     + if (time_before64(fi->i_time, get_jiffies_64())) {
1344     r = true;
1345     err = fuse_do_getattr(inode, stat, file);
1346     } else {
1347     @@ -1111,7 +1112,7 @@ static int fuse_permission(struct inode *inode, int mask)
1348     ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1349     struct fuse_inode *fi = get_fuse_inode(inode);
1350    
1351     - if (fi->i_time < get_jiffies_64()) {
1352     + if (time_before64(fi->i_time, get_jiffies_64())) {
1353     refreshed = true;
1354    
1355     err = fuse_perm_getattr(inode, mask);
1356     diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
1357     index d468643a68b2..73f6bcb44ea8 100644
1358     --- a/fs/fuse/inode.c
1359     +++ b/fs/fuse/inode.c
1360     @@ -461,6 +461,17 @@ static const match_table_t tokens = {
1361     {OPT_ERR, NULL}
1362     };
1363    
1364     +static int fuse_match_uint(substring_t *s, unsigned int *res)
1365     +{
1366     + int err = -ENOMEM;
1367     + char *buf = match_strdup(s);
1368     + if (buf) {
1369     + err = kstrtouint(buf, 10, res);
1370     + kfree(buf);
1371     + }
1372     + return err;
1373     +}
1374     +
1375     static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
1376     {
1377     char *p;
1378     @@ -471,6 +482,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
1379     while ((p = strsep(&opt, ",")) != NULL) {
1380     int token;
1381     int value;
1382     + unsigned uv;
1383     substring_t args[MAX_OPT_ARGS];
1384     if (!*p)
1385     continue;
1386     @@ -494,18 +506,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
1387     break;
1388    
1389     case OPT_USER_ID:
1390     - if (match_int(&args[0], &value))
1391     + if (fuse_match_uint(&args[0], &uv))
1392     return 0;
1393     - d->user_id = make_kuid(current_user_ns(), value);
1394     + d->user_id = make_kuid(current_user_ns(), uv);
1395     if (!uid_valid(d->user_id))
1396     return 0;
1397     d->user_id_present = 1;
1398     break;
1399    
1400     case OPT_GROUP_ID:
1401     - if (match_int(&args[0], &value))
1402     + if (fuse_match_uint(&args[0], &uv))
1403     return 0;
1404     - d->group_id = make_kgid(current_user_ns(), value);
1405     + d->group_id = make_kgid(current_user_ns(), uv);
1406     if (!gid_valid(d->group_id))
1407     return 0;
1408     d->group_id_present = 1;
1409     diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
1410     index cfc8dcc16043..ce87c9007b0f 100644
1411     --- a/fs/quota/dquot.c
1412     +++ b/fs/quota/dquot.c
1413     @@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1414     struct dquot *dquot;
1415     unsigned long freed = 0;
1416    
1417     + spin_lock(&dq_list_lock);
1418     head = free_dquots.prev;
1419     while (head != &free_dquots && sc->nr_to_scan) {
1420     dquot = list_entry(head, struct dquot, dq_free);
1421     @@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1422     freed++;
1423     head = free_dquots.prev;
1424     }
1425     + spin_unlock(&dq_list_lock);
1426     return freed;
1427     }
1428    
1429     diff --git a/include/net/sock.h b/include/net/sock.h
1430     index 57c31dd15e64..2f7bc435c93d 100644
1431     --- a/include/net/sock.h
1432     +++ b/include/net/sock.h
1433     @@ -1755,8 +1755,8 @@ sk_dst_get(struct sock *sk)
1434    
1435     rcu_read_lock();
1436     dst = rcu_dereference(sk->sk_dst_cache);
1437     - if (dst)
1438     - dst_hold(dst);
1439     + if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1440     + dst = NULL;
1441     rcu_read_unlock();
1442     return dst;
1443     }
1444     @@ -1793,9 +1793,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1445     static inline void
1446     sk_dst_set(struct sock *sk, struct dst_entry *dst)
1447     {
1448     - spin_lock(&sk->sk_dst_lock);
1449     - __sk_dst_set(sk, dst);
1450     - spin_unlock(&sk->sk_dst_lock);
1451     + struct dst_entry *old_dst;
1452     +
1453     + sk_tx_queue_clear(sk);
1454     + old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1455     + dst_release(old_dst);
1456     }
1457    
1458     static inline void
1459     @@ -1807,9 +1809,7 @@ __sk_dst_reset(struct sock *sk)
1460     static inline void
1461     sk_dst_reset(struct sock *sk)
1462     {
1463     - spin_lock(&sk->sk_dst_lock);
1464     - __sk_dst_reset(sk);
1465     - spin_unlock(&sk->sk_dst_lock);
1466     + sk_dst_set(sk, NULL);
1467     }
1468    
1469     struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1470     diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
1471     index d2b32ac27a39..ecee67a00f5f 100644
1472     --- a/kernel/Kconfig.locks
1473     +++ b/kernel/Kconfig.locks
1474     @@ -220,6 +220,9 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
1475    
1476     endif
1477    
1478     +config ARCH_SUPPORTS_ATOMIC_RMW
1479     + bool
1480     +
1481     config MUTEX_SPIN_ON_OWNER
1482     def_bool y
1483     - depends on SMP && !DEBUG_MUTEXES
1484     + depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
1485     diff --git a/kernel/events/core.c b/kernel/events/core.c
1486     index 0e7fea78f565..f774e9365a03 100644
1487     --- a/kernel/events/core.c
1488     +++ b/kernel/events/core.c
1489     @@ -2311,7 +2311,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1490     next_parent = rcu_dereference(next_ctx->parent_ctx);
1491    
1492     /* If neither context have a parent context; they cannot be clones. */
1493     - if (!parent && !next_parent)
1494     + if (!parent || !next_parent)
1495     goto unlock;
1496    
1497     if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
1498     diff --git a/kernel/power/process.c b/kernel/power/process.c
1499     index 06ec8869dbf1..14f9a8d4725d 100644
1500     --- a/kernel/power/process.c
1501     +++ b/kernel/power/process.c
1502     @@ -184,6 +184,7 @@ void thaw_processes(void)
1503    
1504     printk("Restarting tasks ... ");
1505    
1506     + __usermodehelper_set_disable_depth(UMH_FREEZING);
1507     thaw_workqueues();
1508    
1509     read_lock(&tasklist_lock);
1510     diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
1511     index dd52e7ffb10e..183e8e5c38ba 100644
1512     --- a/kernel/sched/debug.c
1513     +++ b/kernel/sched/debug.c
1514     @@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
1515    
1516     avg_atom = p->se.sum_exec_runtime;
1517     if (nr_switches)
1518     - do_div(avg_atom, nr_switches);
1519     + avg_atom = div64_ul(avg_atom, nr_switches);
1520     else
1521     avg_atom = -1LL;
1522    
1523     diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
1524     index 88c9c65a430d..fe75444ae7ec 100644
1525     --- a/kernel/time/alarmtimer.c
1526     +++ b/kernel/time/alarmtimer.c
1527     @@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
1528     struct itimerspec *new_setting,
1529     struct itimerspec *old_setting)
1530     {
1531     + ktime_t exp;
1532     +
1533     if (!rtcdev)
1534     return -ENOTSUPP;
1535    
1536     + if (flags & ~TIMER_ABSTIME)
1537     + return -EINVAL;
1538     +
1539     if (old_setting)
1540     alarm_timer_get(timr, old_setting);
1541    
1542     @@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
1543    
1544     /* start the timer */
1545     timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
1546     - alarm_start(&timr->it.alarm.alarmtimer,
1547     - timespec_to_ktime(new_setting->it_value));
1548     + exp = timespec_to_ktime(new_setting->it_value);
1549     + /* Convert (if necessary) to absolute time */
1550     + if (flags != TIMER_ABSTIME) {
1551     + ktime_t now;
1552     +
1553     + now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
1554     + exp = ktime_add(now, exp);
1555     + }
1556     +
1557     + alarm_start(&timr->it.alarm.alarmtimer, exp);
1558     return 0;
1559     }
1560    
1561     @@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
1562     if (!alarmtimer_get_rtcdev())
1563     return -ENOTSUPP;
1564    
1565     + if (flags & ~TIMER_ABSTIME)
1566     + return -EINVAL;
1567     +
1568     if (!capable(CAP_WAKE_ALARM))
1569     return -EPERM;
1570    
1571     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1572     index 868633e61b43..e3be87edde33 100644
1573     --- a/kernel/trace/ftrace.c
1574     +++ b/kernel/trace/ftrace.c
1575     @@ -331,12 +331,12 @@ static void update_ftrace_function(void)
1576     func = ftrace_ops_list_func;
1577     }
1578    
1579     + update_function_graph_func();
1580     +
1581     /* If there's no change, then do nothing more here */
1582     if (ftrace_trace_function == func)
1583     return;
1584    
1585     - update_function_graph_func();
1586     -
1587     /*
1588     * If we are using the list function, it doesn't care
1589     * about the function_trace_ops.
1590     diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1591     index 04202d9aa514..0954450df7dc 100644
1592     --- a/kernel/trace/ring_buffer.c
1593     +++ b/kernel/trace/ring_buffer.c
1594     @@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
1595     struct ring_buffer_per_cpu *cpu_buffer;
1596     struct rb_irq_work *work;
1597    
1598     - if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1599     - (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1600     - return POLLIN | POLLRDNORM;
1601     -
1602     if (cpu == RING_BUFFER_ALL_CPUS)
1603     work = &buffer->irq_work;
1604     else {
1605     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1606     index 922657f30723..7e259b2bdf44 100644
1607     --- a/kernel/trace/trace.c
1608     +++ b/kernel/trace/trace.c
1609     @@ -454,6 +454,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
1610     struct print_entry *entry;
1611     unsigned long irq_flags;
1612     int alloc;
1613     + int pc;
1614     +
1615     + if (!(trace_flags & TRACE_ITER_PRINTK))
1616     + return 0;
1617     +
1618     + pc = preempt_count();
1619    
1620     if (unlikely(tracing_selftest_running || tracing_disabled))
1621     return 0;
1622     @@ -463,7 +469,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
1623     local_save_flags(irq_flags);
1624     buffer = global_trace.trace_buffer.buffer;
1625     event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1626     - irq_flags, preempt_count());
1627     + irq_flags, pc);
1628     if (!event)
1629     return 0;
1630    
1631     @@ -480,6 +486,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
1632     entry->buf[size] = '\0';
1633    
1634     __buffer_unlock_commit(buffer, event);
1635     + ftrace_trace_stack(buffer, irq_flags, 4, pc);
1636    
1637     return size;
1638     }
1639     @@ -497,6 +504,12 @@ int __trace_bputs(unsigned long ip, const char *str)
1640     struct bputs_entry *entry;
1641     unsigned long irq_flags;
1642     int size = sizeof(struct bputs_entry);
1643     + int pc;
1644     +
1645     + if (!(trace_flags & TRACE_ITER_PRINTK))
1646     + return 0;
1647     +
1648     + pc = preempt_count();
1649    
1650     if (unlikely(tracing_selftest_running || tracing_disabled))
1651     return 0;
1652     @@ -504,7 +517,7 @@ int __trace_bputs(unsigned long ip, const char *str)
1653     local_save_flags(irq_flags);
1654     buffer = global_trace.trace_buffer.buffer;
1655     event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1656     - irq_flags, preempt_count());
1657     + irq_flags, pc);
1658     if (!event)
1659     return 0;
1660    
1661     @@ -513,6 +526,7 @@ int __trace_bputs(unsigned long ip, const char *str)
1662     entry->str = str;
1663    
1664     __buffer_unlock_commit(buffer, event);
1665     + ftrace_trace_stack(buffer, irq_flags, 4, pc);
1666    
1667     return 1;
1668     }
1669     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
1670     index 7b16d40bd64d..e4c4efc4ba0d 100644
1671     --- a/kernel/trace/trace_events.c
1672     +++ b/kernel/trace/trace_events.c
1673     @@ -439,6 +439,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
1674    
1675     list_del(&file->list);
1676     remove_subsystem(file->system);
1677     + free_event_filter(file->filter);
1678     kmem_cache_free(file_cachep, file);
1679     }
1680    
1681     diff --git a/mm/shmem.c b/mm/shmem.c
1682     index 1f18c9d0d93e..ff85863587ee 100644
1683     --- a/mm/shmem.c
1684     +++ b/mm/shmem.c
1685     @@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
1686     #define SHORT_SYMLINK_LEN 128
1687    
1688     /*
1689     - * shmem_fallocate and shmem_writepage communicate via inode->i_private
1690     - * (with i_mutex making sure that it has only one user at a time):
1691     - * we would prefer not to enlarge the shmem inode just for that.
1692     + * shmem_fallocate communicates with shmem_fault or shmem_writepage via
1693     + * inode->i_private (with i_mutex making sure that it has only one user at
1694     + * a time): we would prefer not to enlarge the shmem inode just for that.
1695     */
1696     struct shmem_falloc {
1697     + wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1698     pgoff_t start; /* start of range currently being fallocated */
1699     pgoff_t next; /* the next page offset to be fallocated */
1700     pgoff_t nr_falloced; /* how many new pages have been fallocated */
1701     @@ -533,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1702     return;
1703    
1704     index = start;
1705     - for ( ; ; ) {
1706     + while (index < end) {
1707     cond_resched();
1708     pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
1709     min(end - index, (pgoff_t)PAGEVEC_SIZE),
1710     pvec.pages, indices);
1711     if (!pvec.nr) {
1712     - if (index == start || unfalloc)
1713     + /* If all gone or hole-punch or unfalloc, we're done */
1714     + if (index == start || end != -1)
1715     break;
1716     + /* But if truncating, restart to make sure all gone */
1717     index = start;
1718     continue;
1719     }
1720     - if ((index == start || unfalloc) && indices[0] >= end) {
1721     - shmem_deswap_pagevec(&pvec);
1722     - pagevec_release(&pvec);
1723     - break;
1724     - }
1725     mem_cgroup_uncharge_start();
1726     for (i = 0; i < pagevec_count(&pvec); i++) {
1727     struct page *page = pvec.pages[i];
1728     @@ -560,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1729     if (radix_tree_exceptional_entry(page)) {
1730     if (unfalloc)
1731     continue;
1732     - nr_swaps_freed += !shmem_free_swap(mapping,
1733     - index, page);
1734     + if (shmem_free_swap(mapping, index, page)) {
1735     + /* Swap was replaced by page: retry */
1736     + index--;
1737     + break;
1738     + }
1739     + nr_swaps_freed++;
1740     continue;
1741     }
1742    
1743     @@ -570,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1744     if (page->mapping == mapping) {
1745     VM_BUG_ON_PAGE(PageWriteback(page), page);
1746     truncate_inode_page(mapping, page);
1747     + } else {
1748     + /* Page was replaced by swap: retry */
1749     + unlock_page(page);
1750     + index--;
1751     + break;
1752     }
1753     }
1754     unlock_page(page);
1755     @@ -824,6 +831,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1756     spin_lock(&inode->i_lock);
1757     shmem_falloc = inode->i_private;
1758     if (shmem_falloc &&
1759     + !shmem_falloc->waitq &&
1760     index >= shmem_falloc->start &&
1761     index < shmem_falloc->next)
1762     shmem_falloc->nr_unswapped++;
1763     @@ -1298,6 +1306,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1764     int error;
1765     int ret = VM_FAULT_LOCKED;
1766    
1767     + /*
1768     + * Trinity finds that probing a hole which tmpfs is punching can
1769     + * prevent the hole-punch from ever completing: which in turn
1770     + * locks writers out with its hold on i_mutex. So refrain from
1771     + * faulting pages into the hole while it's being punched. Although
1772     + * shmem_undo_range() does remove the additions, it may be unable to
1773     + * keep up, as each new page needs its own unmap_mapping_range() call,
1774     + * and the i_mmap tree grows ever slower to scan if new vmas are added.
1775     + *
1776     + * It does not matter if we sometimes reach this check just before the
1777     + * hole-punch begins, so that one fault then races with the punch:
1778     + * we just need to make racing faults a rare case.
1779     + *
1780     + * The implementation below would be much simpler if we just used a
1781     + * standard mutex or completion: but we cannot take i_mutex in fault,
1782     + * and bloating every shmem inode for this unlikely case would be sad.
1783     + */
1784     + if (unlikely(inode->i_private)) {
1785     + struct shmem_falloc *shmem_falloc;
1786     +
1787     + spin_lock(&inode->i_lock);
1788     + shmem_falloc = inode->i_private;
1789     + if (shmem_falloc &&
1790     + shmem_falloc->waitq &&
1791     + vmf->pgoff >= shmem_falloc->start &&
1792     + vmf->pgoff < shmem_falloc->next) {
1793     + wait_queue_head_t *shmem_falloc_waitq;
1794     + DEFINE_WAIT(shmem_fault_wait);
1795     +
1796     + ret = VM_FAULT_NOPAGE;
1797     + if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1798     + !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1799     + /* It's polite to up mmap_sem if we can */
1800     + up_read(&vma->vm_mm->mmap_sem);
1801     + ret = VM_FAULT_RETRY;
1802     + }
1803     +
1804     + shmem_falloc_waitq = shmem_falloc->waitq;
1805     + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1806     + TASK_UNINTERRUPTIBLE);
1807     + spin_unlock(&inode->i_lock);
1808     + schedule();
1809     +
1810     + /*
1811     + * shmem_falloc_waitq points into the shmem_fallocate()
1812     + * stack of the hole-punching task: shmem_falloc_waitq
1813     + * is usually invalid by the time we reach here, but
1814     + * finish_wait() does not dereference it in that case;
1815     + * though i_lock needed lest racing with wake_up_all().
1816     + */
1817     + spin_lock(&inode->i_lock);
1818     + finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1819     + spin_unlock(&inode->i_lock);
1820     + return ret;
1821     + }
1822     + spin_unlock(&inode->i_lock);
1823     + }
1824     +
1825     error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1826     if (error)
1827     return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1828     @@ -1817,12 +1883,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1829     struct address_space *mapping = file->f_mapping;
1830     loff_t unmap_start = round_up(offset, PAGE_SIZE);
1831     loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1832     + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1833     +
1834     + shmem_falloc.waitq = &shmem_falloc_waitq;
1835     + shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1836     + shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1837     + spin_lock(&inode->i_lock);
1838     + inode->i_private = &shmem_falloc;
1839     + spin_unlock(&inode->i_lock);
1840    
1841     if ((u64)unmap_end > (u64)unmap_start)
1842     unmap_mapping_range(mapping, unmap_start,
1843     1 + unmap_end - unmap_start, 0);
1844     shmem_truncate_range(inode, offset, offset + len - 1);
1845     /* No need to unmap again: hole-punching leaves COWed pages */
1846     +
1847     + spin_lock(&inode->i_lock);
1848     + inode->i_private = NULL;
1849     + wake_up_all(&shmem_falloc_waitq);
1850     + spin_unlock(&inode->i_lock);
1851     error = 0;
1852     goto out;
1853     }
1854     @@ -1840,6 +1919,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1855     goto out;
1856     }
1857    
1858     + shmem_falloc.waitq = NULL;
1859     shmem_falloc.start = start;
1860     shmem_falloc.next = start;
1861     shmem_falloc.nr_falloced = 0;
1862     diff --git a/mm/vmscan.c b/mm/vmscan.c
1863     index 6ef876cae8f1..6ef484f0777f 100644
1864     --- a/mm/vmscan.c
1865     +++ b/mm/vmscan.c
1866     @@ -1540,19 +1540,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1867     * If dirty pages are scanned that are not queued for IO, it
1868     * implies that flushers are not keeping up. In this case, flag
1869     * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
1870     - * pages from reclaim context. It will forcibly stall in the
1871     - * next check.
1872     + * pages from reclaim context.
1873     */
1874     if (nr_unqueued_dirty == nr_taken)
1875     zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
1876    
1877     /*
1878     - * In addition, if kswapd scans pages marked marked for
1879     - * immediate reclaim and under writeback (nr_immediate), it
1880     - * implies that pages are cycling through the LRU faster than
1881     + * If kswapd scans pages marked marked for immediate
1882     + * reclaim and under writeback (nr_immediate), it implies
1883     + * that pages are cycling through the LRU faster than
1884     * they are written so also forcibly stall.
1885     */
1886     - if (nr_unqueued_dirty == nr_taken || nr_immediate)
1887     + if (nr_immediate)
1888     congestion_wait(BLK_RW_ASYNC, HZ/10);
1889     }
1890    
1891     diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
1892     index 6ee48aac776f..7e57135c7cc4 100644
1893     --- a/net/8021q/vlan_core.c
1894     +++ b/net/8021q/vlan_core.c
1895     @@ -108,8 +108,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
1896    
1897     static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
1898     {
1899     - if (skb_cow(skb, skb_headroom(skb)) < 0)
1900     + if (skb_cow(skb, skb_headroom(skb)) < 0) {
1901     + kfree_skb(skb);
1902     return NULL;
1903     + }
1904     +
1905     memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
1906     skb->mac_header += VLAN_HLEN;
1907     return skb;
1908     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
1909     index cc0d21895420..1f26a1b8c576 100644
1910     --- a/net/8021q/vlan_dev.c
1911     +++ b/net/8021q/vlan_dev.c
1912     @@ -635,8 +635,6 @@ static void vlan_dev_uninit(struct net_device *dev)
1913     struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
1914     int i;
1915    
1916     - free_percpu(vlan->vlan_pcpu_stats);
1917     - vlan->vlan_pcpu_stats = NULL;
1918     for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
1919     while ((pm = vlan->egress_priority_map[i]) != NULL) {
1920     vlan->egress_priority_map[i] = pm->next;
1921     @@ -796,6 +794,15 @@ static const struct net_device_ops vlan_netdev_ops = {
1922     .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
1923     };
1924    
1925     +static void vlan_dev_free(struct net_device *dev)
1926     +{
1927     + struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
1928     +
1929     + free_percpu(vlan->vlan_pcpu_stats);
1930     + vlan->vlan_pcpu_stats = NULL;
1931     + free_netdev(dev);
1932     +}
1933     +
1934     void vlan_setup(struct net_device *dev)
1935     {
1936     ether_setup(dev);
1937     @@ -805,7 +812,7 @@ void vlan_setup(struct net_device *dev)
1938     dev->tx_queue_len = 0;
1939    
1940     dev->netdev_ops = &vlan_netdev_ops;
1941     - dev->destructor = free_netdev;
1942     + dev->destructor = vlan_dev_free;
1943     dev->ethtool_ops = &vlan_ethtool_ops;
1944    
1945     memset(dev->broadcast, 0, ETH_ALEN);
1946     diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
1947     index 02806c6b2ff3..0c769cc65f25 100644
1948     --- a/net/appletalk/ddp.c
1949     +++ b/net/appletalk/ddp.c
1950     @@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1951     goto drop;
1952    
1953     /* Queue packet (standard) */
1954     - skb->sk = sock;
1955     -
1956     if (sock_queue_rcv_skb(sock, skb) < 0)
1957     goto drop;
1958    
1959     @@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1960     if (!skb)
1961     goto out;
1962    
1963     - skb->sk = sk;
1964     skb_reserve(skb, ddp_dl->header_length);
1965     skb_reserve(skb, dev->hard_header_len);
1966     skb->dev = dev;
1967     diff --git a/net/core/dev.c b/net/core/dev.c
1968     index 4c1b483f7c07..37bddf729e77 100644
1969     --- a/net/core/dev.c
1970     +++ b/net/core/dev.c
1971     @@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
1972     static struct list_head offload_base __read_mostly;
1973    
1974     static int netif_rx_internal(struct sk_buff *skb);
1975     +static int call_netdevice_notifiers_info(unsigned long val,
1976     + struct net_device *dev,
1977     + struct netdev_notifier_info *info);
1978    
1979     /*
1980     * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1981     @@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
1982     void netdev_state_change(struct net_device *dev)
1983     {
1984     if (dev->flags & IFF_UP) {
1985     - call_netdevice_notifiers(NETDEV_CHANGE, dev);
1986     + struct netdev_notifier_change_info change_info;
1987     +
1988     + change_info.flags_changed = 0;
1989     + call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1990     + &change_info.info);
1991     rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1992     }
1993     }
1994     @@ -4051,6 +4058,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
1995     skb->vlan_tci = 0;
1996     skb->dev = napi->dev;
1997     skb->skb_iif = 0;
1998     + skb->encapsulation = 0;
1999     + skb_shinfo(skb)->gso_type = 0;
2000     skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
2001    
2002     napi->skb = skb;
2003     diff --git a/net/core/dst.c b/net/core/dst.c
2004     index ca4231ec7347..15b6792e6ebb 100644
2005     --- a/net/core/dst.c
2006     +++ b/net/core/dst.c
2007     @@ -267,6 +267,15 @@ again:
2008     }
2009     EXPORT_SYMBOL(dst_destroy);
2010    
2011     +static void dst_destroy_rcu(struct rcu_head *head)
2012     +{
2013     + struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
2014     +
2015     + dst = dst_destroy(dst);
2016     + if (dst)
2017     + __dst_free(dst);
2018     +}
2019     +
2020     void dst_release(struct dst_entry *dst)
2021     {
2022     if (dst) {
2023     @@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
2024    
2025     newrefcnt = atomic_dec_return(&dst->__refcnt);
2026     WARN_ON(newrefcnt < 0);
2027     - if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
2028     - dst = dst_destroy(dst);
2029     - if (dst)
2030     - __dst_free(dst);
2031     - }
2032     + if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
2033     + call_rcu(&dst->rcu_head, dst_destroy_rcu);
2034     }
2035     }
2036     EXPORT_SYMBOL(dst_release);
2037     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2038     index e5ae776ee9b4..7f2e1fce706e 100644
2039     --- a/net/core/skbuff.c
2040     +++ b/net/core/skbuff.c
2041     @@ -2881,12 +2881,13 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2042     int pos;
2043     int dummy;
2044    
2045     + __skb_push(head_skb, doffset);
2046     proto = skb_network_protocol(head_skb, &dummy);
2047     if (unlikely(!proto))
2048     return ERR_PTR(-EINVAL);
2049    
2050     csum = !!can_checksum_protocol(features, proto);
2051     - __skb_push(head_skb, doffset);
2052     +
2053     headroom = skb_headroom(head_skb);
2054     pos = skb_headlen(head_skb);
2055    
2056     diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
2057     index e7b6d53eef88..f005cc760535 100644
2058     --- a/net/dns_resolver/dns_query.c
2059     +++ b/net/dns_resolver/dns_query.c
2060     @@ -149,7 +149,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
2061     if (!*_result)
2062     goto put;
2063    
2064     - memcpy(*_result, upayload->data, len + 1);
2065     + memcpy(*_result, upayload->data, len);
2066     + (*_result)[len] = '\0';
2067     +
2068     if (_expiry)
2069     *_expiry = rkey->expiry;
2070    
2071     diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
2072     index 19ab78aca547..07bd8edef417 100644
2073     --- a/net/ipv4/af_inet.c
2074     +++ b/net/ipv4/af_inet.c
2075     @@ -1434,6 +1434,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
2076     int proto = iph->protocol;
2077     int err = -ENOSYS;
2078    
2079     + if (skb->encapsulation)
2080     + skb_set_inner_network_header(skb, nhoff);
2081     +
2082     csum_replace2(&iph->check, iph->tot_len, newlen);
2083     iph->tot_len = newlen;
2084    
2085     diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
2086     index f1d32280cb54..2d24f293f977 100644
2087     --- a/net/ipv4/gre_offload.c
2088     +++ b/net/ipv4/gre_offload.c
2089     @@ -255,6 +255,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
2090     int err = -ENOENT;
2091     __be16 type;
2092    
2093     + skb->encapsulation = 1;
2094     + skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
2095     +
2096     type = greh->protocol;
2097     if (greh->flags & GRE_KEY)
2098     grehlen += GRE_HEADER_SECTION;
2099     diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
2100     index 0134663fdbce..1e4aa8354f93 100644
2101     --- a/net/ipv4/icmp.c
2102     +++ b/net/ipv4/icmp.c
2103     @@ -732,8 +732,6 @@ static void icmp_unreach(struct sk_buff *skb)
2104     /* fall through */
2105     case 0:
2106     info = ntohs(icmph->un.frag.mtu);
2107     - if (!info)
2108     - goto out;
2109     }
2110     break;
2111     case ICMP_SR_FAILED:
2112     diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2113     index 97e4d1655d26..9db3b877fcaf 100644
2114     --- a/net/ipv4/igmp.c
2115     +++ b/net/ipv4/igmp.c
2116     @@ -1952,6 +1952,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2117    
2118     rtnl_lock();
2119     in_dev = ip_mc_find_dev(net, imr);
2120     + if (!in_dev) {
2121     + ret = -ENODEV;
2122     + goto out;
2123     + }
2124     ifindex = imr->imr_ifindex;
2125     for (imlp = &inet->mc_list;
2126     (iml = rtnl_dereference(*imlp)) != NULL;
2127     @@ -1969,16 +1973,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2128    
2129     *imlp = iml->next_rcu;
2130    
2131     - if (in_dev)
2132     - ip_mc_dec_group(in_dev, group);
2133     + ip_mc_dec_group(in_dev, group);
2134     rtnl_unlock();
2135     /* decrease mem now to avoid the memleak warning */
2136     atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2137     kfree_rcu(iml, rcu);
2138     return 0;
2139     }
2140     - if (!in_dev)
2141     - ret = -ENODEV;
2142     +out:
2143     rtnl_unlock();
2144     return ret;
2145     }
2146     diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
2147     index f4ab72e19af9..96f90b89df32 100644
2148     --- a/net/ipv4/ip_options.c
2149     +++ b/net/ipv4/ip_options.c
2150     @@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
2151     optptr++;
2152     continue;
2153     }
2154     + if (unlikely(l < 2)) {
2155     + pp_ptr = optptr;
2156     + goto error;
2157     + }
2158     optlen = optptr[1];
2159     if (optlen < 2 || optlen > l) {
2160     pp_ptr = optptr;
2161     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2162     index 0c3a5d17b4a9..62cd9e0ae35b 100644
2163     --- a/net/ipv4/ip_tunnel.c
2164     +++ b/net/ipv4/ip_tunnel.c
2165     @@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
2166     {
2167     struct dst_entry *old_dst;
2168    
2169     - if (dst) {
2170     - if (dst->flags & DST_NOCACHE)
2171     - dst = NULL;
2172     - else
2173     - dst_clone(dst);
2174     - }
2175     + dst_clone(dst);
2176     old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
2177     dst_release(old_dst);
2178     }
2179     @@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
2180    
2181     rcu_read_lock();
2182     dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
2183     + if (dst && !atomic_inc_not_zero(&dst->__refcnt))
2184     + dst = NULL;
2185     if (dst) {
2186     if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
2187     - rcu_read_unlock();
2188     tunnel_dst_reset(t);
2189     - return NULL;
2190     + dst_release(dst);
2191     + dst = NULL;
2192     }
2193     - dst_hold(dst);
2194     }
2195     rcu_read_unlock();
2196     return (struct rtable *)dst;
2197     @@ -173,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
2198    
2199     hlist_for_each_entry_rcu(t, head, hash_node) {
2200     if (remote != t->parms.iph.daddr ||
2201     + t->parms.iph.saddr != 0 ||
2202     !(t->dev->flags & IFF_UP))
2203     continue;
2204    
2205     @@ -189,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
2206     head = &itn->tunnels[hash];
2207    
2208     hlist_for_each_entry_rcu(t, head, hash_node) {
2209     - if ((local != t->parms.iph.saddr &&
2210     - (local != t->parms.iph.daddr ||
2211     - !ipv4_is_multicast(local))) ||
2212     - !(t->dev->flags & IFF_UP))
2213     + if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
2214     + (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
2215     + continue;
2216     +
2217     + if (!(t->dev->flags & IFF_UP))
2218     continue;
2219    
2220     if (!ip_tunnel_key_match(&t->parms, flags, key))
2221     @@ -209,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
2222    
2223     hlist_for_each_entry_rcu(t, head, hash_node) {
2224     if (t->parms.i_key != key ||
2225     + t->parms.iph.saddr != 0 ||
2226     + t->parms.iph.daddr != 0 ||
2227     !(t->dev->flags & IFF_UP))
2228     continue;
2229    
2230     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2231     index 134437309b1e..031553f8a306 100644
2232     --- a/net/ipv4/route.c
2233     +++ b/net/ipv4/route.c
2234     @@ -1029,7 +1029,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
2235     const struct iphdr *iph = (const struct iphdr *) skb->data;
2236     struct flowi4 fl4;
2237     struct rtable *rt;
2238     - struct dst_entry *dst;
2239     + struct dst_entry *odst = NULL;
2240     bool new = false;
2241    
2242     bh_lock_sock(sk);
2243     @@ -1037,16 +1037,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
2244     if (!ip_sk_accept_pmtu(sk))
2245     goto out;
2246    
2247     - rt = (struct rtable *) __sk_dst_get(sk);
2248     + odst = sk_dst_get(sk);
2249    
2250     - if (sock_owned_by_user(sk) || !rt) {
2251     + if (sock_owned_by_user(sk) || !odst) {
2252     __ipv4_sk_update_pmtu(skb, sk, mtu);
2253     goto out;
2254     }
2255    
2256     __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
2257    
2258     - if (!__sk_dst_check(sk, 0)) {
2259     + rt = (struct rtable *)odst;
2260     + if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
2261     rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
2262     if (IS_ERR(rt))
2263     goto out;
2264     @@ -1056,8 +1057,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
2265    
2266     __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
2267    
2268     - dst = dst_check(&rt->dst, 0);
2269     - if (!dst) {
2270     + if (!dst_check(&rt->dst, 0)) {
2271     if (new)
2272     dst_release(&rt->dst);
2273    
2274     @@ -1069,10 +1069,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
2275     }
2276    
2277     if (new)
2278     - __sk_dst_set(sk, &rt->dst);
2279     + sk_dst_set(sk, &rt->dst);
2280    
2281     out:
2282     bh_unlock_sock(sk);
2283     + dst_release(odst);
2284     }
2285     EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
2286    
2287     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2288     index 97c8f5620c43..b48fba0aaa92 100644
2289     --- a/net/ipv4/tcp.c
2290     +++ b/net/ipv4/tcp.c
2291     @@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2292     if (unlikely(tp->repair)) {
2293     if (tp->repair_queue == TCP_RECV_QUEUE) {
2294     copied = tcp_send_rcvq(sk, msg, size);
2295     - goto out;
2296     + goto out_nopush;
2297     }
2298    
2299     err = -EINVAL;
2300     @@ -1282,6 +1282,7 @@ wait_for_memory:
2301     out:
2302     if (copied)
2303     tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
2304     +out_nopush:
2305     release_sock(sk);
2306     return copied + copied_syn;
2307    
2308     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2309     index e3647465138b..3898694d0300 100644
2310     --- a/net/ipv4/tcp_input.c
2311     +++ b/net/ipv4/tcp_input.c
2312     @@ -1113,7 +1113,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
2313     }
2314    
2315     /* D-SACK for already forgotten data... Do dumb counting. */
2316     - if (dup_sack && tp->undo_marker && tp->undo_retrans &&
2317     + if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
2318     !after(end_seq_0, prior_snd_una) &&
2319     after(end_seq_0, tp->undo_marker))
2320     tp->undo_retrans--;
2321     @@ -1169,7 +1169,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
2322     unsigned int new_len = (pkt_len / mss) * mss;
2323     if (!in_sack && new_len < pkt_len) {
2324     new_len += mss;
2325     - if (new_len > skb->len)
2326     + if (new_len >= skb->len)
2327     return 0;
2328     }
2329     pkt_len = new_len;
2330     @@ -1193,7 +1193,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
2331    
2332     /* Account D-SACK for retransmitted packet. */
2333     if (dup_sack && (sacked & TCPCB_RETRANS)) {
2334     - if (tp->undo_marker && tp->undo_retrans &&
2335     + if (tp->undo_marker && tp->undo_retrans > 0 &&
2336     after(end_seq, tp->undo_marker))
2337     tp->undo_retrans--;
2338     if (sacked & TCPCB_SACKED_ACKED)
2339     @@ -1894,7 +1894,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
2340     tp->lost_out = 0;
2341    
2342     tp->undo_marker = 0;
2343     - tp->undo_retrans = 0;
2344     + tp->undo_retrans = -1;
2345     }
2346    
2347     void tcp_clear_retrans(struct tcp_sock *tp)
2348     @@ -2663,7 +2663,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2349    
2350     tp->prior_ssthresh = 0;
2351     tp->undo_marker = tp->snd_una;
2352     - tp->undo_retrans = tp->retrans_out;
2353     + tp->undo_retrans = tp->retrans_out ? : -1;
2354    
2355     if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2356     if (!ece_ack)
2357     diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
2358     index b92b81718ca4..c25953a386d0 100644
2359     --- a/net/ipv4/tcp_offload.c
2360     +++ b/net/ipv4/tcp_offload.c
2361     @@ -310,7 +310,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
2362    
2363     th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
2364     iph->daddr, 0);
2365     - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2366     + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
2367    
2368     return tcp_gro_complete(skb);
2369     }
2370     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2371     index 17a11e65e57f..b3d1addd816b 100644
2372     --- a/net/ipv4/tcp_output.c
2373     +++ b/net/ipv4/tcp_output.c
2374     @@ -2448,8 +2448,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2375     if (!tp->retrans_stamp)
2376     tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2377    
2378     - tp->undo_retrans += tcp_skb_pcount(skb);
2379     -
2380     /* snd_nxt is stored to detect loss of retransmitted segment,
2381     * see tcp_input.c tcp_sacktag_write_queue().
2382     */
2383     @@ -2457,6 +2455,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2384     } else {
2385     NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2386     }
2387     +
2388     + if (tp->undo_retrans < 0)
2389     + tp->undo_retrans = 0;
2390     + tp->undo_retrans += tcp_skb_pcount(skb);
2391     return err;
2392     }
2393    
2394     diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
2395     index 8517d3cd1aed..01b0ff9a0c2c 100644
2396     --- a/net/ipv6/tcpv6_offload.c
2397     +++ b/net/ipv6/tcpv6_offload.c
2398     @@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
2399    
2400     th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
2401     &iph->daddr, 0);
2402     - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2403     + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
2404    
2405     return tcp_gro_complete(skb);
2406     }
2407     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2408     index 7f40fd25acae..0dfe894afd48 100644
2409     --- a/net/netlink/af_netlink.c
2410     +++ b/net/netlink/af_netlink.c
2411     @@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
2412     while (nlk->cb_running && netlink_dump_space(nlk)) {
2413     err = netlink_dump(sk);
2414     if (err < 0) {
2415     - sk->sk_err = err;
2416     + sk->sk_err = -err;
2417     sk->sk_error_report(sk);
2418     break;
2419     }
2420     @@ -2448,7 +2448,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2421     atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2422     ret = netlink_dump(sk);
2423     if (ret) {
2424     - sk->sk_err = ret;
2425     + sk->sk_err = -ret;
2426     sk->sk_error_report(sk);
2427     }
2428     }
2429     diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
2430     index c82fdc1eab7c..dfa532f00d88 100644
2431     --- a/net/sctp/sysctl.c
2432     +++ b/net/sctp/sysctl.c
2433     @@ -307,41 +307,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
2434     loff_t *ppos)
2435     {
2436     struct net *net = current->nsproxy->net_ns;
2437     - char tmp[8];
2438     struct ctl_table tbl;
2439     - int ret;
2440     - int changed = 0;
2441     + bool changed = false;
2442     char *none = "none";
2443     + char tmp[8];
2444     + int ret;
2445    
2446     memset(&tbl, 0, sizeof(struct ctl_table));
2447    
2448     if (write) {
2449     tbl.data = tmp;
2450     - tbl.maxlen = 8;
2451     + tbl.maxlen = sizeof(tmp);
2452     } else {
2453     tbl.data = net->sctp.sctp_hmac_alg ? : none;
2454     tbl.maxlen = strlen(tbl.data);
2455     }
2456     - ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
2457    
2458     - if (write) {
2459     + ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
2460     + if (write && ret == 0) {
2461     #ifdef CONFIG_CRYPTO_MD5
2462     if (!strncmp(tmp, "md5", 3)) {
2463     net->sctp.sctp_hmac_alg = "md5";
2464     - changed = 1;
2465     + changed = true;
2466     }
2467     #endif
2468     #ifdef CONFIG_CRYPTO_SHA1
2469     if (!strncmp(tmp, "sha1", 4)) {
2470     net->sctp.sctp_hmac_alg = "sha1";
2471     - changed = 1;
2472     + changed = true;
2473     }
2474     #endif
2475     if (!strncmp(tmp, "none", 4)) {
2476     net->sctp.sctp_hmac_alg = NULL;
2477     - changed = 1;
2478     + changed = true;
2479     }
2480     -
2481     if (!changed)
2482     ret = -EINVAL;
2483     }
2484     @@ -354,11 +353,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
2485     loff_t *ppos)
2486     {
2487     struct net *net = current->nsproxy->net_ns;
2488     - int new_value;
2489     - struct ctl_table tbl;
2490     unsigned int min = *(unsigned int *) ctl->extra1;
2491     unsigned int max = *(unsigned int *) ctl->extra2;
2492     - int ret;
2493     + struct ctl_table tbl;
2494     + int ret, new_value;
2495    
2496     memset(&tbl, 0, sizeof(struct ctl_table));
2497     tbl.maxlen = sizeof(unsigned int);
2498     @@ -367,12 +365,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
2499     tbl.data = &new_value;
2500     else
2501     tbl.data = &net->sctp.rto_min;
2502     +
2503     ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
2504     - if (write) {
2505     - if (ret || new_value > max || new_value < min)
2506     + if (write && ret == 0) {
2507     + if (new_value > max || new_value < min)
2508     return -EINVAL;
2509     +
2510     net->sctp.rto_min = new_value;
2511     }
2512     +
2513     return ret;
2514     }
2515    
2516     @@ -381,11 +382,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
2517     loff_t *ppos)
2518     {
2519     struct net *net = current->nsproxy->net_ns;
2520     - int new_value;
2521     - struct ctl_table tbl;
2522     unsigned int min = *(unsigned int *) ctl->extra1;
2523     unsigned int max = *(unsigned int *) ctl->extra2;
2524     - int ret;
2525     + struct ctl_table tbl;
2526     + int ret, new_value;
2527    
2528     memset(&tbl, 0, sizeof(struct ctl_table));
2529     tbl.maxlen = sizeof(unsigned int);
2530     @@ -394,12 +394,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
2531     tbl.data = &new_value;
2532     else
2533     tbl.data = &net->sctp.rto_max;
2534     +
2535     ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
2536     - if (write) {
2537     - if (ret || new_value > max || new_value < min)
2538     + if (write && ret == 0) {
2539     + if (new_value > max || new_value < min)
2540     return -EINVAL;
2541     +
2542     net->sctp.rto_max = new_value;
2543     }
2544     +
2545     return ret;
2546     }
2547    
2548     @@ -420,8 +423,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
2549     tbl.data = &net->sctp.auth_enable;
2550    
2551     ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
2552     -
2553     - if (write) {
2554     + if (write && ret == 0) {
2555     struct sock *sk = net->sctp.ctl_sock;
2556    
2557     net->sctp.auth_enable = new_value;
2558     diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
2559     index 85c64658bd0b..b6842fdb53d4 100644
2560     --- a/net/sctp/ulpevent.c
2561     +++ b/net/sctp/ulpevent.c
2562     @@ -366,9 +366,10 @@ fail:
2563     * specification [SCTP] and any extensions for a list of possible
2564     * error formats.
2565     */
2566     -struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
2567     - const struct sctp_association *asoc, struct sctp_chunk *chunk,
2568     - __u16 flags, gfp_t gfp)
2569     +struct sctp_ulpevent *
2570     +sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
2571     + struct sctp_chunk *chunk, __u16 flags,
2572     + gfp_t gfp)
2573     {
2574     struct sctp_ulpevent *event;
2575     struct sctp_remote_error *sre;
2576     @@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
2577     /* Copy the skb to a new skb with room for us to prepend
2578     * notification with.
2579     */
2580     - skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
2581     - 0, gfp);
2582     + skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
2583    
2584     /* Pull off the rest of the cause TLV from the chunk. */
2585     skb_pull(chunk->skb, elen);
2586     @@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
2587     event = sctp_skb2event(skb);
2588     sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
2589    
2590     - sre = (struct sctp_remote_error *)
2591     - skb_push(skb, sizeof(struct sctp_remote_error));
2592     + sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
2593    
2594     /* Trim the buffer to the right length. */
2595     - skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
2596     + skb_trim(skb, sizeof(*sre) + elen);
2597    
2598     - /* Socket Extensions for SCTP
2599     - * 5.3.1.3 SCTP_REMOTE_ERROR
2600     - *
2601     - * sre_type:
2602     - * It should be SCTP_REMOTE_ERROR.
2603     - */
2604     + /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
2605     + memset(sre, 0, sizeof(*sre));
2606     sre->sre_type = SCTP_REMOTE_ERROR;
2607     -
2608     - /*
2609     - * Socket Extensions for SCTP
2610     - * 5.3.1.3 SCTP_REMOTE_ERROR
2611     - *
2612     - * sre_flags: 16 bits (unsigned integer)
2613     - * Currently unused.
2614     - */
2615     sre->sre_flags = 0;
2616     -
2617     - /* Socket Extensions for SCTP
2618     - * 5.3.1.3 SCTP_REMOTE_ERROR
2619     - *
2620     - * sre_length: sizeof (__u32)
2621     - *
2622     - * This field is the total length of the notification data,
2623     - * including the notification header.
2624     - */
2625     sre->sre_length = skb->len;
2626     -
2627     - /* Socket Extensions for SCTP
2628     - * 5.3.1.3 SCTP_REMOTE_ERROR
2629     - *
2630     - * sre_error: 16 bits (unsigned integer)
2631     - * This value represents one of the Operational Error causes defined in
2632     - * the SCTP specification, in network byte order.
2633     - */
2634     sre->sre_error = cause;
2635     -
2636     - /* Socket Extensions for SCTP
2637     - * 5.3.1.3 SCTP_REMOTE_ERROR
2638     - *
2639     - * sre_assoc_id: sizeof (sctp_assoc_t)
2640     - *
2641     - * The association id field, holds the identifier for the association.
2642     - * All notifications for a given association have the same association
2643     - * identifier. For TCP style socket, this field is ignored.
2644     - */
2645     sctp_ulpevent_set_owner(event, asoc);
2646     sre->sre_assoc_id = sctp_assoc2id(asoc);
2647    
2648     return event;
2649     -
2650     fail:
2651     return NULL;
2652     }
2653     @@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
2654     return notification->sn_header.sn_type;
2655     }
2656    
2657     -/* Copy out the sndrcvinfo into a msghdr. */
2658     +/* RFC6458, Section 5.3.2. SCTP Header Information Structure
2659     + * (SCTP_SNDRCV, DEPRECATED)
2660     + */
2661     void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
2662     struct msghdr *msghdr)
2663     {
2664     @@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
2665     if (sctp_ulpevent_is_notification(event))
2666     return;
2667    
2668     - /* Sockets API Extensions for SCTP
2669     - * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
2670     - *
2671     - * sinfo_stream: 16 bits (unsigned integer)
2672     - *
2673     - * For recvmsg() the SCTP stack places the message's stream number in
2674     - * this value.
2675     - */
2676     + memset(&sinfo, 0, sizeof(sinfo));
2677     sinfo.sinfo_stream = event->stream;
2678     - /* sinfo_ssn: 16 bits (unsigned integer)
2679     - *
2680     - * For recvmsg() this value contains the stream sequence number that
2681     - * the remote endpoint placed in the DATA chunk. For fragmented
2682     - * messages this is the same number for all deliveries of the message
2683     - * (if more than one recvmsg() is needed to read the message).
2684     - */
2685     sinfo.sinfo_ssn = event->ssn;
2686     - /* sinfo_ppid: 32 bits (unsigned integer)
2687     - *
2688     - * In recvmsg() this value is
2689     - * the same information that was passed by the upper layer in the peer
2690     - * application. Please note that byte order issues are NOT accounted
2691     - * for and this information is passed opaquely by the SCTP stack from
2692     - * one end to the other.
2693     - */
2694     sinfo.sinfo_ppid = event->ppid;
2695     - /* sinfo_flags: 16 bits (unsigned integer)
2696     - *
2697     - * This field may contain any of the following flags and is composed of
2698     - * a bitwise OR of these values.
2699     - *
2700     - * recvmsg() flags:
2701     - *
2702     - * SCTP_UNORDERED - This flag is present when the message was sent
2703     - * non-ordered.
2704     - */
2705     sinfo.sinfo_flags = event->flags;
2706     - /* sinfo_tsn: 32 bit (unsigned integer)
2707     - *
2708     - * For the receiving side, this field holds a TSN that was
2709     - * assigned to one of the SCTP Data Chunks.
2710     - */
2711     sinfo.sinfo_tsn = event->tsn;
2712     - /* sinfo_cumtsn: 32 bit (unsigned integer)
2713     - *
2714     - * This field will hold the current cumulative TSN as
2715     - * known by the underlying SCTP layer. Note this field is
2716     - * ignored when sending and only valid for a receive
2717     - * operation when sinfo_flags are set to SCTP_UNORDERED.
2718     - */
2719     sinfo.sinfo_cumtsn = event->cumtsn;
2720     - /* sinfo_assoc_id: sizeof (sctp_assoc_t)
2721     - *
2722     - * The association handle field, sinfo_assoc_id, holds the identifier
2723     - * for the association announced in the COMMUNICATION_UP notification.
2724     - * All notifications for a given association have the same identifier.
2725     - * Ignored for one-to-one style sockets.
2726     - */
2727     sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
2728     -
2729     - /* context value that is set via SCTP_CONTEXT socket option. */
2730     + /* Context value that is set via SCTP_CONTEXT socket option. */
2731     sinfo.sinfo_context = event->asoc->default_rcv_context;
2732     -
2733     /* These fields are not used while receiving. */
2734     sinfo.sinfo_timetolive = 0;
2735    
2736     put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
2737     - sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
2738     + sizeof(sinfo), &sinfo);
2739     }
2740    
2741     /* Do accounting for bytes received and hold a reference to the association
2742     diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
2743     index bf860d9e75af..3ca45bf5029f 100644
2744     --- a/net/tipc/bcast.c
2745     +++ b/net/tipc/bcast.c
2746     @@ -537,6 +537,7 @@ receive:
2747    
2748     buf = node->bclink.deferred_head;
2749     node->bclink.deferred_head = buf->next;
2750     + buf->next = NULL;
2751     node->bclink.deferred_size--;
2752     goto receive;
2753     }
2754     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2755     index 22f7883fcb9a..7ec91424ba22 100644
2756     --- a/sound/pci/hda/hda_intel.c
2757     +++ b/sound/pci/hda/hda_intel.c
2758     @@ -2930,7 +2930,7 @@ static int azx_suspend(struct device *dev)
2759     struct azx *chip = card->private_data;
2760     struct azx_pcm *p;
2761    
2762     - if (chip->disabled)
2763     + if (chip->disabled || chip->init_failed)
2764     return 0;
2765    
2766     snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
2767     @@ -2961,7 +2961,7 @@ static int azx_resume(struct device *dev)
2768     struct snd_card *card = dev_get_drvdata(dev);
2769     struct azx *chip = card->private_data;
2770    
2771     - if (chip->disabled)
2772     + if (chip->disabled || chip->init_failed)
2773     return 0;
2774    
2775     if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2776     @@ -2996,7 +2996,7 @@ static int azx_runtime_suspend(struct device *dev)
2777     struct snd_card *card = dev_get_drvdata(dev);
2778     struct azx *chip = card->private_data;
2779    
2780     - if (chip->disabled)
2781     + if (chip->disabled || chip->init_failed)
2782     return 0;
2783    
2784     if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
2785     @@ -3022,7 +3022,7 @@ static int azx_runtime_resume(struct device *dev)
2786     struct hda_codec *codec;
2787     int status;
2788    
2789     - if (chip->disabled)
2790     + if (chip->disabled || chip->init_failed)
2791     return 0;
2792    
2793     if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
2794     @@ -3057,7 +3057,7 @@ static int azx_runtime_idle(struct device *dev)
2795     struct snd_card *card = dev_get_drvdata(dev);
2796     struct azx *chip = card->private_data;
2797    
2798     - if (chip->disabled)
2799     + if (chip->disabled || chip->init_failed)
2800     return 0;
2801    
2802     if (!power_save_controller ||
2803     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2804     index 3abfe2a642ec..d135c906caff 100644
2805     --- a/sound/pci/hda/patch_hdmi.c
2806     +++ b/sound/pci/hda/patch_hdmi.c
2807     @@ -1062,6 +1062,7 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
2808     {
2809     union audio_infoframe ai;
2810    
2811     + memset(&ai, 0, sizeof(ai));
2812     if (conn_type == 0) { /* HDMI */
2813     struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
2814