Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.12/0117-3.12.18-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2432 - (hide annotations) (download)
Tue May 13 11:02:49 2014 UTC (10 years ago) by niro
File size: 102639 byte(s)
-linux-3.12.18
1 niro 2432 diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
2     index 11ace3c3d805..4fc392763611 100644
3     --- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
4     +++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
5     @@ -7,3 +7,4 @@ Required properties:
6    
7     Optional properties:
8     - local-mac-address : Ethernet mac address to use
9     +- vdd-supply: supply for Ethernet mac
10     diff --git a/Makefile b/Makefile
11     index fbd1ee8afea8..fc0dcf63a8d9 100644
12     --- a/Makefile
13     +++ b/Makefile
14     @@ -1,6 +1,6 @@
15     VERSION = 3
16     PATCHLEVEL = 12
17     -SUBLEVEL = 17
18     +SUBLEVEL = 18
19     EXTRAVERSION =
20     NAME = One Giant Leap for Frogkind
21    
22     diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
23     index ea16d782af58..4f31b2eb5cdf 100644
24     --- a/arch/arc/boot/dts/nsimosci.dts
25     +++ b/arch/arc/boot/dts/nsimosci.dts
26     @@ -11,13 +11,16 @@
27    
28     / {
29     compatible = "snps,nsimosci";
30     - clock-frequency = <80000000>; /* 80 MHZ */
31     + clock-frequency = <20000000>; /* 20 MHZ */
32     #address-cells = <1>;
33     #size-cells = <1>;
34     interrupt-parent = <&intc>;
35    
36     chosen {
37     - bootargs = "console=tty0 consoleblank=0";
38     + /* this is for console on PGU */
39     + /* bootargs = "console=tty0 consoleblank=0"; */
40     + /* this is for console on serial */
41     + bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
42     };
43    
44     aliases {
45     @@ -44,15 +47,14 @@
46     };
47    
48     uart0: serial@c0000000 {
49     - compatible = "snps,dw-apb-uart";
50     + compatible = "ns8250";
51     reg = <0xc0000000 0x2000>;
52     interrupts = <11>;
53     - #clock-frequency = <80000000>;
54     clock-frequency = <3686400>;
55     baud = <115200>;
56     reg-shift = <2>;
57     reg-io-width = <4>;
58     - status = "okay";
59     + no-loopback-test = <1>;
60     };
61    
62     pgu0: pgu@c9000000 {
63     diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
64     index 451af30914f6..c01ba35a4eff 100644
65     --- a/arch/arc/configs/nsimosci_defconfig
66     +++ b/arch/arc/configs/nsimosci_defconfig
67     @@ -54,6 +54,7 @@ CONFIG_SERIO_ARC_PS2=y
68     CONFIG_SERIAL_8250=y
69     CONFIG_SERIAL_8250_CONSOLE=y
70     CONFIG_SERIAL_8250_DW=y
71     +CONFIG_SERIAL_OF_PLATFORM=y
72     CONFIG_SERIAL_ARC=y
73     CONFIG_SERIAL_ARC_CONSOLE=y
74     # CONFIG_HW_RANDOM is not set
75     diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
76     index 311a300d48cc..ee121a0f5b00 100644
77     --- a/arch/m68k/Kconfig
78     +++ b/arch/m68k/Kconfig
79     @@ -16,6 +16,7 @@ config M68K
80     select FPU if MMU
81     select ARCH_WANT_IPC_PARSE_VERSION
82     select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
83     + select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
84     select HAVE_MOD_ARCH_SPECIFIC
85     select MODULES_USE_ELF_REL
86     select MODULES_USE_ELF_RELA
87     diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
88     index 3e01afa21710..6671e8db1861 100644
89     --- a/arch/s390/Kconfig
90     +++ b/arch/s390/Kconfig
91     @@ -116,6 +116,7 @@ config S390
92     select HAVE_FUNCTION_GRAPH_TRACER
93     select HAVE_FUNCTION_TRACER
94     select HAVE_FUNCTION_TRACE_MCOUNT_TEST
95     + select HAVE_FUTEX_CMPXCHG if FUTEX
96     select HAVE_KERNEL_BZIP2
97     select HAVE_KERNEL_GZIP
98     select HAVE_KERNEL_LZ4
99     diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
100     index 586f41aac361..185fad49d86f 100644
101     --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
102     +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
103     @@ -24,10 +24,6 @@
104     .align 16
105     .Lbswap_mask:
106     .octa 0x000102030405060708090a0b0c0d0e0f
107     -.Lpoly:
108     - .octa 0xc2000000000000000000000000000001
109     -.Ltwo_one:
110     - .octa 0x00000001000000000000000000000001
111    
112     #define DATA %xmm0
113     #define SHASH %xmm1
114     @@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update)
115     .Lupdate_just_ret:
116     ret
117     ENDPROC(clmul_ghash_update)
118     -
119     -/*
120     - * void clmul_ghash_setkey(be128 *shash, const u8 *key);
121     - *
122     - * Calculate hash_key << 1 mod poly
123     - */
124     -ENTRY(clmul_ghash_setkey)
125     - movaps .Lbswap_mask, BSWAP
126     - movups (%rsi), %xmm0
127     - PSHUFB_XMM BSWAP %xmm0
128     - movaps %xmm0, %xmm1
129     - psllq $1, %xmm0
130     - psrlq $63, %xmm1
131     - movaps %xmm1, %xmm2
132     - pslldq $8, %xmm1
133     - psrldq $8, %xmm2
134     - por %xmm1, %xmm0
135     - # reduction
136     - pshufd $0b00100100, %xmm2, %xmm1
137     - pcmpeqd .Ltwo_one, %xmm1
138     - pand .Lpoly, %xmm1
139     - pxor %xmm1, %xmm0
140     - movups %xmm0, (%rdi)
141     - ret
142     -ENDPROC(clmul_ghash_setkey)
143     diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
144     index 6759dd1135be..d785cf2c529c 100644
145     --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
146     +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
147     @@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash);
148     void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
149     const be128 *shash);
150    
151     -void clmul_ghash_setkey(be128 *shash, const u8 *key);
152     -
153     struct ghash_async_ctx {
154     struct cryptd_ahash *cryptd_tfm;
155     };
156     @@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm,
157     const u8 *key, unsigned int keylen)
158     {
159     struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
160     + be128 *x = (be128 *)key;
161     + u64 a, b;
162    
163     if (keylen != GHASH_BLOCK_SIZE) {
164     crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
165     return -EINVAL;
166     }
167    
168     - clmul_ghash_setkey(&ctx->shash, key);
169     + /* perform multiplication by 'x' in GF(2^128) */
170     + a = be64_to_cpu(x->a);
171     + b = be64_to_cpu(x->b);
172     +
173     + ctx->shash.a = (__be64)((b << 1) | (a >> 63));
174     + ctx->shash.b = (__be64)((a << 1) | (b >> 63));
175     +
176     + if (a >> 63)
177     + ctx->shash.b ^= cpu_to_be64(0xc2);
178    
179     return 0;
180     }
181     diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
182     index 04ceb7e2fadd..690011de912a 100644
183     --- a/drivers/block/floppy.c
184     +++ b/drivers/block/floppy.c
185     @@ -3691,9 +3691,12 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
186     if (!(mode & FMODE_NDELAY)) {
187     if (mode & (FMODE_READ|FMODE_WRITE)) {
188     UDRS->last_checked = 0;
189     + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
190     check_disk_change(bdev);
191     if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
192     goto out;
193     + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
194     + goto out;
195     }
196     res = -EROFS;
197     if ((mode & FMODE_WRITE) &&
198     @@ -3746,17 +3749,29 @@ static unsigned int floppy_check_events(struct gendisk *disk,
199     * a disk in the drive, and whether that disk is writable.
200     */
201    
202     -static void floppy_rb0_complete(struct bio *bio, int err)
203     +struct rb0_cbdata {
204     + int drive;
205     + struct completion complete;
206     +};
207     +
208     +static void floppy_rb0_cb(struct bio *bio, int err)
209     {
210     - complete((struct completion *)bio->bi_private);
211     + struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
212     + int drive = cbdata->drive;
213     +
214     + if (err) {
215     + pr_info("floppy: error %d while reading block 0", err);
216     + set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
217     + }
218     + complete(&cbdata->complete);
219     }
220    
221     -static int __floppy_read_block_0(struct block_device *bdev)
222     +static int __floppy_read_block_0(struct block_device *bdev, int drive)
223     {
224     struct bio bio;
225     struct bio_vec bio_vec;
226     - struct completion complete;
227     struct page *page;
228     + struct rb0_cbdata cbdata;
229     size_t size;
230    
231     page = alloc_page(GFP_NOIO);
232     @@ -3769,6 +3784,8 @@ static int __floppy_read_block_0(struct block_device *bdev)
233     if (!size)
234     size = 1024;
235    
236     + cbdata.drive = drive;
237     +
238     bio_init(&bio);
239     bio.bi_io_vec = &bio_vec;
240     bio_vec.bv_page = page;
241     @@ -3779,13 +3796,14 @@ static int __floppy_read_block_0(struct block_device *bdev)
242     bio.bi_bdev = bdev;
243     bio.bi_sector = 0;
244     bio.bi_flags = (1 << BIO_QUIET);
245     - init_completion(&complete);
246     - bio.bi_private = &complete;
247     - bio.bi_end_io = floppy_rb0_complete;
248     + bio.bi_private = &cbdata;
249     + bio.bi_end_io = floppy_rb0_cb;
250    
251     submit_bio(READ, &bio);
252     process_fd_request();
253     - wait_for_completion(&complete);
254     +
255     + init_completion(&cbdata.complete);
256     + wait_for_completion(&cbdata.complete);
257    
258     __free_page(page);
259    
260     @@ -3827,7 +3845,7 @@ static int floppy_revalidate(struct gendisk *disk)
261     UDRS->generation++;
262     if (drive_no_geom(drive)) {
263     /* auto-sensing */
264     - res = __floppy_read_block_0(opened_bdev[drive]);
265     + res = __floppy_read_block_0(opened_bdev[drive], drive);
266     } else {
267     if (cf)
268     poll_drive(false, FD_RAW_NEED_DISK);
269     diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
270     index a22a7a502740..8156cafad11a 100644
271     --- a/drivers/char/ipmi/ipmi_bt_sm.c
272     +++ b/drivers/char/ipmi/ipmi_bt_sm.c
273     @@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
274    
275     static inline int read_all_bytes(struct si_sm_data *bt)
276     {
277     - unsigned char i;
278     + unsigned int i;
279    
280     /*
281     * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
282     diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
283     index 85f1c8c25ddc..4fe6521c30d5 100644
284     --- a/drivers/cpufreq/powernow-k6.c
285     +++ b/drivers/cpufreq/powernow-k6.c
286     @@ -26,41 +26,108 @@
287     static unsigned int busfreq; /* FSB, in 10 kHz */
288     static unsigned int max_multiplier;
289    
290     +static unsigned int param_busfreq = 0;
291     +static unsigned int param_max_multiplier = 0;
292     +
293     +module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
294     +MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
295     +
296     +module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
297     +MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
298    
299     /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
300     static struct cpufreq_frequency_table clock_ratio[] = {
301     - {45, /* 000 -> 4.5x */ 0},
302     + {60, /* 110 -> 6.0x */ 0},
303     + {55, /* 011 -> 5.5x */ 0},
304     {50, /* 001 -> 5.0x */ 0},
305     + {45, /* 000 -> 4.5x */ 0},
306     {40, /* 010 -> 4.0x */ 0},
307     - {55, /* 011 -> 5.5x */ 0},
308     - {20, /* 100 -> 2.0x */ 0},
309     - {30, /* 101 -> 3.0x */ 0},
310     - {60, /* 110 -> 6.0x */ 0},
311     {35, /* 111 -> 3.5x */ 0},
312     + {30, /* 101 -> 3.0x */ 0},
313     + {20, /* 100 -> 2.0x */ 0},
314     {0, CPUFREQ_TABLE_END}
315     };
316    
317     +static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
318     +static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
319     +
320     +static const struct {
321     + unsigned freq;
322     + unsigned mult;
323     +} usual_frequency_table[] = {
324     + { 400000, 40 }, // 100 * 4
325     + { 450000, 45 }, // 100 * 4.5
326     + { 475000, 50 }, // 95 * 5
327     + { 500000, 50 }, // 100 * 5
328     + { 506250, 45 }, // 112.5 * 4.5
329     + { 533500, 55 }, // 97 * 5.5
330     + { 550000, 55 }, // 100 * 5.5
331     + { 562500, 50 }, // 112.5 * 5
332     + { 570000, 60 }, // 95 * 6
333     + { 600000, 60 }, // 100 * 6
334     + { 618750, 55 }, // 112.5 * 5.5
335     + { 660000, 55 }, // 120 * 5.5
336     + { 675000, 60 }, // 112.5 * 6
337     + { 720000, 60 }, // 120 * 6
338     +};
339     +
340     +#define FREQ_RANGE 3000
341    
342     /**
343     * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
344     *
345     - * Returns the current setting of the frequency multiplier. Core clock
346     + * Returns the current setting of the frequency multiplier. Core clock
347     * speed is frequency of the Front-Side Bus multiplied with this value.
348     */
349     static int powernow_k6_get_cpu_multiplier(void)
350     {
351     - u64 invalue = 0;
352     + unsigned long invalue = 0;
353     u32 msrval;
354    
355     + local_irq_disable();
356     +
357     msrval = POWERNOW_IOPORT + 0x1;
358     wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
359     invalue = inl(POWERNOW_IOPORT + 0x8);
360     msrval = POWERNOW_IOPORT + 0x0;
361     wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
362    
363     - return clock_ratio[(invalue >> 5)&7].driver_data;
364     + local_irq_enable();
365     +
366     + return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
367     }
368    
369     +static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
370     +{
371     + unsigned long outvalue, invalue;
372     + unsigned long msrval;
373     + unsigned long cr0;
374     +
375     + /* we now need to transform best_i to the BVC format, see AMD#23446 */
376     +
377     + /*
378     + * The processor doesn't respond to inquiry cycles while changing the
379     + * frequency, so we must disable cache.
380     + */
381     + local_irq_disable();
382     + cr0 = read_cr0();
383     + write_cr0(cr0 | X86_CR0_CD);
384     + wbinvd();
385     +
386     + outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
387     +
388     + msrval = POWERNOW_IOPORT + 0x1;
389     + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
390     + invalue = inl(POWERNOW_IOPORT + 0x8);
391     + invalue = invalue & 0x1f;
392     + outvalue = outvalue | invalue;
393     + outl(outvalue, (POWERNOW_IOPORT + 0x8));
394     + msrval = POWERNOW_IOPORT + 0x0;
395     + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
396     +
397     + write_cr0(cr0);
398     + local_irq_enable();
399     +}
400    
401     /**
402     * powernow_k6_set_state - set the PowerNow! multiplier
403     @@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
404     static void powernow_k6_set_state(struct cpufreq_policy *policy,
405     unsigned int best_i)
406     {
407     - unsigned long outvalue = 0, invalue = 0;
408     - unsigned long msrval;
409     struct cpufreq_freqs freqs;
410    
411     if (clock_ratio[best_i].driver_data > max_multiplier) {
412     @@ -85,18 +150,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
413    
414     cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
415    
416     - /* we now need to transform best_i to the BVC format, see AMD#23446 */
417     -
418     - outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
419     -
420     - msrval = POWERNOW_IOPORT + 0x1;
421     - wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
422     - invalue = inl(POWERNOW_IOPORT + 0x8);
423     - invalue = invalue & 0xf;
424     - outvalue = outvalue | invalue;
425     - outl(outvalue , (POWERNOW_IOPORT + 0x8));
426     - msrval = POWERNOW_IOPORT + 0x0;
427     - wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
428     + powernow_k6_set_cpu_multiplier(best_i);
429    
430     cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
431    
432     @@ -141,18 +195,57 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
433     return 0;
434     }
435    
436     -
437     static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
438     {
439     unsigned int i, f;
440     int result;
441     + unsigned khz;
442    
443     if (policy->cpu != 0)
444     return -ENODEV;
445    
446     - /* get frequencies */
447     - max_multiplier = powernow_k6_get_cpu_multiplier();
448     - busfreq = cpu_khz / max_multiplier;
449     + max_multiplier = 0;
450     + khz = cpu_khz;
451     + for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
452     + if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
453     + khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
454     + khz = usual_frequency_table[i].freq;
455     + max_multiplier = usual_frequency_table[i].mult;
456     + break;
457     + }
458     + }
459     + if (param_max_multiplier) {
460     + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
461     + if (clock_ratio[i].driver_data == param_max_multiplier) {
462     + max_multiplier = param_max_multiplier;
463     + goto have_max_multiplier;
464     + }
465     + }
466     + printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
467     + return -EINVAL;
468     + }
469     +
470     + if (!max_multiplier) {
471     + printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
472     + printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
473     + return -EOPNOTSUPP;
474     + }
475     +
476     +have_max_multiplier:
477     + param_max_multiplier = max_multiplier;
478     +
479     + if (param_busfreq) {
480     + if (param_busfreq >= 50000 && param_busfreq <= 150000) {
481     + busfreq = param_busfreq / 10;
482     + goto have_busfreq;
483     + }
484     + printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
485     + return -EINVAL;
486     + }
487     +
488     + busfreq = khz / max_multiplier;
489     +have_busfreq:
490     + param_busfreq = busfreq * 10;
491    
492     /* table init */
493     for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
494     @@ -164,7 +257,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
495     }
496    
497     /* cpuinfo and default policy values */
498     - policy->cpuinfo.transition_latency = 200000;
499     + policy->cpuinfo.transition_latency = 500000;
500     policy->cur = busfreq * max_multiplier;
501    
502     result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
503     diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
504     index 86d779a9c245..32bbba0a787b 100644
505     --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
506     +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
507     @@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
508     info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
509     info->apertures->ranges[0].size = cdev->mc.vram_size;
510    
511     + info->fix.smem_start = cdev->dev->mode_config.fb_base;
512     + info->fix.smem_len = cdev->mc.vram_size;
513     +
514     info->screen_base = sysram;
515     info->screen_size = size;
516    
517     diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
518     index 379a47ea99f6..3592616d484b 100644
519     --- a/drivers/gpu/drm/cirrus/cirrus_mode.c
520     +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
521     @@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
522    
523     int cirrus_vga_get_modes(struct drm_connector *connector)
524     {
525     - /* Just add a static list of modes */
526     - drm_add_modes_noedid(connector, 640, 480);
527     - drm_add_modes_noedid(connector, 800, 600);
528     - drm_add_modes_noedid(connector, 1024, 768);
529     - drm_add_modes_noedid(connector, 1280, 1024);
530     + int count;
531    
532     - return 4;
533     + /* Just add a static list of modes */
534     + count = drm_add_modes_noedid(connector, 1280, 1024);
535     + drm_set_preferred_mode(connector, 1024, 768);
536     + return count;
537     }
538    
539     static int cirrus_vga_mode_valid(struct drm_connector *connector,
540     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
541     index d0d3eae05a1a..1cb50268a224 100644
542     --- a/drivers/gpu/drm/drm_edid.c
543     +++ b/drivers/gpu/drm/drm_edid.c
544     @@ -3296,6 +3296,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
545     }
546     EXPORT_SYMBOL(drm_add_modes_noedid);
547    
548     +void drm_set_preferred_mode(struct drm_connector *connector,
549     + int hpref, int vpref)
550     +{
551     + struct drm_display_mode *mode;
552     +
553     + list_for_each_entry(mode, &connector->probed_modes, head) {
554     + if (drm_mode_width(mode) == hpref &&
555     + drm_mode_height(mode) == vpref)
556     + mode->type |= DRM_MODE_TYPE_PREFERRED;
557     + }
558     +}
559     +EXPORT_SYMBOL(drm_set_preferred_mode);
560     +
561     /**
562     * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
563     * data from a DRM display mode
564     diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
565     index 3d13ca6e257f..49557c957be8 100644
566     --- a/drivers/gpu/drm/drm_fb_helper.c
567     +++ b/drivers/gpu/drm/drm_fb_helper.c
568     @@ -1163,6 +1163,7 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
569     {
570     struct drm_cmdline_mode *cmdline_mode;
571     struct drm_display_mode *mode = NULL;
572     + bool prefer_non_interlace;
573    
574     cmdline_mode = &fb_helper_conn->cmdline_mode;
575     if (cmdline_mode->specified == false)
576     @@ -1174,6 +1175,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
577     if (cmdline_mode->rb || cmdline_mode->margins)
578     goto create_mode;
579    
580     + prefer_non_interlace = !cmdline_mode->interlace;
581     + again:
582     list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
583     /* check width/height */
584     if (mode->hdisplay != cmdline_mode->xres ||
585     @@ -1188,10 +1191,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
586     if (cmdline_mode->interlace) {
587     if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
588     continue;
589     + } else if (prefer_non_interlace) {
590     + if (mode->flags & DRM_MODE_FLAG_INTERLACE)
591     + continue;
592     }
593     return mode;
594     }
595    
596     + if (prefer_non_interlace) {
597     + prefer_non_interlace = false;
598     + goto again;
599     + }
600     +
601     create_mode:
602     mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
603     cmdline_mode);
604     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
605     index 9dcf34f9a22d..5aa836e6e190 100644
606     --- a/drivers/gpu/drm/i915/intel_display.c
607     +++ b/drivers/gpu/drm/i915/intel_display.c
608     @@ -10073,8 +10073,7 @@ static struct intel_quirk intel_quirks[] = {
609     /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
610     { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
611    
612     - /* 830/845 need to leave pipe A & dpll A up */
613     - { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
614     + /* 830 needs to leave pipe A & dpll A up */
615     { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
616    
617     /* Lenovo U160 cannot use SSC on LVDS */
618     diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
619     index 2a2879e53bd5..bbcd2dd653a3 100644
620     --- a/drivers/gpu/drm/radeon/dce6_afmt.c
621     +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
622     @@ -226,13 +226,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
623     return !ASIC_IS_NODCE(rdev);
624     }
625    
626     -static void dce6_audio_enable(struct radeon_device *rdev,
627     - struct r600_audio_pin *pin,
628     - bool enable)
629     +void dce6_audio_enable(struct radeon_device *rdev,
630     + struct r600_audio_pin *pin,
631     + bool enable)
632     {
633     + if (!pin)
634     + return;
635     +
636     WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
637     enable ? AUDIO_ENABLED : 0);
638     - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
639     }
640    
641     static const u32 pin_offsets[7] =
642     @@ -269,7 +271,8 @@ int dce6_audio_init(struct radeon_device *rdev)
643     rdev->audio.pin[i].connected = false;
644     rdev->audio.pin[i].offset = pin_offsets[i];
645     rdev->audio.pin[i].id = i;
646     - dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
647     + /* disable audio. it will be set up later */
648     + dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
649     }
650    
651     return 0;
652     diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
653     index b347fffa4519..da4e504b78a4 100644
654     --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
655     +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
656     @@ -257,6 +257,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
657     return;
658     offset = dig->afmt->offset;
659    
660     + /* disable audio prior to setting up hw */
661     + if (ASIC_IS_DCE6(rdev)) {
662     + dig->afmt->pin = dce6_audio_get_pin(rdev);
663     + dce6_audio_enable(rdev, dig->afmt->pin, false);
664     + } else {
665     + dig->afmt->pin = r600_audio_get_pin(rdev);
666     + r600_audio_enable(rdev, dig->afmt->pin, false);
667     + }
668     +
669     evergreen_audio_set_dto(encoder, mode->clock);
670    
671     WREG32(HDMI_VBI_PACKET_CONTROL + offset,
672     @@ -358,12 +367,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
673     WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
674     WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
675     WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
676     +
677     + /* enable audio after to setting up hw */
678     + if (ASIC_IS_DCE6(rdev))
679     + dce6_audio_enable(rdev, dig->afmt->pin, true);
680     + else
681     + r600_audio_enable(rdev, dig->afmt->pin, true);
682     }
683    
684     void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
685     {
686     - struct drm_device *dev = encoder->dev;
687     - struct radeon_device *rdev = dev->dev_private;
688     struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
689     struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
690    
691     @@ -376,15 +389,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
692     if (!enable && !dig->afmt->enabled)
693     return;
694    
695     - if (enable) {
696     - if (ASIC_IS_DCE6(rdev))
697     - dig->afmt->pin = dce6_audio_get_pin(rdev);
698     - else
699     - dig->afmt->pin = r600_audio_get_pin(rdev);
700     - } else {
701     - dig->afmt->pin = NULL;
702     - }
703     -
704     dig->afmt->enabled = enable;
705    
706     DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
707     diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
708     index 47fc2b886979..bffac10c4296 100644
709     --- a/drivers/gpu/drm/radeon/r600_audio.c
710     +++ b/drivers/gpu/drm/radeon/r600_audio.c
711     @@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
712     }
713    
714     /* enable the audio stream */
715     -static void r600_audio_enable(struct radeon_device *rdev,
716     - struct r600_audio_pin *pin,
717     - bool enable)
718     +void r600_audio_enable(struct radeon_device *rdev,
719     + struct r600_audio_pin *pin,
720     + bool enable)
721     {
722     u32 value = 0;
723    
724     + if (!pin)
725     + return;
726     +
727     if (ASIC_IS_DCE4(rdev)) {
728     if (enable) {
729     value |= 0x81000000; /* Required to enable audio */
730     @@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
731     WREG32_P(R600_AUDIO_ENABLE,
732     enable ? 0x81000000 : 0x0, ~0x81000000);
733     }
734     - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
735     }
736    
737     /*
738     @@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
739     rdev->audio.pin[0].status_bits = 0;
740     rdev->audio.pin[0].category_code = 0;
741     rdev->audio.pin[0].id = 0;
742     -
743     - r600_audio_enable(rdev, &rdev->audio.pin[0], true);
744     + /* disable audio. it will be set up later */
745     + r600_audio_enable(rdev, &rdev->audio.pin[0], false);
746    
747     return 0;
748     }
749     diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
750     index 7f3b0d9aaada..d38b725563e4 100644
751     --- a/drivers/gpu/drm/radeon/r600_hdmi.c
752     +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
753     @@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
754     u8 *sadb;
755     int sad_count;
756    
757     - /* XXX: setting this register causes hangs on some asics */
758     - return;
759     -
760     list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
761     if (connector->encoder == encoder)
762     radeon_connector = to_radeon_connector(connector);
763     @@ -446,6 +443,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
764     return;
765     offset = dig->afmt->offset;
766    
767     + /* disable audio prior to setting up hw */
768     + dig->afmt->pin = r600_audio_get_pin(rdev);
769     + r600_audio_enable(rdev, dig->afmt->pin, false);
770     +
771     r600_audio_set_dto(encoder, mode->clock);
772    
773     WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
774     @@ -517,6 +518,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
775     WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
776    
777     r600_hdmi_audio_workaround(encoder);
778     +
779     + /* enable audio after to setting up hw */
780     + r600_audio_enable(rdev, dig->afmt->pin, true);
781     }
782    
783     /*
784     @@ -637,11 +641,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
785     if (!enable && !dig->afmt->enabled)
786     return;
787    
788     - if (enable)
789     - dig->afmt->pin = r600_audio_get_pin(rdev);
790     - else
791     - dig->afmt->pin = NULL;
792     -
793     /* Older chipsets require setting HDMI and routing manually */
794     if (!ASIC_IS_DCE3(rdev)) {
795     if (enable)
796     diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
797     index f44ca5853ff2..b11433f75578 100644
798     --- a/drivers/gpu/drm/radeon/radeon.h
799     +++ b/drivers/gpu/drm/radeon/radeon.h
800     @@ -2717,6 +2717,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
801     void r600_audio_update_hdmi(struct work_struct *work);
802     struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
803     struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
804     +void r600_audio_enable(struct radeon_device *rdev,
805     + struct r600_audio_pin *pin,
806     + bool enable);
807     +void dce6_audio_enable(struct radeon_device *rdev,
808     + struct r600_audio_pin *pin,
809     + bool enable);
810    
811     /*
812     * R600 vram scratch functions
813     diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
814     index 02125e6a9109..5a4da94aefb0 100644
815     --- a/drivers/isdn/isdnloop/isdnloop.c
816     +++ b/drivers/isdn/isdnloop/isdnloop.c
817     @@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
818     static void
819     isdnloop_fake_err(isdnloop_card *card)
820     {
821     - char buf[60];
822     + char buf[64];
823    
824     - sprintf(buf, "E%s", card->omsg);
825     + snprintf(buf, sizeof(buf), "E%s", card->omsg);
826     isdnloop_fake(card, buf, -1);
827     isdnloop_fake(card, "NAK", -1);
828     }
829     @@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
830     case 7:
831     /* 0x;EAZ */
832     p += 3;
833     + if (strlen(p) >= sizeof(card->eazlist[0]))
834     + break;
835     strcpy(card->eazlist[ch - 1], p);
836     break;
837     case 8:
838     @@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
839     return -EBUSY;
840     if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
841     return -EFAULT;
842     +
843     + for (i = 0; i < 3; i++) {
844     + if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
845     + return -EINVAL;
846     + }
847     +
848     spin_lock_irqsave(&card->isdnloop_lock, flags);
849     switch (sdef.ptype) {
850     case ISDN_PTYPE_EURO:
851     @@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
852     {
853     ulong a;
854     int i;
855     - char cbuf[60];
856     + char cbuf[80];
857     isdn_ctrl cmd;
858     isdnloop_cdef cdef;
859    
860     @@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
861     break;
862     if ((c->arg & 255) < ISDNLOOP_BCH) {
863     char *p;
864     - char dial[50];
865     char dcode[4];
866    
867     a = c->arg;
868     @@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
869     } else
870     /* Normal Dial */
871     strcpy(dcode, "CAL");
872     - strcpy(dial, p);
873     - sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
874     - dcode, dial, c->parm.setup.si1,
875     - c->parm.setup.si2, c->parm.setup.eazmsn);
876     + snprintf(cbuf, sizeof(cbuf),
877     + "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
878     + dcode, p, c->parm.setup.si1,
879     + c->parm.setup.si2, c->parm.setup.eazmsn);
880     i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
881     }
882     break;
883     diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
884     index f428ef574372..71adb692e457 100644
885     --- a/drivers/net/bonding/bond_alb.c
886     +++ b/drivers/net/bonding/bond_alb.c
887     @@ -694,7 +694,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
888     client_info->ntt = 0;
889     }
890    
891     - if (!vlan_get_tag(skb, &client_info->vlan_id))
892     + if (vlan_get_tag(skb, &client_info->vlan_id))
893     client_info->vlan_id = 0;
894    
895     if (!client_info->assigned) {
896     diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
897     index e838a3f74b69..8f9e76d2dd8b 100644
898     --- a/drivers/net/ethernet/broadcom/bnx2.c
899     +++ b/drivers/net/ethernet/broadcom/bnx2.c
900     @@ -2490,6 +2490,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
901    
902     bp->fw_wr_seq++;
903     msg_data |= bp->fw_wr_seq;
904     + bp->fw_last_msg = msg_data;
905    
906     bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
907    
908     @@ -3982,8 +3983,23 @@ bnx2_setup_wol(struct bnx2 *bp)
909     wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
910     }
911    
912     - if (!(bp->flags & BNX2_FLAG_NO_WOL))
913     - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
914     + if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
915     + u32 val;
916     +
917     + wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
918     + if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
919     + bnx2_fw_sync(bp, wol_msg, 1, 0);
920     + return;
921     + }
922     + /* Tell firmware not to power down the PHY yet, otherwise
923     + * the chip will take a long time to respond to MMIO reads.
924     + */
925     + val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
926     + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
927     + val | BNX2_PORT_FEATURE_ASF_ENABLED);
928     + bnx2_fw_sync(bp, wol_msg, 1, 0);
929     + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
930     + }
931    
932     }
933    
934     @@ -4015,9 +4031,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
935    
936     if (bp->wol)
937     pci_set_power_state(bp->pdev, PCI_D3hot);
938     - } else {
939     - pci_set_power_state(bp->pdev, PCI_D3hot);
940     + break;
941     +
942     + }
943     + if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
944     + u32 val;
945     +
946     + /* Tell firmware not to power down the PHY yet,
947     + * otherwise the other port may not respond to
948     + * MMIO reads.
949     + */
950     + val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
951     + val &= ~BNX2_CONDITION_PM_STATE_MASK;
952     + val |= BNX2_CONDITION_PM_STATE_UNPREP;
953     + bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
954     }
955     + pci_set_power_state(bp->pdev, PCI_D3hot);
956    
957     /* No more memory access after this point until
958     * device is brought back to D0.
959     diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
960     index 18cb2d23e56b..0eb2a65c35b4 100644
961     --- a/drivers/net/ethernet/broadcom/bnx2.h
962     +++ b/drivers/net/ethernet/broadcom/bnx2.h
963     @@ -6890,6 +6890,7 @@ struct bnx2 {
964    
965     u16 fw_wr_seq;
966     u16 fw_drv_pulse_wr_seq;
967     + u32 fw_last_msg;
968    
969     int rx_max_ring;
970     int rx_ring_size;
971     @@ -7396,6 +7397,10 @@ struct bnx2_rv2p_fw_file {
972     #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
973     #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
974     #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
975     +#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
976     +#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
977     +#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
978     +#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
979    
980     #define BNX2_BC_STATE_DEBUG_CMD 0x1dc
981     #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
982     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
983     index 14a50a11d72e..aae7ba66e7bb 100644
984     --- a/drivers/net/ethernet/broadcom/tg3.c
985     +++ b/drivers/net/ethernet/broadcom/tg3.c
986     @@ -17480,8 +17480,6 @@ static int tg3_init_one(struct pci_dev *pdev,
987    
988     tg3_init_bufmgr_config(tp);
989    
990     - features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
991     -
992     /* 5700 B0 chips do not support checksumming correctly due
993     * to hardware bugs.
994     */
995     @@ -17513,7 +17511,8 @@ static int tg3_init_one(struct pci_dev *pdev,
996     features |= NETIF_F_TSO_ECN;
997     }
998    
999     - dev->features |= features;
1000     + dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
1001     + NETIF_F_HW_VLAN_CTAG_RX;
1002     dev->vlan_features |= features;
1003    
1004     /*
1005     diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1006     index 63090c0ddeb9..8672547a2a47 100644
1007     --- a/drivers/net/ethernet/freescale/fec_main.c
1008     +++ b/drivers/net/ethernet/freescale/fec_main.c
1009     @@ -525,13 +525,6 @@ fec_restart(struct net_device *ndev, int duplex)
1010     /* Clear any outstanding interrupt. */
1011     writel(0xffc00000, fep->hwp + FEC_IEVENT);
1012    
1013     - /* Setup multicast filter. */
1014     - set_multicast_list(ndev);
1015     -#ifndef CONFIG_M5272
1016     - writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1017     - writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1018     -#endif
1019     -
1020     /* Set maximum receive buffer size. */
1021     writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1022    
1023     @@ -652,6 +645,13 @@ fec_restart(struct net_device *ndev, int duplex)
1024    
1025     writel(rcntl, fep->hwp + FEC_R_CNTRL);
1026    
1027     + /* Setup multicast filter. */
1028     + set_multicast_list(ndev);
1029     +#ifndef CONFIG_M5272
1030     + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1031     + writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1032     +#endif
1033     +
1034     if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1035     /* enable ENET endian swap */
1036     ecntl |= (1 << 8);
1037     diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
1038     index 727b546a9eb8..e0c92e0e5e1d 100644
1039     --- a/drivers/net/ethernet/micrel/ks8851.c
1040     +++ b/drivers/net/ethernet/micrel/ks8851.c
1041     @@ -23,6 +23,7 @@
1042     #include <linux/crc32.h>
1043     #include <linux/mii.h>
1044     #include <linux/eeprom_93cx6.h>
1045     +#include <linux/regulator/consumer.h>
1046    
1047     #include <linux/spi/spi.h>
1048    
1049     @@ -83,6 +84,7 @@ union ks8851_tx_hdr {
1050     * @rc_rxqcr: Cached copy of KS_RXQCR.
1051     * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
1052     * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
1053     + * @vdd_reg: Optional regulator supplying the chip
1054     *
1055     * The @lock ensures that the chip is protected when certain operations are
1056     * in progress. When the read or write packet transfer is in progress, most
1057     @@ -130,6 +132,7 @@ struct ks8851_net {
1058     struct spi_transfer spi_xfer2[2];
1059    
1060     struct eeprom_93cx6 eeprom;
1061     + struct regulator *vdd_reg;
1062     };
1063    
1064     static int msg_enable;
1065     @@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
1066     ks->spidev = spi;
1067     ks->tx_space = 6144;
1068    
1069     + ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
1070     + if (IS_ERR(ks->vdd_reg)) {
1071     + ret = PTR_ERR(ks->vdd_reg);
1072     + if (ret == -EPROBE_DEFER)
1073     + goto err_reg;
1074     + } else {
1075     + ret = regulator_enable(ks->vdd_reg);
1076     + if (ret) {
1077     + dev_err(&spi->dev, "regulator enable fail: %d\n",
1078     + ret);
1079     + goto err_reg_en;
1080     + }
1081     + }
1082     +
1083     +
1084     mutex_init(&ks->lock);
1085     spin_lock_init(&ks->statelock);
1086    
1087     @@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
1088     err_netdev:
1089     free_irq(ndev->irq, ks);
1090    
1091     -err_id:
1092     err_irq:
1093     +err_id:
1094     + if (!IS_ERR(ks->vdd_reg))
1095     + regulator_disable(ks->vdd_reg);
1096     +err_reg_en:
1097     + if (!IS_ERR(ks->vdd_reg))
1098     + regulator_put(ks->vdd_reg);
1099     +err_reg:
1100     free_netdev(ndev);
1101     return ret;
1102     }
1103     @@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
1104    
1105     unregister_netdev(priv->netdev);
1106     free_irq(spi->irq, priv);
1107     + if (!IS_ERR(priv->vdd_reg)) {
1108     + regulator_disable(priv->vdd_reg);
1109     + regulator_put(priv->vdd_reg);
1110     + }
1111     free_netdev(priv->netdev);
1112    
1113     return 0;
1114     diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
1115     index a91fa49b81c3..1d4da74595f9 100644
1116     --- a/drivers/net/usb/usbnet.c
1117     +++ b/drivers/net/usb/usbnet.c
1118     @@ -753,14 +753,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
1119     // precondition: never called in_interrupt
1120     static void usbnet_terminate_urbs(struct usbnet *dev)
1121     {
1122     - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1123     DECLARE_WAITQUEUE(wait, current);
1124     int temp;
1125    
1126     /* ensure there are no more active urbs */
1127     - add_wait_queue(&unlink_wakeup, &wait);
1128     + add_wait_queue(&dev->wait, &wait);
1129     set_current_state(TASK_UNINTERRUPTIBLE);
1130     - dev->wait = &unlink_wakeup;
1131     temp = unlink_urbs(dev, &dev->txq) +
1132     unlink_urbs(dev, &dev->rxq);
1133    
1134     @@ -774,15 +772,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
1135     "waited for %d urb completions\n", temp);
1136     }
1137     set_current_state(TASK_RUNNING);
1138     - dev->wait = NULL;
1139     - remove_wait_queue(&unlink_wakeup, &wait);
1140     + remove_wait_queue(&dev->wait, &wait);
1141     }
1142    
1143     int usbnet_stop (struct net_device *net)
1144     {
1145     struct usbnet *dev = netdev_priv(net);
1146     struct driver_info *info = dev->driver_info;
1147     - int retval;
1148     + int retval, pm;
1149    
1150     clear_bit(EVENT_DEV_OPEN, &dev->flags);
1151     netif_stop_queue (net);
1152     @@ -792,6 +789,8 @@ int usbnet_stop (struct net_device *net)
1153     net->stats.rx_packets, net->stats.tx_packets,
1154     net->stats.rx_errors, net->stats.tx_errors);
1155    
1156     + /* to not race resume */
1157     + pm = usb_autopm_get_interface(dev->intf);
1158     /* allow minidriver to stop correctly (wireless devices to turn off
1159     * radio etc) */
1160     if (info->stop) {
1161     @@ -818,6 +817,9 @@ int usbnet_stop (struct net_device *net)
1162     dev->flags = 0;
1163     del_timer_sync (&dev->delay);
1164     tasklet_kill (&dev->bh);
1165     + if (!pm)
1166     + usb_autopm_put_interface(dev->intf);
1167     +
1168     if (info->manage_power &&
1169     !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
1170     info->manage_power(dev, 0);
1171     @@ -1438,11 +1440,12 @@ static void usbnet_bh (unsigned long param)
1172     /* restart RX again after disabling due to high error rate */
1173     clear_bit(EVENT_RX_KILL, &dev->flags);
1174    
1175     - // waiting for all pending urbs to complete?
1176     - if (dev->wait) {
1177     - if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1178     - wake_up (dev->wait);
1179     - }
1180     + /* waiting for all pending urbs to complete?
1181     + * only then can we forgo submitting anew
1182     + */
1183     + if (waitqueue_active(&dev->wait)) {
1184     + if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
1185     + wake_up_all(&dev->wait);
1186    
1187     // or are we maybe short a few urbs?
1188     } else if (netif_running (dev->net) &&
1189     @@ -1581,6 +1584,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1190     dev->driver_name = name;
1191     dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1192     | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1193     + init_waitqueue_head(&dev->wait);
1194     skb_queue_head_init (&dev->rxq);
1195     skb_queue_head_init (&dev->txq);
1196     skb_queue_head_init (&dev->done);
1197     @@ -1792,9 +1796,10 @@ int usbnet_resume (struct usb_interface *intf)
1198     spin_unlock_irq(&dev->txq.lock);
1199    
1200     if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1201     - /* handle remote wakeup ASAP */
1202     - if (!dev->wait &&
1203     - netif_device_present(dev->net) &&
1204     + /* handle remote wakeup ASAP
1205     + * we cannot race against stop
1206     + */
1207     + if (netif_device_present(dev->net) &&
1208     !timer_pending(&dev->delay) &&
1209     !test_bit(EVENT_RX_HALT, &dev->flags))
1210     rx_alloc_submit(dev, GFP_NOIO);
1211     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1212     index 32c45c3d820d..4ecdf3c22bc6 100644
1213     --- a/drivers/net/vxlan.c
1214     +++ b/drivers/net/vxlan.c
1215     @@ -781,6 +781,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1216     if (err)
1217     return err;
1218    
1219     + if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
1220     + return -EAFNOSUPPORT;
1221     +
1222     spin_lock_bh(&vxlan->hash_lock);
1223     err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
1224     port, vni, ifindex, ndm->ndm_flags);
1225     @@ -1212,6 +1215,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1226    
1227     neigh_release(n);
1228    
1229     + if (reply == NULL)
1230     + goto out;
1231     +
1232     skb_reset_mac_header(reply);
1233     __skb_pull(reply, skb_network_offset(reply));
1234     reply->ip_summed = CHECKSUM_UNNECESSARY;
1235     @@ -1233,15 +1239,103 @@ out:
1236     }
1237    
1238     #if IS_ENABLED(CONFIG_IPV6)
1239     +
1240     +static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1241     + struct neighbour *n, bool isrouter)
1242     +{
1243     + struct net_device *dev = request->dev;
1244     + struct sk_buff *reply;
1245     + struct nd_msg *ns, *na;
1246     + struct ipv6hdr *pip6;
1247     + u8 *daddr;
1248     + int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1249     + int ns_olen;
1250     + int i, len;
1251     +
1252     + if (dev == NULL)
1253     + return NULL;
1254     +
1255     + len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1256     + sizeof(*na) + na_olen + dev->needed_tailroom;
1257     + reply = alloc_skb(len, GFP_ATOMIC);
1258     + if (reply == NULL)
1259     + return NULL;
1260     +
1261     + reply->protocol = htons(ETH_P_IPV6);
1262     + reply->dev = dev;
1263     + skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1264     + skb_push(reply, sizeof(struct ethhdr));
1265     + skb_set_mac_header(reply, 0);
1266     +
1267     + ns = (struct nd_msg *)skb_transport_header(request);
1268     +
1269     + daddr = eth_hdr(request)->h_source;
1270     + ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1271     + for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1272     + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1273     + daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1274     + break;
1275     + }
1276     + }
1277     +
1278     + /* Ethernet header */
1279     + memcpy(eth_hdr(reply)->h_dest, daddr, ETH_ALEN);
1280     + memcpy(eth_hdr(reply)->h_source, n->ha, ETH_ALEN);
1281     + eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1282     + reply->protocol = htons(ETH_P_IPV6);
1283     +
1284     + skb_pull(reply, sizeof(struct ethhdr));
1285     + skb_set_network_header(reply, 0);
1286     + skb_put(reply, sizeof(struct ipv6hdr));
1287     +
1288     + /* IPv6 header */
1289     +
1290     + pip6 = ipv6_hdr(reply);
1291     + memset(pip6, 0, sizeof(struct ipv6hdr));
1292     + pip6->version = 6;
1293     + pip6->priority = ipv6_hdr(request)->priority;
1294     + pip6->nexthdr = IPPROTO_ICMPV6;
1295     + pip6->hop_limit = 255;
1296     + pip6->daddr = ipv6_hdr(request)->saddr;
1297     + pip6->saddr = *(struct in6_addr *)n->primary_key;
1298     +
1299     + skb_pull(reply, sizeof(struct ipv6hdr));
1300     + skb_set_transport_header(reply, 0);
1301     +
1302     + na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1303     +
1304     + /* Neighbor Advertisement */
1305     + memset(na, 0, sizeof(*na)+na_olen);
1306     + na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1307     + na->icmph.icmp6_router = isrouter;
1308     + na->icmph.icmp6_override = 1;
1309     + na->icmph.icmp6_solicited = 1;
1310     + na->target = ns->target;
1311     + memcpy(&na->opt[2], n->ha, ETH_ALEN);
1312     + na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1313     + na->opt[1] = na_olen >> 3;
1314     +
1315     + na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1316     + &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1317     + csum_partial(na, sizeof(*na)+na_olen, 0));
1318     +
1319     + pip6->payload_len = htons(sizeof(*na)+na_olen);
1320     +
1321     + skb_push(reply, sizeof(struct ipv6hdr));
1322     +
1323     + reply->ip_summed = CHECKSUM_UNNECESSARY;
1324     +
1325     + return reply;
1326     +}
1327     +
1328     static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1329     {
1330     struct vxlan_dev *vxlan = netdev_priv(dev);
1331     - struct neighbour *n;
1332     - union vxlan_addr ipa;
1333     + struct nd_msg *msg;
1334     const struct ipv6hdr *iphdr;
1335     const struct in6_addr *saddr, *daddr;
1336     - struct nd_msg *msg;
1337     - struct inet6_dev *in6_dev = NULL;
1338     + struct neighbour *n;
1339     + struct inet6_dev *in6_dev;
1340    
1341     in6_dev = __in6_dev_get(dev);
1342     if (!in6_dev)
1343     @@ -1254,19 +1348,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1344     saddr = &iphdr->saddr;
1345     daddr = &iphdr->daddr;
1346    
1347     - if (ipv6_addr_loopback(daddr) ||
1348     - ipv6_addr_is_multicast(daddr))
1349     - goto out;
1350     -
1351     msg = (struct nd_msg *)skb_transport_header(skb);
1352     if (msg->icmph.icmp6_code != 0 ||
1353     msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1354     goto out;
1355    
1356     - n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
1357     + if (ipv6_addr_loopback(daddr) ||
1358     + ipv6_addr_is_multicast(&msg->target))
1359     + goto out;
1360     +
1361     + n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1362    
1363     if (n) {
1364     struct vxlan_fdb *f;
1365     + struct sk_buff *reply;
1366    
1367     if (!(n->nud_state & NUD_CONNECTED)) {
1368     neigh_release(n);
1369     @@ -1280,13 +1375,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1370     goto out;
1371     }
1372    
1373     - ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
1374     - !!in6_dev->cnf.forwarding,
1375     - true, false, false);
1376     + reply = vxlan_na_create(skb, n,
1377     + !!(f ? f->flags & NTF_ROUTER : 0));
1378     +
1379     neigh_release(n);
1380     +
1381     + if (reply == NULL)
1382     + goto out;
1383     +
1384     + if (netif_rx_ni(reply) == NET_RX_DROP)
1385     + dev->stats.rx_dropped++;
1386     +
1387     } else if (vxlan->flags & VXLAN_F_L3MISS) {
1388     - ipa.sin6.sin6_addr = *daddr;
1389     - ipa.sa.sa_family = AF_INET6;
1390     + union vxlan_addr ipa = {
1391     + .sin6.sin6_addr = msg->target,
1392     + .sa.sa_family = AF_INET6,
1393     + };
1394     +
1395     vxlan_ip_miss(dev, &ipa);
1396     }
1397    
1398     @@ -2383,9 +2488,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1399     vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1400     dst->remote_vni = vni;
1401    
1402     + /* Unless IPv6 is explicitly requested, assume IPv4 */
1403     + dst->remote_ip.sa.sa_family = AF_INET;
1404     if (data[IFLA_VXLAN_GROUP]) {
1405     dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1406     - dst->remote_ip.sa.sa_family = AF_INET;
1407     } else if (data[IFLA_VXLAN_GROUP6]) {
1408     if (!IS_ENABLED(CONFIG_IPV6))
1409     return -EPFNOSUPPORT;
1410     diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
1411     index 400fea1de080..a7501cb9b53b 100644
1412     --- a/drivers/net/xen-netback/common.h
1413     +++ b/drivers/net/xen-netback/common.h
1414     @@ -102,6 +102,11 @@ struct xenvif {
1415     domid_t domid;
1416     unsigned int handle;
1417    
1418     + /* Is this interface disabled? True when backend discovers
1419     + * frontend is rogue.
1420     + */
1421     + bool disabled;
1422     +
1423     /* Use NAPI for guest TX */
1424     struct napi_struct napi;
1425     /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
1426     diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1427     index 459935a6bfae..adfe46068581 100644
1428     --- a/drivers/net/xen-netback/interface.c
1429     +++ b/drivers/net/xen-netback/interface.c
1430     @@ -66,6 +66,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
1431     struct xenvif *vif = container_of(napi, struct xenvif, napi);
1432     int work_done;
1433    
1434     + /* This vif is rogue, we pretend we've there is nothing to do
1435     + * for this vif to deschedule it from NAPI. But this interface
1436     + * will be turned off in thread context later.
1437     + */
1438     + if (unlikely(vif->disabled)) {
1439     + napi_complete(napi);
1440     + return 0;
1441     + }
1442     +
1443     work_done = xenvif_tx_action(vif, budget);
1444    
1445     if (work_done < budget) {
1446     @@ -309,6 +318,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
1447     vif->csum = 1;
1448     vif->dev = dev;
1449    
1450     + vif->disabled = false;
1451     +
1452     vif->credit_bytes = vif->remaining_credit = ~0UL;
1453     vif->credit_usec = 0UL;
1454     init_timer(&vif->credit_timeout);
1455     diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1456     index 625585034ef4..a1186533cee8 100644
1457     --- a/drivers/net/xen-netback/netback.c
1458     +++ b/drivers/net/xen-netback/netback.c
1459     @@ -206,8 +206,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
1460     * into multiple copies tend to give large frags their
1461     * own buffers as before.
1462     */
1463     - if ((offset + size > MAX_BUFFER_OFFSET) &&
1464     - (size <= MAX_BUFFER_OFFSET) && offset && !head)
1465     + BUG_ON(size > MAX_BUFFER_OFFSET);
1466     + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
1467     return true;
1468    
1469     return false;
1470     @@ -731,7 +731,8 @@ static void xenvif_tx_err(struct xenvif *vif,
1471     static void xenvif_fatal_tx_err(struct xenvif *vif)
1472     {
1473     netdev_err(vif->dev, "fatal error; disabling device\n");
1474     - xenvif_carrier_off(vif);
1475     + vif->disabled = true;
1476     + xenvif_kick_thread(vif);
1477     }
1478    
1479     static int xenvif_count_requests(struct xenvif *vif,
1480     @@ -1242,7 +1243,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1481     vif->tx.sring->req_prod, vif->tx.req_cons,
1482     XEN_NETIF_TX_RING_SIZE);
1483     xenvif_fatal_tx_err(vif);
1484     - continue;
1485     + break;
1486     }
1487    
1488     RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1489     @@ -1642,7 +1643,18 @@ int xenvif_kthread(void *data)
1490     while (!kthread_should_stop()) {
1491     wait_event_interruptible(vif->wq,
1492     rx_work_todo(vif) ||
1493     + vif->disabled ||
1494     kthread_should_stop());
1495     +
1496     + /* This frontend is found to be rogue, disable it in
1497     + * kthread context. Currently this is only set when
1498     + * netback finds out frontend sends malformed packet,
1499     + * but we cannot disable the interface in softirq
1500     + * context so we defer it here.
1501     + */
1502     + if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
1503     + xenvif_carrier_off(vif);
1504     +
1505     if (kthread_should_stop())
1506     break;
1507    
1508     diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
1509     index 1953c1680986..8efd11dafd44 100644
1510     --- a/drivers/pci/host/pci-mvebu.c
1511     +++ b/drivers/pci/host/pci-mvebu.c
1512     @@ -866,11 +866,23 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
1513     continue;
1514     }
1515    
1516     + port->clk = of_clk_get_by_name(child, NULL);
1517     + if (IS_ERR(port->clk)) {
1518     + dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
1519     + port->port, port->lane);
1520     + continue;
1521     + }
1522     +
1523     + ret = clk_prepare_enable(port->clk);
1524     + if (ret)
1525     + continue;
1526     +
1527     port->base = mvebu_pcie_map_registers(pdev, child, port);
1528     if (IS_ERR(port->base)) {
1529     dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
1530     port->port, port->lane);
1531     port->base = NULL;
1532     + clk_disable_unprepare(port->clk);
1533     continue;
1534     }
1535    
1536     @@ -886,22 +898,9 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
1537     port->port, port->lane);
1538     }
1539    
1540     - port->clk = of_clk_get_by_name(child, NULL);
1541     - if (IS_ERR(port->clk)) {
1542     - dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
1543     - port->port, port->lane);
1544     - iounmap(port->base);
1545     - port->haslink = 0;
1546     - continue;
1547     - }
1548     -
1549     port->dn = child;
1550     -
1551     - clk_prepare_enable(port->clk);
1552     spin_lock_init(&port->conf_lock);
1553     -
1554     mvebu_sw_pci_bridge_init(port);
1555     -
1556     i++;
1557     }
1558    
1559     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1560     index b12176f2013c..5264d839474a 100644
1561     --- a/drivers/vhost/net.c
1562     +++ b/drivers/vhost/net.c
1563     @@ -501,9 +501,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
1564     r = -ENOBUFS;
1565     goto err;
1566     }
1567     - d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
1568     + r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
1569     ARRAY_SIZE(vq->iov) - seg, &out,
1570     &in, log, log_num);
1571     + if (unlikely(r < 0))
1572     + goto err;
1573     +
1574     + d = r;
1575     if (d == vq->num) {
1576     r = 0;
1577     goto err;
1578     @@ -528,6 +532,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
1579     *iovcount = seg;
1580     if (unlikely(log))
1581     *log_num = nlogs;
1582     +
1583     + /* Detect overrun */
1584     + if (unlikely(datalen > 0)) {
1585     + r = UIO_MAXIOV + 1;
1586     + goto err;
1587     + }
1588     return headcount;
1589     err:
1590     vhost_discard_vq_desc(vq, headcount);
1591     @@ -583,6 +593,14 @@ static void handle_rx(struct vhost_net *net)
1592     /* On error, stop handling until the next kick. */
1593     if (unlikely(headcount < 0))
1594     break;
1595     + /* On overrun, truncate and discard */
1596     + if (unlikely(headcount > UIO_MAXIOV)) {
1597     + msg.msg_iovlen = 1;
1598     + err = sock->ops->recvmsg(NULL, sock, &msg,
1599     + 1, MSG_DONTWAIT | MSG_TRUNC);
1600     + pr_debug("Discarded rx packet: len %zd\n", sock_len);
1601     + continue;
1602     + }
1603     /* OK, now we need to know about added descriptors. */
1604     if (!headcount) {
1605     if (unlikely(vhost_enable_notify(&net->dev, vq))) {
1606     diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
1607     index 8659eb160b4d..b6d5008f361f 100644
1608     --- a/drivers/video/fbmem.c
1609     +++ b/drivers/video/fbmem.c
1610     @@ -1108,14 +1108,16 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1611     case FBIOPUT_VSCREENINFO:
1612     if (copy_from_user(&var, argp, sizeof(var)))
1613     return -EFAULT;
1614     - if (!lock_fb_info(info))
1615     - return -ENODEV;
1616     console_lock();
1617     + if (!lock_fb_info(info)) {
1618     + console_unlock();
1619     + return -ENODEV;
1620     + }
1621     info->flags |= FBINFO_MISC_USEREVENT;
1622     ret = fb_set_var(info, &var);
1623     info->flags &= ~FBINFO_MISC_USEREVENT;
1624     - console_unlock();
1625     unlock_fb_info(info);
1626     + console_unlock();
1627     if (!ret && copy_to_user(argp, &var, sizeof(var)))
1628     ret = -EFAULT;
1629     break;
1630     @@ -1144,12 +1146,14 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1631     case FBIOPAN_DISPLAY:
1632     if (copy_from_user(&var, argp, sizeof(var)))
1633     return -EFAULT;
1634     - if (!lock_fb_info(info))
1635     - return -ENODEV;
1636     console_lock();
1637     + if (!lock_fb_info(info)) {
1638     + console_unlock();
1639     + return -ENODEV;
1640     + }
1641     ret = fb_pan_display(info, &var);
1642     - console_unlock();
1643     unlock_fb_info(info);
1644     + console_unlock();
1645     if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
1646     return -EFAULT;
1647     break;
1648     @@ -1184,23 +1188,27 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1649     break;
1650     }
1651     event.data = &con2fb;
1652     - if (!lock_fb_info(info))
1653     - return -ENODEV;
1654     console_lock();
1655     + if (!lock_fb_info(info)) {
1656     + console_unlock();
1657     + return -ENODEV;
1658     + }
1659     event.info = info;
1660     ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
1661     - console_unlock();
1662     unlock_fb_info(info);
1663     + console_unlock();
1664     break;
1665     case FBIOBLANK:
1666     - if (!lock_fb_info(info))
1667     - return -ENODEV;
1668     console_lock();
1669     + if (!lock_fb_info(info)) {
1670     + console_unlock();
1671     + return -ENODEV;
1672     + }
1673     info->flags |= FBINFO_MISC_USEREVENT;
1674     ret = fb_blank(info, arg);
1675     info->flags &= ~FBINFO_MISC_USEREVENT;
1676     - console_unlock();
1677     unlock_fb_info(info);
1678     + console_unlock();
1679     break;
1680     default:
1681     if (!lock_fb_info(info))
1682     @@ -1569,10 +1577,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
1683     static int do_unregister_framebuffer(struct fb_info *fb_info);
1684    
1685     #define VGA_FB_PHYS 0xA0000
1686     -static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
1687     - const char *name, bool primary)
1688     +static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
1689     + const char *name, bool primary)
1690     {
1691     - int i;
1692     + int i, ret;
1693    
1694     /* check all firmware fbs and kick off if the base addr overlaps */
1695     for (i = 0 ; i < FB_MAX; i++) {
1696     @@ -1588,25 +1596,31 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
1697     (primary && gen_aper && gen_aper->count &&
1698     gen_aper->ranges[0].base == VGA_FB_PHYS)) {
1699    
1700     - printk(KERN_INFO "fb: conflicting fb hw usage "
1701     - "%s vs %s - removing generic driver\n",
1702     + printk(KERN_INFO "fb: switching to %s from %s\n",
1703     name, registered_fb[i]->fix.id);
1704     - do_unregister_framebuffer(registered_fb[i]);
1705     + ret = do_unregister_framebuffer(registered_fb[i]);
1706     + if (ret)
1707     + return ret;
1708     }
1709     }
1710     +
1711     + return 0;
1712     }
1713    
1714     static int do_register_framebuffer(struct fb_info *fb_info)
1715     {
1716     - int i;
1717     + int i, ret;
1718     struct fb_event event;
1719     struct fb_videomode mode;
1720    
1721     if (fb_check_foreignness(fb_info))
1722     return -ENOSYS;
1723    
1724     - do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
1725     - fb_is_primary_device(fb_info));
1726     + ret = do_remove_conflicting_framebuffers(fb_info->apertures,
1727     + fb_info->fix.id,
1728     + fb_is_primary_device(fb_info));
1729     + if (ret)
1730     + return ret;
1731    
1732     if (num_registered_fb == FB_MAX)
1733     return -ENXIO;
1734     @@ -1660,12 +1674,15 @@ static int do_register_framebuffer(struct fb_info *fb_info)
1735     registered_fb[i] = fb_info;
1736    
1737     event.info = fb_info;
1738     - if (!lock_fb_info(fb_info))
1739     - return -ENODEV;
1740     console_lock();
1741     + if (!lock_fb_info(fb_info)) {
1742     + console_unlock();
1743     + return -ENODEV;
1744     + }
1745     +
1746     fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
1747     - console_unlock();
1748     unlock_fb_info(fb_info);
1749     + console_unlock();
1750     return 0;
1751     }
1752    
1753     @@ -1678,13 +1695,16 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1754     if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
1755     return -EINVAL;
1756    
1757     - if (!lock_fb_info(fb_info))
1758     - return -ENODEV;
1759     console_lock();
1760     + if (!lock_fb_info(fb_info)) {
1761     + console_unlock();
1762     + return -ENODEV;
1763     + }
1764     +
1765     event.info = fb_info;
1766     ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
1767     - console_unlock();
1768     unlock_fb_info(fb_info);
1769     + console_unlock();
1770    
1771     if (ret)
1772     return -EINVAL;
1773     @@ -1725,12 +1745,16 @@ int unlink_framebuffer(struct fb_info *fb_info)
1774     }
1775     EXPORT_SYMBOL(unlink_framebuffer);
1776    
1777     -void remove_conflicting_framebuffers(struct apertures_struct *a,
1778     - const char *name, bool primary)
1779     +int remove_conflicting_framebuffers(struct apertures_struct *a,
1780     + const char *name, bool primary)
1781     {
1782     + int ret;
1783     +
1784     mutex_lock(&registration_lock);
1785     - do_remove_conflicting_framebuffers(a, name, primary);
1786     + ret = do_remove_conflicting_framebuffers(a, name, primary);
1787     mutex_unlock(&registration_lock);
1788     +
1789     + return ret;
1790     }
1791     EXPORT_SYMBOL(remove_conflicting_framebuffers);
1792    
1793     diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
1794     index ef476b02fbe5..53444ac19fe0 100644
1795     --- a/drivers/video/fbsysfs.c
1796     +++ b/drivers/video/fbsysfs.c
1797     @@ -177,9 +177,12 @@ static ssize_t store_modes(struct device *device,
1798     if (i * sizeof(struct fb_videomode) != count)
1799     return -EINVAL;
1800    
1801     - if (!lock_fb_info(fb_info))
1802     - return -ENODEV;
1803     console_lock();
1804     + if (!lock_fb_info(fb_info)) {
1805     + console_unlock();
1806     + return -ENODEV;
1807     + }
1808     +
1809     list_splice(&fb_info->modelist, &old_list);
1810     fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
1811     &fb_info->modelist);
1812     @@ -189,8 +192,8 @@ static ssize_t store_modes(struct device *device,
1813     } else
1814     fb_destroy_modelist(&old_list);
1815    
1816     - console_unlock();
1817     unlock_fb_info(fb_info);
1818     + console_unlock();
1819    
1820     return 0;
1821     }
1822     @@ -404,12 +407,16 @@ static ssize_t store_fbstate(struct device *device,
1823    
1824     state = simple_strtoul(buf, &last, 0);
1825    
1826     - if (!lock_fb_info(fb_info))
1827     - return -ENODEV;
1828     console_lock();
1829     + if (!lock_fb_info(fb_info)) {
1830     + console_unlock();
1831     + return -ENODEV;
1832     + }
1833     +
1834     fb_set_suspend(fb_info, (int)state);
1835     - console_unlock();
1836     +
1837     unlock_fb_info(fb_info);
1838     + console_unlock();
1839    
1840     return count;
1841     }
1842     diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
1843     index 0264704a52be..45d031233253 100644
1844     --- a/drivers/video/sh_mobile_lcdcfb.c
1845     +++ b/drivers/video/sh_mobile_lcdcfb.c
1846     @@ -574,8 +574,9 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
1847     switch (event) {
1848     case SH_MOBILE_LCDC_EVENT_DISPLAY_CONNECT:
1849     /* HDMI plug in */
1850     + console_lock();
1851     if (lock_fb_info(info)) {
1852     - console_lock();
1853     +
1854    
1855     ch->display.width = monspec->max_x * 10;
1856     ch->display.height = monspec->max_y * 10;
1857     @@ -594,19 +595,20 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
1858     fb_set_suspend(info, 0);
1859     }
1860    
1861     - console_unlock();
1862     +
1863     unlock_fb_info(info);
1864     }
1865     + console_unlock();
1866     break;
1867    
1868     case SH_MOBILE_LCDC_EVENT_DISPLAY_DISCONNECT:
1869     /* HDMI disconnect */
1870     + console_lock();
1871     if (lock_fb_info(info)) {
1872     - console_lock();
1873     fb_set_suspend(info, 1);
1874     - console_unlock();
1875     unlock_fb_info(info);
1876     }
1877     + console_unlock();
1878     break;
1879    
1880     case SH_MOBILE_LCDC_EVENT_DISPLAY_MODE:
1881     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1882     index 786bf0708904..f173ef12c97a 100644
1883     --- a/fs/ext4/inode.c
1884     +++ b/fs/ext4/inode.c
1885     @@ -4447,7 +4447,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
1886     return -EIO;
1887     }
1888    
1889     - if (wbc->sync_mode != WB_SYNC_ALL)
1890     + /*
1891     + * No need to force transaction in WB_SYNC_NONE mode. Also
1892     + * ext4_sync_fs() will force the commit after everything is
1893     + * written.
1894     + */
1895     + if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
1896     return 0;
1897    
1898     err = ext4_force_commit(inode->i_sb);
1899     @@ -4457,7 +4462,11 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
1900     err = __ext4_get_inode_loc(inode, &iloc, 0);
1901     if (err)
1902     return err;
1903     - if (wbc->sync_mode == WB_SYNC_ALL)
1904     + /*
1905     + * sync(2) will flush the whole buffer cache. No need to do
1906     + * it here separately for each inode.
1907     + */
1908     + if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
1909     sync_dirty_buffer(iloc.bh);
1910     if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
1911     EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
1912     diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
1913     index 4a1aafba6a20..4612291e7cc0 100644
1914     --- a/fs/nfs/nfs3acl.c
1915     +++ b/fs/nfs/nfs3acl.c
1916     @@ -289,8 +289,8 @@ getout:
1917     return acl;
1918     }
1919    
1920     -static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
1921     - struct posix_acl *dfacl)
1922     +static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
1923     + struct posix_acl *dfacl)
1924     {
1925     struct nfs_server *server = NFS_SERVER(inode);
1926     struct nfs_fattr *fattr;
1927     @@ -373,6 +373,15 @@ out:
1928     return status;
1929     }
1930    
1931     +int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
1932     + struct posix_acl *dfacl)
1933     +{
1934     + int ret;
1935     + ret = __nfs3_proc_setacls(inode, acl, dfacl);
1936     + return (ret == -EOPNOTSUPP) ? 0 : ret;
1937     +
1938     +}
1939     +
1940     int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
1941     {
1942     struct posix_acl *alloc = NULL, *dfacl = NULL;
1943     @@ -406,7 +415,7 @@ int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
1944     if (IS_ERR(alloc))
1945     goto fail;
1946     }
1947     - status = nfs3_proc_setacls(inode, acl, dfacl);
1948     + status = __nfs3_proc_setacls(inode, acl, dfacl);
1949     posix_acl_release(alloc);
1950     return status;
1951    
1952     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
1953     index b2f842d0901b..1c2beb18a713 100644
1954     --- a/fs/nfs/nfs4xdr.c
1955     +++ b/fs/nfs/nfs4xdr.c
1956     @@ -3405,7 +3405,7 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
1957     {
1958     __be32 *p;
1959    
1960     - *res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL;
1961     + *res = 0;
1962     if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
1963     return -EIO;
1964     if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
1965     diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
1966     index 24f499569a2f..ec5d737f93c5 100644
1967     --- a/include/drm/drm_crtc.h
1968     +++ b/include/drm/drm_crtc.h
1969     @@ -1108,6 +1108,8 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
1970     int GTF_2C, int GTF_K, int GTF_2J);
1971     extern int drm_add_modes_noedid(struct drm_connector *connector,
1972     int hdisplay, int vdisplay);
1973     +extern void drm_set_preferred_mode(struct drm_connector *connector,
1974     + int hpref, int vpref);
1975    
1976     extern int drm_edid_header_is_valid(const u8 *raw_edid);
1977     extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
1978     diff --git a/include/linux/fb.h b/include/linux/fb.h
1979     index ffac70aab3e9..8439a1600c1a 100644
1980     --- a/include/linux/fb.h
1981     +++ b/include/linux/fb.h
1982     @@ -613,8 +613,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
1983     extern int register_framebuffer(struct fb_info *fb_info);
1984     extern int unregister_framebuffer(struct fb_info *fb_info);
1985     extern int unlink_framebuffer(struct fb_info *fb_info);
1986     -extern void remove_conflicting_framebuffers(struct apertures_struct *a,
1987     - const char *name, bool primary);
1988     +extern int remove_conflicting_framebuffers(struct apertures_struct *a,
1989     + const char *name, bool primary);
1990     extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
1991     extern int fb_show_logo(struct fb_info *fb_info, int rotate);
1992     extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
1993     diff --git a/include/linux/futex.h b/include/linux/futex.h
1994     index b0d95cac826e..6435f46d6e13 100644
1995     --- a/include/linux/futex.h
1996     +++ b/include/linux/futex.h
1997     @@ -55,7 +55,11 @@ union futex_key {
1998     #ifdef CONFIG_FUTEX
1999     extern void exit_robust_list(struct task_struct *curr);
2000     extern void exit_pi_state_list(struct task_struct *curr);
2001     +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
2002     +#define futex_cmpxchg_enabled 1
2003     +#else
2004     extern int futex_cmpxchg_enabled;
2005     +#endif
2006     #else
2007     static inline void exit_robust_list(struct task_struct *curr)
2008     {
2009     diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
2010     index e303eef94dd5..0662e98fef72 100644
2011     --- a/include/linux/usb/usbnet.h
2012     +++ b/include/linux/usb/usbnet.h
2013     @@ -30,7 +30,7 @@ struct usbnet {
2014     struct driver_info *driver_info;
2015     const char *driver_name;
2016     void *driver_priv;
2017     - wait_queue_head_t *wait;
2018     + wait_queue_head_t wait;
2019     struct mutex phy_mutex;
2020     unsigned char suspend_count;
2021     unsigned char pkt_cnt, pkt_err;
2022     diff --git a/include/net/sock.h b/include/net/sock.h
2023     index 808cbc2ec6c1..6e2c4901a477 100644
2024     --- a/include/net/sock.h
2025     +++ b/include/net/sock.h
2026     @@ -1459,6 +1459,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
2027     */
2028     #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
2029    
2030     +static inline void sock_release_ownership(struct sock *sk)
2031     +{
2032     + sk->sk_lock.owned = 0;
2033     +}
2034     +
2035     /*
2036     * Macro so as to not evaluate some arguments when
2037     * lockdep is not enabled.
2038     diff --git a/include/net/tcp.h b/include/net/tcp.h
2039     index 51dcc6faa561..31c48908ae32 100644
2040     --- a/include/net/tcp.h
2041     +++ b/include/net/tcp.h
2042     @@ -484,20 +484,21 @@ extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
2043     #ifdef CONFIG_SYN_COOKIES
2044     #include <linux/ktime.h>
2045    
2046     -/* Syncookies use a monotonic timer which increments every 64 seconds.
2047     +/* Syncookies use a monotonic timer which increments every 60 seconds.
2048     * This counter is used both as a hash input and partially encoded into
2049     * the cookie value. A cookie is only validated further if the delta
2050     * between the current counter value and the encoded one is less than this,
2051     - * i.e. a sent cookie is valid only at most for 128 seconds (or less if
2052     + * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
2053     * the counter advances immediately after a cookie is generated).
2054     */
2055     #define MAX_SYNCOOKIE_AGE 2
2056    
2057     static inline u32 tcp_cookie_time(void)
2058     {
2059     - struct timespec now;
2060     - getnstimeofday(&now);
2061     - return now.tv_sec >> 6; /* 64 seconds granularity */
2062     + u64 val = get_jiffies_64();
2063     +
2064     + do_div(val, 60 * HZ);
2065     + return val;
2066     }
2067    
2068     extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
2069     diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h
2070     index f1f3dd5981b2..84c517cbce90 100644
2071     --- a/include/uapi/linux/fd.h
2072     +++ b/include/uapi/linux/fd.h
2073     @@ -185,7 +185,8 @@ enum {
2074     * to clear media change status */
2075     FD_UNUSED_BIT,
2076     FD_DISK_CHANGED_BIT, /* disk has been changed since last i/o */
2077     - FD_DISK_WRITABLE_BIT /* disk is writable */
2078     + FD_DISK_WRITABLE_BIT, /* disk is writable */
2079     + FD_OPEN_SHOULD_FAIL_BIT
2080     };
2081    
2082     #define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
2083     diff --git a/init/Kconfig b/init/Kconfig
2084     index 3ecd8a1178f1..d42dc7c6ba64 100644
2085     --- a/init/Kconfig
2086     +++ b/init/Kconfig
2087     @@ -1406,6 +1406,13 @@ config FUTEX
2088     support for "fast userspace mutexes". The resulting kernel may not
2089     run glibc-based applications correctly.
2090    
2091     +config HAVE_FUTEX_CMPXCHG
2092     + bool
2093     + help
2094     + Architectures should select this if futex_atomic_cmpxchg_inatomic()
2095     + is implemented and always working. This removes a couple of runtime
2096     + checks.
2097     +
2098     config EPOLL
2099     bool "Enable eventpoll support" if EXPERT
2100     default y
2101     diff --git a/kernel/futex.c b/kernel/futex.c
2102     index 231754863a87..d8347b7a064f 100644
2103     --- a/kernel/futex.c
2104     +++ b/kernel/futex.c
2105     @@ -68,7 +68,9 @@
2106    
2107     #include "rtmutex_common.h"
2108    
2109     +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
2110     int __read_mostly futex_cmpxchg_enabled;
2111     +#endif
2112    
2113     #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
2114    
2115     @@ -2731,10 +2733,10 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2116     return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2117     }
2118    
2119     -static int __init futex_init(void)
2120     +static void __init futex_detect_cmpxchg(void)
2121     {
2122     +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
2123     u32 curval;
2124     - int i;
2125    
2126     /*
2127     * This will fail and we want it. Some arch implementations do
2128     @@ -2748,6 +2750,14 @@ static int __init futex_init(void)
2129     */
2130     if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2131     futex_cmpxchg_enabled = 1;
2132     +#endif
2133     +}
2134     +
2135     +static int __init futex_init(void)
2136     +{
2137     + int i;
2138     +
2139     + futex_detect_cmpxchg();
2140    
2141     for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2142     plist_head_init(&futex_queues[i].chain);
2143     diff --git a/lib/nlattr.c b/lib/nlattr.c
2144     index 18eca7809b08..fc6754720ced 100644
2145     --- a/lib/nlattr.c
2146     +++ b/lib/nlattr.c
2147     @@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
2148     */
2149     int nla_strcmp(const struct nlattr *nla, const char *str)
2150     {
2151     - int len = strlen(str) + 1;
2152     - int d = nla_len(nla) - len;
2153     + int len = strlen(str);
2154     + char *buf = nla_data(nla);
2155     + int attrlen = nla_len(nla);
2156     + int d;
2157    
2158     + if (attrlen > 0 && buf[attrlen - 1] == '\0')
2159     + attrlen--;
2160     +
2161     + d = attrlen - len;
2162     if (d == 0)
2163     d = memcmp(nla_data(nla), str, len);
2164    
2165     diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
2166     index 61fc573f1142..856499fdb10f 100644
2167     --- a/net/8021q/vlan.c
2168     +++ b/net/8021q/vlan.c
2169     @@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
2170     static void vlan_transfer_features(struct net_device *dev,
2171     struct net_device *vlandev)
2172     {
2173     + struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
2174     +
2175     vlandev->gso_max_size = dev->gso_max_size;
2176    
2177     - if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
2178     + if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
2179     vlandev->hard_header_len = dev->hard_header_len;
2180     else
2181     vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
2182     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2183     index edf44d079da7..d1537dcd4df8 100644
2184     --- a/net/8021q/vlan_dev.c
2185     +++ b/net/8021q/vlan_dev.c
2186     @@ -557,6 +557,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
2187     struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
2188     struct net_device *real_dev = vlan->real_dev;
2189    
2190     + if (saddr == NULL)
2191     + saddr = dev->dev_addr;
2192     +
2193     return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
2194     }
2195    
2196     @@ -608,7 +611,8 @@ static int vlan_dev_init(struct net_device *dev)
2197     #endif
2198    
2199     dev->needed_headroom = real_dev->needed_headroom;
2200     - if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
2201     + if (vlan_hw_offload_capable(real_dev->features,
2202     + vlan_dev_priv(dev)->vlan_proto)) {
2203     dev->header_ops = &vlan_passthru_header_ops;
2204     dev->hard_header_len = real_dev->hard_header_len;
2205     } else {
2206     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2207     index 1b148a3affa7..162d6c78ad05 100644
2208     --- a/net/bridge/br_multicast.c
2209     +++ b/net/bridge/br_multicast.c
2210     @@ -1129,9 +1129,10 @@ static void br_multicast_query_received(struct net_bridge *br,
2211     struct net_bridge_port *port,
2212     struct bridge_mcast_querier *querier,
2213     int saddr,
2214     + bool is_general_query,
2215     unsigned long max_delay)
2216     {
2217     - if (saddr)
2218     + if (saddr && is_general_query)
2219     br_multicast_update_querier_timer(br, querier, max_delay);
2220     else if (timer_pending(&querier->timer))
2221     return;
2222     @@ -1183,8 +1184,16 @@ static int br_ip4_multicast_query(struct net_bridge *br,
2223     IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
2224     }
2225    
2226     + /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
2227     + * all-systems destination addresses (224.0.0.1) for general queries
2228     + */
2229     + if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
2230     + err = -EINVAL;
2231     + goto out;
2232     + }
2233     +
2234     br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
2235     - max_delay);
2236     + !group, max_delay);
2237    
2238     if (!group)
2239     goto out;
2240     @@ -1230,6 +1239,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
2241     unsigned long max_delay;
2242     unsigned long now = jiffies;
2243     const struct in6_addr *group = NULL;
2244     + bool is_general_query;
2245     int err = 0;
2246     u16 vid = 0;
2247    
2248     @@ -1238,6 +1248,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
2249     (port && port->state == BR_STATE_DISABLED))
2250     goto out;
2251    
2252     + /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
2253     + if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
2254     + err = -EINVAL;
2255     + goto out;
2256     + }
2257     +
2258     if (skb->len == sizeof(*mld)) {
2259     if (!pskb_may_pull(skb, sizeof(*mld))) {
2260     err = -EINVAL;
2261     @@ -1259,8 +1275,19 @@ static int br_ip6_multicast_query(struct net_bridge *br,
2262     max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
2263     }
2264    
2265     + is_general_query = group && ipv6_addr_any(group);
2266     +
2267     + /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
2268     + * all-nodes destination address (ff02::1) for general queries
2269     + */
2270     + if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
2271     + err = -EINVAL;
2272     + goto out;
2273     + }
2274     +
2275     br_multicast_query_received(br, port, &br->ip6_querier,
2276     - !ipv6_addr_any(&ip6h->saddr), max_delay);
2277     + !ipv6_addr_any(&ip6h->saddr),
2278     + is_general_query, max_delay);
2279    
2280     if (!group)
2281     goto out;
2282     diff --git a/net/core/netpoll.c b/net/core/netpoll.c
2283     index 462cdc97fad8..9b40f234b802 100644
2284     --- a/net/core/netpoll.c
2285     +++ b/net/core/netpoll.c
2286     @@ -740,7 +740,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
2287     struct nd_msg *msg;
2288     struct ipv6hdr *hdr;
2289    
2290     - if (skb->protocol != htons(ETH_P_ARP))
2291     + if (skb->protocol != htons(ETH_P_IPV6))
2292     return false;
2293     if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
2294     return false;
2295     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2296     index 2a0e21de3060..37b492eaa4f8 100644
2297     --- a/net/core/rtnetlink.c
2298     +++ b/net/core/rtnetlink.c
2299     @@ -2014,12 +2014,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
2300     static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2301     struct net_device *dev,
2302     u8 *addr, u32 pid, u32 seq,
2303     - int type, unsigned int flags)
2304     + int type, unsigned int flags,
2305     + int nlflags)
2306     {
2307     struct nlmsghdr *nlh;
2308     struct ndmsg *ndm;
2309    
2310     - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
2311     + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2312     if (!nlh)
2313     return -EMSGSIZE;
2314    
2315     @@ -2057,7 +2058,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
2316     if (!skb)
2317     goto errout;
2318    
2319     - err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
2320     + err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
2321     if (err < 0) {
2322     kfree_skb(skb);
2323     goto errout;
2324     @@ -2282,7 +2283,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
2325    
2326     err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2327     portid, seq,
2328     - RTM_NEWNEIGH, NTF_SELF);
2329     + RTM_NEWNEIGH, NTF_SELF,
2330     + NLM_F_MULTI);
2331     if (err < 0)
2332     return err;
2333     skip:
2334     diff --git a/net/core/sock.c b/net/core/sock.c
2335     index 831a0d0af49f..ec228a30e7dc 100644
2336     --- a/net/core/sock.c
2337     +++ b/net/core/sock.c
2338     @@ -2359,10 +2359,13 @@ void release_sock(struct sock *sk)
2339     if (sk->sk_backlog.tail)
2340     __release_sock(sk);
2341    
2342     + /* Warning : release_cb() might need to release sk ownership,
2343     + * ie call sock_release_ownership(sk) before us.
2344     + */
2345     if (sk->sk_prot->release_cb)
2346     sk->sk_prot->release_cb(sk);
2347    
2348     - sk->sk_lock.owned = 0;
2349     + sock_release_ownership(sk);
2350     if (waitqueue_active(&sk->sk_lock.wq))
2351     wake_up(&sk->sk_lock.wq);
2352     spin_unlock_bh(&sk->sk_lock.slock);
2353     diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
2354     index 736c9fc3ef93..0c0c1f09fd17 100644
2355     --- a/net/ipv4/gre_demux.c
2356     +++ b/net/ipv4/gre_demux.c
2357     @@ -211,6 +211,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
2358     int i;
2359     bool csum_err = false;
2360    
2361     +#ifdef CONFIG_NET_IPGRE_BROADCAST
2362     + if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
2363     + /* Looped back packet, drop it! */
2364     + if (rt_is_output_route(skb_rtable(skb)))
2365     + goto drop;
2366     + }
2367     +#endif
2368     +
2369     if (parse_gre_header(skb, &tpi, &csum_err) < 0)
2370     goto drop;
2371    
2372     diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
2373     index c5313a9c019b..12b80fbfe767 100644
2374     --- a/net/ipv4/inet_fragment.c
2375     +++ b/net/ipv4/inet_fragment.c
2376     @@ -211,7 +211,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
2377     }
2378    
2379     work = frag_mem_limit(nf) - nf->low_thresh;
2380     - while (work > 0) {
2381     + while (work > 0 || force) {
2382     spin_lock(&nf->lru_lock);
2383    
2384     if (list_empty(&nf->lru_list)) {
2385     @@ -281,9 +281,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
2386    
2387     atomic_inc(&qp->refcnt);
2388     hlist_add_head(&qp->list, &hb->chain);
2389     + inet_frag_lru_add(nf, qp);
2390     spin_unlock(&hb->chain_lock);
2391     read_unlock(&f->lock);
2392     - inet_frag_lru_add(nf, qp);
2393     +
2394     return qp;
2395     }
2396    
2397     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2398     index 995a0bb33a65..3bedb26cfb53 100644
2399     --- a/net/ipv4/ip_tunnel.c
2400     +++ b/net/ipv4/ip_tunnel.c
2401     @@ -411,9 +411,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
2402    
2403     #ifdef CONFIG_NET_IPGRE_BROADCAST
2404     if (ipv4_is_multicast(iph->daddr)) {
2405     - /* Looped back packet, drop it! */
2406     - if (rt_is_output_route(skb_rtable(skb)))
2407     - goto drop;
2408     tunnel->dev->stats.multicast++;
2409     skb->pkt_type = PACKET_BROADCAST;
2410     }
2411     diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
2412     index ba22cc3a5a53..c31e3ad98ef2 100644
2413     --- a/net/ipv4/ip_tunnel_core.c
2414     +++ b/net/ipv4/ip_tunnel_core.c
2415     @@ -109,6 +109,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
2416     secpath_reset(skb);
2417     if (!skb->l4_rxhash)
2418     skb->rxhash = 0;
2419     + skb_dst_drop(skb);
2420     skb->vlan_tci = 0;
2421     skb_set_queue_mapping(skb, 0);
2422     skb->pkt_type = PACKET_HOST;
2423     diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
2424     index 1672409f5ba5..6fbf3393d842 100644
2425     --- a/net/ipv4/ipmr.c
2426     +++ b/net/ipv4/ipmr.c
2427     @@ -2253,13 +2253,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2428     }
2429    
2430     static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2431     - u32 portid, u32 seq, struct mfc_cache *c, int cmd)
2432     + u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2433     + int flags)
2434     {
2435     struct nlmsghdr *nlh;
2436     struct rtmsg *rtm;
2437     int err;
2438    
2439     - nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
2440     + nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2441     if (nlh == NULL)
2442     return -EMSGSIZE;
2443    
2444     @@ -2327,7 +2328,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2445     if (skb == NULL)
2446     goto errout;
2447    
2448     - err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
2449     + err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2450     if (err < 0)
2451     goto errout;
2452    
2453     @@ -2366,7 +2367,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2454     if (ipmr_fill_mroute(mrt, skb,
2455     NETLINK_CB(cb->skb).portid,
2456     cb->nlh->nlmsg_seq,
2457     - mfc, RTM_NEWROUTE) < 0)
2458     + mfc, RTM_NEWROUTE,
2459     + NLM_F_MULTI) < 0)
2460     goto done;
2461     next_entry:
2462     e++;
2463     @@ -2380,7 +2382,8 @@ next_entry:
2464     if (ipmr_fill_mroute(mrt, skb,
2465     NETLINK_CB(cb->skb).portid,
2466     cb->nlh->nlmsg_seq,
2467     - mfc, RTM_NEWROUTE) < 0) {
2468     + mfc, RTM_NEWROUTE,
2469     + NLM_F_MULTI) < 0) {
2470     spin_unlock_bh(&mfc_unres_lock);
2471     goto done;
2472     }
2473     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2474     index e088932bcfae..826fc6fab576 100644
2475     --- a/net/ipv4/tcp_output.c
2476     +++ b/net/ipv4/tcp_output.c
2477     @@ -765,6 +765,17 @@ void tcp_release_cb(struct sock *sk)
2478     if (flags & (1UL << TCP_TSQ_DEFERRED))
2479     tcp_tsq_handler(sk);
2480    
2481     + /* Here begins the tricky part :
2482     + * We are called from release_sock() with :
2483     + * 1) BH disabled
2484     + * 2) sk_lock.slock spinlock held
2485     + * 3) socket owned by us (sk->sk_lock.owned == 1)
2486     + *
2487     + * But following code is meant to be called from BH handlers,
2488     + * so we should keep BH disabled, but early release socket ownership
2489     + */
2490     + sock_release_ownership(sk);
2491     +
2492     if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
2493     tcp_write_timer_handler(sk);
2494     __sock_put(sk);
2495     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2496     index cd3fb301da38..5dac9fd72465 100644
2497     --- a/net/ipv6/addrconf.c
2498     +++ b/net/ipv6/addrconf.c
2499     @@ -1079,8 +1079,11 @@ retry:
2500     * Lifetime is greater than REGEN_ADVANCE time units. In particular,
2501     * an implementation must not create a temporary address with a zero
2502     * Preferred Lifetime.
2503     + * Use age calculation as in addrconf_verify to avoid unnecessary
2504     + * temporary addresses being generated.
2505     */
2506     - if (tmp_prefered_lft <= regen_advance) {
2507     + age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
2508     + if (tmp_prefered_lft <= regen_advance + age) {
2509     in6_ifa_put(ifp);
2510     in6_dev_put(idev);
2511     ret = -1;
2512     diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
2513     index cf77f3abfd06..447a7fbd1bb6 100644
2514     --- a/net/ipv6/exthdrs_offload.c
2515     +++ b/net/ipv6/exthdrs_offload.c
2516     @@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void)
2517     int ret;
2518    
2519     ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
2520     - if (!ret)
2521     + if (ret)
2522     goto out;
2523    
2524     ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
2525     - if (!ret)
2526     + if (ret)
2527     goto out_rt;
2528    
2529     out:
2530     diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
2531     index eef8d945b362..e2c9ff840f63 100644
2532     --- a/net/ipv6/icmp.c
2533     +++ b/net/ipv6/icmp.c
2534     @@ -516,7 +516,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
2535     np->tclass, NULL, &fl6, (struct rt6_info *)dst,
2536     MSG_DONTWAIT, np->dontfrag);
2537     if (err) {
2538     - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
2539     + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
2540     ip6_flush_pending_frames(sk);
2541     } else {
2542     err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
2543     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2544     index 68fd4918315c..516e136f15ca 100644
2545     --- a/net/ipv6/ip6_output.c
2546     +++ b/net/ipv6/ip6_output.c
2547     @@ -1088,21 +1088,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
2548     unsigned int fragheaderlen,
2549     struct sk_buff *skb,
2550     struct rt6_info *rt,
2551     - bool pmtuprobe)
2552     + unsigned int orig_mtu)
2553     {
2554     if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
2555     if (skb == NULL) {
2556     /* first fragment, reserve header_len */
2557     - *mtu = *mtu - rt->dst.header_len;
2558     + *mtu = orig_mtu - rt->dst.header_len;
2559    
2560     } else {
2561     /*
2562     * this fragment is not first, the headers
2563     * space is regarded as data space.
2564     */
2565     - *mtu = min(*mtu, pmtuprobe ?
2566     - rt->dst.dev->mtu :
2567     - dst_mtu(rt->dst.path));
2568     + *mtu = orig_mtu;
2569     }
2570     *maxfraglen = ((*mtu - fragheaderlen) & ~7)
2571     + fragheaderlen - sizeof(struct frag_hdr);
2572     @@ -1119,7 +1117,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2573     struct ipv6_pinfo *np = inet6_sk(sk);
2574     struct inet_cork *cork;
2575     struct sk_buff *skb, *skb_prev = NULL;
2576     - unsigned int maxfraglen, fragheaderlen, mtu;
2577     + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
2578     int exthdrlen;
2579     int dst_exthdrlen;
2580     int hh_len;
2581     @@ -1201,6 +1199,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2582     dst_exthdrlen = 0;
2583     mtu = cork->fragsize;
2584     }
2585     + orig_mtu = mtu;
2586    
2587     hh_len = LL_RESERVED_SPACE(rt->dst.dev);
2588    
2589     @@ -1280,8 +1279,7 @@ alloc_new_skb:
2590     if (skb == NULL || skb_prev == NULL)
2591     ip6_append_data_mtu(&mtu, &maxfraglen,
2592     fragheaderlen, skb, rt,
2593     - np->pmtudisc ==
2594     - IPV6_PMTUDISC_PROBE);
2595     + orig_mtu);
2596    
2597     skb_prev = skb;
2598    
2599     @@ -1537,8 +1535,8 @@ int ip6_push_pending_frames(struct sock *sk)
2600     if (proto == IPPROTO_ICMPV6) {
2601     struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
2602    
2603     - ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
2604     - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
2605     + ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
2606     + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2607     }
2608    
2609     err = ip6_local_out(skb);
2610     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
2611     index 0eb4038a4d63..8737400af0a0 100644
2612     --- a/net/ipv6/ip6mr.c
2613     +++ b/net/ipv6/ip6mr.c
2614     @@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
2615     }
2616    
2617     static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2618     - u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
2619     + u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2620     + int flags)
2621     {
2622     struct nlmsghdr *nlh;
2623     struct rtmsg *rtm;
2624     int err;
2625    
2626     - nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
2627     + nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2628     if (nlh == NULL)
2629     return -EMSGSIZE;
2630    
2631     @@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2632     if (skb == NULL)
2633     goto errout;
2634    
2635     - err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
2636     + err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2637     if (err < 0)
2638     goto errout;
2639    
2640     @@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2641     if (ip6mr_fill_mroute(mrt, skb,
2642     NETLINK_CB(cb->skb).portid,
2643     cb->nlh->nlmsg_seq,
2644     - mfc, RTM_NEWROUTE) < 0)
2645     + mfc, RTM_NEWROUTE,
2646     + NLM_F_MULTI) < 0)
2647     goto done;
2648     next_entry:
2649     e++;
2650     @@ -2476,7 +2478,8 @@ next_entry:
2651     if (ip6mr_fill_mroute(mrt, skb,
2652     NETLINK_CB(cb->skb).portid,
2653     cb->nlh->nlmsg_seq,
2654     - mfc, RTM_NEWROUTE) < 0) {
2655     + mfc, RTM_NEWROUTE,
2656     + NLM_F_MULTI) < 0) {
2657     spin_unlock_bh(&mfc_unres_lock);
2658     goto done;
2659     }
2660     diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
2661     index d18f9f903db6..d81abd5ba767 100644
2662     --- a/net/ipv6/mcast.c
2663     +++ b/net/ipv6/mcast.c
2664     @@ -1620,11 +1620,12 @@ static void mld_sendpack(struct sk_buff *skb)
2665     dst_output);
2666     out:
2667     if (!err) {
2668     - ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
2669     - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
2670     - IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
2671     - } else
2672     - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
2673     + ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
2674     + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2675     + IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
2676     + } else {
2677     + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2678     + }
2679    
2680     rcu_read_unlock();
2681     return;
2682     diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
2683     index 7856e962a3e6..6acab0bce9d8 100644
2684     --- a/net/ipv6/ping.c
2685     +++ b/net/ipv6/ping.c
2686     @@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2687     MSG_DONTWAIT, np->dontfrag);
2688    
2689     if (err) {
2690     - ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
2691     - ICMP6_MIB_OUTERRORS);
2692     + ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
2693     + ICMP6_MIB_OUTERRORS);
2694     ip6_flush_pending_frames(sk);
2695     } else {
2696     err = icmpv6_push_pending_frames(sk, &fl6,
2697     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2698     index 0accb1321dd6..77f81beabbd3 100644
2699     --- a/net/ipv6/route.c
2700     +++ b/net/ipv6/route.c
2701     @@ -1500,7 +1500,7 @@ int ip6_route_add(struct fib6_config *cfg)
2702     if (!table)
2703     goto out;
2704    
2705     - rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
2706     + rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
2707    
2708     if (!rt) {
2709     err = -ENOMEM;
2710     diff --git a/net/rds/iw.c b/net/rds/iw.c
2711     index 7826d46baa70..589935661d66 100644
2712     --- a/net/rds/iw.c
2713     +++ b/net/rds/iw.c
2714     @@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
2715     ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
2716     /* due to this, we will claim to support IB devices unless we
2717     check node_type. */
2718     - if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
2719     + if (ret || !cm_id->device ||
2720     + cm_id->device->node_type != RDMA_NODE_RNIC)
2721     ret = -EADDRNOTAVAIL;
2722    
2723     rdsdebug("addr %pI4 ret %d node type %d\n",
2724     diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
2725     index 2e55f8189502..52229f91b115 100644
2726     --- a/net/sched/sch_fq.c
2727     +++ b/net/sched/sch_fq.c
2728     @@ -577,9 +577,11 @@ static void fq_rehash(struct fq_sched_data *q,
2729     q->stat_gc_flows += fcnt;
2730     }
2731    
2732     -static int fq_resize(struct fq_sched_data *q, u32 log)
2733     +static int fq_resize(struct Qdisc *sch, u32 log)
2734     {
2735     + struct fq_sched_data *q = qdisc_priv(sch);
2736     struct rb_root *array;
2737     + void *old_fq_root;
2738     u32 idx;
2739    
2740     if (q->fq_root && log == q->fq_trees_log)
2741     @@ -592,13 +594,19 @@ static int fq_resize(struct fq_sched_data *q, u32 log)
2742     for (idx = 0; idx < (1U << log); idx++)
2743     array[idx] = RB_ROOT;
2744    
2745     - if (q->fq_root) {
2746     - fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
2747     - kfree(q->fq_root);
2748     - }
2749     + sch_tree_lock(sch);
2750     +
2751     + old_fq_root = q->fq_root;
2752     + if (old_fq_root)
2753     + fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
2754     +
2755     q->fq_root = array;
2756     q->fq_trees_log = log;
2757    
2758     + sch_tree_unlock(sch);
2759     +
2760     + kfree(old_fq_root);
2761     +
2762     return 0;
2763     }
2764    
2765     @@ -674,9 +682,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
2766     q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
2767     }
2768    
2769     - if (!err)
2770     - err = fq_resize(q, fq_log);
2771     -
2772     + if (!err) {
2773     + sch_tree_unlock(sch);
2774     + err = fq_resize(sch, fq_log);
2775     + sch_tree_lock(sch);
2776     + }
2777     while (sch->q.qlen > sch->limit) {
2778     struct sk_buff *skb = fq_dequeue(sch);
2779    
2780     @@ -722,7 +732,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
2781     if (opt)
2782     err = fq_change(sch, opt);
2783     else
2784     - err = fq_resize(q, q->fq_trees_log);
2785     + err = fq_resize(sch, q->fq_trees_log);
2786    
2787     return err;
2788     }
2789     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
2790     index d244a23ab8d3..26be077b8267 100644
2791     --- a/net/sctp/sm_make_chunk.c
2792     +++ b/net/sctp/sm_make_chunk.c
2793     @@ -1433,8 +1433,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
2794     BUG_ON(!list_empty(&chunk->list));
2795     list_del_init(&chunk->transmitted_list);
2796    
2797     - /* Free the chunk skb data and the SCTP_chunk stub itself. */
2798     - dev_kfree_skb(chunk->skb);
2799     + consume_skb(chunk->skb);
2800     + consume_skb(chunk->auth_chunk);
2801    
2802     SCTP_DBG_OBJCNT_DEC(chunk);
2803     kmem_cache_free(sctp_chunk_cachep, chunk);
2804     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2805     index 56ebe71cfe13..0a5f0508c43a 100644
2806     --- a/net/sctp/sm_statefuns.c
2807     +++ b/net/sctp/sm_statefuns.c
2808     @@ -761,7 +761,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
2809    
2810     /* Make sure that we and the peer are AUTH capable */
2811     if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
2812     - kfree_skb(chunk->auth_chunk);
2813     sctp_association_free(new_asoc);
2814     return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2815     }
2816     @@ -776,10 +775,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
2817     auth.transport = chunk->transport;
2818    
2819     ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
2820     -
2821     - /* We can now safely free the auth_chunk clone */
2822     - kfree_skb(chunk->auth_chunk);
2823     -
2824     if (ret != SCTP_IERROR_NO_ERROR) {
2825     sctp_association_free(new_asoc);
2826     return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2827     diff --git a/net/socket.c b/net/socket.c
2828     index e83c416708af..dc57dae20a9a 100644
2829     --- a/net/socket.c
2830     +++ b/net/socket.c
2831     @@ -1972,6 +1972,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
2832     {
2833     if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
2834     return -EFAULT;
2835     +
2836     + if (kmsg->msg_namelen < 0)
2837     + return -EINVAL;
2838     +
2839     if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
2840     kmsg->msg_namelen = sizeof(struct sockaddr_storage);
2841     return 0;
2842     diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
2843     index 890a29912d5a..e860d4f7ed2a 100644
2844     --- a/net/sunrpc/backchannel_rqst.c
2845     +++ b/net/sunrpc/backchannel_rqst.c
2846     @@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req)
2847     free_page((unsigned long)xbufp->head[0].iov_base);
2848     xbufp = &req->rq_snd_buf;
2849     free_page((unsigned long)xbufp->head[0].iov_base);
2850     - list_del(&req->rq_bc_pa_list);
2851     kfree(req);
2852     }
2853    
2854     @@ -168,8 +167,10 @@ out_free:
2855     /*
2856     * Memory allocation failed, free the temporary list
2857     */
2858     - list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
2859     + list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
2860     + list_del(&req->rq_bc_pa_list);
2861     xprt_free_allocation(req);
2862     + }
2863    
2864     dprintk("RPC: setup backchannel transport failed\n");
2865     return -ENOMEM;
2866     @@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
2867     xprt_dec_alloc_count(xprt, max_reqs);
2868     list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
2869     dprintk("RPC: req=%p\n", req);
2870     + list_del(&req->rq_bc_pa_list);
2871     xprt_free_allocation(req);
2872     if (--max_reqs == 0)
2873     break;
2874     diff --git a/net/tipc/config.c b/net/tipc/config.c
2875     index c301a9a592d8..5afe633114e0 100644
2876     --- a/net/tipc/config.c
2877     +++ b/net/tipc/config.c
2878     @@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
2879     struct tipc_cfg_msg_hdr *req_hdr;
2880     struct tipc_cfg_msg_hdr *rep_hdr;
2881     struct sk_buff *rep_buf;
2882     - int ret;
2883    
2884     /* Validate configuration message header (ignore invalid message) */
2885     req_hdr = (struct tipc_cfg_msg_hdr *)buf;
2886     @@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
2887     memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
2888     rep_hdr->tcm_len = htonl(rep_buf->len);
2889     rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
2890     -
2891     - ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
2892     - rep_buf->len);
2893     - if (ret < 0)
2894     - pr_err("Sending cfg reply message failed, no memory\n");
2895     -
2896     + tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
2897     + rep_buf->len);
2898     kfree_skb(rep_buf);
2899     }
2900     }
2901     diff --git a/net/tipc/handler.c b/net/tipc/handler.c
2902     index b36f0fcd9bdf..79b991e044a9 100644
2903     --- a/net/tipc/handler.c
2904     +++ b/net/tipc/handler.c
2905     @@ -57,7 +57,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
2906     struct queue_item *item;
2907    
2908     if (!handler_enabled) {
2909     - pr_err("Signal request ignored by handler\n");
2910     return -ENOPROTOOPT;
2911     }
2912    
2913     diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
2914     index 09dcd54b04e1..299e45af7e4e 100644
2915     --- a/net/tipc/name_table.c
2916     +++ b/net/tipc/name_table.c
2917     @@ -942,20 +942,51 @@ int tipc_nametbl_init(void)
2918     return 0;
2919     }
2920    
2921     +/**
2922     + * tipc_purge_publications - remove all publications for a given type
2923     + *
2924     + * tipc_nametbl_lock must be held when calling this function
2925     + */
2926     +static void tipc_purge_publications(struct name_seq *seq)
2927     +{
2928     + struct publication *publ, *safe;
2929     + struct sub_seq *sseq;
2930     + struct name_info *info;
2931     +
2932     + if (!seq->sseqs) {
2933     + nameseq_delete_empty(seq);
2934     + return;
2935     + }
2936     + sseq = seq->sseqs;
2937     + info = sseq->info;
2938     + list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
2939     + tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
2940     + publ->ref, publ->key);
2941     + }
2942     +}
2943     +
2944     void tipc_nametbl_stop(void)
2945     {
2946     u32 i;
2947     + struct name_seq *seq;
2948     + struct hlist_head *seq_head;
2949     + struct hlist_node *safe;
2950    
2951     if (!table.types)
2952     return;
2953    
2954     - /* Verify name table is empty, then release it */
2955     + /* Verify name table is empty and purge any lingering
2956     + * publications, then release the name table
2957     + */
2958     write_lock_bh(&tipc_nametbl_lock);
2959     for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
2960     if (hlist_empty(&table.types[i]))
2961     continue;
2962     - pr_err("nametbl_stop(): orphaned hash chain detected\n");
2963     - break;
2964     + seq_head = &table.types[i];
2965     + hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
2966     + tipc_purge_publications(seq);
2967     + }
2968     + continue;
2969     }
2970     kfree(table.types);
2971     table.types = NULL;
2972     diff --git a/net/tipc/server.c b/net/tipc/server.c
2973     index fd3fa57a410e..bd2336aad0e4 100644
2974     --- a/net/tipc/server.c
2975     +++ b/net/tipc/server.c
2976     @@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
2977     static void tipc_conn_kref_release(struct kref *kref)
2978     {
2979     struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
2980     - struct tipc_server *s = con->server;
2981    
2982     if (con->sock) {
2983     tipc_sock_release_local(con->sock);
2984     @@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref)
2985     }
2986    
2987     tipc_clean_outqueues(con);
2988     -
2989     - if (con->conid)
2990     - s->tipc_conn_shutdown(con->conid, con->usr_data);
2991     -
2992     kfree(con);
2993     }
2994    
2995     @@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con)
2996     struct tipc_server *s = con->server;
2997    
2998     if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
2999     + if (con->conid)
3000     + s->tipc_conn_shutdown(con->conid, con->usr_data);
3001     +
3002     spin_lock_bh(&s->idr_lock);
3003     idr_remove(&s->conn_idr, con->conid);
3004     s->idr_in_use--;
3005     @@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
3006     list_add_tail(&e->list, &con->outqueue);
3007     spin_unlock_bh(&con->outqueue_lock);
3008    
3009     - if (test_bit(CF_CONNECTED, &con->flags))
3010     + if (test_bit(CF_CONNECTED, &con->flags)) {
3011     if (!queue_work(s->send_wq, &con->swork))
3012     conn_put(con);
3013     -
3014     + } else {
3015     + conn_put(con);
3016     + }
3017     return 0;
3018     }
3019    
3020     diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
3021     index d38bb45d82e9..c2a37aa12498 100644
3022     --- a/net/tipc/subscr.c
3023     +++ b/net/tipc/subscr.c
3024     @@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
3025     {
3026     struct tipc_subscriber *subscriber = sub->subscriber;
3027     struct kvec msg_sect;
3028     - int ret;
3029    
3030     msg_sect.iov_base = (void *)&sub->evt;
3031     msg_sect.iov_len = sizeof(struct tipc_event);
3032     -
3033     sub->evt.event = htohl(event, sub->swap);
3034     sub->evt.found_lower = htohl(found_lower, sub->swap);
3035     sub->evt.found_upper = htohl(found_upper, sub->swap);
3036     sub->evt.port.ref = htohl(port_ref, sub->swap);
3037     sub->evt.port.node = htohl(node, sub->swap);
3038     - ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
3039     - msg_sect.iov_base, msg_sect.iov_len);
3040     - if (ret < 0)
3041     - pr_err("Sending subscription event failed, no memory\n");
3042     + tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
3043     + msg_sect.iov_len);
3044     }
3045    
3046     /**
3047     @@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
3048     /* The spin lock per subscriber is used to protect its members */
3049     spin_lock_bh(&subscriber->lock);
3050    
3051     - /* Validate if the connection related to the subscriber is
3052     - * closed (in case subscriber is terminating)
3053     - */
3054     - if (subscriber->conid == 0) {
3055     - spin_unlock_bh(&subscriber->lock);
3056     - return;
3057     - }
3058     -
3059     /* Validate timeout (in case subscription is being cancelled) */
3060     if (sub->timeout == TIPC_WAIT_FOREVER) {
3061     spin_unlock_bh(&subscriber->lock);
3062     @@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber)
3063    
3064     spin_lock_bh(&subscriber->lock);
3065    
3066     - /* Invalidate subscriber reference */
3067     - subscriber->conid = 0;
3068     -
3069     /* Destroy any existing subscriptions for subscriber */
3070     list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
3071     subscription_list) {
3072     @@ -278,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s,
3073     *
3074     * Called with subscriber lock held.
3075     */
3076     -static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
3077     - struct tipc_subscriber *subscriber)
3078     -{
3079     +static int subscr_subscribe(struct tipc_subscr *s,
3080     + struct tipc_subscriber *subscriber,
3081     + struct tipc_subscription **sub_p) {
3082     struct tipc_subscription *sub;
3083     int swap;
3084    
3085     @@ -291,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
3086     if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
3087     s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
3088     subscr_cancel(s, subscriber);
3089     - return NULL;
3090     + return 0;
3091     }
3092    
3093     /* Refuse subscription if global limit exceeded */
3094     if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
3095     pr_warn("Subscription rejected, limit reached (%u)\n",
3096     TIPC_MAX_SUBSCRIPTIONS);
3097     - subscr_terminate(subscriber);
3098     - return NULL;
3099     + return -EINVAL;
3100     }
3101    
3102     /* Allocate subscription object */
3103     sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
3104     if (!sub) {
3105     pr_warn("Subscription rejected, no memory\n");
3106     - subscr_terminate(subscriber);
3107     - return NULL;
3108     + return -ENOMEM;
3109     }
3110    
3111     /* Initialize subscription object */
3112     @@ -321,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
3113     (sub->seq.lower > sub->seq.upper)) {
3114     pr_warn("Subscription rejected, illegal request\n");
3115     kfree(sub);
3116     - subscr_terminate(subscriber);
3117     - return NULL;
3118     + return -EINVAL;
3119     }
3120     INIT_LIST_HEAD(&sub->nameseq_list);
3121     list_add(&sub->subscription_list, &subscriber->subscription_list);
3122     @@ -335,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
3123     (Handler)subscr_timeout, (unsigned long)sub);
3124     k_start_timer(&sub->timer, sub->timeout);
3125     }
3126     -
3127     - return sub;
3128     + *sub_p = sub;
3129     + return 0;
3130     }
3131    
3132     /* Handle one termination request for the subscriber */
3133     @@ -350,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
3134     void *usr_data, void *buf, size_t len)
3135     {
3136     struct tipc_subscriber *subscriber = usr_data;
3137     - struct tipc_subscription *sub;
3138     + struct tipc_subscription *sub = NULL;
3139    
3140     spin_lock_bh(&subscriber->lock);
3141     - sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
3142     + if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
3143     + spin_unlock_bh(&subscriber->lock);
3144     + subscr_terminate(subscriber);
3145     + return;
3146     + }
3147     if (sub)
3148     tipc_nametbl_subscribe(sub);
3149     spin_unlock_bh(&subscriber->lock);
3150     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3151     index d7c1ac621a90..c3975bcf725f 100644
3152     --- a/net/unix/af_unix.c
3153     +++ b/net/unix/af_unix.c
3154     @@ -1785,8 +1785,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
3155     goto out;
3156    
3157     err = mutex_lock_interruptible(&u->readlock);
3158     - if (err) {
3159     - err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
3160     + if (unlikely(err)) {
3161     + /* recvmsg() in non blocking mode is supposed to return -EAGAIN
3162     + * sk_rcvtimeo is not honored by mutex_lock_interruptible()
3163     + */
3164     + err = noblock ? -EAGAIN : -ERESTARTSYS;
3165     goto out;
3166     }
3167    
3168     @@ -1911,6 +1914,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3169     struct unix_sock *u = unix_sk(sk);
3170     struct sockaddr_un *sunaddr = msg->msg_name;
3171     int copied = 0;
3172     + int noblock = flags & MSG_DONTWAIT;
3173     int check_creds = 0;
3174     int target;
3175     int err = 0;
3176     @@ -1926,7 +1930,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3177     goto out;
3178    
3179     target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
3180     - timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
3181     + timeo = sock_rcvtimeo(sk, noblock);
3182    
3183     /* Lock the socket to prevent queue disordering
3184     * while sleeps in memcpy_tomsg
3185     @@ -1938,8 +1942,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3186     }
3187    
3188     err = mutex_lock_interruptible(&u->readlock);
3189     - if (err) {
3190     - err = sock_intr_errno(timeo);
3191     + if (unlikely(err)) {
3192     + /* recvmsg() in non blocking mode is supposed to return -EAGAIN
3193     + * sk_rcvtimeo is not honored by mutex_lock_interruptible()
3194     + */
3195     + err = noblock ? -EAGAIN : -ERESTARTSYS;
3196     goto out;
3197     }
3198    
3199     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
3200     index 25d5ebaf25f9..630b8adf0ce5 100644
3201     --- a/security/selinux/hooks.c
3202     +++ b/security/selinux/hooks.c
3203     @@ -1386,15 +1386,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
3204     isec->sid = sbsec->sid;
3205    
3206     if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
3207     - if (opt_dentry) {
3208     - isec->sclass = inode_mode_to_security_class(inode->i_mode);
3209     - rc = selinux_proc_get_sid(opt_dentry,
3210     - isec->sclass,
3211     - &sid);
3212     - if (rc)
3213     - goto out_unlock;
3214     - isec->sid = sid;
3215     - }
3216     + /* We must have a dentry to determine the label on
3217     + * procfs inodes */
3218     + if (opt_dentry)
3219     + /* Called from d_instantiate or
3220     + * d_splice_alias. */
3221     + dentry = dget(opt_dentry);
3222     + else
3223     + /* Called from selinux_complete_init, try to
3224     + * find a dentry. */
3225     + dentry = d_find_alias(inode);
3226     + /*
3227     + * This can be hit on boot when a file is accessed
3228     + * before the policy is loaded. When we load policy we
3229     + * may find inodes that have no dentry on the
3230     + * sbsec->isec_head list. No reason to complain as
3231     + * these will get fixed up the next time we go through
3232     + * inode_doinit() with a dentry, before these inodes
3233     + * could be used again by userspace.
3234     + */
3235     + if (!dentry)
3236     + goto out_unlock;
3237     + isec->sclass = inode_mode_to_security_class(inode->i_mode);
3238     + rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
3239     + dput(dentry);
3240     + if (rc)
3241     + goto out_unlock;
3242     + isec->sid = sid;
3243     }
3244     break;
3245     }