Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.10/0103-3.10.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2253 - (hide annotations) (download)
Tue Aug 13 14:26:09 2013 UTC (10 years, 9 months ago) by niro
File size: 101588 byte(s)
3.10.6-magellan-r1
1 niro 2253 diff --git a/Makefile b/Makefile
2     index b548552..b4df9b2 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 3
9     +SUBLEVEL = 4
10     EXTRAVERSION =
11     NAME = Unicycling Gorilla
12    
13     diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
14     index a7cd2cf..3490a24 100644
15     --- a/arch/arm/mach-footbridge/dc21285.c
16     +++ b/arch/arm/mach-footbridge/dc21285.c
17     @@ -276,8 +276,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
18    
19     sys->mem_offset = DC21285_PCI_MEM;
20    
21     - pci_ioremap_io(0, DC21285_PCI_IO);
22     -
23     pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
24     pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
25    
26     diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
27     index 34fffdf..5645536 100644
28     --- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
29     +++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
30     @@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
31     }
32     };
33    
34     -static struct clk init_clocks[] = {
35     - {
36     - .name = "lcd",
37     - .parent = &clk_h,
38     - .enable = s3c2410_clkcon_enable,
39     - .ctrlbit = S3C2410_CLKCON_LCDC,
40     - }, {
41     - .name = "gpio",
42     - .parent = &clk_p,
43     - .enable = s3c2410_clkcon_enable,
44     - .ctrlbit = S3C2410_CLKCON_GPIO,
45     - }, {
46     - .name = "usb-host",
47     - .parent = &clk_h,
48     - .enable = s3c2410_clkcon_enable,
49     - .ctrlbit = S3C2410_CLKCON_USBH,
50     - }, {
51     - .name = "usb-device",
52     - .parent = &clk_h,
53     - .enable = s3c2410_clkcon_enable,
54     - .ctrlbit = S3C2410_CLKCON_USBD,
55     - }, {
56     - .name = "timers",
57     - .parent = &clk_p,
58     - .enable = s3c2410_clkcon_enable,
59     - .ctrlbit = S3C2410_CLKCON_PWMT,
60     - }, {
61     - .name = "uart",
62     - .devname = "s3c2410-uart.0",
63     - .parent = &clk_p,
64     - .enable = s3c2410_clkcon_enable,
65     - .ctrlbit = S3C2410_CLKCON_UART0,
66     - }, {
67     - .name = "uart",
68     - .devname = "s3c2410-uart.1",
69     - .parent = &clk_p,
70     - .enable = s3c2410_clkcon_enable,
71     - .ctrlbit = S3C2410_CLKCON_UART1,
72     - }, {
73     - .name = "uart",
74     - .devname = "s3c2410-uart.2",
75     - .parent = &clk_p,
76     - .enable = s3c2410_clkcon_enable,
77     - .ctrlbit = S3C2410_CLKCON_UART2,
78     - }, {
79     - .name = "rtc",
80     - .parent = &clk_p,
81     - .enable = s3c2410_clkcon_enable,
82     - .ctrlbit = S3C2410_CLKCON_RTC,
83     - }, {
84     - .name = "watchdog",
85     - .parent = &clk_p,
86     - .ctrlbit = 0,
87     - }, {
88     - .name = "usb-bus-host",
89     - .parent = &clk_usb_bus,
90     - }, {
91     - .name = "usb-bus-gadget",
92     - .parent = &clk_usb_bus,
93     - },
94     +static struct clk clk_lcd = {
95     + .name = "lcd",
96     + .parent = &clk_h,
97     + .enable = s3c2410_clkcon_enable,
98     + .ctrlbit = S3C2410_CLKCON_LCDC,
99     +};
100     +
101     +static struct clk clk_gpio = {
102     + .name = "gpio",
103     + .parent = &clk_p,
104     + .enable = s3c2410_clkcon_enable,
105     + .ctrlbit = S3C2410_CLKCON_GPIO,
106     +};
107     +
108     +static struct clk clk_usb_host = {
109     + .name = "usb-host",
110     + .parent = &clk_h,
111     + .enable = s3c2410_clkcon_enable,
112     + .ctrlbit = S3C2410_CLKCON_USBH,
113     +};
114     +
115     +static struct clk clk_usb_device = {
116     + .name = "usb-device",
117     + .parent = &clk_h,
118     + .enable = s3c2410_clkcon_enable,
119     + .ctrlbit = S3C2410_CLKCON_USBD,
120     +};
121     +
122     +static struct clk clk_timers = {
123     + .name = "timers",
124     + .parent = &clk_p,
125     + .enable = s3c2410_clkcon_enable,
126     + .ctrlbit = S3C2410_CLKCON_PWMT,
127     +};
128     +
129     +struct clk s3c24xx_clk_uart0 = {
130     + .name = "uart",
131     + .devname = "s3c2410-uart.0",
132     + .parent = &clk_p,
133     + .enable = s3c2410_clkcon_enable,
134     + .ctrlbit = S3C2410_CLKCON_UART0,
135     +};
136     +
137     +struct clk s3c24xx_clk_uart1 = {
138     + .name = "uart",
139     + .devname = "s3c2410-uart.1",
140     + .parent = &clk_p,
141     + .enable = s3c2410_clkcon_enable,
142     + .ctrlbit = S3C2410_CLKCON_UART1,
143     +};
144     +
145     +struct clk s3c24xx_clk_uart2 = {
146     + .name = "uart",
147     + .devname = "s3c2410-uart.2",
148     + .parent = &clk_p,
149     + .enable = s3c2410_clkcon_enable,
150     + .ctrlbit = S3C2410_CLKCON_UART2,
151     +};
152     +
153     +static struct clk clk_rtc = {
154     + .name = "rtc",
155     + .parent = &clk_p,
156     + .enable = s3c2410_clkcon_enable,
157     + .ctrlbit = S3C2410_CLKCON_RTC,
158     +};
159     +
160     +static struct clk clk_watchdog = {
161     + .name = "watchdog",
162     + .parent = &clk_p,
163     + .ctrlbit = 0,
164     +};
165     +
166     +static struct clk clk_usb_bus_host = {
167     + .name = "usb-bus-host",
168     + .parent = &clk_usb_bus,
169     +};
170     +
171     +static struct clk clk_usb_bus_gadget = {
172     + .name = "usb-bus-gadget",
173     + .parent = &clk_usb_bus,
174     +};
175     +
176     +static struct clk *init_clocks[] = {
177     + &clk_lcd,
178     + &clk_gpio,
179     + &clk_usb_host,
180     + &clk_usb_device,
181     + &clk_timers,
182     + &s3c24xx_clk_uart0,
183     + &s3c24xx_clk_uart1,
184     + &s3c24xx_clk_uart2,
185     + &clk_rtc,
186     + &clk_watchdog,
187     + &clk_usb_bus_host,
188     + &clk_usb_bus_gadget,
189     };
190    
191     /* s3c2410_baseclk_add()
192     @@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
193     {
194     unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
195     unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
196     - struct clk *clkp;
197     struct clk *xtal;
198     int ret;
199     int ptr;
200     @@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
201    
202     /* register clocks from clock array */
203    
204     - clkp = init_clocks;
205     - for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
206     + for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
207     + struct clk *clkp = init_clocks[ptr];
208     +
209     /* ensure that we note the clock state */
210    
211     clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
212     diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
213     index 1069b56..aaf006d 100644
214     --- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
215     +++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
216     @@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
217     CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
218     CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
219     CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
220     + CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
221     + CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
222     + CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
223     CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
224     };
225    
226     diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
227     index a62753d..df45d6e 100644
228     --- a/arch/arm/plat-samsung/include/plat/clock.h
229     +++ b/arch/arm/plat-samsung/include/plat/clock.h
230     @@ -83,6 +83,11 @@ extern struct clk clk_ext;
231     extern struct clksrc_clk clk_epllref;
232     extern struct clksrc_clk clk_esysclk;
233    
234     +/* S3C24XX UART clocks */
235     +extern struct clk s3c24xx_clk_uart0;
236     +extern struct clk s3c24xx_clk_uart1;
237     +extern struct clk s3c24xx_clk_uart2;
238     +
239     /* S3C64XX specific clocks */
240     extern struct clk clk_h2;
241     extern struct clk clk_27m;
242     diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
243     index 1e1e18c..2a75ff2 100644
244     --- a/arch/mips/cavium-octeon/setup.c
245     +++ b/arch/mips/cavium-octeon/setup.c
246     @@ -7,6 +7,7 @@
247     * Copyright (C) 2008, 2009 Wind River Systems
248     * written by Ralf Baechle <ralf@linux-mips.org>
249     */
250     +#include <linux/compiler.h>
251     #include <linux/init.h>
252     #include <linux/kernel.h>
253     #include <linux/console.h>
254     @@ -712,7 +713,7 @@ void __init prom_init(void)
255     if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
256     pr_info("Skipping L2 locking due to reduced L2 cache size\n");
257     } else {
258     - uint32_t ebase = read_c0_ebase() & 0x3ffff000;
259     + uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
260     #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
261     /* TLB refill */
262     cvmx_l2c_lock_mem_region(ebase, 0x100);
263     diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
264     index 961b87f..f76389a 100644
265     --- a/arch/sparc/kernel/asm-offsets.c
266     +++ b/arch/sparc/kernel/asm-offsets.c
267     @@ -49,6 +49,8 @@ int foo(void)
268     DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
269     BLANK();
270     DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
271     + BLANK();
272     + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
273    
274     /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
275     return 0;
276     diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
277     index 44aad32..969f964 100644
278     --- a/arch/sparc/mm/hypersparc.S
279     +++ b/arch/sparc/mm/hypersparc.S
280     @@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
281    
282     /* The things we do for performance... */
283     hypersparc_flush_cache_range:
284     - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
285     + ld [%o0 + VMA_VM_MM], %o0
286     #ifndef CONFIG_SMP
287     ld [%o0 + AOFF_mm_context], %g1
288     cmp %g1, -1
289     @@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
290     */
291     /* Verified, my ass... */
292     hypersparc_flush_cache_page:
293     - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
294     + ld [%o0 + VMA_VM_MM], %o0
295     ld [%o0 + AOFF_mm_context], %g2
296     #ifndef CONFIG_SMP
297     cmp %g2, -1
298     @@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
299     sta %g5, [%g1] ASI_M_MMUREGS
300    
301     hypersparc_flush_tlb_range:
302     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
303     + ld [%o0 + VMA_VM_MM], %o0
304     mov SRMMU_CTX_REG, %g1
305     ld [%o0 + AOFF_mm_context], %o3
306     lda [%g1] ASI_M_MMUREGS, %g5
307     @@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
308     sta %g5, [%g1] ASI_M_MMUREGS
309    
310     hypersparc_flush_tlb_page:
311     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
312     + ld [%o0 + VMA_VM_MM], %o0
313     mov SRMMU_CTX_REG, %g1
314     ld [%o0 + AOFF_mm_context], %o3
315     andn %o1, (PAGE_SIZE - 1), %o1
316     diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
317     index c801c39..5d2b88d 100644
318     --- a/arch/sparc/mm/swift.S
319     +++ b/arch/sparc/mm/swift.S
320     @@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
321    
322     .globl swift_flush_cache_range
323     swift_flush_cache_range:
324     - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
325     + ld [%o0 + VMA_VM_MM], %o0
326     sub %o2, %o1, %o2
327     sethi %hi(4096), %o3
328     cmp %o2, %o3
329     @@ -116,7 +116,7 @@ swift_flush_cache_range:
330    
331     .globl swift_flush_cache_page
332     swift_flush_cache_page:
333     - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
334     + ld [%o0 + VMA_VM_MM], %o0
335     70:
336     ld [%o0 + AOFF_mm_context], %g2
337     cmp %g2, -1
338     @@ -219,7 +219,7 @@ swift_flush_sig_insns:
339     .globl swift_flush_tlb_range
340     .globl swift_flush_tlb_all
341     swift_flush_tlb_range:
342     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
343     + ld [%o0 + VMA_VM_MM], %o0
344     swift_flush_tlb_mm:
345     ld [%o0 + AOFF_mm_context], %g2
346     cmp %g2, -1
347     @@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
348    
349     .globl swift_flush_tlb_page
350     swift_flush_tlb_page:
351     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
352     + ld [%o0 + VMA_VM_MM], %o0
353     mov SRMMU_CTX_REG, %g1
354     ld [%o0 + AOFF_mm_context], %o3
355     andn %o1, (PAGE_SIZE - 1), %o1
356     diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
357     index 4e55e8f..bf10a34 100644
358     --- a/arch/sparc/mm/tsunami.S
359     +++ b/arch/sparc/mm/tsunami.S
360     @@ -24,7 +24,7 @@
361     /* Sliiick... */
362     tsunami_flush_cache_page:
363     tsunami_flush_cache_range:
364     - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
365     + ld [%o0 + VMA_VM_MM], %o0
366     tsunami_flush_cache_mm:
367     ld [%o0 + AOFF_mm_context], %g2
368     cmp %g2, -1
369     @@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
370    
371     /* More slick stuff... */
372     tsunami_flush_tlb_range:
373     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
374     + ld [%o0 + VMA_VM_MM], %o0
375     tsunami_flush_tlb_mm:
376     ld [%o0 + AOFF_mm_context], %g2
377     cmp %g2, -1
378     @@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
379    
380     /* This one can be done in a fine grained manner... */
381     tsunami_flush_tlb_page:
382     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
383     + ld [%o0 + VMA_VM_MM], %o0
384     mov SRMMU_CTX_REG, %g1
385     ld [%o0 + AOFF_mm_context], %o3
386     andn %o1, (PAGE_SIZE - 1), %o1
387     diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
388     index bf8ee06..852257f 100644
389     --- a/arch/sparc/mm/viking.S
390     +++ b/arch/sparc/mm/viking.S
391     @@ -108,7 +108,7 @@ viking_mxcc_flush_page:
392     viking_flush_cache_page:
393     viking_flush_cache_range:
394     #ifndef CONFIG_SMP
395     - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
396     + ld [%o0 + VMA_VM_MM], %o0
397     #endif
398     viking_flush_cache_mm:
399     #ifndef CONFIG_SMP
400     @@ -148,7 +148,7 @@ viking_flush_tlb_mm:
401     #endif
402    
403     viking_flush_tlb_range:
404     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
405     + ld [%o0 + VMA_VM_MM], %o0
406     mov SRMMU_CTX_REG, %g1
407     ld [%o0 + AOFF_mm_context], %o3
408     lda [%g1] ASI_M_MMUREGS, %g5
409     @@ -173,7 +173,7 @@ viking_flush_tlb_range:
410     #endif
411    
412     viking_flush_tlb_page:
413     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
414     + ld [%o0 + VMA_VM_MM], %o0
415     mov SRMMU_CTX_REG, %g1
416     ld [%o0 + AOFF_mm_context], %o3
417     lda [%g1] ASI_M_MMUREGS, %g5
418     @@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range:
419     tst %g5
420     bne 3f
421     mov SRMMU_CTX_REG, %g1
422     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
423     + ld [%o0 + VMA_VM_MM], %o0
424     ld [%o0 + AOFF_mm_context], %o3
425     lda [%g1] ASI_M_MMUREGS, %g5
426     sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
427     @@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page:
428     tst %g5
429     bne 2f
430     mov SRMMU_CTX_REG, %g1
431     - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
432     + ld [%o0 + VMA_VM_MM], %o0
433     ld [%o0 + AOFF_mm_context], %o3
434     lda [%g1] ASI_M_MMUREGS, %g5
435     and %o1, PAGE_MASK, %o1
436     diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
437     index 27e86d9..89e1090 100644
438     --- a/drivers/edac/edac_mc.c
439     +++ b/drivers/edac/edac_mc.c
440     @@ -48,6 +48,8 @@ static LIST_HEAD(mc_devices);
441     */
442     static void const *edac_mc_owner;
443    
444     +static struct bus_type mc_bus[EDAC_MAX_MCS];
445     +
446     unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
447     unsigned len)
448     {
449     @@ -723,6 +725,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
450     int ret = -EINVAL;
451     edac_dbg(0, "\n");
452    
453     + if (mci->mc_idx >= EDAC_MAX_MCS) {
454     + pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
455     + return -ENODEV;
456     + }
457     +
458     #ifdef CONFIG_EDAC_DEBUG
459     if (edac_debug_level >= 3)
460     edac_mc_dump_mci(mci);
461     @@ -762,6 +769,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
462     /* set load time so that error rate can be tracked */
463     mci->start_time = jiffies;
464    
465     + mci->bus = &mc_bus[mci->mc_idx];
466     +
467     if (edac_create_sysfs_mci_device(mci)) {
468     edac_mc_printk(mci, KERN_WARNING,
469     "failed to create sysfs device\n");
470     diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
471     index 67610a6..c4d700a 100644
472     --- a/drivers/edac/edac_mc_sysfs.c
473     +++ b/drivers/edac/edac_mc_sysfs.c
474     @@ -370,7 +370,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
475     return -ENODEV;
476    
477     csrow->dev.type = &csrow_attr_type;
478     - csrow->dev.bus = &mci->bus;
479     + csrow->dev.bus = mci->bus;
480     device_initialize(&csrow->dev);
481     csrow->dev.parent = &mci->dev;
482     csrow->mci = mci;
483     @@ -605,7 +605,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
484     dimm->mci = mci;
485    
486     dimm->dev.type = &dimm_attr_type;
487     - dimm->dev.bus = &mci->bus;
488     + dimm->dev.bus = mci->bus;
489     device_initialize(&dimm->dev);
490    
491     dimm->dev.parent = &mci->dev;
492     @@ -975,11 +975,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
493     * The memory controller needs its own bus, in order to avoid
494     * namespace conflicts at /sys/bus/edac.
495     */
496     - mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
497     - if (!mci->bus.name)
498     + mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
499     + if (!mci->bus->name)
500     return -ENOMEM;
501     - edac_dbg(0, "creating bus %s\n", mci->bus.name);
502     - err = bus_register(&mci->bus);
503     +
504     + edac_dbg(0, "creating bus %s\n", mci->bus->name);
505     +
506     + err = bus_register(mci->bus);
507     if (err < 0)
508     return err;
509    
510     @@ -988,7 +990,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
511     device_initialize(&mci->dev);
512    
513     mci->dev.parent = mci_pdev;
514     - mci->dev.bus = &mci->bus;
515     + mci->dev.bus = mci->bus;
516     dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
517     dev_set_drvdata(&mci->dev, mci);
518     pm_runtime_forbid(&mci->dev);
519     @@ -997,8 +999,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
520     err = device_add(&mci->dev);
521     if (err < 0) {
522     edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
523     - bus_unregister(&mci->bus);
524     - kfree(mci->bus.name);
525     + bus_unregister(mci->bus);
526     + kfree(mci->bus->name);
527     return err;
528     }
529    
530     @@ -1064,8 +1066,8 @@ fail:
531     }
532     fail2:
533     device_unregister(&mci->dev);
534     - bus_unregister(&mci->bus);
535     - kfree(mci->bus.name);
536     + bus_unregister(mci->bus);
537     + kfree(mci->bus->name);
538     return err;
539     }
540    
541     @@ -1098,8 +1100,8 @@ void edac_unregister_sysfs(struct mem_ctl_info *mci)
542     {
543     edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
544     device_unregister(&mci->dev);
545     - bus_unregister(&mci->bus);
546     - kfree(mci->bus.name);
547     + bus_unregister(mci->bus);
548     + kfree(mci->bus->name);
549     }
550    
551     static void mc_attr_release(struct device *dev)
552     diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
553     index 1b63517..157b934 100644
554     --- a/drivers/edac/i5100_edac.c
555     +++ b/drivers/edac/i5100_edac.c
556     @@ -974,7 +974,7 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
557     if (!i5100_debugfs)
558     return -ENODEV;
559    
560     - priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs);
561     + priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
562    
563     if (!priv->debugfs)
564     return -ENOMEM;
565     diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
566     index d3e15b4..c42b14b 100644
567     --- a/drivers/md/bcache/bcache.h
568     +++ b/drivers/md/bcache/bcache.h
569     @@ -437,6 +437,7 @@ struct bcache_device {
570    
571     /* If nonzero, we're detaching/unregistering from cache set */
572     atomic_t detaching;
573     + int flush_done;
574    
575     atomic_long_t sectors_dirty;
576     unsigned long sectors_dirty_gc;
577     diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
578     index 7a5658f..7b687a6 100644
579     --- a/drivers/md/bcache/btree.c
580     +++ b/drivers/md/bcache/btree.c
581     @@ -1419,8 +1419,10 @@ static void btree_gc_start(struct cache_set *c)
582     for_each_cache(ca, c, i)
583     for_each_bucket(b, ca) {
584     b->gc_gen = b->gen;
585     - if (!atomic_read(&b->pin))
586     + if (!atomic_read(&b->pin)) {
587     SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
588     + SET_GC_SECTORS_USED(b, 0);
589     + }
590     }
591    
592     for (d = c->devices;
593     diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
594     index bd05a9a..9aba201 100644
595     --- a/drivers/md/bcache/closure.c
596     +++ b/drivers/md/bcache/closure.c
597     @@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
598     } else {
599     struct closure *parent = cl->parent;
600     struct closure_waitlist *wait = closure_waitlist(cl);
601     + closure_fn *destructor = cl->fn;
602    
603     closure_debug_destroy(cl);
604    
605     + smp_mb();
606     atomic_set(&cl->remaining, -1);
607    
608     if (wait)
609     closure_wake_up(wait);
610    
611     - if (cl->fn)
612     - cl->fn(cl);
613     + if (destructor)
614     + destructor(cl);
615    
616     if (parent)
617     closure_put(parent);
618     diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
619     index 8c8dfdc..8a54d3b 100644
620     --- a/drivers/md/bcache/journal.c
621     +++ b/drivers/md/bcache/journal.c
622     @@ -182,9 +182,14 @@ bsearch:
623     pr_debug("starting binary search, l %u r %u", l, r);
624    
625     while (l + 1 < r) {
626     + seq = list_entry(list->prev, struct journal_replay,
627     + list)->j.seq;
628     +
629     m = (l + r) >> 1;
630     + read_bucket(m);
631    
632     - if (read_bucket(m))
633     + if (seq != list_entry(list->prev, struct journal_replay,
634     + list)->j.seq)
635     l = m;
636     else
637     r = m;
638     diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
639     index e5ff12e..2f36743 100644
640     --- a/drivers/md/bcache/request.c
641     +++ b/drivers/md/bcache/request.c
642     @@ -489,6 +489,12 @@ static void bch_insert_data_loop(struct closure *cl)
643     bch_queue_gc(op->c);
644     }
645    
646     + /*
647     + * Journal writes are marked REQ_FLUSH; if the original write was a
648     + * flush, it'll wait on the journal write.
649     + */
650     + bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
651     +
652     do {
653     unsigned i;
654     struct bkey *k;
655     @@ -716,7 +722,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
656     s->task = current;
657     s->orig_bio = bio;
658     s->write = (bio->bi_rw & REQ_WRITE) != 0;
659     - s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0;
660     + s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
661     s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
662     s->recoverable = 1;
663     s->start_time = jiffies;
664     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
665     index f88e2b6..b4713ce 100644
666     --- a/drivers/md/bcache/super.c
667     +++ b/drivers/md/bcache/super.c
668     @@ -704,7 +704,8 @@ static void bcache_device_detach(struct bcache_device *d)
669     atomic_set(&d->detaching, 0);
670     }
671    
672     - bcache_device_unlink(d);
673     + if (!d->flush_done)
674     + bcache_device_unlink(d);
675    
676     d->c->devices[d->id] = NULL;
677     closure_put(&d->c->caching);
678     @@ -781,6 +782,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
679     set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
680     set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
681    
682     + blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
683     +
684     return 0;
685     }
686    
687     @@ -1014,6 +1017,14 @@ static void cached_dev_flush(struct closure *cl)
688     struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
689     struct bcache_device *d = &dc->disk;
690    
691     + mutex_lock(&bch_register_lock);
692     + d->flush_done = 1;
693     +
694     + if (d->c)
695     + bcache_device_unlink(d);
696     +
697     + mutex_unlock(&bch_register_lock);
698     +
699     bch_cache_accounting_destroy(&dc->accounting);
700     kobject_del(&d->kobj);
701    
702     @@ -1303,18 +1314,22 @@ static void cache_set_flush(struct closure *cl)
703     static void __cache_set_unregister(struct closure *cl)
704     {
705     struct cache_set *c = container_of(cl, struct cache_set, caching);
706     - struct cached_dev *dc, *t;
707     + struct cached_dev *dc;
708     size_t i;
709    
710     mutex_lock(&bch_register_lock);
711    
712     - if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
713     - list_for_each_entry_safe(dc, t, &c->cached_devs, list)
714     - bch_cached_dev_detach(dc);
715     -
716     for (i = 0; i < c->nr_uuids; i++)
717     - if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
718     - bcache_device_stop(c->devices[i]);
719     + if (c->devices[i]) {
720     + if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
721     + test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
722     + dc = container_of(c->devices[i],
723     + struct cached_dev, disk);
724     + bch_cached_dev_detach(dc);
725     + } else {
726     + bcache_device_stop(c->devices[i]);
727     + }
728     + }
729    
730     mutex_unlock(&bch_register_lock);
731    
732     diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
733     index a1a3a51..0b4616b 100644
734     --- a/drivers/media/dvb-core/dmxdev.c
735     +++ b/drivers/media/dvb-core/dmxdev.c
736     @@ -377,10 +377,8 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
737     ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
738     buffer2_len);
739     }
740     - if (ret < 0) {
741     - dvb_ringbuffer_flush(&dmxdevfilter->buffer);
742     + if (ret < 0)
743     dmxdevfilter->buffer.error = ret;
744     - }
745     if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
746     dmxdevfilter->state = DMXDEV_STATE_DONE;
747     spin_unlock(&dmxdevfilter->dev->lock);
748     @@ -416,10 +414,8 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
749     ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
750     if (ret == buffer1_len)
751     ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
752     - if (ret < 0) {
753     - dvb_ringbuffer_flush(buffer);
754     + if (ret < 0)
755     buffer->error = ret;
756     - }
757     spin_unlock(&dmxdevfilter->dev->lock);
758     wake_up(&buffer->queue);
759     return 0;
760     diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
761     index 10460fd..dbcdfbf 100644
762     --- a/drivers/media/pci/saa7134/saa7134-alsa.c
763     +++ b/drivers/media/pci/saa7134/saa7134-alsa.c
764     @@ -172,7 +172,9 @@ static void saa7134_irq_alsa_done(struct saa7134_dev *dev,
765     dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count,
766     dev->dmasound.bufsize, dev->dmasound.blocks);
767     spin_unlock(&dev->slock);
768     + snd_pcm_stream_lock(dev->dmasound.substream);
769     snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN);
770     + snd_pcm_stream_unlock(dev->dmasound.substream);
771     return;
772     }
773    
774     diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
775     index 42aa54a..b710c6b 100644
776     --- a/drivers/net/dummy.c
777     +++ b/drivers/net/dummy.c
778     @@ -185,6 +185,8 @@ static int __init dummy_init_module(void)
779    
780     rtnl_lock();
781     err = __rtnl_link_register(&dummy_link_ops);
782     + if (err < 0)
783     + goto out;
784    
785     for (i = 0; i < numdummies && !err; i++) {
786     err = dummy_init_one();
787     @@ -192,6 +194,8 @@ static int __init dummy_init_module(void)
788     }
789     if (err < 0)
790     __rtnl_link_unregister(&dummy_link_ops);
791     +
792     +out:
793     rtnl_unlock();
794    
795     return err;
796     diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
797     index 418de8b..d30085c 100644
798     --- a/drivers/net/ethernet/atheros/alx/main.c
799     +++ b/drivers/net/ethernet/atheros/alx/main.c
800     @@ -1303,6 +1303,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
801    
802     SET_NETDEV_DEV(netdev, &pdev->dev);
803     alx = netdev_priv(netdev);
804     + spin_lock_init(&alx->hw.mdio_lock);
805     + spin_lock_init(&alx->irq_lock);
806     alx->dev = netdev;
807     alx->hw.pdev = pdev;
808     alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
809     @@ -1385,9 +1387,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
810    
811     INIT_WORK(&alx->link_check_wk, alx_link_check);
812     INIT_WORK(&alx->reset_wk, alx_reset);
813     - spin_lock_init(&alx->hw.mdio_lock);
814     - spin_lock_init(&alx->irq_lock);
815     -
816     netif_carrier_off(netdev);
817    
818     err = register_netdev(netdev);
819     diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
820     index 0688bb8..c23bb02 100644
821     --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
822     +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
823     @@ -1665,8 +1665,8 @@ check_sum:
824     return 0;
825     }
826    
827     -static void atl1e_tx_map(struct atl1e_adapter *adapter,
828     - struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
829     +static int atl1e_tx_map(struct atl1e_adapter *adapter,
830     + struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
831     {
832     struct atl1e_tpd_desc *use_tpd = NULL;
833     struct atl1e_tx_buffer *tx_buffer = NULL;
834     @@ -1677,6 +1677,8 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
835     u16 nr_frags;
836     u16 f;
837     int segment;
838     + int ring_start = adapter->tx_ring.next_to_use;
839     + int ring_end;
840    
841     nr_frags = skb_shinfo(skb)->nr_frags;
842     segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
843     @@ -1689,6 +1691,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
844     tx_buffer->length = map_len;
845     tx_buffer->dma = pci_map_single(adapter->pdev,
846     skb->data, hdr_len, PCI_DMA_TODEVICE);
847     + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
848     + return -ENOSPC;
849     +
850     ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
851     mapped_len += map_len;
852     use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
853     @@ -1715,6 +1720,22 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
854     tx_buffer->dma =
855     pci_map_single(adapter->pdev, skb->data + mapped_len,
856     map_len, PCI_DMA_TODEVICE);
857     +
858     + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
859     + /* We need to unwind the mappings we've done */
860     + ring_end = adapter->tx_ring.next_to_use;
861     + adapter->tx_ring.next_to_use = ring_start;
862     + while (adapter->tx_ring.next_to_use != ring_end) {
863     + tpd = atl1e_get_tpd(adapter);
864     + tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
865     + pci_unmap_single(adapter->pdev, tx_buffer->dma,
866     + tx_buffer->length, PCI_DMA_TODEVICE);
867     + }
868     + /* Reset the tx rings next pointer */
869     + adapter->tx_ring.next_to_use = ring_start;
870     + return -ENOSPC;
871     + }
872     +
873     ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
874     mapped_len += map_len;
875     use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
876     @@ -1750,6 +1771,23 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
877     (i * MAX_TX_BUF_LEN),
878     tx_buffer->length,
879     DMA_TO_DEVICE);
880     +
881     + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
882     + /* We need to unwind the mappings we've done */
883     + ring_end = adapter->tx_ring.next_to_use;
884     + adapter->tx_ring.next_to_use = ring_start;
885     + while (adapter->tx_ring.next_to_use != ring_end) {
886     + tpd = atl1e_get_tpd(adapter);
887     + tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
888     + dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
889     + tx_buffer->length, DMA_TO_DEVICE);
890     + }
891     +
892     + /* Reset the ring next to use pointer */
893     + adapter->tx_ring.next_to_use = ring_start;
894     + return -ENOSPC;
895     + }
896     +
897     ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
898     use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
899     use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
900     @@ -1767,6 +1805,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
901     /* The last buffer info contain the skb address,
902     so it will be free after unmap */
903     tx_buffer->skb = skb;
904     + return 0;
905     }
906    
907     static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
908     @@ -1834,10 +1873,15 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
909     return NETDEV_TX_OK;
910     }
911    
912     - atl1e_tx_map(adapter, skb, tpd);
913     + if (atl1e_tx_map(adapter, skb, tpd)) {
914     + dev_kfree_skb_any(skb);
915     + goto out;
916     + }
917     +
918     atl1e_tx_queue(adapter, tpd_req, tpd);
919    
920     netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
921     +out:
922     spin_unlock_irqrestore(&adapter->tx_lock, flags);
923     return NETDEV_TX_OK;
924     }
925     diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
926     index c89aa41..b4e0dc8 100644
927     --- a/drivers/net/ethernet/cadence/macb.c
928     +++ b/drivers/net/ethernet/cadence/macb.c
929     @@ -1070,7 +1070,7 @@ static void macb_configure_dma(struct macb *bp)
930     static void macb_configure_caps(struct macb *bp)
931     {
932     if (macb_is_gem(bp)) {
933     - if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
934     + if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
935     bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
936     }
937     }
938     diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
939     index a0b4be5..6e43426 100644
940     --- a/drivers/net/ethernet/emulex/benet/be_main.c
941     +++ b/drivers/net/ethernet/emulex/benet/be_main.c
942     @@ -782,16 +782,22 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
943    
944     if (vlan_tx_tag_present(skb))
945     vlan_tag = be_get_tx_vlan_tag(adapter, skb);
946     - else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
947     - vlan_tag = adapter->pvid;
948     +
949     + if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
950     + if (!vlan_tag)
951     + vlan_tag = adapter->pvid;
952     + /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
953     + * skip VLAN insertion
954     + */
955     + if (skip_hw_vlan)
956     + *skip_hw_vlan = true;
957     + }
958    
959     if (vlan_tag) {
960     skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
961     if (unlikely(!skb))
962     return skb;
963     skb->vlan_tci = 0;
964     - if (skip_hw_vlan)
965     - *skip_hw_vlan = true;
966     }
967    
968     /* Insert the outer VLAN, if any */
969     diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
970     index a7dfe36..5173eaa 100644
971     --- a/drivers/net/ethernet/sfc/rx.c
972     +++ b/drivers/net/ethernet/sfc/rx.c
973     @@ -282,9 +282,9 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
974     }
975    
976     /* Recycle the pages that are used by buffers that have just been received. */
977     -static void efx_recycle_rx_buffers(struct efx_channel *channel,
978     - struct efx_rx_buffer *rx_buf,
979     - unsigned int n_frags)
980     +static void efx_recycle_rx_pages(struct efx_channel *channel,
981     + struct efx_rx_buffer *rx_buf,
982     + unsigned int n_frags)
983     {
984     struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
985    
986     @@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel,
987     } while (--n_frags);
988     }
989    
990     +static void efx_discard_rx_packet(struct efx_channel *channel,
991     + struct efx_rx_buffer *rx_buf,
992     + unsigned int n_frags)
993     +{
994     + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
995     +
996     + efx_recycle_rx_pages(channel, rx_buf, n_frags);
997     +
998     + do {
999     + efx_free_rx_buffer(rx_buf);
1000     + rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
1001     + } while (--n_frags);
1002     +}
1003     +
1004     /**
1005     * efx_fast_push_rx_descriptors - push new RX descriptors quickly
1006     * @rx_queue: RX descriptor queue
1007     @@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
1008     */
1009     if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
1010     efx_rx_flush_packet(channel);
1011     - put_page(rx_buf->page);
1012     - efx_recycle_rx_buffers(channel, rx_buf, n_frags);
1013     + efx_discard_rx_packet(channel, rx_buf, n_frags);
1014     return;
1015     }
1016    
1017     @@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
1018     efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
1019     }
1020    
1021     - /* All fragments have been DMA-synced, so recycle buffers and pages. */
1022     + /* All fragments have been DMA-synced, so recycle pages. */
1023     rx_buf = efx_rx_buffer(rx_queue, index);
1024     - efx_recycle_rx_buffers(channel, rx_buf, n_frags);
1025     + efx_recycle_rx_pages(channel, rx_buf, n_frags);
1026    
1027     /* Pipeline receives so that we give time for packet headers to be
1028     * prefetched into cache.
1029     diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
1030     index 1df0ff3..3df5684 100644
1031     --- a/drivers/net/ethernet/sun/sunvnet.c
1032     +++ b/drivers/net/ethernet/sun/sunvnet.c
1033     @@ -1239,6 +1239,8 @@ static int vnet_port_remove(struct vio_dev *vdev)
1034     dev_set_drvdata(&vdev->dev, NULL);
1035    
1036     kfree(port);
1037     +
1038     + unregister_netdev(vp->dev);
1039     }
1040     return 0;
1041     }
1042     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1043     index 4dccead..23a0fff 100644
1044     --- a/drivers/net/hyperv/netvsc_drv.c
1045     +++ b/drivers/net/hyperv/netvsc_drv.c
1046     @@ -431,8 +431,8 @@ static int netvsc_probe(struct hv_device *dev,
1047     net->netdev_ops = &device_ops;
1048    
1049     /* TODO: Add GSO and Checksum offload */
1050     - net->hw_features = NETIF_F_SG;
1051     - net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
1052     + net->hw_features = 0;
1053     + net->features = NETIF_F_HW_VLAN_CTAG_TX;
1054    
1055     SET_ETHTOOL_OPS(net, &ethtool_ops);
1056     SET_NETDEV_DEV(net, &dev->device);
1057     diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
1058     index dc9f6a4..a3bed28 100644
1059     --- a/drivers/net/ifb.c
1060     +++ b/drivers/net/ifb.c
1061     @@ -291,11 +291,17 @@ static int __init ifb_init_module(void)
1062    
1063     rtnl_lock();
1064     err = __rtnl_link_register(&ifb_link_ops);
1065     + if (err < 0)
1066     + goto out;
1067    
1068     - for (i = 0; i < numifbs && !err; i++)
1069     + for (i = 0; i < numifbs && !err; i++) {
1070     err = ifb_init_one(i);
1071     + cond_resched();
1072     + }
1073     if (err)
1074     __rtnl_link_unregister(&ifb_link_ops);
1075     +
1076     +out:
1077     rtnl_unlock();
1078    
1079     return err;
1080     diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
1081     index b6dd6a7..523d6b2 100644
1082     --- a/drivers/net/macvtap.c
1083     +++ b/drivers/net/macvtap.c
1084     @@ -633,6 +633,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
1085     return 0;
1086     }
1087    
1088     +static unsigned long iov_pages(const struct iovec *iv, int offset,
1089     + unsigned long nr_segs)
1090     +{
1091     + unsigned long seg, base;
1092     + int pages = 0, len, size;
1093     +
1094     + while (nr_segs && (offset >= iv->iov_len)) {
1095     + offset -= iv->iov_len;
1096     + ++iv;
1097     + --nr_segs;
1098     + }
1099     +
1100     + for (seg = 0; seg < nr_segs; seg++) {
1101     + base = (unsigned long)iv[seg].iov_base + offset;
1102     + len = iv[seg].iov_len - offset;
1103     + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1104     + pages += size;
1105     + offset = 0;
1106     + }
1107     +
1108     + return pages;
1109     +}
1110    
1111     /* Get packet from user space buffer */
1112     static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
1113     @@ -647,6 +669,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
1114     int vnet_hdr_len = 0;
1115     int copylen = 0;
1116     bool zerocopy = false;
1117     + size_t linear;
1118    
1119     if (q->flags & IFF_VNET_HDR) {
1120     vnet_hdr_len = q->vnet_hdr_sz;
1121     @@ -678,42 +701,35 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
1122     if (unlikely(count > UIO_MAXIOV))
1123     goto err;
1124    
1125     - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
1126     - zerocopy = true;
1127     + if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
1128     + copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
1129     + linear = copylen;
1130     + if (iov_pages(iv, vnet_hdr_len + copylen, count)
1131     + <= MAX_SKB_FRAGS)
1132     + zerocopy = true;
1133     + }
1134    
1135     - if (zerocopy) {
1136     - /* Userspace may produce vectors with count greater than
1137     - * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1138     - * to let the rest of data to be fit in the frags.
1139     - */
1140     - if (count > MAX_SKB_FRAGS) {
1141     - copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1142     - if (copylen < vnet_hdr_len)
1143     - copylen = 0;
1144     - else
1145     - copylen -= vnet_hdr_len;
1146     - }
1147     - /* There are 256 bytes to be copied in skb, so there is enough
1148     - * room for skb expand head in case it is used.
1149     - * The rest buffer is mapped from userspace.
1150     - */
1151     - if (copylen < vnet_hdr.hdr_len)
1152     - copylen = vnet_hdr.hdr_len;
1153     - if (!copylen)
1154     - copylen = GOODCOPY_LEN;
1155     - } else
1156     + if (!zerocopy) {
1157     copylen = len;
1158     + linear = vnet_hdr.hdr_len;
1159     + }
1160    
1161     skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
1162     - vnet_hdr.hdr_len, noblock, &err);
1163     + linear, noblock, &err);
1164     if (!skb)
1165     goto err;
1166    
1167     if (zerocopy)
1168     err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
1169     - else
1170     + else {
1171     err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
1172     len);
1173     + if (!err && m && m->msg_control) {
1174     + struct ubuf_info *uarg = m->msg_control;
1175     + uarg->callback(uarg, false);
1176     + }
1177     + }
1178     +
1179     if (err)
1180     goto err_kfree;
1181    
1182     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1183     index 9c61f87..2491eb2 100644
1184     --- a/drivers/net/tun.c
1185     +++ b/drivers/net/tun.c
1186     @@ -1037,6 +1037,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
1187     return 0;
1188     }
1189    
1190     +static unsigned long iov_pages(const struct iovec *iv, int offset,
1191     + unsigned long nr_segs)
1192     +{
1193     + unsigned long seg, base;
1194     + int pages = 0, len, size;
1195     +
1196     + while (nr_segs && (offset >= iv->iov_len)) {
1197     + offset -= iv->iov_len;
1198     + ++iv;
1199     + --nr_segs;
1200     + }
1201     +
1202     + for (seg = 0; seg < nr_segs; seg++) {
1203     + base = (unsigned long)iv[seg].iov_base + offset;
1204     + len = iv[seg].iov_len - offset;
1205     + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1206     + pages += size;
1207     + offset = 0;
1208     + }
1209     +
1210     + return pages;
1211     +}
1212     +
1213     /* Get packet from user space buffer */
1214     static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1215     void *msg_control, const struct iovec *iv,
1216     @@ -1044,7 +1067,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1217     {
1218     struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1219     struct sk_buff *skb;
1220     - size_t len = total_len, align = NET_SKB_PAD;
1221     + size_t len = total_len, align = NET_SKB_PAD, linear;
1222     struct virtio_net_hdr gso = { 0 };
1223     int offset = 0;
1224     int copylen;
1225     @@ -1084,34 +1107,23 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1226     return -EINVAL;
1227     }
1228    
1229     - if (msg_control)
1230     - zerocopy = true;
1231     -
1232     - if (zerocopy) {
1233     - /* Userspace may produce vectors with count greater than
1234     - * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1235     - * to let the rest of data to be fit in the frags.
1236     - */
1237     - if (count > MAX_SKB_FRAGS) {
1238     - copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1239     - if (copylen < offset)
1240     - copylen = 0;
1241     - else
1242     - copylen -= offset;
1243     - } else
1244     - copylen = 0;
1245     - /* There are 256 bytes to be copied in skb, so there is enough
1246     - * room for skb expand head in case it is used.
1247     + if (msg_control) {
1248     + /* There are 256 bytes to be copied in skb, so there is
1249     + * enough room for skb expand head in case it is used.
1250     * The rest of the buffer is mapped from userspace.
1251     */
1252     - if (copylen < gso.hdr_len)
1253     - copylen = gso.hdr_len;
1254     - if (!copylen)
1255     - copylen = GOODCOPY_LEN;
1256     - } else
1257     + copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1258     + linear = copylen;
1259     + if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1260     + zerocopy = true;
1261     + }
1262     +
1263     + if (!zerocopy) {
1264     copylen = len;
1265     + linear = gso.hdr_len;
1266     + }
1267    
1268     - skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
1269     + skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1270     if (IS_ERR(skb)) {
1271     if (PTR_ERR(skb) != -EAGAIN)
1272     tun->dev->stats.rx_dropped++;
1273     @@ -1120,8 +1132,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1274    
1275     if (zerocopy)
1276     err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1277     - else
1278     + else {
1279     err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1280     + if (!err && msg_control) {
1281     + struct ubuf_info *uarg = msg_control;
1282     + uarg->callback(uarg, false);
1283     + }
1284     + }
1285    
1286     if (err) {
1287     tun->dev->stats.rx_dropped++;
1288     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1289     index c9e0038..42d670a 100644
1290     --- a/drivers/net/virtio_net.c
1291     +++ b/drivers/net/virtio_net.c
1292     @@ -602,7 +602,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1293     container_of(napi, struct receive_queue, napi);
1294     struct virtnet_info *vi = rq->vq->vdev->priv;
1295     void *buf;
1296     - unsigned int len, received = 0;
1297     + unsigned int r, len, received = 0;
1298    
1299     again:
1300     while (received < budget &&
1301     @@ -619,8 +619,9 @@ again:
1302    
1303     /* Out of packets? */
1304     if (received < budget) {
1305     + r = virtqueue_enable_cb_prepare(rq->vq);
1306     napi_complete(napi);
1307     - if (unlikely(!virtqueue_enable_cb(rq->vq)) &&
1308     + if (unlikely(virtqueue_poll(rq->vq, r)) &&
1309     napi_schedule_prep(napi)) {
1310     virtqueue_disable_cb(rq->vq);
1311     __napi_schedule(napi);
1312     diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
1313     index 809b7a3..5d3b0f0 100644
1314     --- a/drivers/rapidio/switches/idt_gen2.c
1315     +++ b/drivers/rapidio/switches/idt_gen2.c
1316     @@ -15,6 +15,8 @@
1317     #include <linux/rio_drv.h>
1318     #include <linux/rio_ids.h>
1319     #include <linux/delay.h>
1320     +
1321     +#include <asm/page.h>
1322     #include "../rio.h"
1323    
1324     #define LOCAL_RTE_CONF_DESTID_SEL 0x010070
1325     diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1326     index 3a9ddae..89178b8 100644
1327     --- a/drivers/scsi/megaraid/megaraid_sas_base.c
1328     +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1329     @@ -4852,10 +4852,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
1330     sense, sense_handle);
1331     }
1332    
1333     - for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
1334     - dma_free_coherent(&instance->pdev->dev,
1335     - kern_sge32[i].length,
1336     - kbuff_arr[i], kern_sge32[i].phys_addr);
1337     + for (i = 0; i < ioc->sge_count; i++) {
1338     + if (kbuff_arr[i])
1339     + dma_free_coherent(&instance->pdev->dev,
1340     + kern_sge32[i].length,
1341     + kbuff_arr[i],
1342     + kern_sge32[i].phys_addr);
1343     }
1344    
1345     megasas_return_cmd(instance, cmd);
1346     diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1347     index dcbf7c8..f8c4b85 100644
1348     --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1349     +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1350     @@ -1273,6 +1273,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1351     struct MPT3SAS_DEVICE *sas_device_priv_data;
1352     struct scsi_target *starget;
1353     struct _raid_device *raid_device;
1354     + struct _sas_device *sas_device;
1355     unsigned long flags;
1356    
1357     sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
1358     @@ -1301,6 +1302,19 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1359     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1360     }
1361    
1362     + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1363     + spin_lock_irqsave(&ioc->sas_device_lock, flags);
1364     + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
1365     + sas_target_priv_data->sas_address);
1366     + if (sas_device && (sas_device->starget == NULL)) {
1367     + sdev_printk(KERN_INFO, sdev,
1368     + "%s : sas_device->starget set to starget @ %d\n",
1369     + __func__, __LINE__);
1370     + sas_device->starget = starget;
1371     + }
1372     + spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1373     + }
1374     +
1375     return 0;
1376     }
1377    
1378     @@ -6392,7 +6406,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
1379     handle))) {
1380     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1381     MPI2_IOCSTATUS_MASK;
1382     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1383     + if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1384     break;
1385     handle = le16_to_cpu(sas_device_pg0.DevHandle);
1386     device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1387     @@ -6494,7 +6508,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
1388     &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1389     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1390     MPI2_IOCSTATUS_MASK;
1391     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1392     + if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1393     break;
1394     handle = le16_to_cpu(volume_pg1.DevHandle);
1395    
1396     @@ -6518,7 +6532,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
1397     phys_disk_num))) {
1398     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1399     MPI2_IOCSTATUS_MASK;
1400     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1401     + if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1402     break;
1403     phys_disk_num = pd_pg0.PhysDiskNum;
1404     handle = le16_to_cpu(pd_pg0.DevHandle);
1405     @@ -6597,7 +6611,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
1406    
1407     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1408     MPI2_IOCSTATUS_MASK;
1409     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1410     + if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1411     break;
1412    
1413     handle = le16_to_cpu(expander_pg0.DevHandle);
1414     @@ -6742,8 +6756,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1415     MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
1416     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1417     MPI2_IOCSTATUS_MASK;
1418     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1419     - break;
1420     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1421     pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
1422     "ioc_status(0x%04x), loginfo(0x%08x)\n",
1423     @@ -6787,8 +6799,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1424     phys_disk_num))) {
1425     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1426     MPI2_IOCSTATUS_MASK;
1427     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1428     - break;
1429     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1430     pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
1431     "ioc_status(0x%04x), loginfo(0x%08x)\n",
1432     @@ -6854,8 +6864,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1433     &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1434     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1435     MPI2_IOCSTATUS_MASK;
1436     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1437     - break;
1438     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1439     pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
1440     "ioc_status(0x%04x), loginfo(0x%08x)\n",
1441     @@ -6914,8 +6922,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1442     handle))) {
1443     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1444     MPI2_IOCSTATUS_MASK;
1445     - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1446     - break;
1447     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1448     pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
1449     " ioc_status(0x%04x), loginfo(0x%08x)\n",
1450     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1451     index 2c65955..c90d960 100644
1452     --- a/drivers/usb/serial/cp210x.c
1453     +++ b/drivers/usb/serial/cp210x.c
1454     @@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
1455     { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
1456     { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
1457     { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
1458     + { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
1459     { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
1460     { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
1461     { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
1462     @@ -118,6 +119,8 @@ static const struct usb_device_id id_table[] = {
1463     { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
1464     { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
1465     { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
1466     + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1467     + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1468     { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1469     { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
1470     { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
1471     @@ -148,6 +151,7 @@ static const struct usb_device_id id_table[] = {
1472     { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
1473     { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1474     { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1475     + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1476     { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
1477     { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
1478     { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
1479     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1480     index 5dd857d..1cf6f12 100644
1481     --- a/drivers/usb/serial/option.c
1482     +++ b/drivers/usb/serial/option.c
1483     @@ -341,17 +341,12 @@ static void option_instat_callback(struct urb *urb);
1484     #define OLIVETTI_VENDOR_ID 0x0b3c
1485     #define OLIVETTI_PRODUCT_OLICARD100 0xc000
1486     #define OLIVETTI_PRODUCT_OLICARD145 0xc003
1487     +#define OLIVETTI_PRODUCT_OLICARD200 0xc005
1488    
1489     /* Celot products */
1490     #define CELOT_VENDOR_ID 0x211f
1491     #define CELOT_PRODUCT_CT680M 0x6801
1492    
1493     -/* ONDA Communication vendor id */
1494     -#define ONDA_VENDOR_ID 0x1ee8
1495     -
1496     -/* ONDA MT825UP HSDPA 14.2 modem */
1497     -#define ONDA_MT825UP 0x000b
1498     -
1499     /* Samsung products */
1500     #define SAMSUNG_VENDOR_ID 0x04e8
1501     #define SAMSUNG_PRODUCT_GT_B3730 0x6889
1502     @@ -444,7 +439,8 @@ static void option_instat_callback(struct urb *urb);
1503    
1504     /* Hyundai Petatel Inc. products */
1505     #define PETATEL_VENDOR_ID 0x1ff4
1506     -#define PETATEL_PRODUCT_NP10T 0x600e
1507     +#define PETATEL_PRODUCT_NP10T_600A 0x600a
1508     +#define PETATEL_PRODUCT_NP10T_600E 0x600e
1509    
1510     /* TP-LINK Incorporated products */
1511     #define TPLINK_VENDOR_ID 0x2357
1512     @@ -782,6 +778,7 @@ static const struct usb_device_id option_ids[] = {
1513     { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1514     { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1515     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1516     + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1517     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1518     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
1519     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
1520     @@ -817,7 +814,8 @@ static const struct usb_device_id option_ids[] = {
1521     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
1522     .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1523     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
1524     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
1525     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
1526     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1527     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
1528     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
1529     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1530     @@ -1256,8 +1254,8 @@ static const struct usb_device_id option_ids[] = {
1531    
1532     { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1533     { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1534     + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
1535     { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1536     - { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
1537     { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1538     { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1539     { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
1540     @@ -1329,9 +1327,12 @@ static const struct usb_device_id option_ids[] = {
1541     { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
1542     { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
1543     { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
1544     - { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
1545     + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
1546     + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
1547     { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
1548     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1549     + { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
1550     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1551     { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
1552     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
1553     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
1554     @@ -1339,6 +1340,8 @@ static const struct usb_device_id option_ids[] = {
1555     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1556     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1557     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1558     + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1559     + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1560     { } /* Terminating entry */
1561     };
1562     MODULE_DEVICE_TABLE(usb, option_ids);
1563     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1564     index f80d3dd..8ca5ac7 100644
1565     --- a/drivers/vhost/net.c
1566     +++ b/drivers/vhost/net.c
1567     @@ -150,6 +150,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
1568     {
1569     kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
1570     wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1571     +}
1572     +
1573     +static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
1574     +{
1575     + vhost_net_ubuf_put_and_wait(ubufs);
1576     kfree(ubufs);
1577     }
1578    
1579     @@ -948,7 +953,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1580     mutex_unlock(&vq->mutex);
1581    
1582     if (oldubufs) {
1583     - vhost_net_ubuf_put_and_wait(oldubufs);
1584     + vhost_net_ubuf_put_wait_and_free(oldubufs);
1585     mutex_lock(&vq->mutex);
1586     vhost_zerocopy_signal_used(n, vq);
1587     mutex_unlock(&vq->mutex);
1588     @@ -966,7 +971,7 @@ err_used:
1589     rcu_assign_pointer(vq->private_data, oldsock);
1590     vhost_net_enable_vq(n, vq);
1591     if (ubufs)
1592     - vhost_net_ubuf_put_and_wait(ubufs);
1593     + vhost_net_ubuf_put_wait_and_free(ubufs);
1594     err_ubufs:
1595     fput(sock->file);
1596     err_vq:
1597     diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1598     index 5217baf..37d58f8 100644
1599     --- a/drivers/virtio/virtio_ring.c
1600     +++ b/drivers/virtio/virtio_ring.c
1601     @@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
1602     EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1603    
1604     /**
1605     - * virtqueue_enable_cb - restart callbacks after disable_cb.
1606     + * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1607     * @vq: the struct virtqueue we're talking about.
1608     *
1609     - * This re-enables callbacks; it returns "false" if there are pending
1610     - * buffers in the queue, to detect a possible race between the driver
1611     - * checking for more work, and enabling callbacks.
1612     + * This re-enables callbacks; it returns current queue state
1613     + * in an opaque unsigned value. This value should be later tested by
1614     + * virtqueue_poll, to detect a possible race between the driver checking for
1615     + * more work, and enabling callbacks.
1616     *
1617     * Caller must ensure we don't call this with other virtqueue
1618     * operations at the same time (except where noted).
1619     */
1620     -bool virtqueue_enable_cb(struct virtqueue *_vq)
1621     +unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1622     {
1623     struct vring_virtqueue *vq = to_vvq(_vq);
1624     + u16 last_used_idx;
1625    
1626     START_USE(vq);
1627    
1628     @@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
1629     * either clear the flags bit or point the event index at the next
1630     * entry. Always do both to keep code simple. */
1631     vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
1632     - vring_used_event(&vq->vring) = vq->last_used_idx;
1633     + vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
1634     + END_USE(vq);
1635     + return last_used_idx;
1636     +}
1637     +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1638     +
1639     +/**
1640     + * virtqueue_poll - query pending used buffers
1641     + * @vq: the struct virtqueue we're talking about.
1642     + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1643     + *
1644     + * Returns "true" if there are pending used buffers in the queue.
1645     + *
1646     + * This does not need to be serialized.
1647     + */
1648     +bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1649     +{
1650     + struct vring_virtqueue *vq = to_vvq(_vq);
1651     +
1652     virtio_mb(vq->weak_barriers);
1653     - if (unlikely(more_used(vq))) {
1654     - END_USE(vq);
1655     - return false;
1656     - }
1657     + return (u16)last_used_idx != vq->vring.used->idx;
1658     +}
1659     +EXPORT_SYMBOL_GPL(virtqueue_poll);
1660    
1661     - END_USE(vq);
1662     - return true;
1663     +/**
1664     + * virtqueue_enable_cb - restart callbacks after disable_cb.
1665     + * @vq: the struct virtqueue we're talking about.
1666     + *
1667     + * This re-enables callbacks; it returns "false" if there are pending
1668     + * buffers in the queue, to detect a possible race between the driver
1669     + * checking for more work, and enabling callbacks.
1670     + *
1671     + * Caller must ensure we don't call this with other virtqueue
1672     + * operations at the same time (except where noted).
1673     + */
1674     +bool virtqueue_enable_cb(struct virtqueue *_vq)
1675     +{
1676     + unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1677     + return !virtqueue_poll(_vq, last_used_idx);
1678     }
1679     EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1680    
1681     diff --git a/fs/block_dev.c b/fs/block_dev.c
1682     index 2091db8..85f5c85 100644
1683     --- a/fs/block_dev.c
1684     +++ b/fs/block_dev.c
1685     @@ -58,17 +58,24 @@ static void bdev_inode_switch_bdi(struct inode *inode,
1686     struct backing_dev_info *dst)
1687     {
1688     struct backing_dev_info *old = inode->i_data.backing_dev_info;
1689     + bool wakeup_bdi = false;
1690    
1691     if (unlikely(dst == old)) /* deadlock avoidance */
1692     return;
1693     bdi_lock_two(&old->wb, &dst->wb);
1694     spin_lock(&inode->i_lock);
1695     inode->i_data.backing_dev_info = dst;
1696     - if (inode->i_state & I_DIRTY)
1697     + if (inode->i_state & I_DIRTY) {
1698     + if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
1699     + wakeup_bdi = true;
1700     list_move(&inode->i_wb_list, &dst->wb.b_dirty);
1701     + }
1702     spin_unlock(&inode->i_lock);
1703     spin_unlock(&old->wb.list_lock);
1704     spin_unlock(&dst->wb.list_lock);
1705     +
1706     + if (wakeup_bdi)
1707     + bdi_wakeup_thread_delayed(dst);
1708     }
1709    
1710     /* Kill _all_ buffers and pagecache , dirty or not.. */
1711     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1712     index e49da58..fddf3d9 100644
1713     --- a/fs/ext4/extents.c
1714     +++ b/fs/ext4/extents.c
1715     @@ -4386,9 +4386,20 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
1716    
1717     last_block = (inode->i_size + sb->s_blocksize - 1)
1718     >> EXT4_BLOCK_SIZE_BITS(sb);
1719     +retry:
1720     err = ext4_es_remove_extent(inode, last_block,
1721     EXT_MAX_BLOCKS - last_block);
1722     + if (err == ENOMEM) {
1723     + cond_resched();
1724     + congestion_wait(BLK_RW_ASYNC, HZ/50);
1725     + goto retry;
1726     + }
1727     + if (err) {
1728     + ext4_std_error(inode->i_sb, err);
1729     + return;
1730     + }
1731     err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
1732     + ext4_std_error(inode->i_sb, err);
1733     }
1734    
1735     static void ext4_falloc_update_inode(struct inode *inode,
1736     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
1737     index f3f783d..5b12746 100644
1738     --- a/fs/fuse/dir.c
1739     +++ b/fs/fuse/dir.c
1740     @@ -1225,13 +1225,29 @@ static int fuse_direntplus_link(struct file *file,
1741     if (name.name[1] == '.' && name.len == 2)
1742     return 0;
1743     }
1744     +
1745     + if (invalid_nodeid(o->nodeid))
1746     + return -EIO;
1747     + if (!fuse_valid_type(o->attr.mode))
1748     + return -EIO;
1749     +
1750     fc = get_fuse_conn(dir);
1751    
1752     name.hash = full_name_hash(name.name, name.len);
1753     dentry = d_lookup(parent, &name);
1754     - if (dentry && dentry->d_inode) {
1755     + if (dentry) {
1756     inode = dentry->d_inode;
1757     - if (get_node_id(inode) == o->nodeid) {
1758     + if (!inode) {
1759     + d_drop(dentry);
1760     + } else if (get_node_id(inode) != o->nodeid ||
1761     + ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
1762     + err = d_invalidate(dentry);
1763     + if (err)
1764     + goto out;
1765     + } else if (is_bad_inode(inode)) {
1766     + err = -EIO;
1767     + goto out;
1768     + } else {
1769     struct fuse_inode *fi;
1770     fi = get_fuse_inode(inode);
1771     spin_lock(&fc->lock);
1772     @@ -1244,9 +1260,6 @@ static int fuse_direntplus_link(struct file *file,
1773     */
1774     goto found;
1775     }
1776     - err = d_invalidate(dentry);
1777     - if (err)
1778     - goto out;
1779     dput(dentry);
1780     dentry = NULL;
1781     }
1782     @@ -1261,10 +1274,19 @@ static int fuse_direntplus_link(struct file *file,
1783     if (!inode)
1784     goto out;
1785    
1786     - alias = d_materialise_unique(dentry, inode);
1787     - err = PTR_ERR(alias);
1788     - if (IS_ERR(alias))
1789     - goto out;
1790     + if (S_ISDIR(inode->i_mode)) {
1791     + mutex_lock(&fc->inst_mutex);
1792     + alias = fuse_d_add_directory(dentry, inode);
1793     + mutex_unlock(&fc->inst_mutex);
1794     + err = PTR_ERR(alias);
1795     + if (IS_ERR(alias)) {
1796     + iput(inode);
1797     + goto out;
1798     + }
1799     + } else {
1800     + alias = d_splice_alias(inode, dentry);
1801     + }
1802     +
1803     if (alias) {
1804     dput(dentry);
1805     dentry = alias;
1806     diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
1807     index e703318..8ebd3f5 100644
1808     --- a/fs/lockd/svclock.c
1809     +++ b/fs/lockd/svclock.c
1810     @@ -939,6 +939,7 @@ nlmsvc_retry_blocked(void)
1811     unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
1812     struct nlm_block *block;
1813    
1814     + spin_lock(&nlm_blocked_lock);
1815     while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
1816     block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1817    
1818     @@ -948,6 +949,7 @@ nlmsvc_retry_blocked(void)
1819     timeout = block->b_when - jiffies;
1820     break;
1821     }
1822     + spin_unlock(&nlm_blocked_lock);
1823    
1824     dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1825     block, block->b_when);
1826     @@ -957,7 +959,9 @@ nlmsvc_retry_blocked(void)
1827     retry_deferred_block(block);
1828     } else
1829     nlmsvc_grant_blocked(block);
1830     + spin_lock(&nlm_blocked_lock);
1831     }
1832     + spin_unlock(&nlm_blocked_lock);
1833    
1834     return timeout;
1835     }
1836     diff --git a/include/linux/edac.h b/include/linux/edac.h
1837     index 0b76327..5c6d7fb 100644
1838     --- a/include/linux/edac.h
1839     +++ b/include/linux/edac.h
1840     @@ -622,7 +622,7 @@ struct edac_raw_error_desc {
1841     */
1842     struct mem_ctl_info {
1843     struct device dev;
1844     - struct bus_type bus;
1845     + struct bus_type *bus;
1846    
1847     struct list_head link; /* for global list of mem_ctl_info structs */
1848    
1849     @@ -742,4 +742,9 @@ struct mem_ctl_info {
1850     #endif
1851     };
1852    
1853     +/*
1854     + * Maximum number of memory controllers in the coherent fabric.
1855     + */
1856     +#define EDAC_MAX_MCS 16
1857     +
1858     #endif
1859     diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
1860     index 637fa71d..0b34988 100644
1861     --- a/include/linux/if_vlan.h
1862     +++ b/include/linux/if_vlan.h
1863     @@ -79,9 +79,8 @@ static inline int is_vlan_dev(struct net_device *dev)
1864     }
1865    
1866     #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
1867     -#define vlan_tx_nonzero_tag_present(__skb) \
1868     - (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
1869     #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
1870     +#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
1871    
1872     #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1873    
1874     diff --git a/include/linux/virtio.h b/include/linux/virtio.h
1875     index 9ff8645..72398ee 100644
1876     --- a/include/linux/virtio.h
1877     +++ b/include/linux/virtio.h
1878     @@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
1879    
1880     bool virtqueue_enable_cb(struct virtqueue *vq);
1881    
1882     +unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
1883     +
1884     +bool virtqueue_poll(struct virtqueue *vq, unsigned);
1885     +
1886     bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
1887    
1888     void *virtqueue_detach_unused_buf(struct virtqueue *vq);
1889     diff --git a/include/net/addrconf.h b/include/net/addrconf.h
1890     index 21f70270..01b1a1a 100644
1891     --- a/include/net/addrconf.h
1892     +++ b/include/net/addrconf.h
1893     @@ -86,6 +86,9 @@ extern int ipv6_dev_get_saddr(struct net *net,
1894     const struct in6_addr *daddr,
1895     unsigned int srcprefs,
1896     struct in6_addr *saddr);
1897     +extern int __ipv6_get_lladdr(struct inet6_dev *idev,
1898     + struct in6_addr *addr,
1899     + unsigned char banned_flags);
1900     extern int ipv6_get_lladdr(struct net_device *dev,
1901     struct in6_addr *addr,
1902     unsigned char banned_flags);
1903     diff --git a/include/net/udp.h b/include/net/udp.h
1904     index 065f379..ad99eed 100644
1905     --- a/include/net/udp.h
1906     +++ b/include/net/udp.h
1907     @@ -181,6 +181,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
1908     extern void udp_err(struct sk_buff *, u32);
1909     extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
1910     struct msghdr *msg, size_t len);
1911     +extern int udp_push_pending_frames(struct sock *sk);
1912     extern void udp_flush_pending_frames(struct sock *sk);
1913     extern int udp_rcv(struct sk_buff *skb);
1914     extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
1915     diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
1916     index 0b46fd5..e36a4ae 100644
1917     --- a/include/uapi/linux/if_pppox.h
1918     +++ b/include/uapi/linux/if_pppox.h
1919     @@ -135,11 +135,11 @@ struct pppoe_tag {
1920    
1921     struct pppoe_hdr {
1922     #if defined(__LITTLE_ENDIAN_BITFIELD)
1923     - __u8 ver : 4;
1924     __u8 type : 4;
1925     + __u8 ver : 4;
1926     #elif defined(__BIG_ENDIAN_BITFIELD)
1927     - __u8 type : 4;
1928     __u8 ver : 4;
1929     + __u8 type : 4;
1930     #else
1931     #error "Please fix <asm/byteorder.h>"
1932     #endif
1933     diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
1934     index fd4b13b..2288fbd 100644
1935     --- a/kernel/hrtimer.c
1936     +++ b/kernel/hrtimer.c
1937     @@ -721,17 +721,20 @@ static int hrtimer_switch_to_hres(void)
1938     return 1;
1939     }
1940    
1941     +static void clock_was_set_work(struct work_struct *work)
1942     +{
1943     + clock_was_set();
1944     +}
1945     +
1946     +static DECLARE_WORK(hrtimer_work, clock_was_set_work);
1947     +
1948     /*
1949     - * Called from timekeeping code to reprogramm the hrtimer interrupt
1950     - * device. If called from the timer interrupt context we defer it to
1951     - * softirq context.
1952     + * Called from timekeeping and resume code to reprogramm the hrtimer
1953     + * interrupt device on all cpus.
1954     */
1955     void clock_was_set_delayed(void)
1956     {
1957     - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1958     -
1959     - cpu_base->clock_was_set = 1;
1960     - __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1961     + schedule_work(&hrtimer_work);
1962     }
1963    
1964     #else
1965     @@ -780,8 +783,10 @@ void hrtimers_resume(void)
1966     WARN_ONCE(!irqs_disabled(),
1967     KERN_INFO "hrtimers_resume() called with IRQs enabled!");
1968    
1969     + /* Retrigger on the local CPU */
1970     retrigger_next_event(NULL);
1971     - timerfd_clock_was_set();
1972     + /* And schedule a retrigger for all others */
1973     + clock_was_set_delayed();
1974     }
1975    
1976     static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
1977     @@ -1432,13 +1437,6 @@ void hrtimer_peek_ahead_timers(void)
1978    
1979     static void run_hrtimer_softirq(struct softirq_action *h)
1980     {
1981     - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1982     -
1983     - if (cpu_base->clock_was_set) {
1984     - cpu_base->clock_was_set = 0;
1985     - clock_was_set();
1986     - }
1987     -
1988     hrtimer_peek_ahead_timers();
1989     }
1990    
1991     diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
1992     index c6422ff..9012ecf 100644
1993     --- a/kernel/power/autosleep.c
1994     +++ b/kernel/power/autosleep.c
1995     @@ -32,7 +32,8 @@ static void try_to_suspend(struct work_struct *work)
1996    
1997     mutex_lock(&autosleep_lock);
1998    
1999     - if (!pm_save_wakeup_count(initial_count)) {
2000     + if (!pm_save_wakeup_count(initial_count) ||
2001     + system_state != SYSTEM_RUNNING) {
2002     mutex_unlock(&autosleep_lock);
2003     goto out;
2004     }
2005     diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2006     index 566cf2b..74fdc5c 100644
2007     --- a/lib/Kconfig.debug
2008     +++ b/lib/Kconfig.debug
2009     @@ -1272,7 +1272,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
2010     depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
2011     depends on !X86_64
2012     select STACKTRACE
2013     - select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
2014     + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
2015     help
2016     Provide stacktrace filter for fault-injection capabilities
2017    
2018     diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
2019     index 8a15eaa..4a78c4d 100644
2020     --- a/net/8021q/vlan_core.c
2021     +++ b/net/8021q/vlan_core.c
2022     @@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
2023     {
2024     struct sk_buff *skb = *skbp;
2025     __be16 vlan_proto = skb->vlan_proto;
2026     - u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
2027     + u16 vlan_id = vlan_tx_tag_get_id(skb);
2028     struct net_device *vlan_dev;
2029     struct vlan_pcpu_stats *rx_stats;
2030    
2031     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2032     index 3a8c8fd..1cd3d2a 100644
2033     --- a/net/8021q/vlan_dev.c
2034     +++ b/net/8021q/vlan_dev.c
2035     @@ -73,6 +73,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
2036     {
2037     struct vlan_priority_tci_mapping *mp;
2038    
2039     + smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
2040     +
2041     mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
2042     while (mp) {
2043     if (mp->priority == skb->priority) {
2044     @@ -249,6 +251,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
2045     np->next = mp;
2046     np->priority = skb_prio;
2047     np->vlan_qos = vlan_qos;
2048     + /* Before inserting this element in hash table, make sure all its fields
2049     + * are committed to memory.
2050     + * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
2051     + */
2052     + smp_wmb();
2053     vlan->egress_priority_map[skb_prio & 0xF] = np;
2054     if (vlan_qos)
2055     vlan->nr_egress_mappings++;
2056     diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
2057     index de8df95..2ee3879 100644
2058     --- a/net/9p/trans_common.c
2059     +++ b/net/9p/trans_common.c
2060     @@ -24,11 +24,11 @@
2061     */
2062     void p9_release_pages(struct page **pages, int nr_pages)
2063     {
2064     - int i = 0;
2065     - while (pages[i] && nr_pages--) {
2066     - put_page(pages[i]);
2067     - i++;
2068     - }
2069     + int i;
2070     +
2071     + for (i = 0; i < nr_pages; i++)
2072     + if (pages[i])
2073     + put_page(pages[i]);
2074     }
2075     EXPORT_SYMBOL(p9_release_pages);
2076    
2077     diff --git a/net/core/dev.c b/net/core/dev.c
2078     index faebb39..7ddbb31 100644
2079     --- a/net/core/dev.c
2080     +++ b/net/core/dev.c
2081     @@ -3513,8 +3513,15 @@ ncls:
2082     }
2083     }
2084    
2085     - if (vlan_tx_nonzero_tag_present(skb))
2086     - skb->pkt_type = PACKET_OTHERHOST;
2087     + if (unlikely(vlan_tx_tag_present(skb))) {
2088     + if (vlan_tx_tag_get_id(skb))
2089     + skb->pkt_type = PACKET_OTHERHOST;
2090     + /* Note: we might in the future use prio bits
2091     + * and set skb->priority like in vlan_do_receive()
2092     + * For the time being, just ignore Priority Code Point
2093     + */
2094     + skb->vlan_tci = 0;
2095     + }
2096    
2097     /* deliver only exact match when indicated */
2098     null_or_dev = deliver_exact ? skb->dev : NULL;
2099     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2100     index 5c56b21..ce90b02 100644
2101     --- a/net/core/neighbour.c
2102     +++ b/net/core/neighbour.c
2103     @@ -231,7 +231,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
2104     we must kill timers etc. and move
2105     it to safe state.
2106     */
2107     - skb_queue_purge(&n->arp_queue);
2108     + __skb_queue_purge(&n->arp_queue);
2109     n->arp_queue_len_bytes = 0;
2110     n->output = neigh_blackhole;
2111     if (n->nud_state & NUD_VALID)
2112     @@ -286,7 +286,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
2113     if (!n)
2114     goto out_entries;
2115    
2116     - skb_queue_head_init(&n->arp_queue);
2117     + __skb_queue_head_init(&n->arp_queue);
2118     rwlock_init(&n->lock);
2119     seqlock_init(&n->ha_lock);
2120     n->updated = n->used = now;
2121     @@ -708,7 +708,9 @@ void neigh_destroy(struct neighbour *neigh)
2122     if (neigh_del_timer(neigh))
2123     pr_warn("Impossible event\n");
2124    
2125     - skb_queue_purge(&neigh->arp_queue);
2126     + write_lock_bh(&neigh->lock);
2127     + __skb_queue_purge(&neigh->arp_queue);
2128     + write_unlock_bh(&neigh->lock);
2129     neigh->arp_queue_len_bytes = 0;
2130    
2131     if (dev->netdev_ops->ndo_neigh_destroy)
2132     @@ -858,7 +860,7 @@ static void neigh_invalidate(struct neighbour *neigh)
2133     neigh->ops->error_report(neigh, skb);
2134     write_lock(&neigh->lock);
2135     }
2136     - skb_queue_purge(&neigh->arp_queue);
2137     + __skb_queue_purge(&neigh->arp_queue);
2138     neigh->arp_queue_len_bytes = 0;
2139     }
2140    
2141     @@ -1210,7 +1212,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
2142    
2143     write_lock_bh(&neigh->lock);
2144     }
2145     - skb_queue_purge(&neigh->arp_queue);
2146     + __skb_queue_purge(&neigh->arp_queue);
2147     neigh->arp_queue_len_bytes = 0;
2148     }
2149     out:
2150     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2151     index 2a83591..855004f 100644
2152     --- a/net/ipv4/ip_gre.c
2153     +++ b/net/ipv4/ip_gre.c
2154     @@ -503,10 +503,11 @@ static int ipgre_tunnel_ioctl(struct net_device *dev,
2155    
2156     if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
2157     return -EFAULT;
2158     - if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
2159     - p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
2160     - ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) {
2161     - return -EINVAL;
2162     + if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
2163     + if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
2164     + p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
2165     + ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
2166     + return -EINVAL;
2167     }
2168     p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
2169     p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
2170     diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2171     index 3da817b..15e3e68 100644
2172     --- a/net/ipv4/ip_input.c
2173     +++ b/net/ipv4/ip_input.c
2174     @@ -190,10 +190,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
2175     {
2176     struct net *net = dev_net(skb->dev);
2177    
2178     - __skb_pull(skb, ip_hdrlen(skb));
2179     -
2180     - /* Point into the IP datagram, just past the header. */
2181     - skb_reset_transport_header(skb);
2182     + __skb_pull(skb, skb_network_header_len(skb));
2183    
2184     rcu_read_lock();
2185     {
2186     @@ -437,6 +434,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
2187     goto drop;
2188     }
2189    
2190     + skb->transport_header = skb->network_header + iph->ihl*4;
2191     +
2192     /* Remove any debris in the socket control block */
2193     memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
2194    
2195     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2196     index 7fa8f08..cbfc37f 100644
2197     --- a/net/ipv4/ip_tunnel.c
2198     +++ b/net/ipv4/ip_tunnel.c
2199     @@ -486,6 +486,53 @@ drop:
2200     }
2201     EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
2202    
2203     +static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
2204     + struct rtable *rt, __be16 df)
2205     +{
2206     + struct ip_tunnel *tunnel = netdev_priv(dev);
2207     + int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
2208     + int mtu;
2209     +
2210     + if (df)
2211     + mtu = dst_mtu(&rt->dst) - dev->hard_header_len
2212     + - sizeof(struct iphdr) - tunnel->hlen;
2213     + else
2214     + mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
2215     +
2216     + if (skb_dst(skb))
2217     + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
2218     +
2219     + if (skb->protocol == htons(ETH_P_IP)) {
2220     + if (!skb_is_gso(skb) &&
2221     + (df & htons(IP_DF)) && mtu < pkt_size) {
2222     + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
2223     + return -E2BIG;
2224     + }
2225     + }
2226     +#if IS_ENABLED(CONFIG_IPV6)
2227     + else if (skb->protocol == htons(ETH_P_IPV6)) {
2228     + struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
2229     +
2230     + if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
2231     + mtu >= IPV6_MIN_MTU) {
2232     + if ((tunnel->parms.iph.daddr &&
2233     + !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
2234     + rt6->rt6i_dst.plen == 128) {
2235     + rt6->rt6i_flags |= RTF_MODIFIED;
2236     + dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
2237     + }
2238     + }
2239     +
2240     + if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
2241     + mtu < pkt_size) {
2242     + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
2243     + return -E2BIG;
2244     + }
2245     + }
2246     +#endif
2247     + return 0;
2248     +}
2249     +
2250     void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2251     const struct iphdr *tnl_params)
2252     {
2253     @@ -499,7 +546,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2254     struct net_device *tdev; /* Device to other host */
2255     unsigned int max_headroom; /* The extra header space needed */
2256     __be32 dst;
2257     - int mtu;
2258    
2259     inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
2260    
2261     @@ -579,50 +625,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2262     goto tx_error;
2263     }
2264    
2265     - df = tnl_params->frag_off;
2266    
2267     - if (df)
2268     - mtu = dst_mtu(&rt->dst) - dev->hard_header_len
2269     - - sizeof(struct iphdr);
2270     - else
2271     - mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
2272     -
2273     - if (skb_dst(skb))
2274     - skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
2275     -
2276     - if (skb->protocol == htons(ETH_P_IP)) {
2277     - df |= (inner_iph->frag_off&htons(IP_DF));
2278     -
2279     - if (!skb_is_gso(skb) &&
2280     - (inner_iph->frag_off&htons(IP_DF)) &&
2281     - mtu < ntohs(inner_iph->tot_len)) {
2282     - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
2283     - ip_rt_put(rt);
2284     - goto tx_error;
2285     - }
2286     - }
2287     -#if IS_ENABLED(CONFIG_IPV6)
2288     - else if (skb->protocol == htons(ETH_P_IPV6)) {
2289     - struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
2290     -
2291     - if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
2292     - mtu >= IPV6_MIN_MTU) {
2293     - if ((tunnel->parms.iph.daddr &&
2294     - !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
2295     - rt6->rt6i_dst.plen == 128) {
2296     - rt6->rt6i_flags |= RTF_MODIFIED;
2297     - dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
2298     - }
2299     - }
2300     -
2301     - if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
2302     - mtu < skb->len) {
2303     - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
2304     - ip_rt_put(rt);
2305     - goto tx_error;
2306     - }
2307     + if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
2308     + ip_rt_put(rt);
2309     + goto tx_error;
2310     }
2311     -#endif
2312    
2313     if (tunnel->err_count > 0) {
2314     if (time_before(jiffies,
2315     @@ -646,6 +653,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2316     ttl = ip4_dst_hoplimit(&rt->dst);
2317     }
2318    
2319     + df = tnl_params->frag_off;
2320     + if (skb->protocol == htons(ETH_P_IP))
2321     + df |= (inner_iph->frag_off&htons(IP_DF));
2322     +
2323     max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr)
2324     + rt->dst.header_len;
2325     if (max_headroom > dev->needed_headroom) {
2326     diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
2327     index c118f6b..17cc0ff 100644
2328     --- a/net/ipv4/ip_vti.c
2329     +++ b/net/ipv4/ip_vti.c
2330     @@ -606,17 +606,10 @@ static int __net_init vti_fb_tunnel_init(struct net_device *dev)
2331     struct iphdr *iph = &tunnel->parms.iph;
2332     struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
2333    
2334     - tunnel->dev = dev;
2335     - strcpy(tunnel->parms.name, dev->name);
2336     -
2337     iph->version = 4;
2338     iph->protocol = IPPROTO_IPIP;
2339     iph->ihl = 5;
2340    
2341     - dev->tstats = alloc_percpu(struct pcpu_tstats);
2342     - if (!dev->tstats)
2343     - return -ENOMEM;
2344     -
2345     dev_hold(dev);
2346     rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
2347     return 0;
2348     diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
2349     index 77bfcce..7cfc456 100644
2350     --- a/net/ipv4/ipip.c
2351     +++ b/net/ipv4/ipip.c
2352     @@ -240,11 +240,13 @@ ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2353     if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
2354     return -EFAULT;
2355    
2356     - if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
2357     - p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
2358     - return -EINVAL;
2359     - if (p.i_key || p.o_key || p.i_flags || p.o_flags)
2360     - return -EINVAL;
2361     + if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
2362     + if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
2363     + p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
2364     + return -EINVAL;
2365     + }
2366     +
2367     + p.i_key = p.o_key = p.i_flags = p.o_flags = 0;
2368     if (p.iph.ttl)
2369     p.iph.frag_off |= htons(IP_DF);
2370    
2371     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2372     index 0bf5d39..93b731d 100644
2373     --- a/net/ipv4/udp.c
2374     +++ b/net/ipv4/udp.c
2375     @@ -799,7 +799,7 @@ send:
2376     /*
2377     * Push out all pending data as one UDP datagram. Socket is locked.
2378     */
2379     -static int udp_push_pending_frames(struct sock *sk)
2380     +int udp_push_pending_frames(struct sock *sk)
2381     {
2382     struct udp_sock *up = udp_sk(sk);
2383     struct inet_sock *inet = inet_sk(sk);
2384     @@ -818,6 +818,7 @@ out:
2385     up->pending = 0;
2386     return err;
2387     }
2388     +EXPORT_SYMBOL(udp_push_pending_frames);
2389    
2390     int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2391     size_t len)
2392     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2393     index 4ab4c38..fb8c94c 100644
2394     --- a/net/ipv6/addrconf.c
2395     +++ b/net/ipv6/addrconf.c
2396     @@ -1448,6 +1448,23 @@ try_nextdev:
2397     }
2398     EXPORT_SYMBOL(ipv6_dev_get_saddr);
2399    
2400     +int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
2401     + unsigned char banned_flags)
2402     +{
2403     + struct inet6_ifaddr *ifp;
2404     + int err = -EADDRNOTAVAIL;
2405     +
2406     + list_for_each_entry(ifp, &idev->addr_list, if_list) {
2407     + if (ifp->scope == IFA_LINK &&
2408     + !(ifp->flags & banned_flags)) {
2409     + *addr = ifp->addr;
2410     + err = 0;
2411     + break;
2412     + }
2413     + }
2414     + return err;
2415     +}
2416     +
2417     int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
2418     unsigned char banned_flags)
2419     {
2420     @@ -1457,17 +1474,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
2421     rcu_read_lock();
2422     idev = __in6_dev_get(dev);
2423     if (idev) {
2424     - struct inet6_ifaddr *ifp;
2425     -
2426     read_lock_bh(&idev->lock);
2427     - list_for_each_entry(ifp, &idev->addr_list, if_list) {
2428     - if (ifp->scope == IFA_LINK &&
2429     - !(ifp->flags & banned_flags)) {
2430     - *addr = ifp->addr;
2431     - err = 0;
2432     - break;
2433     - }
2434     - }
2435     + err = __ipv6_get_lladdr(idev, addr, banned_flags);
2436     read_unlock_bh(&idev->lock);
2437     }
2438     rcu_read_unlock();
2439     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2440     index 192dd1a..5fc9c7a 100644
2441     --- a/net/ipv6/ip6_fib.c
2442     +++ b/net/ipv6/ip6_fib.c
2443     @@ -632,6 +632,12 @@ insert_above:
2444     return ln;
2445     }
2446    
2447     +static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
2448     +{
2449     + return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
2450     + RTF_GATEWAY;
2451     +}
2452     +
2453     /*
2454     * Insert routing information in a node.
2455     */
2456     @@ -646,6 +652,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2457     int add = (!info->nlh ||
2458     (info->nlh->nlmsg_flags & NLM_F_CREATE));
2459     int found = 0;
2460     + bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
2461    
2462     ins = &fn->leaf;
2463    
2464     @@ -691,9 +698,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2465     * To avoid long list, we only had siblings if the
2466     * route have a gateway.
2467     */
2468     - if (rt->rt6i_flags & RTF_GATEWAY &&
2469     - !(rt->rt6i_flags & RTF_EXPIRES) &&
2470     - !(iter->rt6i_flags & RTF_EXPIRES))
2471     + if (rt_can_ecmp &&
2472     + rt6_qualify_for_ecmp(iter))
2473     rt->rt6i_nsiblings++;
2474     }
2475    
2476     @@ -715,7 +721,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2477     /* Find the first route that have the same metric */
2478     sibling = fn->leaf;
2479     while (sibling) {
2480     - if (sibling->rt6i_metric == rt->rt6i_metric) {
2481     + if (sibling->rt6i_metric == rt->rt6i_metric &&
2482     + rt6_qualify_for_ecmp(sibling)) {
2483     list_add_tail(&rt->rt6i_siblings,
2484     &sibling->rt6i_siblings);
2485     break;
2486     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2487     index d5d20cd..6e3ddf8 100644
2488     --- a/net/ipv6/ip6_output.c
2489     +++ b/net/ipv6/ip6_output.c
2490     @@ -1098,11 +1098,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
2491     return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
2492     }
2493    
2494     -static void ip6_append_data_mtu(int *mtu,
2495     +static void ip6_append_data_mtu(unsigned int *mtu,
2496     int *maxfraglen,
2497     unsigned int fragheaderlen,
2498     struct sk_buff *skb,
2499     - struct rt6_info *rt)
2500     + struct rt6_info *rt,
2501     + bool pmtuprobe)
2502     {
2503     if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
2504     if (skb == NULL) {
2505     @@ -1114,7 +1115,9 @@ static void ip6_append_data_mtu(int *mtu,
2506     * this fragment is not first, the headers
2507     * space is regarded as data space.
2508     */
2509     - *mtu = dst_mtu(rt->dst.path);
2510     + *mtu = min(*mtu, pmtuprobe ?
2511     + rt->dst.dev->mtu :
2512     + dst_mtu(rt->dst.path));
2513     }
2514     *maxfraglen = ((*mtu - fragheaderlen) & ~7)
2515     + fragheaderlen - sizeof(struct frag_hdr);
2516     @@ -1131,11 +1134,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2517     struct ipv6_pinfo *np = inet6_sk(sk);
2518     struct inet_cork *cork;
2519     struct sk_buff *skb, *skb_prev = NULL;
2520     - unsigned int maxfraglen, fragheaderlen;
2521     + unsigned int maxfraglen, fragheaderlen, mtu;
2522     int exthdrlen;
2523     int dst_exthdrlen;
2524     int hh_len;
2525     - int mtu;
2526     int copy;
2527     int err;
2528     int offset = 0;
2529     @@ -1292,7 +1294,9 @@ alloc_new_skb:
2530     /* update mtu and maxfraglen if necessary */
2531     if (skb == NULL || skb_prev == NULL)
2532     ip6_append_data_mtu(&mtu, &maxfraglen,
2533     - fragheaderlen, skb, rt);
2534     + fragheaderlen, skb, rt,
2535     + np->pmtudisc ==
2536     + IPV6_PMTUDISC_PROBE);
2537    
2538     skb_prev = skb;
2539    
2540     diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
2541     index bfa6cc3..c3998c2 100644
2542     --- a/net/ipv6/mcast.c
2543     +++ b/net/ipv6/mcast.c
2544     @@ -1343,8 +1343,9 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
2545     hdr->daddr = *daddr;
2546     }
2547    
2548     -static struct sk_buff *mld_newpack(struct net_device *dev, int size)
2549     +static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
2550     {
2551     + struct net_device *dev = idev->dev;
2552     struct net *net = dev_net(dev);
2553     struct sock *sk = net->ipv6.igmp_sk;
2554     struct sk_buff *skb;
2555     @@ -1369,7 +1370,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
2556    
2557     skb_reserve(skb, hlen);
2558    
2559     - if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2560     + if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
2561     /* <draft-ietf-magma-mld-source-05.txt>:
2562     * use unspecified address as the source address
2563     * when a valid link-local address is not available.
2564     @@ -1465,7 +1466,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2565     struct mld2_grec *pgr;
2566    
2567     if (!skb)
2568     - skb = mld_newpack(dev, dev->mtu);
2569     + skb = mld_newpack(pmc->idev, dev->mtu);
2570     if (!skb)
2571     return NULL;
2572     pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
2573     @@ -1485,7 +1486,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2574     static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2575     int type, int gdeleted, int sdeleted)
2576     {
2577     - struct net_device *dev = pmc->idev->dev;
2578     + struct inet6_dev *idev = pmc->idev;
2579     + struct net_device *dev = idev->dev;
2580     struct mld2_report *pmr;
2581     struct mld2_grec *pgr = NULL;
2582     struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
2583     @@ -1514,7 +1516,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2584     AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
2585     if (skb)
2586     mld_sendpack(skb);
2587     - skb = mld_newpack(dev, dev->mtu);
2588     + skb = mld_newpack(idev, dev->mtu);
2589     }
2590     }
2591     first = 1;
2592     @@ -1541,7 +1543,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2593     pgr->grec_nsrcs = htons(scount);
2594     if (skb)
2595     mld_sendpack(skb);
2596     - skb = mld_newpack(dev, dev->mtu);
2597     + skb = mld_newpack(idev, dev->mtu);
2598     first = 1;
2599     scount = 0;
2600     }
2601     @@ -1596,8 +1598,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2602     struct sk_buff *skb = NULL;
2603     int type;
2604    
2605     + read_lock_bh(&idev->lock);
2606     if (!pmc) {
2607     - read_lock_bh(&idev->lock);
2608     for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2609     if (pmc->mca_flags & MAF_NOREPORT)
2610     continue;
2611     @@ -1609,7 +1611,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2612     skb = add_grec(skb, pmc, type, 0, 0);
2613     spin_unlock_bh(&pmc->mca_lock);
2614     }
2615     - read_unlock_bh(&idev->lock);
2616     } else {
2617     spin_lock_bh(&pmc->mca_lock);
2618     if (pmc->mca_sfcount[MCAST_EXCLUDE])
2619     @@ -1619,6 +1620,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2620     skb = add_grec(skb, pmc, type, 0, 0);
2621     spin_unlock_bh(&pmc->mca_lock);
2622     }
2623     + read_unlock_bh(&idev->lock);
2624     if (skb)
2625     mld_sendpack(skb);
2626     }
2627     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2628     index ad0aa6b..bacce6c 100644
2629     --- a/net/ipv6/route.c
2630     +++ b/net/ipv6/route.c
2631     @@ -65,6 +65,12 @@
2632     #include <linux/sysctl.h>
2633     #endif
2634    
2635     +enum rt6_nud_state {
2636     + RT6_NUD_FAIL_HARD = -2,
2637     + RT6_NUD_FAIL_SOFT = -1,
2638     + RT6_NUD_SUCCEED = 1
2639     +};
2640     +
2641     static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
2642     const struct in6_addr *dest);
2643     static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
2644     @@ -527,26 +533,29 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
2645     return 0;
2646     }
2647    
2648     -static inline bool rt6_check_neigh(struct rt6_info *rt)
2649     +static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
2650     {
2651     struct neighbour *neigh;
2652     - bool ret = false;
2653     + enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
2654    
2655     if (rt->rt6i_flags & RTF_NONEXTHOP ||
2656     !(rt->rt6i_flags & RTF_GATEWAY))
2657     - return true;
2658     + return RT6_NUD_SUCCEED;
2659    
2660     rcu_read_lock_bh();
2661     neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2662     if (neigh) {
2663     read_lock(&neigh->lock);
2664     if (neigh->nud_state & NUD_VALID)
2665     - ret = true;
2666     + ret = RT6_NUD_SUCCEED;
2667     #ifdef CONFIG_IPV6_ROUTER_PREF
2668     else if (!(neigh->nud_state & NUD_FAILED))
2669     - ret = true;
2670     + ret = RT6_NUD_SUCCEED;
2671     #endif
2672     read_unlock(&neigh->lock);
2673     + } else {
2674     + ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
2675     + RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
2676     }
2677     rcu_read_unlock_bh();
2678    
2679     @@ -560,43 +569,52 @@ static int rt6_score_route(struct rt6_info *rt, int oif,
2680    
2681     m = rt6_check_dev(rt, oif);
2682     if (!m && (strict & RT6_LOOKUP_F_IFACE))
2683     - return -1;
2684     + return RT6_NUD_FAIL_HARD;
2685     #ifdef CONFIG_IPV6_ROUTER_PREF
2686     m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
2687     #endif
2688     - if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE))
2689     - return -1;
2690     + if (strict & RT6_LOOKUP_F_REACHABLE) {
2691     + int n = rt6_check_neigh(rt);
2692     + if (n < 0)
2693     + return n;
2694     + }
2695     return m;
2696     }
2697    
2698     static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
2699     - int *mpri, struct rt6_info *match)
2700     + int *mpri, struct rt6_info *match,
2701     + bool *do_rr)
2702     {
2703     int m;
2704     + bool match_do_rr = false;
2705    
2706     if (rt6_check_expired(rt))
2707     goto out;
2708    
2709     m = rt6_score_route(rt, oif, strict);
2710     - if (m < 0)
2711     + if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
2712     + match_do_rr = true;
2713     + m = 0; /* lowest valid score */
2714     + } else if (m < 0) {
2715     goto out;
2716     + }
2717     +
2718     + if (strict & RT6_LOOKUP_F_REACHABLE)
2719     + rt6_probe(rt);
2720    
2721     if (m > *mpri) {
2722     - if (strict & RT6_LOOKUP_F_REACHABLE)
2723     - rt6_probe(match);
2724     + *do_rr = match_do_rr;
2725     *mpri = m;
2726     match = rt;
2727     - } else if (strict & RT6_LOOKUP_F_REACHABLE) {
2728     - rt6_probe(rt);
2729     }
2730     -
2731     out:
2732     return match;
2733     }
2734    
2735     static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
2736     struct rt6_info *rr_head,
2737     - u32 metric, int oif, int strict)
2738     + u32 metric, int oif, int strict,
2739     + bool *do_rr)
2740     {
2741     struct rt6_info *rt, *match;
2742     int mpri = -1;
2743     @@ -604,10 +622,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
2744     match = NULL;
2745     for (rt = rr_head; rt && rt->rt6i_metric == metric;
2746     rt = rt->dst.rt6_next)
2747     - match = find_match(rt, oif, strict, &mpri, match);
2748     + match = find_match(rt, oif, strict, &mpri, match, do_rr);
2749     for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
2750     rt = rt->dst.rt6_next)
2751     - match = find_match(rt, oif, strict, &mpri, match);
2752     + match = find_match(rt, oif, strict, &mpri, match, do_rr);
2753    
2754     return match;
2755     }
2756     @@ -616,15 +634,16 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
2757     {
2758     struct rt6_info *match, *rt0;
2759     struct net *net;
2760     + bool do_rr = false;
2761    
2762     rt0 = fn->rr_ptr;
2763     if (!rt0)
2764     fn->rr_ptr = rt0 = fn->leaf;
2765    
2766     - match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
2767     + match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
2768     + &do_rr);
2769    
2770     - if (!match &&
2771     - (strict & RT6_LOOKUP_F_REACHABLE)) {
2772     + if (do_rr) {
2773     struct rt6_info *next = rt0->dst.rt6_next;
2774    
2775     /* no entries matched; do round-robin */
2776     @@ -1074,10 +1093,13 @@ static void ip6_link_failure(struct sk_buff *skb)
2777    
2778     rt = (struct rt6_info *) skb_dst(skb);
2779     if (rt) {
2780     - if (rt->rt6i_flags & RTF_CACHE)
2781     - rt6_update_expires(rt, 0);
2782     - else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
2783     + if (rt->rt6i_flags & RTF_CACHE) {
2784     + dst_hold(&rt->dst);
2785     + if (ip6_del_rt(rt))
2786     + dst_free(&rt->dst);
2787     + } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
2788     rt->rt6i_node->fn_sernum = -1;
2789     + }
2790     }
2791     }
2792    
2793     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
2794     index 3353634..60df36d 100644
2795     --- a/net/ipv6/sit.c
2796     +++ b/net/ipv6/sit.c
2797     @@ -589,7 +589,7 @@ static int ipip6_rcv(struct sk_buff *skb)
2798     tunnel->dev->stats.rx_errors++;
2799     goto out;
2800     }
2801     - } else {
2802     + } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) {
2803     if (is_spoofed_6rd(tunnel, iph->saddr,
2804     &ipv6_hdr(skb)->saddr) ||
2805     is_spoofed_6rd(tunnel, iph->daddr,
2806     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2807     index 42923b1..e7b28f9 100644
2808     --- a/net/ipv6/udp.c
2809     +++ b/net/ipv6/udp.c
2810     @@ -955,11 +955,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
2811     struct udphdr *uh;
2812     struct udp_sock *up = udp_sk(sk);
2813     struct inet_sock *inet = inet_sk(sk);
2814     - struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
2815     + struct flowi6 *fl6;
2816     int err = 0;
2817     int is_udplite = IS_UDPLITE(sk);
2818     __wsum csum = 0;
2819    
2820     + if (up->pending == AF_INET)
2821     + return udp_push_pending_frames(sk);
2822     +
2823     + fl6 = &inet->cork.fl.u.ip6;
2824     +
2825     /* Grab the skbuff where UDP header space exists. */
2826     if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
2827     goto out;
2828     diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2829     index 8dec687..5ebee2d 100644
2830     --- a/net/l2tp/l2tp_ppp.c
2831     +++ b/net/l2tp/l2tp_ppp.c
2832     @@ -1793,7 +1793,8 @@ static const struct proto_ops pppol2tp_ops = {
2833    
2834     static const struct pppox_proto pppol2tp_proto = {
2835     .create = pppol2tp_create,
2836     - .ioctl = pppol2tp_ioctl
2837     + .ioctl = pppol2tp_ioctl,
2838     + .owner = THIS_MODULE,
2839     };
2840    
2841     #ifdef CONFIG_L2TP_V3
2842     diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
2843     index d51852b..5792252 100644
2844     --- a/net/sched/sch_qfq.c
2845     +++ b/net/sched/sch_qfq.c
2846     @@ -113,7 +113,6 @@
2847    
2848     #define FRAC_BITS 30 /* fixed point arithmetic */
2849     #define ONE_FP (1UL << FRAC_BITS)
2850     -#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
2851    
2852     #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
2853     #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
2854     @@ -189,6 +188,7 @@ struct qfq_sched {
2855     struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
2856     u32 num_active_agg; /* Num. of active aggregates */
2857     u32 wsum; /* weight sum */
2858     + u32 iwsum; /* inverse weight sum */
2859    
2860     unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
2861     struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
2862     @@ -314,6 +314,7 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
2863    
2864     q->wsum +=
2865     (int) agg->class_weight * (new_num_classes - agg->num_classes);
2866     + q->iwsum = ONE_FP / q->wsum;
2867    
2868     agg->num_classes = new_num_classes;
2869     }
2870     @@ -340,6 +341,10 @@ static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
2871     {
2872     if (!hlist_unhashed(&agg->nonfull_next))
2873     hlist_del_init(&agg->nonfull_next);
2874     + q->wsum -= agg->class_weight;
2875     + if (q->wsum != 0)
2876     + q->iwsum = ONE_FP / q->wsum;
2877     +
2878     if (q->in_serv_agg == agg)
2879     q->in_serv_agg = qfq_choose_next_agg(q);
2880     kfree(agg);
2881     @@ -827,38 +832,60 @@ static void qfq_make_eligible(struct qfq_sched *q)
2882     }
2883     }
2884    
2885     -
2886     /*
2887     - * The index of the slot in which the aggregate is to be inserted must
2888     - * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
2889     - * because the start time of the group may be moved backward by one
2890     - * slot after the aggregate has been inserted, and this would cause
2891     - * non-empty slots to be right-shifted by one position.
2892     + * The index of the slot in which the input aggregate agg is to be
2893     + * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
2894     + * and not a '-1' because the start time of the group may be moved
2895     + * backward by one slot after the aggregate has been inserted, and
2896     + * this would cause non-empty slots to be right-shifted by one
2897     + * position.
2898     + *
2899     + * QFQ+ fully satisfies this bound to the slot index if the parameters
2900     + * of the classes are not changed dynamically, and if QFQ+ never
2901     + * happens to postpone the service of agg unjustly, i.e., it never
2902     + * happens that the aggregate becomes backlogged and eligible, or just
2903     + * eligible, while an aggregate with a higher approximated finish time
2904     + * is being served. In particular, in this case QFQ+ guarantees that
2905     + * the timestamps of agg are low enough that the slot index is never
2906     + * higher than 2. Unfortunately, QFQ+ cannot provide the same
2907     + * guarantee if it happens to unjustly postpone the service of agg, or
2908     + * if the parameters of some class are changed.
2909     + *
2910     + * As for the first event, i.e., an out-of-order service, the
2911     + * upper bound to the slot index guaranteed by QFQ+ grows to
2912     + * 2 +
2913     + * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
2914     + * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
2915     *
2916     - * If the weight and lmax (max_pkt_size) of the classes do not change,
2917     - * then QFQ+ does meet the above contraint according to the current
2918     - * values of its parameters. In fact, if the weight and lmax of the
2919     - * classes do not change, then, from the theory, QFQ+ guarantees that
2920     - * the slot index is never higher than
2921     - * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
2922     - * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
2923     + * The following function deals with this problem by backward-shifting
2924     + * the timestamps of agg, if needed, so as to guarantee that the slot
2925     + * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
2926     + * cause the service of other aggregates to be postponed, yet the
2927     + * worst-case guarantees of these aggregates are not violated. In
2928     + * fact, in case of no out-of-order service, the timestamps of agg
2929     + * would have been even lower than they are after the backward shift,
2930     + * because QFQ+ would have guaranteed a maximum value equal to 2 for
2931     + * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
2932     + * service is postponed because of the backward-shift would have
2933     + * however waited for the service of agg before being served.
2934     *
2935     - * When the weight of a class is increased or the lmax of the class is
2936     - * decreased, a new aggregate with smaller slot size than the original
2937     - * parent aggregate of the class may happen to be activated. The
2938     - * activation of this aggregate should be properly delayed to when the
2939     - * service of the class has finished in the ideal system tracked by
2940     - * QFQ+. If the activation of the aggregate is not delayed to this
2941     - * reference time instant, then this aggregate may be unjustly served
2942     - * before other aggregates waiting for service. This may cause the
2943     - * above bound to the slot index to be violated for some of these
2944     - * unlucky aggregates.
2945     + * The other event that may cause the slot index to be higher than 2
2946     + * for agg is a recent change of the parameters of some class. If the
2947     + * weight of a class is increased or the lmax (max_pkt_size) of the
2948     + * class is decreased, then a new aggregate with smaller slot size
2949     + * than the original parent aggregate of the class may happen to be
2950     + * activated. The activation of this aggregate should be properly
2951     + * delayed to when the service of the class has finished in the ideal
2952     + * system tracked by QFQ+. If the activation of the aggregate is not
2953     + * delayed to this reference time instant, then this aggregate may be
2954     + * unjustly served before other aggregates waiting for service. This
2955     + * may cause the above bound to the slot index to be violated for some
2956     + * of these unlucky aggregates.
2957     *
2958     * Instead of delaying the activation of the new aggregate, which is
2959     - * quite complex, the following inaccurate but simple solution is used:
2960     - * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
2961     - * timestamps of the aggregate are shifted backward so as to let the
2962     - * slot index become equal to QFQ_MAX_SLOTS-2.
2963     + * quite complex, the above-discussed capping of the slot index is
2964     + * used to handle also the consequences of a change of the parameters
2965     + * of a class.
2966     */
2967     static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
2968     u64 roundedS)
2969     @@ -1077,7 +1104,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
2970     else
2971     in_serv_agg->budget -= len;
2972    
2973     - q->V += (u64)len * IWSUM;
2974     + q->V += (u64)len * q->iwsum;
2975     pr_debug("qfq dequeue: len %u F %lld now %lld\n",
2976     len, (unsigned long long) in_serv_agg->F,
2977     (unsigned long long) q->V);
2978     diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
2979     index 37ca969..22c88d2 100644
2980     --- a/net/x25/af_x25.c
2981     +++ b/net/x25/af_x25.c
2982     @@ -1583,11 +1583,11 @@ out_cud_release:
2983     case SIOCX25CALLACCPTAPPRV: {
2984     rc = -EINVAL;
2985     lock_sock(sk);
2986     - if (sk->sk_state != TCP_CLOSE)
2987     - break;
2988     - clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
2989     + if (sk->sk_state == TCP_CLOSE) {
2990     + clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
2991     + rc = 0;
2992     + }
2993     release_sock(sk);
2994     - rc = 0;
2995     break;
2996     }
2997    
2998     @@ -1595,14 +1595,15 @@ out_cud_release:
2999     rc = -EINVAL;
3000     lock_sock(sk);
3001     if (sk->sk_state != TCP_ESTABLISHED)
3002     - break;
3003     + goto out_sendcallaccpt_release;
3004     /* must call accptapprv above */
3005     if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
3006     - break;
3007     + goto out_sendcallaccpt_release;
3008     x25_write_internal(sk, X25_CALL_ACCEPTED);
3009     x25->state = X25_STATE_3;
3010     - release_sock(sk);
3011     rc = 0;
3012     +out_sendcallaccpt_release:
3013     + release_sock(sk);
3014     break;
3015     }
3016    
3017     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3018     index 1d9d642..e849e1e 100644
3019     --- a/sound/pci/hda/patch_sigmatel.c
3020     +++ b/sound/pci/hda/patch_sigmatel.c
3021     @@ -417,9 +417,11 @@ static void stac_update_outputs(struct hda_codec *codec)
3022     val &= ~spec->eapd_mask;
3023     else
3024     val |= spec->eapd_mask;
3025     - if (spec->gpio_data != val)
3026     + if (spec->gpio_data != val) {
3027     + spec->gpio_data = val;
3028     stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir,
3029     val);
3030     + }
3031     }
3032     }
3033    
3034     @@ -3227,7 +3229,7 @@ static const struct hda_fixup stac927x_fixups[] = {
3035     /* configure the analog microphone on some laptops */
3036     { 0x0c, 0x90a79130 },
3037     /* correct the front output jack as a hp out */
3038     - { 0x0f, 0x0227011f },
3039     + { 0x0f, 0x0221101f },
3040     /* correct the front input jack as a mic */
3041     { 0x0e, 0x02a79130 },
3042     {}
3043     @@ -3608,20 +3610,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
3044     static int stac_init(struct hda_codec *codec)
3045     {
3046     struct sigmatel_spec *spec = codec->spec;
3047     - unsigned int gpio;
3048     int i;
3049    
3050     /* override some hints */
3051     stac_store_hints(codec);
3052    
3053     /* set up GPIO */
3054     - gpio = spec->gpio_data;
3055     /* turn on EAPD statically when spec->eapd_switch isn't set.
3056     * otherwise, unsol event will turn it on/off dynamically
3057     */
3058     if (!spec->eapd_switch)
3059     - gpio |= spec->eapd_mask;
3060     - stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, gpio);
3061     + spec->gpio_data |= spec->eapd_mask;
3062     + stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
3063    
3064     snd_hda_gen_init(codec);
3065    
3066     @@ -3921,6 +3921,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
3067     {
3068     struct sigmatel_spec *spec = codec->spec;
3069    
3070     + spec->gpio_mask |= spec->eapd_mask;
3071     if (spec->gpio_led) {
3072     if (!spec->vref_mute_led_nid) {
3073     spec->gpio_mask |= spec->gpio_led;
3074     diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
3075     index 8221ff2..074aaf7 100644
3076     --- a/sound/usb/6fire/pcm.c
3077     +++ b/sound/usb/6fire/pcm.c
3078     @@ -543,7 +543,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer(
3079     snd_pcm_uframes_t ret;
3080    
3081     if (rt->panic || !sub)
3082     - return SNDRV_PCM_STATE_XRUN;
3083     + return SNDRV_PCM_POS_XRUN;
3084    
3085     spin_lock_irqsave(&sub->lock, flags);
3086     ret = sub->dma_off;