Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.10/0126-3.10.27-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2378 - (hide annotations) (download)
Fri Jan 24 08:19:51 2014 UTC (10 years, 4 months ago) by niro
File size: 65810 byte(s)
-linux-3.10.27
1 niro 2378 diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
2     index 23dd80e82b8e..0f4376ec8852 100644
3     --- a/Documentation/networking/packet_mmap.txt
4     +++ b/Documentation/networking/packet_mmap.txt
5     @@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
6     [shutdown] close() --------> destruction of the transmission socket and
7     deallocation of all associated resources.
8    
9     +Socket creation and destruction is also straight forward, and is done
10     +the same way as in capturing described in the previous paragraph:
11     +
12     + int fd = socket(PF_PACKET, mode, 0);
13     +
14     +The protocol can optionally be 0 in case we only want to transmit
15     +via this socket, which avoids an expensive call to packet_rcv().
16     +In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
17     +set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
18     +
19     Binding the socket to your network interface is mandatory (with zero copy) to
20     know the header size of frames used in the circular buffer.
21    
22     diff --git a/Makefile b/Makefile
23     index ac07707a2f9e..09675a57059c 100644
24     --- a/Makefile
25     +++ b/Makefile
26     @@ -1,6 +1,6 @@
27     VERSION = 3
28     PATCHLEVEL = 10
29     -SUBLEVEL = 26
30     +SUBLEVEL = 27
31     EXTRAVERSION =
32     NAME = TOSSUG Baby Fish
33    
34     diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
35     index fc9fb3d526e2..cdbdc4dfef22 100644
36     --- a/arch/arm/boot/dts/exynos5250.dtsi
37     +++ b/arch/arm/boot/dts/exynos5250.dtsi
38     @@ -545,7 +545,7 @@
39     compatible = "arm,pl330", "arm,primecell";
40     reg = <0x10800000 0x1000>;
41     interrupts = <0 33 0>;
42     - clocks = <&clock 271>;
43     + clocks = <&clock 346>;
44     clock-names = "apb_pclk";
45     #dma-cells = <1>;
46     #dma-channels = <8>;
47     diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
48     index 6b9567e19bdc..d6a0fdb6c2ee 100644
49     --- a/arch/arm/kernel/traps.c
50     +++ b/arch/arm/kernel/traps.c
51     @@ -35,7 +35,13 @@
52     #include <asm/tls.h>
53     #include <asm/system_misc.h>
54    
55     -static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
56     +static const char *handler[]= {
57     + "prefetch abort",
58     + "data abort",
59     + "address exception",
60     + "interrupt",
61     + "undefined instruction",
62     +};
63    
64     void *vectors_page;
65    
66     diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
67     index 9ee78f7b4990..782f6c71fa0a 100644
68     --- a/arch/arm/mach-footbridge/dc21285-timer.c
69     +++ b/arch/arm/mach-footbridge/dc21285-timer.c
70     @@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
71     void __init footbridge_timer_init(void)
72     {
73     struct clock_event_device *ce = &ckevt_dc21285;
74     + unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
75    
76     - clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
77     + clocksource_register_hz(&cksrc_dc21285, rate);
78    
79     setup_irq(ce->irq, &footbridge_timer_irq);
80    
81     ce->cpumask = cpumask_of(smp_processor_id());
82     - clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
83     + clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
84     }
85     diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
86     index b85b2882dbd0..803c9fcfb359 100644
87     --- a/arch/arm/mach-shmobile/board-armadillo800eva.c
88     +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
89     @@ -437,7 +437,7 @@ static struct platform_device lcdc0_device = {
90     .id = 0,
91     .dev = {
92     .platform_data = &lcdc0_info,
93     - .coherent_dma_mask = ~0,
94     + .coherent_dma_mask = DMA_BIT_MASK(32),
95     },
96     };
97    
98     @@ -534,7 +534,7 @@ static struct platform_device hdmi_lcdc_device = {
99     .id = 1,
100     .dev = {
101     .platform_data = &hdmi_lcdc_info,
102     - .coherent_dma_mask = ~0,
103     + .coherent_dma_mask = DMA_BIT_MASK(32),
104     },
105     };
106    
107     diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
108     index e6b775a10aad..4d610e6ea987 100644
109     --- a/arch/arm/mach-shmobile/board-kzm9g.c
110     +++ b/arch/arm/mach-shmobile/board-kzm9g.c
111     @@ -332,7 +332,7 @@ static struct platform_device lcdc_device = {
112     .resource = lcdc_resources,
113     .dev = {
114     .platform_data = &lcdc_info,
115     - .coherent_dma_mask = ~0,
116     + .coherent_dma_mask = DMA_BIT_MASK(32),
117     },
118     };
119    
120     diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
121     index fa3407da682a..3b917bc76a49 100644
122     --- a/arch/arm/mach-shmobile/board-mackerel.c
123     +++ b/arch/arm/mach-shmobile/board-mackerel.c
124     @@ -421,7 +421,7 @@ static struct platform_device lcdc_device = {
125     .resource = lcdc_resources,
126     .dev = {
127     .platform_data = &lcdc_info,
128     - .coherent_dma_mask = ~0,
129     + .coherent_dma_mask = DMA_BIT_MASK(32),
130     },
131     };
132    
133     @@ -497,7 +497,7 @@ static struct platform_device hdmi_lcdc_device = {
134     .id = 1,
135     .dev = {
136     .platform_data = &hdmi_lcdc_info,
137     - .coherent_dma_mask = ~0,
138     + .coherent_dma_mask = DMA_BIT_MASK(32),
139     },
140     };
141    
142     diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
143     index f0e2784e7cca..2f9b751878ba 100644
144     --- a/arch/parisc/include/asm/cacheflush.h
145     +++ b/arch/parisc/include/asm/cacheflush.h
146     @@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
147     void mark_rodata_ro(void);
148     #endif
149    
150     -#ifdef CONFIG_PA8X00
151     -/* Only pa8800, pa8900 needs this */
152     -
153     #include <asm/kmap_types.h>
154    
155     #define ARCH_HAS_KMAP
156    
157     -void kunmap_parisc(void *addr);
158     -
159     static inline void *kmap(struct page *page)
160     {
161     might_sleep();
162     + flush_dcache_page(page);
163     return page_address(page);
164     }
165    
166     static inline void kunmap(struct page *page)
167     {
168     - kunmap_parisc(page_address(page));
169     + flush_kernel_dcache_page_addr(page_address(page));
170     }
171    
172     static inline void *kmap_atomic(struct page *page)
173     {
174     pagefault_disable();
175     + flush_dcache_page(page);
176     return page_address(page);
177     }
178    
179     static inline void __kunmap_atomic(void *addr)
180     {
181     - kunmap_parisc(addr);
182     + flush_kernel_dcache_page_addr(addr);
183     pagefault_enable();
184     }
185    
186     #define kmap_atomic_prot(page, prot) kmap_atomic(page)
187     #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
188     #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
189     -#endif
190    
191     #endif /* _PARISC_CACHEFLUSH_H */
192    
193     diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
194     index b7adb2ac049c..c53fc63149e8 100644
195     --- a/arch/parisc/include/asm/page.h
196     +++ b/arch/parisc/include/asm/page.h
197     @@ -28,9 +28,8 @@ struct page;
198    
199     void clear_page_asm(void *page);
200     void copy_page_asm(void *to, void *from);
201     -void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
202     -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
203     - struct page *pg);
204     +#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
205     +#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
206    
207     /* #define CONFIG_PARISC_TMPALIAS */
208    
209     diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
210     index c035673209f7..a72545554a31 100644
211     --- a/arch/parisc/kernel/cache.c
212     +++ b/arch/parisc/kernel/cache.c
213     @@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
214     }
215     EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
216    
217     -void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
218     -{
219     - clear_page_asm(vto);
220     - if (!parisc_requires_coherency())
221     - flush_kernel_dcache_page_asm(vto);
222     -}
223     -EXPORT_SYMBOL(clear_user_page);
224     -
225     -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
226     - struct page *pg)
227     -{
228     - /* Copy using kernel mapping. No coherency is needed
229     - (all in kmap/kunmap) on machines that don't support
230     - non-equivalent aliasing. However, the `from' page
231     - needs to be flushed before it can be accessed through
232     - the kernel mapping. */
233     - preempt_disable();
234     - flush_dcache_page_asm(__pa(vfrom), vaddr);
235     - preempt_enable();
236     - copy_page_asm(vto, vfrom);
237     - if (!parisc_requires_coherency())
238     - flush_kernel_dcache_page_asm(vto);
239     -}
240     -EXPORT_SYMBOL(copy_user_page);
241     -
242     -#ifdef CONFIG_PA8X00
243     -
244     -void kunmap_parisc(void *addr)
245     -{
246     - if (parisc_requires_coherency())
247     - flush_kernel_dcache_page_addr(addr);
248     -}
249     -EXPORT_SYMBOL(kunmap_parisc);
250     -#endif
251     -
252     void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
253     {
254     unsigned long flags;
255     diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
256     index e25cc33ec54d..e72b2e41499e 100644
257     --- a/arch/x86/include/asm/fpu-internal.h
258     +++ b/arch/x86/include/asm/fpu-internal.h
259     @@ -295,12 +295,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
260     /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
261     is pending. Clear the x87 state here by setting it to fixed
262     values. "m" is a random variable that should be in L1 */
263     - alternative_input(
264     - ASM_NOP8 ASM_NOP2,
265     - "emms\n\t" /* clear stack tags */
266     - "fildl %P[addr]", /* set F?P to defined value */
267     - X86_FEATURE_FXSAVE_LEAK,
268     - [addr] "m" (tsk->thread.fpu.has_fpu));
269     + if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
270     + asm volatile(
271     + "fnclex\n\t"
272     + "emms\n\t"
273     + "fildl %P[addr]" /* set F?P to defined value */
274     + : : [addr] "m" (tsk->thread.fpu.has_fpu));
275     + }
276    
277     return fpu_restore_checking(&tsk->thread.fpu);
278     }
279     diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
280     index 95332717e4f5..99427d7307af 100644
281     --- a/drivers/acpi/battery.c
282     +++ b/drivers/acpi/battery.c
283     @@ -68,6 +68,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
284     MODULE_DESCRIPTION("ACPI Battery Driver");
285     MODULE_LICENSE("GPL");
286    
287     +static int battery_bix_broken_package;
288     static unsigned int cache_time = 1000;
289     module_param(cache_time, uint, 0644);
290     MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
291     @@ -443,7 +444,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
292     ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
293     return -ENODEV;
294     }
295     - if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
296     +
297     + if (battery_bix_broken_package)
298     + result = extract_package(battery, buffer.pointer,
299     + extended_info_offsets + 1,
300     + ARRAY_SIZE(extended_info_offsets) - 1);
301     + else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
302     result = extract_package(battery, buffer.pointer,
303     extended_info_offsets,
304     ARRAY_SIZE(extended_info_offsets));
305     @@ -1064,6 +1070,17 @@ static int battery_notify(struct notifier_block *nb,
306     return 0;
307     }
308    
309     +static struct dmi_system_id bat_dmi_table[] = {
310     + {
311     + .ident = "NEC LZ750/LS",
312     + .matches = {
313     + DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
314     + DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
315     + },
316     + },
317     + {},
318     +};
319     +
320     static int acpi_battery_add(struct acpi_device *device)
321     {
322     int result = 0;
323     @@ -1174,6 +1191,8 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
324     if (!acpi_battery_dir)
325     return;
326     #endif
327     + if (dmi_check_system(bat_dmi_table))
328     + battery_bix_broken_package = 1;
329     if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
330     #ifdef CONFIG_ACPI_PROCFS_POWER
331     acpi_unlock_battery_dir(acpi_battery_dir);
332     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
333     index 3d67f76407c1..3f1794f4a8bf 100644
334     --- a/drivers/ata/ahci.c
335     +++ b/drivers/ata/ahci.c
336     @@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
337     .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
338     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
339     .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
340     + { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
341     + PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
342     + .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
343     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
344     .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
345     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
346     diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
347     index 2168d15bc728..57a818b2b5f2 100644
348     --- a/drivers/char/tpm/tpm_ppi.c
349     +++ b/drivers/char/tpm/tpm_ppi.c
350     @@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
351     static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
352     void **return_value)
353     {
354     - acpi_status status;
355     + acpi_status status = AE_OK;
356     struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
357     - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
358     - if (strstr(buffer.pointer, context) != NULL) {
359     - *return_value = handle;
360     +
361     + if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
362     + if (strstr(buffer.pointer, context) != NULL) {
363     + *return_value = handle;
364     + status = AE_CTRL_TERMINATE;
365     + }
366     kfree(buffer.pointer);
367     - return AE_CTRL_TERMINATE;
368     }
369     - return AE_OK;
370     +
371     + return status;
372     }
373    
374     static inline void ppi_assign_params(union acpi_object params[4],
375     diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
376     index 6d9674160430..2ce22447d76e 100644
377     --- a/drivers/clk/clk-divider.c
378     +++ b/drivers/clk/clk-divider.c
379     @@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
380     return 0;
381     }
382    
383     -static unsigned int _get_val(struct clk_divider *divider, u8 div)
384     +static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
385     {
386     if (divider->flags & CLK_DIVIDER_ONE_BASED)
387     return div;
388     diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
389     index 3c1f88868f29..b4b283b47308 100644
390     --- a/drivers/clk/samsung/clk-exynos4.c
391     +++ b/drivers/clk/samsung/clk-exynos4.c
392     @@ -40,7 +40,7 @@
393     #define SRC_TOP1 0xc214
394     #define SRC_CAM 0xc220
395     #define SRC_TV 0xc224
396     -#define SRC_MFC 0xcc28
397     +#define SRC_MFC 0xc228
398     #define SRC_G3D 0xc22c
399     #define E4210_SRC_IMAGE 0xc230
400     #define SRC_LCD0 0xc234
401     diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
402     index 22d7699e7ced..92afc060673e 100644
403     --- a/drivers/clk/samsung/clk-exynos5250.c
404     +++ b/drivers/clk/samsung/clk-exynos5250.c
405     @@ -325,8 +325,8 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
406     GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
407     GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
408     GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
409     - GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
410     - GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
411     + GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
412     + GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
413     GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
414     GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
415     GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
416     @@ -377,7 +377,8 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
417     GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
418     GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
419     GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
420     - GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
421     + GATE(sysreg, "sysreg", "aclk66",
422     + GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
423     GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
424     GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
425     GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
426     diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
427     index 4329a29a5310..314184932293 100644
428     --- a/drivers/clocksource/em_sti.c
429     +++ b/drivers/clocksource/em_sti.c
430     @@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
431     ced->name = dev_name(&p->pdev->dev);
432     ced->features = CLOCK_EVT_FEAT_ONESHOT;
433     ced->rating = 200;
434     - ced->cpumask = cpumask_of(0);
435     + ced->cpumask = cpu_possible_mask;
436     ced->set_next_event = em_sti_clock_event_next;
437     ced->set_mode = em_sti_clock_event_mode;
438    
439     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
440     index 7054c579d451..a22fb3e47256 100644
441     --- a/drivers/cpufreq/intel_pstate.c
442     +++ b/drivers/cpufreq/intel_pstate.c
443     @@ -516,7 +516,8 @@ static void intel_pstate_timer_func(unsigned long __data)
444     }
445    
446     #define ICPU(model, policy) \
447     - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
448     + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
449     + (unsigned long)&policy }
450    
451     static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
452     ICPU(0x2a, default_policy),
453     diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
454     index c45ad478f462..f3c5d39f138e 100644
455     --- a/drivers/gpio/gpio-rcar.c
456     +++ b/drivers/gpio/gpio-rcar.c
457     @@ -333,7 +333,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
458     }
459    
460     if (devm_request_irq(&pdev->dev, irq->start,
461     - gpio_rcar_irq_handler, 0, name, p)) {
462     + gpio_rcar_irq_handler, IRQF_SHARED, name, p)) {
463     dev_err(&pdev->dev, "failed to request IRQ\n");
464     ret = -ENOENT;
465     goto err1;
466     diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
467     index e2d7f38447cc..3044b07230db 100644
468     --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
469     +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
470     @@ -1295,7 +1295,11 @@ init_jump(struct nvbios_init *init)
471     u16 offset = nv_ro16(bios, init->offset + 1);
472    
473     trace("JUMP\t0x%04x\n", offset);
474     - init->offset = offset;
475     +
476     + if (init_exec(init))
477     + init->offset = offset;
478     + else
479     + init->offset += 3;
480     }
481    
482     /**
483     diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
484     index 4f762bc9456a..1be9156a3950 100644
485     --- a/drivers/hid/hid-logitech-dj.c
486     +++ b/drivers/hid/hid-logitech-dj.c
487     @@ -192,6 +192,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
488     static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
489     size_t count,
490     unsigned char report_type);
491     +static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
492    
493     static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
494     struct dj_report *dj_report)
495     @@ -232,6 +233,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
496     if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
497     SPFUNCTION_DEVICE_LIST_EMPTY) {
498     dbg_hid("%s: device list is empty\n", __func__);
499     + djrcv_dev->querying_devices = false;
500     return;
501     }
502    
503     @@ -242,6 +244,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
504     return;
505     }
506    
507     + if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
508     + /* The device is already known. No need to reallocate it. */
509     + dbg_hid("%s: device is already known\n", __func__);
510     + return;
511     + }
512     +
513     dj_hiddev = hid_allocate_device();
514     if (IS_ERR(dj_hiddev)) {
515     dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
516     @@ -305,6 +313,7 @@ static void delayedwork_callback(struct work_struct *work)
517     struct dj_report dj_report;
518     unsigned long flags;
519     int count;
520     + int retval;
521    
522     dbg_hid("%s\n", __func__);
523    
524     @@ -337,6 +346,25 @@ static void delayedwork_callback(struct work_struct *work)
525     logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
526     break;
527     default:
528     + /* A normal report (i. e. not belonging to a pair/unpair notification)
529     + * arriving here, means that the report arrived but we did not have a
530     + * paired dj_device associated to the report's device_index, this
531     + * means that the original "device paired" notification corresponding
532     + * to this dj_device never arrived to this driver. The reason is that
533     + * hid-core discards all packets coming from a device while probe() is
534     + * executing. */
535     + if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
536     + /* ok, we don't know the device, just re-ask the
537     + * receiver for the list of connected devices. */
538     + retval = logi_dj_recv_query_paired_devices(djrcv_dev);
539     + if (!retval) {
540     + /* everything went fine, so just leave */
541     + break;
542     + }
543     + dev_err(&djrcv_dev->hdev->dev,
544     + "%s:logi_dj_recv_query_paired_devices "
545     + "error:%d\n", __func__, retval);
546     + }
547     dbg_hid("%s: unexpected report type\n", __func__);
548     }
549     }
550     @@ -367,6 +395,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
551     if (!djdev) {
552     dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
553     " is NULL, index %d\n", dj_report->device_index);
554     + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
555     +
556     + if (schedule_work(&djrcv_dev->work) == 0) {
557     + dbg_hid("%s: did not schedule the work item, was already "
558     + "queued\n", __func__);
559     + }
560     return;
561     }
562    
563     @@ -397,6 +431,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
564     if (dj_device == NULL) {
565     dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
566     " is NULL, index %d\n", dj_report->device_index);
567     + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
568     +
569     + if (schedule_work(&djrcv_dev->work) == 0) {
570     + dbg_hid("%s: did not schedule the work item, was already "
571     + "queued\n", __func__);
572     + }
573     return;
574     }
575    
576     @@ -444,6 +484,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
577     struct dj_report *dj_report;
578     int retval;
579    
580     + /* no need to protect djrcv_dev->querying_devices */
581     + if (djrcv_dev->querying_devices)
582     + return 0;
583     +
584     dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
585     if (!dj_report)
586     return -ENOMEM;
587     @@ -455,6 +499,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
588     return retval;
589     }
590    
591     +
592     static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
593     unsigned timeout)
594     {
595     diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
596     index fd28a5e0ca3b..4a4000340ce1 100644
597     --- a/drivers/hid/hid-logitech-dj.h
598     +++ b/drivers/hid/hid-logitech-dj.h
599     @@ -101,6 +101,7 @@ struct dj_receiver_dev {
600     struct work_struct work;
601     struct kfifo notif_fifo;
602     spinlock_t lock;
603     + bool querying_devices;
604     };
605    
606     struct dj_device {
607     diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
608     index 927bff373aac..2f404ba61c6c 100644
609     --- a/drivers/irqchip/irq-renesas-irqc.c
610     +++ b/drivers/irqchip/irq-renesas-irqc.c
611     @@ -248,8 +248,8 @@ static int irqc_probe(struct platform_device *pdev)
612    
613     return 0;
614     err3:
615     - for (; k >= 0; k--)
616     - free_irq(p->irq[k - 1].requested_irq, &p->irq[k - 1]);
617     + while (--k >= 0)
618     + free_irq(p->irq[k].requested_irq, &p->irq[k]);
619    
620     irq_domain_remove(p->irq_domain);
621     err2:
622     diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
623     index e968c01ca2ac..45f26be359ea 100644
624     --- a/drivers/mfd/rtsx_pcr.c
625     +++ b/drivers/mfd/rtsx_pcr.c
626     @@ -1195,8 +1195,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
627    
628     pcr->remove_pci = true;
629    
630     - cancel_delayed_work(&pcr->carddet_work);
631     - cancel_delayed_work(&pcr->idle_work);
632     + /* Disable interrupts at the pcr level */
633     + spin_lock_irq(&pcr->lock);
634     + rtsx_pci_writel(pcr, RTSX_BIER, 0);
635     + pcr->bier = 0;
636     + spin_unlock_irq(&pcr->lock);
637     +
638     + cancel_delayed_work_sync(&pcr->carddet_work);
639     + cancel_delayed_work_sync(&pcr->idle_work);
640    
641     mfd_remove_devices(&pcidev->dev);
642    
643     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
644     index 36a0b438e65e..11ae0811e4bf 100644
645     --- a/drivers/net/ethernet/broadcom/tg3.c
646     +++ b/drivers/net/ethernet/broadcom/tg3.c
647     @@ -16297,6 +16297,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
648     /* Clear this out for sanity. */
649     tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650    
651     + /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
652     + tw32(TG3PCI_REG_BASE_ADDR, 0);
653     +
654     pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
655     &pci_state_reg);
656     if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
657     diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
658     index d48099f03b7f..fbd0d7df67d8 100644
659     --- a/drivers/net/ethernet/freescale/fec_main.c
660     +++ b/drivers/net/ethernet/freescale/fec_main.c
661     @@ -371,6 +371,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
662     else
663     bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
664    
665     + skb_tx_timestamp(skb);
666     +
667     fep->cur_tx = bdp;
668    
669     if (fep->cur_tx == fep->dirty_tx)
670     @@ -379,8 +381,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
671     /* Trigger transmission start */
672     writel(0, fep->hwp + FEC_X_DES_ACTIVE);
673    
674     - skb_tx_timestamp(skb);
675     -
676     return NETDEV_TX_OK;
677     }
678    
679     diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
680     index 90ea0b1673ca..716dae7b9d8e 100644
681     --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
682     +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
683     @@ -3023,7 +3023,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
684    
685     dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
686     NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
687     - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
688     + dev->features = NETIF_F_SG | NETIF_F_TSO |
689     NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
690     NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
691     NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
692     diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
693     index 571452e786d5..61a1540f1347 100644
694     --- a/drivers/net/ethernet/tehuti/tehuti.c
695     +++ b/drivers/net/ethernet/tehuti/tehuti.c
696     @@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
697     ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
698     | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
699     NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
700     - /*| NETIF_F_FRAGLIST */
701     ;
702     ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
703     NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
704     diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
705     index 5444f2b87d01..52de70bd0bba 100644
706     --- a/drivers/net/ethernet/xilinx/ll_temac_main.c
707     +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
708     @@ -1016,7 +1016,7 @@ static int temac_of_probe(struct platform_device *op)
709     dev_set_drvdata(&op->dev, ndev);
710     SET_NETDEV_DEV(ndev, &op->dev);
711     ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
712     - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
713     + ndev->features = NETIF_F_SG;
714     ndev->netdev_ops = &temac_netdev_ops;
715     ndev->ethtool_ops = &temac_ethtool_ops;
716     #if 0
717     diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
718     index 24748e8367a1..2e2eeba37a06 100644
719     --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
720     +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
721     @@ -1488,7 +1488,7 @@ static int axienet_of_probe(struct platform_device *op)
722    
723     SET_NETDEV_DEV(ndev, &op->dev);
724     ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
725     - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
726     + ndev->features = NETIF_F_SG;
727     ndev->netdev_ops = &axienet_netdev_ops;
728     ndev->ethtool_ops = &axienet_ethtool_ops;
729    
730     diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
731     index 3169252613fa..5d78c1d08abd 100644
732     --- a/drivers/net/hamradio/hdlcdrv.c
733     +++ b/drivers/net/hamradio/hdlcdrv.c
734     @@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
735     case HDLCDRVCTL_CALIBRATE:
736     if(!capable(CAP_SYS_RAWIO))
737     return -EPERM;
738     + if (bi.data.calibrate > INT_MAX / s->par.bitrate)
739     + return -EINVAL;
740     s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
741     return 0;
742    
743     diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
744     index 0721e72f9299..82529a2658d7 100644
745     --- a/drivers/net/hamradio/yam.c
746     +++ b/drivers/net/hamradio/yam.c
747     @@ -1058,6 +1058,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
748     break;
749    
750     case SIOCYAMGCFG:
751     + memset(&yi, 0, sizeof(yi));
752     yi.cfg.mask = 0xffffffff;
753     yi.cfg.iobase = yp->iobase;
754     yi.cfg.irq = yp->irq;
755     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
756     index 23a0fff0df52..aea78fc2e48f 100644
757     --- a/drivers/net/hyperv/netvsc_drv.c
758     +++ b/drivers/net/hyperv/netvsc_drv.c
759     @@ -328,7 +328,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
760     return -EINVAL;
761    
762     nvdev->start_remove = true;
763     - cancel_delayed_work_sync(&ndevctx->dwork);
764     cancel_work_sync(&ndevctx->work);
765     netif_tx_disable(ndev);
766     rndis_filter_device_remove(hdev);
767     diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
768     index c70ff7dac00e..9e56eb479a4f 100644
769     --- a/drivers/net/macvtap.c
770     +++ b/drivers/net/macvtap.c
771     @@ -797,11 +797,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
772     const struct sk_buff *skb,
773     const struct iovec *iv, int len)
774     {
775     - struct macvlan_dev *vlan;
776     int ret;
777     int vnet_hdr_len = 0;
778     int vlan_offset = 0;
779     - int copied;
780     + int copied, total;
781    
782     if (q->flags & IFF_VNET_HDR) {
783     struct virtio_net_hdr vnet_hdr;
784     @@ -816,7 +815,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
785     if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
786     return -EFAULT;
787     }
788     - copied = vnet_hdr_len;
789     + total = copied = vnet_hdr_len;
790     + total += skb->len;
791    
792     if (!vlan_tx_tag_present(skb))
793     len = min_t(int, skb->len, len);
794     @@ -831,6 +831,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
795    
796     vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
797     len = min_t(int, skb->len + VLAN_HLEN, len);
798     + total += VLAN_HLEN;
799    
800     copy = min_t(int, vlan_offset, len);
801     ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
802     @@ -848,16 +849,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
803     }
804    
805     ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
806     - copied += len;
807    
808     done:
809     - rcu_read_lock_bh();
810     - vlan = rcu_dereference_bh(q->vlan);
811     - if (vlan)
812     - macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
813     - rcu_read_unlock_bh();
814     -
815     - return ret ? ret : copied;
816     + return ret ? ret : total;
817     }
818    
819     static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
820     @@ -909,7 +903,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
821     }
822    
823     ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
824     - ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
825     + ret = min_t(ssize_t, ret, len);
826     + if (ret > 0)
827     + iocb->ki_pos = ret;
828     out:
829     return ret;
830     }
831     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
832     index 9ef85fea1d1e..582497103fe8 100644
833     --- a/drivers/net/tun.c
834     +++ b/drivers/net/tun.c
835     @@ -1412,6 +1412,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
836     ret = tun_do_read(tun, tfile, iocb, iv, len,
837     file->f_flags & O_NONBLOCK);
838     ret = min_t(ssize_t, ret, len);
839     + if (ret > 0)
840     + iocb->ki_pos = ret;
841     out:
842     tun_put(tun);
843     return ret;
844     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
845     index 64cf70247048..a0c05e07feeb 100644
846     --- a/drivers/net/virtio_net.c
847     +++ b/drivers/net/virtio_net.c
848     @@ -294,26 +294,61 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
849     return skb;
850     }
851    
852     -static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
853     +static struct sk_buff *receive_small(void *buf, unsigned int len)
854     {
855     - struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
856     - struct page *page;
857     - int num_buf, i, len;
858     + struct sk_buff * skb = buf;
859     +
860     + len -= sizeof(struct virtio_net_hdr);
861     + skb_trim(skb, len);
862     +
863     + return skb;
864     +}
865     +
866     +static struct sk_buff *receive_big(struct net_device *dev,
867     + struct receive_queue *rq,
868     + void *buf)
869     +{
870     + struct page *page = buf;
871     + struct sk_buff *skb = page_to_skb(rq, page, 0);
872     +
873     + if (unlikely(!skb))
874     + goto err;
875     +
876     + return skb;
877     +
878     +err:
879     + dev->stats.rx_dropped++;
880     + give_pages(rq, page);
881     + return NULL;
882     +}
883     +
884     +static struct sk_buff *receive_mergeable(struct net_device *dev,
885     + struct receive_queue *rq,
886     + void *buf,
887     + unsigned int len)
888     +{
889     + struct skb_vnet_hdr *hdr = page_address(buf);
890     + int num_buf = hdr->mhdr.num_buffers;
891     + struct page *page = buf;
892     + struct sk_buff *skb = page_to_skb(rq, page, len);
893     + int i;
894     +
895     + if (unlikely(!skb))
896     + goto err_skb;
897    
898     - num_buf = hdr->mhdr.num_buffers;
899     while (--num_buf) {
900     i = skb_shinfo(skb)->nr_frags;
901     if (i >= MAX_SKB_FRAGS) {
902     pr_debug("%s: packet too long\n", skb->dev->name);
903     skb->dev->stats.rx_length_errors++;
904     - return -EINVAL;
905     + goto err_frags;
906     }
907     page = virtqueue_get_buf(rq->vq, &len);
908     if (!page) {
909     - pr_debug("%s: rx error: %d buffers missing\n",
910     - skb->dev->name, hdr->mhdr.num_buffers);
911     - skb->dev->stats.rx_length_errors++;
912     - return -EINVAL;
913     + pr_debug("%s: rx error: %d buffers %d missing\n",
914     + dev->name, hdr->mhdr.num_buffers, num_buf);
915     + dev->stats.rx_length_errors++;
916     + goto err_buf;
917     }
918    
919     if (len > PAGE_SIZE)
920     @@ -323,7 +358,26 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
921    
922     --rq->num;
923     }
924     - return 0;
925     + return skb;
926     +err_skb:
927     + give_pages(rq, page);
928     + while (--num_buf) {
929     +err_frags:
930     + buf = virtqueue_get_buf(rq->vq, &len);
931     + if (unlikely(!buf)) {
932     + pr_debug("%s: rx error: %d buffers missing\n",
933     + dev->name, num_buf);
934     + dev->stats.rx_length_errors++;
935     + break;
936     + }
937     + page = buf;
938     + give_pages(rq, page);
939     + --rq->num;
940     + }
941     +err_buf:
942     + dev->stats.rx_dropped++;
943     + dev_kfree_skb(skb);
944     + return NULL;
945     }
946    
947     static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
948     @@ -332,7 +386,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
949     struct net_device *dev = vi->dev;
950     struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
951     struct sk_buff *skb;
952     - struct page *page;
953     struct skb_vnet_hdr *hdr;
954    
955     if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
956     @@ -344,25 +397,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
957     dev_kfree_skb(buf);
958     return;
959     }
960     + if (vi->mergeable_rx_bufs)
961     + skb = receive_mergeable(dev, rq, buf, len);
962     + else if (vi->big_packets)
963     + skb = receive_big(dev, rq, buf);
964     + else
965     + skb = receive_small(buf, len);
966    
967     - if (!vi->mergeable_rx_bufs && !vi->big_packets) {
968     - skb = buf;
969     - len -= sizeof(struct virtio_net_hdr);
970     - skb_trim(skb, len);
971     - } else {
972     - page = buf;
973     - skb = page_to_skb(rq, page, len);
974     - if (unlikely(!skb)) {
975     - dev->stats.rx_dropped++;
976     - give_pages(rq, page);
977     - return;
978     - }
979     - if (vi->mergeable_rx_bufs)
980     - if (receive_mergeable(rq, skb)) {
981     - dev_kfree_skb(skb);
982     - return;
983     - }
984     - }
985     + if (unlikely(!skb))
986     + return;
987    
988     hdr = skb_vnet_hdr(skb);
989    
990     @@ -1285,6 +1328,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
991    
992     static void virtnet_free_queues(struct virtnet_info *vi)
993     {
994     + int i;
995     +
996     + for (i = 0; i < vi->max_queue_pairs; i++)
997     + netif_napi_del(&vi->rq[i].napi);
998     +
999     kfree(vi->rq);
1000     kfree(vi->sq);
1001     }
1002     @@ -1697,16 +1745,17 @@ static int virtnet_restore(struct virtio_device *vdev)
1003     if (err)
1004     return err;
1005    
1006     - if (netif_running(vi->dev))
1007     + if (netif_running(vi->dev)) {
1008     + for (i = 0; i < vi->curr_queue_pairs; i++)
1009     + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1010     + schedule_delayed_work(&vi->refill, 0);
1011     +
1012     for (i = 0; i < vi->max_queue_pairs; i++)
1013     virtnet_napi_enable(&vi->rq[i]);
1014     + }
1015    
1016     netif_device_attach(vi->dev);
1017    
1018     - for (i = 0; i < vi->curr_queue_pairs; i++)
1019     - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1020     - schedule_delayed_work(&vi->refill, 0);
1021     -
1022     mutex_lock(&vi->config_lock);
1023     vi->config_enable = true;
1024     mutex_unlock(&vi->config_lock);
1025     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1026     index 9bc913b92d13..26b543bc4f53 100644
1027     --- a/drivers/scsi/sd.c
1028     +++ b/drivers/scsi/sd.c
1029     @@ -2634,13 +2634,16 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
1030     }
1031    
1032     if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
1033     + /* too large values might cause issues with arcmsr */
1034     + int vpd_buf_len = 64;
1035     +
1036     sdev->no_report_opcodes = 1;
1037    
1038     /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
1039     * CODES is unsupported and the device has an ATA
1040     * Information VPD page (SAT).
1041     */
1042     - if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
1043     + if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
1044     sdev->no_write_same = 1;
1045     }
1046    
1047     diff --git a/include/linux/net.h b/include/linux/net.h
1048     index 0c4ae5d94de9..65545ac6fb9c 100644
1049     --- a/include/linux/net.h
1050     +++ b/include/linux/net.h
1051     @@ -180,7 +180,7 @@ struct proto_ops {
1052     int offset, size_t size, int flags);
1053     ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
1054     struct pipe_inode_info *pipe, size_t len, unsigned int flags);
1055     - void (*set_peek_off)(struct sock *sk, int val);
1056     + int (*set_peek_off)(struct sock *sk, int val);
1057     };
1058    
1059     #define DECLARE_SOCKADDR(type, dst, src) \
1060     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1061     index 96e4c21e15e0..abf7756eaf9e 100644
1062     --- a/include/linux/netdevice.h
1063     +++ b/include/linux/netdevice.h
1064     @@ -1772,6 +1772,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
1065     return dev->header_ops->parse(skb, haddr);
1066     }
1067    
1068     +static inline int dev_rebuild_header(struct sk_buff *skb)
1069     +{
1070     + const struct net_device *dev = skb->dev;
1071     +
1072     + if (!dev->header_ops || !dev->header_ops->rebuild)
1073     + return 0;
1074     + return dev->header_ops->rebuild(skb);
1075     +}
1076     +
1077     typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1078     extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1079     static inline int unregister_gifconf(unsigned int family)
1080     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1081     index 74db47ec09ea..ded45ec6b22b 100644
1082     --- a/include/linux/skbuff.h
1083     +++ b/include/linux/skbuff.h
1084     @@ -1741,6 +1741,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1085     }
1086     #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1087    
1088     +static inline void skb_pop_mac_header(struct sk_buff *skb)
1089     +{
1090     + skb->mac_header = skb->network_header;
1091     +}
1092     +
1093     static inline void skb_probe_transport_header(struct sk_buff *skb,
1094     const int offset_hint)
1095     {
1096     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1097     index f9e35b1e7713..b4308d7da339 100644
1098     --- a/kernel/sched/core.c
1099     +++ b/kernel/sched/core.c
1100     @@ -7812,7 +7812,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
1101    
1102     runtime_enabled = quota != RUNTIME_INF;
1103     runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1104     - account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
1105     + /*
1106     + * If we need to toggle cfs_bandwidth_used, off->on must occur
1107     + * before making related changes, and on->off must occur afterwards
1108     + */
1109     + if (runtime_enabled && !runtime_was_enabled)
1110     + cfs_bandwidth_usage_inc();
1111     raw_spin_lock_irq(&cfs_b->lock);
1112     cfs_b->period = ns_to_ktime(period);
1113     cfs_b->quota = quota;
1114     @@ -7838,6 +7843,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
1115     unthrottle_cfs_rq(cfs_rq);
1116     raw_spin_unlock_irq(&rq->lock);
1117     }
1118     + if (runtime_was_enabled && !runtime_enabled)
1119     + cfs_bandwidth_usage_dec();
1120     out_unlock:
1121     mutex_unlock(&cfs_constraints_mutex);
1122    
1123     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1124     index ce60006132b1..305ef886219e 100644
1125     --- a/kernel/sched/fair.c
1126     +++ b/kernel/sched/fair.c
1127     @@ -2029,13 +2029,14 @@ static inline bool cfs_bandwidth_used(void)
1128     return static_key_false(&__cfs_bandwidth_used);
1129     }
1130    
1131     -void account_cfs_bandwidth_used(int enabled, int was_enabled)
1132     +void cfs_bandwidth_usage_inc(void)
1133     {
1134     - /* only need to count groups transitioning between enabled/!enabled */
1135     - if (enabled && !was_enabled)
1136     - static_key_slow_inc(&__cfs_bandwidth_used);
1137     - else if (!enabled && was_enabled)
1138     - static_key_slow_dec(&__cfs_bandwidth_used);
1139     + static_key_slow_inc(&__cfs_bandwidth_used);
1140     +}
1141     +
1142     +void cfs_bandwidth_usage_dec(void)
1143     +{
1144     + static_key_slow_dec(&__cfs_bandwidth_used);
1145     }
1146     #else /* HAVE_JUMP_LABEL */
1147     static bool cfs_bandwidth_used(void)
1148     @@ -2043,7 +2044,8 @@ static bool cfs_bandwidth_used(void)
1149     return true;
1150     }
1151    
1152     -void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
1153     +void cfs_bandwidth_usage_inc(void) {}
1154     +void cfs_bandwidth_usage_dec(void) {}
1155     #endif /* HAVE_JUMP_LABEL */
1156    
1157     /*
1158     @@ -2408,6 +2410,13 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1159     if (idle)
1160     goto out_unlock;
1161    
1162     + /*
1163     + * if we have relooped after returning idle once, we need to update our
1164     + * status as actually running, so that other cpus doing
1165     + * __start_cfs_bandwidth will stop trying to cancel us.
1166     + */
1167     + cfs_b->timer_active = 1;
1168     +
1169     __refill_cfs_bandwidth_runtime(cfs_b);
1170    
1171     if (!throttled) {
1172     @@ -2468,7 +2477,13 @@ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
1173     /* how long we wait to gather additional slack before distributing */
1174     static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
1175    
1176     -/* are we near the end of the current quota period? */
1177     +/*
1178     + * Are we near the end of the current quota period?
1179     + *
1180     + * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
1181     + * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
1182     + * migrate_hrtimers, base is never cleared, so we are fine.
1183     + */
1184     static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
1185     {
1186     struct hrtimer *refresh_timer = &cfs_b->period_timer;
1187     @@ -2544,10 +2559,12 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
1188     u64 expires;
1189    
1190     /* confirm we're still not at a refresh boundary */
1191     - if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
1192     + raw_spin_lock(&cfs_b->lock);
1193     + if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
1194     + raw_spin_unlock(&cfs_b->lock);
1195     return;
1196     + }
1197    
1198     - raw_spin_lock(&cfs_b->lock);
1199     if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
1200     runtime = cfs_b->runtime;
1201     cfs_b->runtime = 0;
1202     @@ -2672,11 +2689,11 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
1203     * (timer_active==0 becomes visible before the hrtimer call-back
1204     * terminates). In either case we ensure that it's re-programmed
1205     */
1206     - while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
1207     + while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
1208     + hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
1209     + /* bounce the lock to allow do_sched_cfs_period_timer to run */
1210     raw_spin_unlock(&cfs_b->lock);
1211     - /* ensure cfs_b->lock is available while we wait */
1212     - hrtimer_cancel(&cfs_b->period_timer);
1213     -
1214     + cpu_relax();
1215     raw_spin_lock(&cfs_b->lock);
1216     /* if someone else restarted the timer then we're done */
1217     if (cfs_b->timer_active)
1218     @@ -6074,7 +6091,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
1219     se->cfs_rq = parent->my_q;
1220    
1221     se->my_q = cfs_rq;
1222     - update_load_set(&se->load, 0);
1223     + /* guarantee group entities always have weight */
1224     + update_load_set(&se->load, NICE_0_LOAD);
1225     se->parent = parent;
1226     }
1227    
1228     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
1229     index ce39224d6155..dfa31d533e3f 100644
1230     --- a/kernel/sched/sched.h
1231     +++ b/kernel/sched/sched.h
1232     @@ -1318,7 +1318,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
1233     extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1234     extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1235    
1236     -extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1237     +extern void cfs_bandwidth_usage_inc(void);
1238     +extern void cfs_bandwidth_usage_dec(void);
1239    
1240     #ifdef CONFIG_NO_HZ_COMMON
1241     enum rq_nohz_flag_bits {
1242     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
1243     index 1cd3d2a406f5..4af64afc7022 100644
1244     --- a/net/8021q/vlan_dev.c
1245     +++ b/net/8021q/vlan_dev.c
1246     @@ -549,6 +549,23 @@ static const struct header_ops vlan_header_ops = {
1247     .parse = eth_header_parse,
1248     };
1249    
1250     +static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
1251     + unsigned short type,
1252     + const void *daddr, const void *saddr,
1253     + unsigned int len)
1254     +{
1255     + struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
1256     + struct net_device *real_dev = vlan->real_dev;
1257     +
1258     + return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
1259     +}
1260     +
1261     +static const struct header_ops vlan_passthru_header_ops = {
1262     + .create = vlan_passthru_hard_header,
1263     + .rebuild = dev_rebuild_header,
1264     + .parse = eth_header_parse,
1265     +};
1266     +
1267     static struct device_type vlan_type = {
1268     .name = "vlan",
1269     };
1270     @@ -592,7 +609,7 @@ static int vlan_dev_init(struct net_device *dev)
1271    
1272     dev->needed_headroom = real_dev->needed_headroom;
1273     if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
1274     - dev->header_ops = real_dev->header_ops;
1275     + dev->header_ops = &vlan_passthru_header_ops;
1276     dev->hard_header_len = real_dev->hard_header_len;
1277     } else {
1278     dev->header_ops = &vlan_header_ops;
1279     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1280     index d82058f6fc79..2a180a380181 100644
1281     --- a/net/bridge/br_multicast.c
1282     +++ b/net/bridge/br_multicast.c
1283     @@ -1839,7 +1839,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1284     u32 old;
1285     struct net_bridge_mdb_htable *mdb;
1286    
1287     - spin_lock(&br->multicast_lock);
1288     + spin_lock_bh(&br->multicast_lock);
1289     if (!netif_running(br->dev))
1290     goto unlock;
1291    
1292     @@ -1871,7 +1871,7 @@ rollback:
1293     }
1294    
1295     unlock:
1296     - spin_unlock(&br->multicast_lock);
1297     + spin_unlock_bh(&br->multicast_lock);
1298    
1299     return err;
1300     }
1301     diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
1302     index d23b6682f4e9..a974dfec4bf1 100644
1303     --- a/net/core/drop_monitor.c
1304     +++ b/net/core/drop_monitor.c
1305     @@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
1306     .hdrsize = 0,
1307     .name = "NET_DM",
1308     .version = 2,
1309     - .maxattr = NET_DM_CMD_MAX,
1310     };
1311    
1312     static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
1313     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1314     index 0034b611fa5e..49aeab86f317 100644
1315     --- a/net/core/neighbour.c
1316     +++ b/net/core/neighbour.c
1317     @@ -1274,7 +1274,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1318    
1319     if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1320     skb->len) < 0 &&
1321     - dev->header_ops->rebuild(skb))
1322     + dev_rebuild_header(skb))
1323     return 0;
1324    
1325     return dev_queue_xmit(skb);
1326     diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1327     index b04f73847eda..27f33f25cda8 100644
1328     --- a/net/core/netpoll.c
1329     +++ b/net/core/netpoll.c
1330     @@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1331     !vlan_hw_offload_capable(netif_skb_features(skb),
1332     skb->vlan_proto)) {
1333     skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
1334     - if (unlikely(!skb))
1335     - break;
1336     + if (unlikely(!skb)) {
1337     + /* This is actually a packet drop, but we
1338     + * don't want the code at the end of this
1339     + * function to try and re-queue a NULL skb.
1340     + */
1341     + status = NETDEV_TX_OK;
1342     + goto unlock_txq;
1343     + }
1344     skb->vlan_tci = 0;
1345     }
1346    
1347     @@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1348     if (status == NETDEV_TX_OK)
1349     txq_trans_update(txq);
1350     }
1351     + unlock_txq:
1352     __netif_tx_unlock(txq);
1353    
1354     if (status == NETDEV_TX_OK)
1355     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1356     index d9e8736bcdc1..c35b81b80fe2 100644
1357     --- a/net/core/skbuff.c
1358     +++ b/net/core/skbuff.c
1359     @@ -2854,7 +2854,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
1360     doffset + tnl_hlen);
1361    
1362     if (fskb != skb_shinfo(skb)->frag_list)
1363     - continue;
1364     + goto perform_csum_check;
1365    
1366     if (!sg) {
1367     nskb->ip_summed = CHECKSUM_NONE;
1368     @@ -2918,6 +2918,7 @@ skip_fraglist:
1369     nskb->len += nskb->data_len;
1370     nskb->truesize += nskb->data_len;
1371    
1372     +perform_csum_check:
1373     if (!csum) {
1374     nskb->csum = skb_checksum(nskb, doffset,
1375     nskb->len - doffset, 0);
1376     diff --git a/net/core/sock.c b/net/core/sock.c
1377     index 6565431b0e6d..50a345e5a26f 100644
1378     --- a/net/core/sock.c
1379     +++ b/net/core/sock.c
1380     @@ -885,7 +885,7 @@ set_rcvbuf:
1381    
1382     case SO_PEEK_OFF:
1383     if (sock->ops->set_peek_off)
1384     - sock->ops->set_peek_off(sk, val);
1385     + ret = sock->ops->set_peek_off(sk, val);
1386     else
1387     ret = -EOPNOTSUPP;
1388     break;
1389     diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
1390     index 5f648751fce2..31cf54d18221 100644
1391     --- a/net/ipv4/inet_diag.c
1392     +++ b/net/ipv4/inet_diag.c
1393     @@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
1394    
1395     r->id.idiag_sport = inet->inet_sport;
1396     r->id.idiag_dport = inet->inet_dport;
1397     +
1398     + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
1399     + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
1400     +
1401     r->id.idiag_src[0] = inet->inet_rcv_saddr;
1402     r->id.idiag_dst[0] = inet->inet_daddr;
1403    
1404     @@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
1405    
1406     r->idiag_family = tw->tw_family;
1407     r->idiag_retrans = 0;
1408     +
1409     r->id.idiag_if = tw->tw_bound_dev_if;
1410     sock_diag_save_cookie(tw, r->id.idiag_cookie);
1411     +
1412     r->id.idiag_sport = tw->tw_sport;
1413     r->id.idiag_dport = tw->tw_dport;
1414     +
1415     + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
1416     + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
1417     +
1418     r->id.idiag_src[0] = tw->tw_rcv_saddr;
1419     r->id.idiag_dst[0] = tw->tw_daddr;
1420     +
1421     r->idiag_state = tw->tw_substate;
1422     r->idiag_timer = 3;
1423     r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
1424     @@ -732,8 +743,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
1425    
1426     r->id.idiag_sport = inet->inet_sport;
1427     r->id.idiag_dport = ireq->rmt_port;
1428     +
1429     + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
1430     + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
1431     +
1432     r->id.idiag_src[0] = ireq->loc_addr;
1433     r->id.idiag_dst[0] = ireq->rmt_addr;
1434     +
1435     r->idiag_expires = jiffies_to_msecs(tmo);
1436     r->idiag_rqueue = 0;
1437     r->idiag_wqueue = 0;
1438     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
1439     index 64e4e98c8786..828b2e8631e7 100644
1440     --- a/net/ipv4/ip_gre.c
1441     +++ b/net/ipv4/ip_gre.c
1442     @@ -335,6 +335,7 @@ static int ipgre_rcv(struct sk_buff *skb)
1443     iph->saddr, iph->daddr, tpi.key);
1444    
1445     if (tunnel) {
1446     + skb_pop_mac_header(skb);
1447     ip_tunnel_rcv(tunnel, skb, &tpi, hdr_len, log_ecn_error);
1448     return 0;
1449     }
1450     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1451     index 5a8bf536026c..6c389300f4e9 100644
1452     --- a/net/ipv6/route.c
1453     +++ b/net/ipv6/route.c
1454     @@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops);
1455    
1456     static int ip6_pkt_discard(struct sk_buff *skb);
1457     static int ip6_pkt_discard_out(struct sk_buff *skb);
1458     +static int ip6_pkt_prohibit(struct sk_buff *skb);
1459     +static int ip6_pkt_prohibit_out(struct sk_buff *skb);
1460     static void ip6_link_failure(struct sk_buff *skb);
1461     static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1462     struct sk_buff *skb, u32 mtu);
1463     @@ -233,9 +235,6 @@ static const struct rt6_info ip6_null_entry_template = {
1464    
1465     #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1466    
1467     -static int ip6_pkt_prohibit(struct sk_buff *skb);
1468     -static int ip6_pkt_prohibit_out(struct sk_buff *skb);
1469     -
1470     static const struct rt6_info ip6_prohibit_entry_template = {
1471     .dst = {
1472     .__refcnt = ATOMIC_INIT(1),
1473     @@ -1498,21 +1497,24 @@ int ip6_route_add(struct fib6_config *cfg)
1474     goto out;
1475     }
1476     }
1477     - rt->dst.output = ip6_pkt_discard_out;
1478     - rt->dst.input = ip6_pkt_discard;
1479     rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1480     switch (cfg->fc_type) {
1481     case RTN_BLACKHOLE:
1482     rt->dst.error = -EINVAL;
1483     + rt->dst.output = dst_discard;
1484     + rt->dst.input = dst_discard;
1485     break;
1486     case RTN_PROHIBIT:
1487     rt->dst.error = -EACCES;
1488     + rt->dst.output = ip6_pkt_prohibit_out;
1489     + rt->dst.input = ip6_pkt_prohibit;
1490     break;
1491     case RTN_THROW:
1492     - rt->dst.error = -EAGAIN;
1493     - break;
1494     default:
1495     - rt->dst.error = -ENETUNREACH;
1496     + rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1497     + : -ENETUNREACH;
1498     + rt->dst.output = ip6_pkt_discard_out;
1499     + rt->dst.input = ip6_pkt_discard;
1500     break;
1501     }
1502     goto install_route;
1503     @@ -1836,9 +1838,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1504     else
1505     rt->rt6i_gateway = *dest;
1506     rt->rt6i_flags = ort->rt6i_flags;
1507     - if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1508     - (RTF_DEFAULT | RTF_ADDRCONF))
1509     - rt6_set_from(rt, ort);
1510     + rt6_set_from(rt, ort);
1511     rt->rt6i_metric = 0;
1512    
1513     #ifdef CONFIG_IPV6_SUBTREES
1514     @@ -2077,8 +2077,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb)
1515     return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1516     }
1517    
1518     -#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1519     -
1520     static int ip6_pkt_prohibit(struct sk_buff *skb)
1521     {
1522     return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1523     @@ -2090,8 +2088,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1524     return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1525     }
1526    
1527     -#endif
1528     -
1529     /*
1530     * Allocate a dst for local (unicast / anycast) address.
1531     */
1532     @@ -2101,12 +2097,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1533     bool anycast)
1534     {
1535     struct net *net = dev_net(idev->dev);
1536     - struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
1537     -
1538     - if (!rt) {
1539     - net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
1540     + struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
1541     + DST_NOCOUNT, NULL);
1542     + if (!rt)
1543     return ERR_PTR(-ENOMEM);
1544     - }
1545    
1546     in6_dev_hold(idev);
1547    
1548     diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
1549     index 76f165ef8d49..3696aa28784a 100644
1550     --- a/net/ipv6/udp_offload.c
1551     +++ b/net/ipv6/udp_offload.c
1552     @@ -85,7 +85,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
1553    
1554     /* Check if there is enough headroom to insert fragment header. */
1555     tnl_hlen = skb_tnl_header_len(skb);
1556     - if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
1557     + if (skb_mac_header(skb) < skb->head + tnl_hlen + frag_hdr_sz) {
1558     if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
1559     goto out;
1560     }
1561     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1562     index 88709882c464..c3ee80547066 100644
1563     --- a/net/llc/af_llc.c
1564     +++ b/net/llc/af_llc.c
1565     @@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
1566     unsigned long cpu_flags;
1567     size_t copied = 0;
1568     u32 peek_seq = 0;
1569     - u32 *seq;
1570     + u32 *seq, skb_len;
1571     unsigned long used;
1572     int target; /* Read at least this many bytes */
1573     long timeo;
1574     @@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
1575     }
1576     continue;
1577     found_ok_skb:
1578     + skb_len = skb->len;
1579     /* Ok so how much can we use? */
1580     used = skb->len - offset;
1581     if (len < used)
1582     @@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
1583     }
1584    
1585     /* Partial read */
1586     - if (used + offset < skb->len)
1587     + if (used + offset < skb_len)
1588     continue;
1589     } while (len > 0);
1590    
1591     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1592     index e9d18c30071f..cc65cdddb047 100644
1593     --- a/net/mac80211/tx.c
1594     +++ b/net/mac80211/tx.c
1595     @@ -447,7 +447,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1596     {
1597     struct sta_info *sta = tx->sta;
1598     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1599     - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
1600     struct ieee80211_local *local = tx->local;
1601    
1602     if (unlikely(!sta))
1603     @@ -458,15 +457,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1604     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
1605     int ac = skb_get_queue_mapping(tx->skb);
1606    
1607     - /* only deauth, disassoc and action are bufferable MMPDUs */
1608     - if (ieee80211_is_mgmt(hdr->frame_control) &&
1609     - !ieee80211_is_deauth(hdr->frame_control) &&
1610     - !ieee80211_is_disassoc(hdr->frame_control) &&
1611     - !ieee80211_is_action(hdr->frame_control)) {
1612     - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1613     - return TX_CONTINUE;
1614     - }
1615     -
1616     ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
1617     sta->sta.addr, sta->sta.aid, ac);
1618     if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
1619     @@ -509,9 +499,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1620     static ieee80211_tx_result debug_noinline
1621     ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
1622     {
1623     + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1624     + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
1625     +
1626     if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
1627     return TX_CONTINUE;
1628    
1629     + /* only deauth, disassoc and action are bufferable MMPDUs */
1630     + if (ieee80211_is_mgmt(hdr->frame_control) &&
1631     + !ieee80211_is_deauth(hdr->frame_control) &&
1632     + !ieee80211_is_disassoc(hdr->frame_control) &&
1633     + !ieee80211_is_action(hdr->frame_control)) {
1634     + if (tx->flags & IEEE80211_TX_UNICAST)
1635     + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1636     + return TX_CONTINUE;
1637     + }
1638     +
1639     if (tx->flags & IEEE80211_TX_UNICAST)
1640     return ieee80211_tx_h_unicast_ps_buf(tx);
1641     else
1642     diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
1643     index f02b3605823e..1fb2258c3535 100644
1644     --- a/net/netfilter/nf_nat_irc.c
1645     +++ b/net/netfilter/nf_nat_irc.c
1646     @@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
1647     struct nf_conntrack_expect *exp)
1648     {
1649     char buffer[sizeof("4294967296 65635")];
1650     + struct nf_conn *ct = exp->master;
1651     + union nf_inet_addr newaddr;
1652     u_int16_t port;
1653     unsigned int ret;
1654    
1655     /* Reply comes from server. */
1656     + newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
1657     +
1658     exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
1659     exp->dir = IP_CT_DIR_REPLY;
1660     exp->expectfn = nf_nat_follow_master;
1661     @@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
1662     }
1663    
1664     if (port == 0) {
1665     - nf_ct_helper_log(skb, exp->master, "all ports in use");
1666     + nf_ct_helper_log(skb, ct, "all ports in use");
1667     return NF_DROP;
1668     }
1669    
1670     - ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
1671     - protoff, matchoff, matchlen, buffer,
1672     - strlen(buffer));
1673     + /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
1674     + * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
1675     + * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
1676     + * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
1677     + * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
1678     + *
1679     + * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
1680     + * 255.255.255.255==4294967296, 10 digits)
1681     + * P: bound port (min 1 d, max 5d (65635))
1682     + * F: filename (min 1 d )
1683     + * S: size (min 1 d )
1684     + * 0x01, \n: terminators
1685     + */
1686     + /* AAA = "us", ie. where server normally talks to. */
1687     + snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
1688     + pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
1689     + buffer, &newaddr.ip, port);
1690     +
1691     + ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
1692     + matchlen, buffer, strlen(buffer));
1693     if (ret != NF_ACCEPT) {
1694     - nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
1695     + nf_ct_helper_log(skb, ct, "cannot mangle packet");
1696     nf_ct_unexpect_related(exp);
1697     }
1698     +
1699     return ret;
1700     }
1701    
1702     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1703     index c503ad6f610f..e8b5a0dfca21 100644
1704     --- a/net/packet/af_packet.c
1705     +++ b/net/packet/af_packet.c
1706     @@ -237,6 +237,30 @@ struct packet_skb_cb {
1707     static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
1708     static void __fanout_link(struct sock *sk, struct packet_sock *po);
1709    
1710     +static struct net_device *packet_cached_dev_get(struct packet_sock *po)
1711     +{
1712     + struct net_device *dev;
1713     +
1714     + rcu_read_lock();
1715     + dev = rcu_dereference(po->cached_dev);
1716     + if (likely(dev))
1717     + dev_hold(dev);
1718     + rcu_read_unlock();
1719     +
1720     + return dev;
1721     +}
1722     +
1723     +static void packet_cached_dev_assign(struct packet_sock *po,
1724     + struct net_device *dev)
1725     +{
1726     + rcu_assign_pointer(po->cached_dev, dev);
1727     +}
1728     +
1729     +static void packet_cached_dev_reset(struct packet_sock *po)
1730     +{
1731     + RCU_INIT_POINTER(po->cached_dev, NULL);
1732     +}
1733     +
1734     /* register_prot_hook must be invoked with the po->bind_lock held,
1735     * or from a context in which asynchronous accesses to the packet
1736     * socket is not possible (packet_create()).
1737     @@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
1738     struct packet_sock *po = pkt_sk(sk);
1739    
1740     if (!po->running) {
1741     - if (po->fanout) {
1742     + if (po->fanout)
1743     __fanout_link(sk, po);
1744     - } else {
1745     + else
1746     dev_add_pack(&po->prot_hook);
1747     - rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
1748     - }
1749    
1750     sock_hold(sk);
1751     po->running = 1;
1752     @@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
1753     struct packet_sock *po = pkt_sk(sk);
1754    
1755     po->running = 0;
1756     - if (po->fanout) {
1757     +
1758     + if (po->fanout)
1759     __fanout_unlink(sk, po);
1760     - } else {
1761     + else
1762     __dev_remove_pack(&po->prot_hook);
1763     - RCU_INIT_POINTER(po->cached_dev, NULL);
1764     - }
1765    
1766     __sock_put(sk);
1767    
1768     @@ -2048,19 +2069,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1769     return tp_len;
1770     }
1771    
1772     -static struct net_device *packet_cached_dev_get(struct packet_sock *po)
1773     -{
1774     - struct net_device *dev;
1775     -
1776     - rcu_read_lock();
1777     - dev = rcu_dereference(po->cached_dev);
1778     - if (dev)
1779     - dev_hold(dev);
1780     - rcu_read_unlock();
1781     -
1782     - return dev;
1783     -}
1784     -
1785     static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1786     {
1787     struct sk_buff *skb;
1788     @@ -2077,7 +2085,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1789    
1790     mutex_lock(&po->pg_vec_lock);
1791    
1792     - if (saddr == NULL) {
1793     + if (likely(saddr == NULL)) {
1794     dev = packet_cached_dev_get(po);
1795     proto = po->num;
1796     addr = NULL;
1797     @@ -2231,7 +2239,7 @@ static int packet_snd(struct socket *sock,
1798     * Get and verify the address.
1799     */
1800    
1801     - if (saddr == NULL) {
1802     + if (likely(saddr == NULL)) {
1803     dev = packet_cached_dev_get(po);
1804     proto = po->num;
1805     addr = NULL;
1806     @@ -2440,6 +2448,8 @@ static int packet_release(struct socket *sock)
1807    
1808     spin_lock(&po->bind_lock);
1809     unregister_prot_hook(sk, false);
1810     + packet_cached_dev_reset(po);
1811     +
1812     if (po->prot_hook.dev) {
1813     dev_put(po->prot_hook.dev);
1814     po->prot_hook.dev = NULL;
1815     @@ -2495,14 +2505,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
1816    
1817     spin_lock(&po->bind_lock);
1818     unregister_prot_hook(sk, true);
1819     +
1820     po->num = protocol;
1821     po->prot_hook.type = protocol;
1822     if (po->prot_hook.dev)
1823     dev_put(po->prot_hook.dev);
1824     - po->prot_hook.dev = dev;
1825    
1826     + po->prot_hook.dev = dev;
1827     po->ifindex = dev ? dev->ifindex : 0;
1828    
1829     + packet_cached_dev_assign(po, dev);
1830     +
1831     if (protocol == 0)
1832     goto out_unlock;
1833    
1834     @@ -2615,7 +2628,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
1835     po = pkt_sk(sk);
1836     sk->sk_family = PF_PACKET;
1837     po->num = proto;
1838     - RCU_INIT_POINTER(po->cached_dev, NULL);
1839     +
1840     + packet_cached_dev_reset(po);
1841    
1842     sk->sk_destruct = packet_sock_destruct;
1843     sk_refcnt_debug_inc(sk);
1844     @@ -3369,6 +3383,7 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1845     sk->sk_error_report(sk);
1846     }
1847     if (msg == NETDEV_UNREGISTER) {
1848     + packet_cached_dev_reset(po);
1849     po->ifindex = -1;
1850     if (po->prot_hook.dev)
1851     dev_put(po->prot_hook.dev);
1852     diff --git a/net/rds/ib.c b/net/rds/ib.c
1853     index b4c8b0022fee..ba2dffeff608 100644
1854     --- a/net/rds/ib.c
1855     +++ b/net/rds/ib.c
1856     @@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
1857     ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
1858     /* due to this, we will claim to support iWARP devices unless we
1859     check node_type. */
1860     - if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
1861     + if (ret || !cm_id->device ||
1862     + cm_id->device->node_type != RDMA_NODE_IB_CA)
1863     ret = -EADDRNOTAVAIL;
1864    
1865     rdsdebug("addr %pI4 ret %d node type %d\n",
1866     diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
1867     index e59094981175..37be6e226d1b 100644
1868     --- a/net/rds/ib_send.c
1869     +++ b/net/rds/ib_send.c
1870     @@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
1871     && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
1872     rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
1873     scat = &rm->data.op_sg[sg];
1874     - ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
1875     - ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
1876     - return ret;
1877     + ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
1878     + return sizeof(struct rds_header) + ret;
1879     }
1880    
1881     /* FIXME we may overallocate here */
1882     diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
1883     index abf0ad6311d0..27e6896705e6 100644
1884     --- a/net/rose/af_rose.c
1885     +++ b/net/rose/af_rose.c
1886     @@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1887    
1888     if (msg->msg_name) {
1889     struct sockaddr_rose *srose;
1890     + struct full_sockaddr_rose *full_srose = msg->msg_name;
1891    
1892     memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1893     srose = msg->msg_name;
1894     @@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1895     srose->srose_addr = rose->dest_addr;
1896     srose->srose_call = rose->dest_call;
1897     srose->srose_ndigis = rose->dest_ndigis;
1898     - if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
1899     - struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
1900     - for (n = 0 ; n < rose->dest_ndigis ; n++)
1901     - full_srose->srose_digis[n] = rose->dest_digis[n];
1902     - msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1903     - } else {
1904     - if (rose->dest_ndigis >= 1) {
1905     - srose->srose_ndigis = 1;
1906     - srose->srose_digi = rose->dest_digis[0];
1907     - }
1908     - msg->msg_namelen = sizeof(struct sockaddr_rose);
1909     - }
1910     + for (n = 0 ; n < rose->dest_ndigis ; n++)
1911     + full_srose->srose_digis[n] = rose->dest_digis[n];
1912     + msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1913     }
1914    
1915     skb_free_datagram(sk, skb);
1916     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1917     index 8664ad0d5797..3ca7927520b0 100644
1918     --- a/net/unix/af_unix.c
1919     +++ b/net/unix/af_unix.c
1920     @@ -529,13 +529,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
1921     static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
1922     struct msghdr *, size_t, int);
1923    
1924     -static void unix_set_peek_off(struct sock *sk, int val)
1925     +static int unix_set_peek_off(struct sock *sk, int val)
1926     {
1927     struct unix_sock *u = unix_sk(sk);
1928    
1929     - mutex_lock(&u->readlock);
1930     + if (mutex_lock_interruptible(&u->readlock))
1931     + return -EINTR;
1932     +
1933     sk->sk_peek_off = val;
1934     mutex_unlock(&u->readlock);
1935     +
1936     + return 0;
1937     }
1938    
1939    
1940     @@ -713,7 +717,9 @@ static int unix_autobind(struct socket *sock)
1941     int err;
1942     unsigned int retries = 0;
1943    
1944     - mutex_lock(&u->readlock);
1945     + err = mutex_lock_interruptible(&u->readlock);
1946     + if (err)
1947     + return err;
1948    
1949     err = 0;
1950     if (u->addr)
1951     @@ -872,7 +878,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1952     goto out;
1953     addr_len = err;
1954    
1955     - mutex_lock(&u->readlock);
1956     + err = mutex_lock_interruptible(&u->readlock);
1957     + if (err)
1958     + goto out;
1959    
1960     err = -EINVAL;
1961     if (u->addr)