Magellan Linux

Contents of /trunk/kernel-alx/patches-3.12/0107-3.12.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2423 - (show annotations) (download)
Tue Mar 25 12:29:50 2014 UTC (10 years, 1 month ago) by niro
File size: 87546 byte(s)
-added 3.12 branch
1 diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
2 index 24765c146e31..5883ec878ddd 100644
3 --- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
4 +++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
5 @@ -159,6 +159,8 @@ clock which they consume.
6 mixer 343
7 hdmi 344
8 g2d 345
9 + mdma0 346
10 + smmu_mdma0 347
11
12
13 [Clock Muxes]
14 diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
15 index c01223628a87..8e48e3b14227 100644
16 --- a/Documentation/networking/packet_mmap.txt
17 +++ b/Documentation/networking/packet_mmap.txt
18 @@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
19 [shutdown] close() --------> destruction of the transmission socket and
20 deallocation of all associated resources.
21
22 +Socket creation and destruction is also straight forward, and is done
23 +the same way as in capturing described in the previous paragraph:
24 +
25 + int fd = socket(PF_PACKET, mode, 0);
26 +
27 +The protocol can optionally be 0 in case we only want to transmit
28 +via this socket, which avoids an expensive call to packet_rcv().
29 +In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
30 +set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
31 +
32 Binding the socket to your network interface is mandatory (with zero copy) to
33 know the header size of frames used in the circular buffer.
34
35 diff --git a/Makefile b/Makefile
36 index c2f0b7985b41..5d0ec13bb77d 100644
37 --- a/Makefile
38 +++ b/Makefile
39 @@ -1,6 +1,6 @@
40 VERSION = 3
41 PATCHLEVEL = 12
42 -SUBLEVEL = 7
43 +SUBLEVEL = 8
44 EXTRAVERSION =
45 NAME = One Giant Leap for Frogkind
46
47 diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
48 index bbac42a78ce5..6a1117e481b1 100644
49 --- a/arch/arm/boot/dts/exynos5250.dtsi
50 +++ b/arch/arm/boot/dts/exynos5250.dtsi
51 @@ -556,7 +556,7 @@
52 compatible = "arm,pl330", "arm,primecell";
53 reg = <0x10800000 0x1000>;
54 interrupts = <0 33 0>;
55 - clocks = <&clock 271>;
56 + clocks = <&clock 346>;
57 clock-names = "apb_pclk";
58 #dma-cells = <1>;
59 #dma-channels = <8>;
60 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
61 index 65ed63f68ef8..1f735aafd5ec 100644
62 --- a/arch/arm/kernel/traps.c
63 +++ b/arch/arm/kernel/traps.c
64 @@ -35,7 +35,13 @@
65 #include <asm/tls.h>
66 #include <asm/system_misc.h>
67
68 -static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
69 +static const char *handler[]= {
70 + "prefetch abort",
71 + "data abort",
72 + "address exception",
73 + "interrupt",
74 + "undefined instruction",
75 +};
76
77 void *vectors_page;
78
79 diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
80 index 9ee78f7b4990..782f6c71fa0a 100644
81 --- a/arch/arm/mach-footbridge/dc21285-timer.c
82 +++ b/arch/arm/mach-footbridge/dc21285-timer.c
83 @@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
84 void __init footbridge_timer_init(void)
85 {
86 struct clock_event_device *ce = &ckevt_dc21285;
87 + unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
88
89 - clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
90 + clocksource_register_hz(&cksrc_dc21285, rate);
91
92 setup_irq(ce->irq, &footbridge_timer_irq);
93
94 ce->cpumask = cpumask_of(smp_processor_id());
95 - clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
96 + clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
97 }
98 diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
99 index 7f8f6076d360..07b91832bd2c 100644
100 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c
101 +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
102 @@ -482,7 +482,7 @@ static struct platform_device lcdc0_device = {
103 .id = 0,
104 .dev = {
105 .platform_data = &lcdc0_info,
106 - .coherent_dma_mask = ~0,
107 + .coherent_dma_mask = DMA_BIT_MASK(32),
108 },
109 };
110
111 @@ -579,7 +579,7 @@ static struct platform_device hdmi_lcdc_device = {
112 .id = 1,
113 .dev = {
114 .platform_data = &hdmi_lcdc_info,
115 - .coherent_dma_mask = ~0,
116 + .coherent_dma_mask = DMA_BIT_MASK(32),
117 },
118 };
119
120 diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
121 index f1994968d303..5cc96d004b4f 100644
122 --- a/arch/arm/mach-shmobile/board-kzm9g.c
123 +++ b/arch/arm/mach-shmobile/board-kzm9g.c
124 @@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
125 .resource = lcdc_resources,
126 .dev = {
127 .platform_data = &lcdc_info,
128 - .coherent_dma_mask = ~0,
129 + .coherent_dma_mask = DMA_BIT_MASK(32),
130 },
131 };
132
133 diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
134 index af06753eb809..e721d2ccceae 100644
135 --- a/arch/arm/mach-shmobile/board-mackerel.c
136 +++ b/arch/arm/mach-shmobile/board-mackerel.c
137 @@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
138 .resource = lcdc_resources,
139 .dev = {
140 .platform_data = &lcdc_info,
141 - .coherent_dma_mask = ~0,
142 + .coherent_dma_mask = DMA_BIT_MASK(32),
143 },
144 };
145
146 @@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
147 .id = 1,
148 .dev = {
149 .platform_data = &hdmi_lcdc_info,
150 - .coherent_dma_mask = ~0,
151 + .coherent_dma_mask = DMA_BIT_MASK(32),
152 },
153 };
154
155 diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
156 index 6d5ba9afb16a..3387e60e4ea3 100644
157 --- a/arch/arm/mm/flush.c
158 +++ b/arch/arm/mm/flush.c
159 @@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
160 unsigned long i;
161 if (cache_is_vipt_nonaliasing()) {
162 for (i = 0; i < (1 << compound_order(page)); i++) {
163 - void *addr = kmap_atomic(page);
164 + void *addr = kmap_atomic(page + i);
165 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
166 kunmap_atomic(addr);
167 }
168 } else {
169 for (i = 0; i < (1 << compound_order(page)); i++) {
170 - void *addr = kmap_high_get(page);
171 + void *addr = kmap_high_get(page + i);
172 if (addr) {
173 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
174 - kunmap_high(page);
175 + kunmap_high(page + i);
176 }
177 }
178 }
179 diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
180 index f0e2784e7cca..2f9b751878ba 100644
181 --- a/arch/parisc/include/asm/cacheflush.h
182 +++ b/arch/parisc/include/asm/cacheflush.h
183 @@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
184 void mark_rodata_ro(void);
185 #endif
186
187 -#ifdef CONFIG_PA8X00
188 -/* Only pa8800, pa8900 needs this */
189 -
190 #include <asm/kmap_types.h>
191
192 #define ARCH_HAS_KMAP
193
194 -void kunmap_parisc(void *addr);
195 -
196 static inline void *kmap(struct page *page)
197 {
198 might_sleep();
199 + flush_dcache_page(page);
200 return page_address(page);
201 }
202
203 static inline void kunmap(struct page *page)
204 {
205 - kunmap_parisc(page_address(page));
206 + flush_kernel_dcache_page_addr(page_address(page));
207 }
208
209 static inline void *kmap_atomic(struct page *page)
210 {
211 pagefault_disable();
212 + flush_dcache_page(page);
213 return page_address(page);
214 }
215
216 static inline void __kunmap_atomic(void *addr)
217 {
218 - kunmap_parisc(addr);
219 + flush_kernel_dcache_page_addr(addr);
220 pagefault_enable();
221 }
222
223 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
224 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
225 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
226 -#endif
227
228 #endif /* _PARISC_CACHEFLUSH_H */
229
230 diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
231 index b7adb2ac049c..c53fc63149e8 100644
232 --- a/arch/parisc/include/asm/page.h
233 +++ b/arch/parisc/include/asm/page.h
234 @@ -28,9 +28,8 @@ struct page;
235
236 void clear_page_asm(void *page);
237 void copy_page_asm(void *to, void *from);
238 -void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
239 -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
240 - struct page *pg);
241 +#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
242 +#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
243
244 /* #define CONFIG_PARISC_TMPALIAS */
245
246 diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
247 index c035673209f7..a72545554a31 100644
248 --- a/arch/parisc/kernel/cache.c
249 +++ b/arch/parisc/kernel/cache.c
250 @@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
251 }
252 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
253
254 -void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
255 -{
256 - clear_page_asm(vto);
257 - if (!parisc_requires_coherency())
258 - flush_kernel_dcache_page_asm(vto);
259 -}
260 -EXPORT_SYMBOL(clear_user_page);
261 -
262 -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
263 - struct page *pg)
264 -{
265 - /* Copy using kernel mapping. No coherency is needed
266 - (all in kmap/kunmap) on machines that don't support
267 - non-equivalent aliasing. However, the `from' page
268 - needs to be flushed before it can be accessed through
269 - the kernel mapping. */
270 - preempt_disable();
271 - flush_dcache_page_asm(__pa(vfrom), vaddr);
272 - preempt_enable();
273 - copy_page_asm(vto, vfrom);
274 - if (!parisc_requires_coherency())
275 - flush_kernel_dcache_page_asm(vto);
276 -}
277 -EXPORT_SYMBOL(copy_user_page);
278 -
279 -#ifdef CONFIG_PA8X00
280 -
281 -void kunmap_parisc(void *addr)
282 -{
283 - if (parisc_requires_coherency())
284 - flush_kernel_dcache_page_addr(addr);
285 -}
286 -EXPORT_SYMBOL(kunmap_parisc);
287 -#endif
288 -
289 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
290 {
291 unsigned long flags;
292 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
293 index 4d0bda7b11e3..5be9f879957f 100644
294 --- a/arch/x86/include/asm/fpu-internal.h
295 +++ b/arch/x86/include/asm/fpu-internal.h
296 @@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
297 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
298 is pending. Clear the x87 state here by setting it to fixed
299 values. "m" is a random variable that should be in L1 */
300 - alternative_input(
301 - ASM_NOP8 ASM_NOP2,
302 - "emms\n\t" /* clear stack tags */
303 - "fildl %P[addr]", /* set F?P to defined value */
304 - X86_FEATURE_FXSAVE_LEAK,
305 - [addr] "m" (tsk->thread.fpu.has_fpu));
306 + if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
307 + asm volatile(
308 + "fnclex\n\t"
309 + "emms\n\t"
310 + "fildl %P[addr]" /* set F?P to defined value */
311 + : : [addr] "m" (tsk->thread.fpu.has_fpu));
312 + }
313
314 return fpu_restore_checking(&tsk->thread.fpu);
315 }
316 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
317 index 2c9958cd7a43..ffa5af4c221a 100644
318 --- a/drivers/acpi/battery.c
319 +++ b/drivers/acpi/battery.c
320 @@ -68,6 +68,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
321 MODULE_DESCRIPTION("ACPI Battery Driver");
322 MODULE_LICENSE("GPL");
323
324 +static int battery_bix_broken_package;
325 static unsigned int cache_time = 1000;
326 module_param(cache_time, uint, 0644);
327 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
328 @@ -443,7 +444,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
329 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
330 return -ENODEV;
331 }
332 - if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
333 +
334 + if (battery_bix_broken_package)
335 + result = extract_package(battery, buffer.pointer,
336 + extended_info_offsets + 1,
337 + ARRAY_SIZE(extended_info_offsets) - 1);
338 + else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
339 result = extract_package(battery, buffer.pointer,
340 extended_info_offsets,
341 ARRAY_SIZE(extended_info_offsets));
342 @@ -1054,6 +1060,17 @@ static int battery_notify(struct notifier_block *nb,
343 return 0;
344 }
345
346 +static struct dmi_system_id bat_dmi_table[] = {
347 + {
348 + .ident = "NEC LZ750/LS",
349 + .matches = {
350 + DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
351 + DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
352 + },
353 + },
354 + {},
355 +};
356 +
357 static int acpi_battery_add(struct acpi_device *device)
358 {
359 int result = 0;
360 @@ -1163,6 +1180,8 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
361 if (!acpi_battery_dir)
362 return;
363 #endif
364 + if (dmi_check_system(bat_dmi_table))
365 + battery_bix_broken_package = 1;
366 if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
367 #ifdef CONFIG_ACPI_PROCFS_POWER
368 acpi_unlock_battery_dir(acpi_battery_dir);
369 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
370 index 14f1e9506338..cfc6073c0487 100644
371 --- a/drivers/ata/ahci.c
372 +++ b/drivers/ata/ahci.c
373 @@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
374 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
375 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
376 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
377 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
378 + PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
379 + .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
380 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
381 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
382 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
383 diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
384 index 2168d15bc728..57a818b2b5f2 100644
385 --- a/drivers/char/tpm/tpm_ppi.c
386 +++ b/drivers/char/tpm/tpm_ppi.c
387 @@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
388 static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
389 void **return_value)
390 {
391 - acpi_status status;
392 + acpi_status status = AE_OK;
393 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
394 - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
395 - if (strstr(buffer.pointer, context) != NULL) {
396 - *return_value = handle;
397 +
398 + if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
399 + if (strstr(buffer.pointer, context) != NULL) {
400 + *return_value = handle;
401 + status = AE_CTRL_TERMINATE;
402 + }
403 kfree(buffer.pointer);
404 - return AE_CTRL_TERMINATE;
405 }
406 - return AE_OK;
407 +
408 + return status;
409 }
410
411 static inline void ppi_assign_params(union acpi_object params[4],
412 diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
413 index 8d3009e44fba..5543b7df8e16 100644
414 --- a/drivers/clk/clk-divider.c
415 +++ b/drivers/clk/clk-divider.c
416 @@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
417 return 0;
418 }
419
420 -static unsigned int _get_val(struct clk_divider *divider, u8 div)
421 +static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
422 {
423 if (divider->flags & CLK_DIVIDER_ONE_BASED)
424 return div;
425 diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
426 index ad5ff50c5f28..1a7c1b929c69 100644
427 --- a/drivers/clk/samsung/clk-exynos4.c
428 +++ b/drivers/clk/samsung/clk-exynos4.c
429 @@ -39,7 +39,7 @@
430 #define SRC_TOP1 0xc214
431 #define SRC_CAM 0xc220
432 #define SRC_TV 0xc224
433 -#define SRC_MFC 0xcc28
434 +#define SRC_MFC 0xc228
435 #define SRC_G3D 0xc22c
436 #define E4210_SRC_IMAGE 0xc230
437 #define SRC_LCD0 0xc234
438 diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
439 index adf32343c9f9..e52359cf9b6f 100644
440 --- a/drivers/clk/samsung/clk-exynos5250.c
441 +++ b/drivers/clk/samsung/clk-exynos5250.c
442 @@ -25,6 +25,7 @@
443 #define MPLL_LOCK 0x4000
444 #define MPLL_CON0 0x4100
445 #define SRC_CORE1 0x4204
446 +#define GATE_IP_ACP 0x8800
447 #define CPLL_LOCK 0x10020
448 #define EPLL_LOCK 0x10030
449 #define VPLL_LOCK 0x10040
450 @@ -75,7 +76,6 @@
451 #define SRC_CDREX 0x20200
452 #define PLL_DIV2_SEL 0x20a24
453 #define GATE_IP_DISP1 0x10928
454 -#define GATE_IP_ACP 0x10000
455
456 /* list of PLLs to be registered */
457 enum exynos5250_plls {
458 @@ -120,7 +120,8 @@ enum exynos5250_clks {
459 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
460 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
461 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
462 - wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
463 + wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
464 + smmu_mdma0,
465
466 /* mux clocks */
467 mout_hdmi = 1024,
468 @@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
469 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
470 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
471 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
472 - GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
473 - GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
474 + GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
475 + GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
476 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
477 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
478 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
479 @@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
480 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
481 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
482 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
483 - GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
484 + GATE(sysreg, "sysreg", "aclk66",
485 + GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
486 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
487 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
488 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
489 @@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
490 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
491 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
492 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
493 + GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
494 + GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
495 };
496
497 static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
498 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
499 index 99d8ab548a34..d5dc567efd96 100644
500 --- a/drivers/cpufreq/intel_pstate.c
501 +++ b/drivers/cpufreq/intel_pstate.c
502 @@ -518,7 +518,8 @@ static void intel_pstate_timer_func(unsigned long __data)
503 }
504
505 #define ICPU(model, policy) \
506 - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
507 + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
508 + (unsigned long)&policy }
509
510 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
511 ICPU(0x2a, default_policy),
512 diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
513 index 57cda2a1437b..3dc7a997b795 100644
514 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
515 +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
516 @@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
517 u16 offset = nv_ro16(bios, init->offset + 1);
518
519 trace("JUMP\t0x%04x\n", offset);
520 - init->offset = offset;
521 +
522 + if (init_exec(init))
523 + init->offset = offset;
524 + else
525 + init->offset += 3;
526 }
527
528 /**
529 diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
530 index 05188351711d..a97263e902ff 100644
531 --- a/drivers/leds/leds-lp5521.c
532 +++ b/drivers/leds/leds-lp5521.c
533 @@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
534 if (i % 2)
535 goto err;
536
537 - mutex_lock(&chip->lock);
538 -
539 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
540 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
541 - if (ret) {
542 - mutex_unlock(&chip->lock);
543 + if (ret)
544 return -EINVAL;
545 - }
546 }
547
548 - mutex_unlock(&chip->lock);
549 -
550 return size;
551
552 err:
553 @@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
554 {
555 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
556 struct lp55xx_chip *chip = led->chip;
557 + int ret;
558
559 mutex_lock(&chip->lock);
560
561 chip->engine_idx = nr;
562 lp5521_load_engine(chip);
563 + ret = lp5521_update_program_memory(chip, buf, len);
564
565 mutex_unlock(&chip->lock);
566
567 - return lp5521_update_program_memory(chip, buf, len);
568 + return ret;
569 }
570 store_load(1)
571 store_load(2)
572 diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
573 index fe3bcbb5747f..f382fc1e57a0 100644
574 --- a/drivers/leds/leds-lp5523.c
575 +++ b/drivers/leds/leds-lp5523.c
576 @@ -336,18 +336,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
577 if (i % 2)
578 goto err;
579
580 - mutex_lock(&chip->lock);
581 -
582 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
583 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
584 - if (ret) {
585 - mutex_unlock(&chip->lock);
586 + if (ret)
587 return -EINVAL;
588 - }
589 }
590
591 - mutex_unlock(&chip->lock);
592 -
593 return size;
594
595 err:
596 @@ -547,15 +541,17 @@ static ssize_t store_engine_load(struct device *dev,
597 {
598 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
599 struct lp55xx_chip *chip = led->chip;
600 + int ret;
601
602 mutex_lock(&chip->lock);
603
604 chip->engine_idx = nr;
605 lp5523_load_engine_and_select_page(chip);
606 + ret = lp5523_update_program_memory(chip, buf, len);
607
608 mutex_unlock(&chip->lock);
609
610 - return lp5523_update_program_memory(chip, buf, len);
611 + return ret;
612 }
613 store_load(1)
614 store_load(2)
615 diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
616 index e6ae7720f9e1..6ed83feb0c52 100644
617 --- a/drivers/mfd/rtsx_pcr.c
618 +++ b/drivers/mfd/rtsx_pcr.c
619 @@ -1230,8 +1230,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
620
621 pcr->remove_pci = true;
622
623 - cancel_delayed_work(&pcr->carddet_work);
624 - cancel_delayed_work(&pcr->idle_work);
625 + /* Disable interrupts at the pcr level */
626 + spin_lock_irq(&pcr->lock);
627 + rtsx_pci_writel(pcr, RTSX_BIER, 0);
628 + pcr->bier = 0;
629 + spin_unlock_irq(&pcr->lock);
630 +
631 + cancel_delayed_work_sync(&pcr->carddet_work);
632 + cancel_delayed_work_sync(&pcr->idle_work);
633
634 mfd_remove_devices(&pcidev->dev);
635
636 diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
637 index 9e1601487263..4fc96d6c6ac0 100644
638 --- a/drivers/net/ethernet/arc/emac_main.c
639 +++ b/drivers/net/ethernet/arc/emac_main.c
640 @@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
641 /* Make sure pointer to data buffer is set */
642 wmb();
643
644 + skb_tx_timestamp(skb);
645 +
646 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
647
648 /* Increment index to point to the next BD */
649 @@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
650
651 arc_reg_set(priv, R_STATUS, TXPL_MASK);
652
653 - skb_tx_timestamp(skb);
654 -
655 return NETDEV_TX_OK;
656 }
657
658 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
659 index a82229fe1c7f..3ff1f272c6c8 100644
660 --- a/drivers/net/ethernet/broadcom/tg3.c
661 +++ b/drivers/net/ethernet/broadcom/tg3.c
662 @@ -16485,6 +16485,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
663 /* Clear this out for sanity. */
664 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665
666 + /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
667 + tw32(TG3PCI_REG_BASE_ADDR, 0);
668 +
669 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
670 &pci_state_reg);
671 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
672 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
673 index b2793b91cc55..63090c0ddeb9 100644
674 --- a/drivers/net/ethernet/freescale/fec_main.c
675 +++ b/drivers/net/ethernet/freescale/fec_main.c
676 @@ -425,6 +425,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
677 /* If this was the last BD in the ring, start at the beginning again. */
678 bdp = fec_enet_get_nextdesc(bdp, fep);
679
680 + skb_tx_timestamp(skb);
681 +
682 fep->cur_tx = bdp;
683
684 if (fep->cur_tx == fep->dirty_tx)
685 @@ -433,8 +435,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
686 /* Trigger transmission start */
687 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
688
689 - skb_tx_timestamp(skb);
690 -
691 return NETDEV_TX_OK;
692 }
693
694 diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
695 index 2d1c6bdd3618..7628e0fd8455 100644
696 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
697 +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
698 @@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
699
700 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
701 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
702 - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
703 + dev->features = NETIF_F_SG | NETIF_F_TSO |
704 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
705 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
706 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
707 diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
708 index 07c9bc4c61bc..f59a0b6f1ae5 100644
709 --- a/drivers/net/ethernet/sfc/efx.c
710 +++ b/drivers/net/ethernet/sfc/efx.c
711 @@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx)
712 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
713 efx->type->rx_buffer_padding);
714 rx_buf_len = (sizeof(struct efx_rx_page_state) +
715 - NET_IP_ALIGN + efx->rx_dma_len);
716 + efx->rx_ip_align + efx->rx_dma_len);
717 if (rx_buf_len <= PAGE_SIZE) {
718 efx->rx_scatter = efx->type->always_rx_scatter;
719 efx->rx_buffer_order = 0;
720 @@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx)
721 WARN_ON(channel->rx_pkt_n_frags);
722 }
723
724 + efx_ptp_start_datapath(efx);
725 +
726 if (netif_device_present(efx->net_dev))
727 netif_tx_wake_all_queues(efx->net_dev);
728 }
729 @@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
730 EFX_ASSERT_RESET_SERIALISED(efx);
731 BUG_ON(efx->port_enabled);
732
733 + efx_ptp_stop_datapath(efx);
734 +
735 /* Stop RX refill */
736 efx_for_each_channel(channel, efx) {
737 efx_for_each_channel_rx_queue(rx_queue, channel)
738 @@ -2550,6 +2554,8 @@ static int efx_init_struct(struct efx_nic *efx,
739
740 efx->net_dev = net_dev;
741 efx->rx_prefix_size = efx->type->rx_prefix_size;
742 + efx->rx_ip_align =
743 + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
744 efx->rx_packet_hash_offset =
745 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
746 spin_lock_init(&efx->stats_lock);
747 diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
748 index 366c8e3e3784..4b0bd8a1514d 100644
749 --- a/drivers/net/ethernet/sfc/mcdi.c
750 +++ b/drivers/net/ethernet/sfc/mcdi.c
751 @@ -50,6 +50,7 @@ struct efx_mcdi_async_param {
752 static void efx_mcdi_timeout_async(unsigned long context);
753 static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
754 bool *was_attached_out);
755 +static bool efx_mcdi_poll_once(struct efx_nic *efx);
756
757 static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
758 {
759 @@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
760 }
761 }
762
763 +static bool efx_mcdi_poll_once(struct efx_nic *efx)
764 +{
765 + struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
766 +
767 + rmb();
768 + if (!efx->type->mcdi_poll_response(efx))
769 + return false;
770 +
771 + spin_lock_bh(&mcdi->iface_lock);
772 + efx_mcdi_read_response_header(efx);
773 + spin_unlock_bh(&mcdi->iface_lock);
774 +
775 + return true;
776 +}
777 +
778 static int efx_mcdi_poll(struct efx_nic *efx)
779 {
780 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
781 @@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
782
783 time = jiffies;
784
785 - rmb();
786 - if (efx->type->mcdi_poll_response(efx))
787 + if (efx_mcdi_poll_once(efx))
788 break;
789
790 if (time_after(time, finish))
791 return -ETIMEDOUT;
792 }
793
794 - spin_lock_bh(&mcdi->iface_lock);
795 - efx_mcdi_read_response_header(efx);
796 - spin_unlock_bh(&mcdi->iface_lock);
797 -
798 /* Return rc=0 like wait_event_timeout() */
799 return 0;
800 }
801 @@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
802 rc = efx_mcdi_await_completion(efx);
803
804 if (rc != 0) {
805 + netif_err(efx, hw, efx->net_dev,
806 + "MC command 0x%x inlen %d mode %d timed out\n",
807 + cmd, (int)inlen, mcdi->mode);
808 +
809 + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
810 + netif_err(efx, hw, efx->net_dev,
811 + "MCDI request was completed without an event\n");
812 + rc = 0;
813 + }
814 +
815 /* Close the race with efx_mcdi_ev_cpl() executing just too late
816 * and completing a request we've just cancelled, by ensuring
817 * that the seqno check therein fails.
818 @@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
819 ++mcdi->seqno;
820 ++mcdi->credits;
821 spin_unlock_bh(&mcdi->iface_lock);
822 + }
823
824 - netif_err(efx, hw, efx->net_dev,
825 - "MC command 0x%x inlen %d mode %d timed out\n",
826 - cmd, (int)inlen, mcdi->mode);
827 - } else {
828 + if (rc == 0) {
829 size_t hdr_len, data_len;
830
831 /* At the very least we need a memory barrier here to ensure
832 diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
833 index b172ed133055..2aeb45167511 100644
834 --- a/drivers/net/ethernet/sfc/net_driver.h
835 +++ b/drivers/net/ethernet/sfc/net_driver.h
836 @@ -673,6 +673,8 @@ struct vfdi_status;
837 * @n_channels: Number of channels in use
838 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
839 * @n_tx_channels: Number of channels used for TX
840 + * @rx_ip_align: RX DMA address offset to have IP header aligned in
841 + * in accordance with NET_IP_ALIGN
842 * @rx_dma_len: Current maximum RX DMA length
843 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
844 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
845 @@ -806,6 +808,7 @@ struct efx_nic {
846 unsigned rss_spread;
847 unsigned tx_channel_offset;
848 unsigned n_tx_channels;
849 + unsigned int rx_ip_align;
850 unsigned int rx_dma_len;
851 unsigned int rx_buffer_order;
852 unsigned int rx_buffer_truesize;
853 diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
854 index 890bbbe8320e..3d713b518847 100644
855 --- a/drivers/net/ethernet/sfc/nic.h
856 +++ b/drivers/net/ethernet/sfc/nic.h
857 @@ -528,6 +528,8 @@ extern void efx_ptp_get_ts_info(struct efx_nic *efx,
858 extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
859 extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
860 extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
861 +void efx_ptp_start_datapath(struct efx_nic *efx);
862 +void efx_ptp_stop_datapath(struct efx_nic *efx);
863
864 extern const struct efx_nic_type falcon_a1_nic_type;
865 extern const struct efx_nic_type falcon_b0_nic_type;
866 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
867 index 03acf57df045..3dd39dcfe36b 100644
868 --- a/drivers/net/ethernet/sfc/ptp.c
869 +++ b/drivers/net/ethernet/sfc/ptp.c
870 @@ -220,6 +220,7 @@ struct efx_ptp_timeset {
871 * @evt_list: List of MC receive events awaiting packets
872 * @evt_free_list: List of free events
873 * @evt_lock: Lock for manipulating evt_list and evt_free_list
874 + * @evt_overflow: Boolean indicating that event list has overflowed
875 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
876 * @workwq: Work queue for processing pending PTP operations
877 * @work: Work task
878 @@ -270,6 +271,7 @@ struct efx_ptp_data {
879 struct list_head evt_list;
880 struct list_head evt_free_list;
881 spinlock_t evt_lock;
882 + bool evt_overflow;
883 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
884 struct workqueue_struct *workwq;
885 struct work_struct work;
886 @@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
887 }
888 }
889 }
890 + /* If the event overflow flag is set and the event list is now empty
891 + * clear the flag to re-enable the overflow warning message.
892 + */
893 + if (ptp->evt_overflow && list_empty(&ptp->evt_list))
894 + ptp->evt_overflow = false;
895 spin_unlock_bh(&ptp->evt_lock);
896 }
897
898 @@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
899 break;
900 }
901 }
902 + /* If the event overflow flag is set and the event list is now empty
903 + * clear the flag to re-enable the overflow warning message.
904 + */
905 + if (ptp->evt_overflow && list_empty(&ptp->evt_list))
906 + ptp->evt_overflow = false;
907 spin_unlock_bh(&ptp->evt_lock);
908
909 return rc;
910 @@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
911 __skb_queue_tail(q, skb);
912 } else if (time_after(jiffies, match->expiry)) {
913 match->state = PTP_PACKET_STATE_TIMED_OUT;
914 - netif_warn(efx, rx_err, efx->net_dev,
915 - "PTP packet - no timestamp seen\n");
916 + if (net_ratelimit())
917 + netif_warn(efx, rx_err, efx->net_dev,
918 + "PTP packet - no timestamp seen\n");
919 __skb_queue_tail(q, skb);
920 } else {
921 /* Replace unprocessed entry and stop */
922 @@ -788,9 +801,14 @@ fail:
923 static int efx_ptp_stop(struct efx_nic *efx)
924 {
925 struct efx_ptp_data *ptp = efx->ptp_data;
926 - int rc = efx_ptp_disable(efx);
927 struct list_head *cursor;
928 struct list_head *next;
929 + int rc;
930 +
931 + if (ptp == NULL)
932 + return 0;
933 +
934 + rc = efx_ptp_disable(efx);
935
936 if (ptp->rxfilter_installed) {
937 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
938 @@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx)
939 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
940 list_move(cursor, &efx->ptp_data->evt_free_list);
941 }
942 + ptp->evt_overflow = false;
943 spin_unlock_bh(&efx->ptp_data->evt_lock);
944
945 return rc;
946 }
947
948 +static int efx_ptp_restart(struct efx_nic *efx)
949 +{
950 + if (efx->ptp_data && efx->ptp_data->enabled)
951 + return efx_ptp_start(efx);
952 + return 0;
953 +}
954 +
955 static void efx_ptp_pps_worker(struct work_struct *work)
956 {
957 struct efx_ptp_data *ptp =
958 @@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
959 spin_lock_init(&ptp->evt_lock);
960 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
961 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
962 + ptp->evt_overflow = false;
963
964 ptp->phc_clock_info.owner = THIS_MODULE;
965 snprintf(ptp->phc_clock_info.name,
966 @@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
967 skb->len >= PTP_MIN_LENGTH &&
968 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
969 likely(skb->protocol == htons(ETH_P_IP)) &&
970 + skb_transport_header_was_set(skb) &&
971 + skb_network_header_len(skb) >= sizeof(struct iphdr) &&
972 ip_hdr(skb)->protocol == IPPROTO_UDP &&
973 + skb_headlen(skb) >=
974 + skb_transport_offset(skb) + sizeof(struct udphdr) &&
975 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
976 }
977
978 @@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
979 {
980 if ((enable_wanted != efx->ptp_data->enabled) ||
981 (enable_wanted && (efx->ptp_data->mode != new_mode))) {
982 - int rc;
983 + int rc = 0;
984
985 if (enable_wanted) {
986 /* Change of mode requires disable */
987 @@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
988 * succeed.
989 */
990 efx->ptp_data->mode = new_mode;
991 - rc = efx_ptp_start(efx);
992 + if (netif_running(efx->net_dev))
993 + rc = efx_ptp_start(efx);
994 if (rc == 0) {
995 rc = efx_ptp_synchronize(efx,
996 PTP_SYNC_ATTEMPTS * 2);
997 @@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
998 list_add_tail(&evt->link, &ptp->evt_list);
999
1000 queue_work(ptp->workwq, &ptp->work);
1001 - } else {
1002 - netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
1003 + } else if (!ptp->evt_overflow) {
1004 + /* Log a warning message and set the event overflow flag.
1005 + * The message won't be logged again until the event queue
1006 + * becomes empty.
1007 + */
1008 + netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
1009 + ptp->evt_overflow = true;
1010 }
1011 spin_unlock_bh(&ptp->evt_lock);
1012 }
1013 @@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1014 if (rc != 0)
1015 return rc;
1016
1017 - ptp_data->current_adjfreq = delta;
1018 + ptp_data->current_adjfreq = adjustment_ns;
1019 return 0;
1020 }
1021
1022 @@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1023
1024 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1025 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1026 - MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
1027 + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
1028 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1029 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1030 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
1031 @@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx)
1032 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
1033 &efx_ptp_channel_type;
1034 }
1035 +
1036 +void efx_ptp_start_datapath(struct efx_nic *efx)
1037 +{
1038 + if (efx_ptp_restart(efx))
1039 + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
1040 +}
1041 +
1042 +void efx_ptp_stop_datapath(struct efx_nic *efx)
1043 +{
1044 + efx_ptp_stop(efx);
1045 +}
1046 diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
1047 index 4a596725023f..f18d5864bfa7 100644
1048 --- a/drivers/net/ethernet/sfc/rx.c
1049 +++ b/drivers/net/ethernet/sfc/rx.c
1050 @@ -93,7 +93,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
1051
1052 void efx_rx_config_page_split(struct efx_nic *efx)
1053 {
1054 - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
1055 + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
1056 EFX_RX_BUF_ALIGNMENT);
1057 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
1058 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
1059 @@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
1060 do {
1061 index = rx_queue->added_count & rx_queue->ptr_mask;
1062 rx_buf = efx_rx_buffer(rx_queue, index);
1063 - rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
1064 + rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
1065 rx_buf->page = page;
1066 - rx_buf->page_offset = page_offset + NET_IP_ALIGN;
1067 + rx_buf->page_offset = page_offset + efx->rx_ip_align;
1068 rx_buf->len = efx->rx_dma_len;
1069 rx_buf->flags = 0;
1070 ++rx_queue->added_count;
1071 diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
1072 index 571452e786d5..61a1540f1347 100644
1073 --- a/drivers/net/ethernet/tehuti/tehuti.c
1074 +++ b/drivers/net/ethernet/tehuti/tehuti.c
1075 @@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1076 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1077 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1078 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
1079 - /*| NETIF_F_FRAGLIST */
1080 ;
1081 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1082 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
1083 diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
1084 index 0029148077a9..def50bd53124 100644
1085 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c
1086 +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
1087 @@ -1016,7 +1016,7 @@ static int temac_of_probe(struct platform_device *op)
1088 platform_set_drvdata(op, ndev);
1089 SET_NETDEV_DEV(ndev, &op->dev);
1090 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1091 - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1092 + ndev->features = NETIF_F_SG;
1093 ndev->netdev_ops = &temac_netdev_ops;
1094 ndev->ethtool_ops = &temac_ethtool_ops;
1095 #if 0
1096 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1097 index b2ff038d6d20..f9293da19e26 100644
1098 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1099 +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1100 @@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op)
1101
1102 SET_NETDEV_DEV(ndev, &op->dev);
1103 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1104 - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1105 + ndev->features = NETIF_F_SG;
1106 ndev->netdev_ops = &axienet_netdev_ops;
1107 ndev->ethtool_ops = &axienet_ethtool_ops;
1108
1109 diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
1110 index 3169252613fa..5d78c1d08abd 100644
1111 --- a/drivers/net/hamradio/hdlcdrv.c
1112 +++ b/drivers/net/hamradio/hdlcdrv.c
1113 @@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1114 case HDLCDRVCTL_CALIBRATE:
1115 if(!capable(CAP_SYS_RAWIO))
1116 return -EPERM;
1117 + if (bi.data.calibrate > INT_MAX / s->par.bitrate)
1118 + return -EINVAL;
1119 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
1120 return 0;
1121
1122 diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
1123 index 5af1c3e5032a..b7e967540509 100644
1124 --- a/drivers/net/hamradio/yam.c
1125 +++ b/drivers/net/hamradio/yam.c
1126 @@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1127 break;
1128
1129 case SIOCYAMGCFG:
1130 + memset(&yi, 0, sizeof(yi));
1131 yi.cfg.mask = 0xffffffff;
1132 yi.cfg.iobase = yp->iobase;
1133 yi.cfg.irq = yp->irq;
1134 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1135 index 524f713f6017..f8135725bcf6 100644
1136 --- a/drivers/net/hyperv/netvsc_drv.c
1137 +++ b/drivers/net/hyperv/netvsc_drv.c
1138 @@ -327,7 +327,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1139 return -EINVAL;
1140
1141 nvdev->start_remove = true;
1142 - cancel_delayed_work_sync(&ndevctx->dwork);
1143 cancel_work_sync(&ndevctx->work);
1144 netif_tx_disable(ndev);
1145 rndis_filter_device_remove(hdev);
1146 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
1147 index dc76670c2f2a..5895e4dbbf2a 100644
1148 --- a/drivers/net/macvtap.c
1149 +++ b/drivers/net/macvtap.c
1150 @@ -767,11 +767,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
1151 const struct sk_buff *skb,
1152 const struct iovec *iv, int len)
1153 {
1154 - struct macvlan_dev *vlan;
1155 int ret;
1156 int vnet_hdr_len = 0;
1157 int vlan_offset = 0;
1158 - int copied;
1159 + int copied, total;
1160
1161 if (q->flags & IFF_VNET_HDR) {
1162 struct virtio_net_hdr vnet_hdr;
1163 @@ -786,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
1164 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
1165 return -EFAULT;
1166 }
1167 - copied = vnet_hdr_len;
1168 + total = copied = vnet_hdr_len;
1169 + total += skb->len;
1170
1171 if (!vlan_tx_tag_present(skb))
1172 len = min_t(int, skb->len, len);
1173 @@ -801,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
1174
1175 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1176 len = min_t(int, skb->len + VLAN_HLEN, len);
1177 + total += VLAN_HLEN;
1178
1179 copy = min_t(int, vlan_offset, len);
1180 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
1181 @@ -818,19 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
1182 }
1183
1184 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
1185 - copied += len;
1186
1187 done:
1188 - rcu_read_lock();
1189 - vlan = rcu_dereference(q->vlan);
1190 - if (vlan) {
1191 - preempt_disable();
1192 - macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
1193 - preempt_enable();
1194 - }
1195 - rcu_read_unlock();
1196 -
1197 - return ret ? ret : copied;
1198 + return ret ? ret : total;
1199 }
1200
1201 static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
1202 @@ -885,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
1203 }
1204
1205 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
1206 - ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
1207 + ret = min_t(ssize_t, ret, len);
1208 + if (ret > 0)
1209 + iocb->ki_pos = ret;
1210 out:
1211 return ret;
1212 }
1213 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1214 index 782e38bfc1ee..7c8343a4f918 100644
1215 --- a/drivers/net/tun.c
1216 +++ b/drivers/net/tun.c
1217 @@ -1184,7 +1184,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1218 {
1219 struct tun_pi pi = { 0, skb->protocol };
1220 ssize_t total = 0;
1221 - int vlan_offset = 0;
1222 + int vlan_offset = 0, copied;
1223
1224 if (!(tun->flags & TUN_NO_PI)) {
1225 if ((len -= sizeof(pi)) < 0)
1226 @@ -1248,6 +1248,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1227 total += tun->vnet_hdr_sz;
1228 }
1229
1230 + copied = total;
1231 + total += skb->len;
1232 if (!vlan_tx_tag_present(skb)) {
1233 len = min_t(int, skb->len, len);
1234 } else {
1235 @@ -1262,24 +1264,24 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1236
1237 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1238 len = min_t(int, skb->len + VLAN_HLEN, len);
1239 + total += VLAN_HLEN;
1240
1241 copy = min_t(int, vlan_offset, len);
1242 - ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
1243 + ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
1244 len -= copy;
1245 - total += copy;
1246 + copied += copy;
1247 if (ret || !len)
1248 goto done;
1249
1250 copy = min_t(int, sizeof(veth), len);
1251 - ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
1252 + ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
1253 len -= copy;
1254 - total += copy;
1255 + copied += copy;
1256 if (ret || !len)
1257 goto done;
1258 }
1259
1260 - skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
1261 - total += len;
1262 + skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
1263
1264 done:
1265 tun->dev->stats.tx_packets++;
1266 @@ -1356,6 +1358,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1267 ret = tun_do_read(tun, tfile, iocb, iv, len,
1268 file->f_flags & O_NONBLOCK);
1269 ret = min_t(ssize_t, ret, len);
1270 + if (ret > 0)
1271 + iocb->ki_pos = ret;
1272 out:
1273 tun_put(tun);
1274 return ret;
1275 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1276 index bbc9cb84ec1f..8065066a6230 100644
1277 --- a/drivers/net/virtio_net.c
1278 +++ b/drivers/net/virtio_net.c
1279 @@ -297,26 +297,61 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
1280 return skb;
1281 }
1282
1283 -static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
1284 +static struct sk_buff *receive_small(void *buf, unsigned int len)
1285 {
1286 - struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
1287 - struct page *page;
1288 - int num_buf, i, len;
1289 + struct sk_buff * skb = buf;
1290 +
1291 + len -= sizeof(struct virtio_net_hdr);
1292 + skb_trim(skb, len);
1293 +
1294 + return skb;
1295 +}
1296 +
1297 +static struct sk_buff *receive_big(struct net_device *dev,
1298 + struct receive_queue *rq,
1299 + void *buf)
1300 +{
1301 + struct page *page = buf;
1302 + struct sk_buff *skb = page_to_skb(rq, page, 0);
1303 +
1304 + if (unlikely(!skb))
1305 + goto err;
1306 +
1307 + return skb;
1308 +
1309 +err:
1310 + dev->stats.rx_dropped++;
1311 + give_pages(rq, page);
1312 + return NULL;
1313 +}
1314 +
1315 +static struct sk_buff *receive_mergeable(struct net_device *dev,
1316 + struct receive_queue *rq,
1317 + void *buf,
1318 + unsigned int len)
1319 +{
1320 + struct skb_vnet_hdr *hdr = page_address(buf);
1321 + int num_buf = hdr->mhdr.num_buffers;
1322 + struct page *page = buf;
1323 + struct sk_buff *skb = page_to_skb(rq, page, len);
1324 + int i;
1325 +
1326 + if (unlikely(!skb))
1327 + goto err_skb;
1328
1329 - num_buf = hdr->mhdr.num_buffers;
1330 while (--num_buf) {
1331 i = skb_shinfo(skb)->nr_frags;
1332 if (i >= MAX_SKB_FRAGS) {
1333 pr_debug("%s: packet too long\n", skb->dev->name);
1334 skb->dev->stats.rx_length_errors++;
1335 - return -EINVAL;
1336 + goto err_frags;
1337 }
1338 page = virtqueue_get_buf(rq->vq, &len);
1339 if (!page) {
1340 - pr_debug("%s: rx error: %d buffers missing\n",
1341 - skb->dev->name, hdr->mhdr.num_buffers);
1342 - skb->dev->stats.rx_length_errors++;
1343 - return -EINVAL;
1344 + pr_debug("%s: rx error: %d buffers %d missing\n",
1345 + dev->name, hdr->mhdr.num_buffers, num_buf);
1346 + dev->stats.rx_length_errors++;
1347 + goto err_buf;
1348 }
1349
1350 if (len > PAGE_SIZE)
1351 @@ -326,7 +361,26 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
1352
1353 --rq->num;
1354 }
1355 - return 0;
1356 + return skb;
1357 +err_skb:
1358 + give_pages(rq, page);
1359 + while (--num_buf) {
1360 +err_frags:
1361 + buf = virtqueue_get_buf(rq->vq, &len);
1362 + if (unlikely(!buf)) {
1363 + pr_debug("%s: rx error: %d buffers missing\n",
1364 + dev->name, num_buf);
1365 + dev->stats.rx_length_errors++;
1366 + break;
1367 + }
1368 + page = buf;
1369 + give_pages(rq, page);
1370 + --rq->num;
1371 + }
1372 +err_buf:
1373 + dev->stats.rx_dropped++;
1374 + dev_kfree_skb(skb);
1375 + return NULL;
1376 }
1377
1378 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
1379 @@ -335,7 +389,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
1380 struct net_device *dev = vi->dev;
1381 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
1382 struct sk_buff *skb;
1383 - struct page *page;
1384 struct skb_vnet_hdr *hdr;
1385
1386 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
1387 @@ -347,25 +400,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
1388 dev_kfree_skb(buf);
1389 return;
1390 }
1391 + if (vi->mergeable_rx_bufs)
1392 + skb = receive_mergeable(dev, rq, buf, len);
1393 + else if (vi->big_packets)
1394 + skb = receive_big(dev, rq, buf);
1395 + else
1396 + skb = receive_small(buf, len);
1397
1398 - if (!vi->mergeable_rx_bufs && !vi->big_packets) {
1399 - skb = buf;
1400 - len -= sizeof(struct virtio_net_hdr);
1401 - skb_trim(skb, len);
1402 - } else {
1403 - page = buf;
1404 - skb = page_to_skb(rq, page, len);
1405 - if (unlikely(!skb)) {
1406 - dev->stats.rx_dropped++;
1407 - give_pages(rq, page);
1408 - return;
1409 - }
1410 - if (vi->mergeable_rx_bufs)
1411 - if (receive_mergeable(rq, skb)) {
1412 - dev_kfree_skb(skb);
1413 - return;
1414 - }
1415 - }
1416 + if (unlikely(!skb))
1417 + return;
1418
1419 hdr = skb_vnet_hdr(skb);
1420
1421 @@ -1307,6 +1350,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1422
1423 static void virtnet_free_queues(struct virtnet_info *vi)
1424 {
1425 + int i;
1426 +
1427 + for (i = 0; i < vi->max_queue_pairs; i++)
1428 + netif_napi_del(&vi->rq[i].napi);
1429 +
1430 kfree(vi->rq);
1431 kfree(vi->sq);
1432 }
1433 @@ -1724,16 +1772,17 @@ static int virtnet_restore(struct virtio_device *vdev)
1434 if (err)
1435 return err;
1436
1437 - if (netif_running(vi->dev))
1438 + if (netif_running(vi->dev)) {
1439 + for (i = 0; i < vi->curr_queue_pairs; i++)
1440 + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1441 + schedule_delayed_work(&vi->refill, 0);
1442 +
1443 for (i = 0; i < vi->max_queue_pairs; i++)
1444 virtnet_napi_enable(&vi->rq[i]);
1445 + }
1446
1447 netif_device_attach(vi->dev);
1448
1449 - for (i = 0; i < vi->curr_queue_pairs; i++)
1450 - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1451 - schedule_delayed_work(&vi->refill, 0);
1452 -
1453 mutex_lock(&vi->config_lock);
1454 vi->config_enable = true;
1455 mutex_unlock(&vi->config_lock);
1456 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1457 index 2ef5b6219f3f..146236891889 100644
1458 --- a/drivers/net/vxlan.c
1459 +++ b/drivers/net/vxlan.c
1460 @@ -1672,7 +1672,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1461 netdev_dbg(dev, "circular route to %pI4\n",
1462 &dst->sin.sin_addr.s_addr);
1463 dev->stats.collisions++;
1464 - goto tx_error;
1465 + goto rt_tx_error;
1466 }
1467
1468 /* Bypass encapsulation if the destination is local */
1469 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1470 index 2634d691ec17..dbc024bd4adf 100644
1471 --- a/drivers/scsi/sd.c
1472 +++ b/drivers/scsi/sd.c
1473 @@ -2645,13 +2645,16 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
1474 }
1475
1476 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
1477 + /* too large values might cause issues with arcmsr */
1478 + int vpd_buf_len = 64;
1479 +
1480 sdev->no_report_opcodes = 1;
1481
1482 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
1483 * CODES is unsupported and the device has an ATA
1484 * Information VPD page (SAT).
1485 */
1486 - if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
1487 + if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
1488 sdev->no_write_same = 1;
1489 }
1490
1491 diff --git a/include/linux/net.h b/include/linux/net.h
1492 index 8bd9d926b3cf..41103f84527e 100644
1493 --- a/include/linux/net.h
1494 +++ b/include/linux/net.h
1495 @@ -180,7 +180,7 @@ struct proto_ops {
1496 int offset, size_t size, int flags);
1497 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
1498 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
1499 - void (*set_peek_off)(struct sock *sk, int val);
1500 + int (*set_peek_off)(struct sock *sk, int val);
1501 };
1502
1503 #define DECLARE_SOCKADDR(type, dst, src) \
1504 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1505 index 25f5d2d11e7c..21eae43348fb 100644
1506 --- a/include/linux/netdevice.h
1507 +++ b/include/linux/netdevice.h
1508 @@ -1872,6 +1872,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
1509 return dev->header_ops->parse(skb, haddr);
1510 }
1511
1512 +static inline int dev_rebuild_header(struct sk_buff *skb)
1513 +{
1514 + const struct net_device *dev = skb->dev;
1515 +
1516 + if (!dev->header_ops || !dev->header_ops->rebuild)
1517 + return 0;
1518 + return dev->header_ops->rebuild(skb);
1519 +}
1520 +
1521 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1522 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1523 static inline int unregister_gifconf(unsigned int family)
1524 @@ -2945,6 +2954,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
1525 dev->gso_max_size = size;
1526 }
1527
1528 +static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
1529 + int pulled_hlen, u16 mac_offset,
1530 + int mac_len)
1531 +{
1532 + skb->protocol = protocol;
1533 + skb->encapsulation = 1;
1534 + skb_push(skb, pulled_hlen);
1535 + skb_reset_transport_header(skb);
1536 + skb->mac_header = mac_offset;
1537 + skb->network_header = skb->mac_header + mac_len;
1538 + skb->mac_len = mac_len;
1539 +}
1540 +
1541 static inline bool netif_is_bond_master(struct net_device *dev)
1542 {
1543 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
1544 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1545 index f66f346dd164..efa1649a822a 100644
1546 --- a/include/linux/skbuff.h
1547 +++ b/include/linux/skbuff.h
1548 @@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1549 skb->mac_header += offset;
1550 }
1551
1552 +static inline void skb_pop_mac_header(struct sk_buff *skb)
1553 +{
1554 + skb->mac_header = skb->network_header;
1555 +}
1556 +
1557 static inline void skb_probe_transport_header(struct sk_buff *skb,
1558 const int offset_hint)
1559 {
1560 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1561 index 5ac63c9a995a..ceae65e69a64 100644
1562 --- a/kernel/sched/core.c
1563 +++ b/kernel/sched/core.c
1564 @@ -7277,7 +7277,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
1565
1566 runtime_enabled = quota != RUNTIME_INF;
1567 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1568 - account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
1569 + /*
1570 + * If we need to toggle cfs_bandwidth_used, off->on must occur
1571 + * before making related changes, and on->off must occur afterwards
1572 + */
1573 + if (runtime_enabled && !runtime_was_enabled)
1574 + cfs_bandwidth_usage_inc();
1575 raw_spin_lock_irq(&cfs_b->lock);
1576 cfs_b->period = ns_to_ktime(period);
1577 cfs_b->quota = quota;
1578 @@ -7303,6 +7308,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
1579 unthrottle_cfs_rq(cfs_rq);
1580 raw_spin_unlock_irq(&rq->lock);
1581 }
1582 + if (runtime_was_enabled && !runtime_enabled)
1583 + cfs_bandwidth_usage_dec();
1584 out_unlock:
1585 mutex_unlock(&cfs_constraints_mutex);
1586
1587 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1588 index 7765ad82736a..411732334906 100644
1589 --- a/kernel/sched/fair.c
1590 +++ b/kernel/sched/fair.c
1591 @@ -2077,13 +2077,14 @@ static inline bool cfs_bandwidth_used(void)
1592 return static_key_false(&__cfs_bandwidth_used);
1593 }
1594
1595 -void account_cfs_bandwidth_used(int enabled, int was_enabled)
1596 +void cfs_bandwidth_usage_inc(void)
1597 {
1598 - /* only need to count groups transitioning between enabled/!enabled */
1599 - if (enabled && !was_enabled)
1600 - static_key_slow_inc(&__cfs_bandwidth_used);
1601 - else if (!enabled && was_enabled)
1602 - static_key_slow_dec(&__cfs_bandwidth_used);
1603 + static_key_slow_inc(&__cfs_bandwidth_used);
1604 +}
1605 +
1606 +void cfs_bandwidth_usage_dec(void)
1607 +{
1608 + static_key_slow_dec(&__cfs_bandwidth_used);
1609 }
1610 #else /* HAVE_JUMP_LABEL */
1611 static bool cfs_bandwidth_used(void)
1612 @@ -2091,7 +2092,8 @@ static bool cfs_bandwidth_used(void)
1613 return true;
1614 }
1615
1616 -void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
1617 +void cfs_bandwidth_usage_inc(void) {}
1618 +void cfs_bandwidth_usage_dec(void) {}
1619 #endif /* HAVE_JUMP_LABEL */
1620
1621 /*
1622 @@ -2457,6 +2459,13 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1623 if (idle)
1624 goto out_unlock;
1625
1626 + /*
1627 + * if we have relooped after returning idle once, we need to update our
1628 + * status as actually running, so that other cpus doing
1629 + * __start_cfs_bandwidth will stop trying to cancel us.
1630 + */
1631 + cfs_b->timer_active = 1;
1632 +
1633 __refill_cfs_bandwidth_runtime(cfs_b);
1634
1635 if (!throttled) {
1636 @@ -2517,7 +2526,13 @@ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
1637 /* how long we wait to gather additional slack before distributing */
1638 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
1639
1640 -/* are we near the end of the current quota period? */
1641 +/*
1642 + * Are we near the end of the current quota period?
1643 + *
1644 + * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
1645 + * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
1646 + * migrate_hrtimers, base is never cleared, so we are fine.
1647 + */
1648 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
1649 {
1650 struct hrtimer *refresh_timer = &cfs_b->period_timer;
1651 @@ -2593,10 +2608,12 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
1652 u64 expires;
1653
1654 /* confirm we're still not at a refresh boundary */
1655 - if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
1656 + raw_spin_lock(&cfs_b->lock);
1657 + if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
1658 + raw_spin_unlock(&cfs_b->lock);
1659 return;
1660 + }
1661
1662 - raw_spin_lock(&cfs_b->lock);
1663 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
1664 runtime = cfs_b->runtime;
1665 cfs_b->runtime = 0;
1666 @@ -2717,11 +2734,11 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
1667 * (timer_active==0 becomes visible before the hrtimer call-back
1668 * terminates). In either case we ensure that it's re-programmed
1669 */
1670 - while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
1671 + while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
1672 + hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
1673 + /* bounce the lock to allow do_sched_cfs_period_timer to run */
1674 raw_spin_unlock(&cfs_b->lock);
1675 - /* ensure cfs_b->lock is available while we wait */
1676 - hrtimer_cancel(&cfs_b->period_timer);
1677 -
1678 + cpu_relax();
1679 raw_spin_lock(&cfs_b->lock);
1680 /* if someone else restarted the timer then we're done */
1681 if (cfs_b->timer_active)
1682 @@ -6223,7 +6240,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
1683 se->cfs_rq = parent->my_q;
1684
1685 se->my_q = cfs_rq;
1686 - update_load_set(&se->load, 0);
1687 + /* guarantee group entities always have weight */
1688 + update_load_set(&se->load, NICE_0_LOAD);
1689 se->parent = parent;
1690 }
1691
1692 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
1693 index b3c5653e1dca..a6208afd80e7 100644
1694 --- a/kernel/sched/sched.h
1695 +++ b/kernel/sched/sched.h
1696 @@ -1305,7 +1305,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
1697 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1698 extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1699
1700 -extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1701 +extern void cfs_bandwidth_usage_inc(void);
1702 +extern void cfs_bandwidth_usage_dec(void);
1703
1704 #ifdef CONFIG_NO_HZ_COMMON
1705 enum rq_nohz_flag_bits {
1706 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
1707 index 09bf1c38805b..edf44d079da7 100644
1708 --- a/net/8021q/vlan_dev.c
1709 +++ b/net/8021q/vlan_dev.c
1710 @@ -549,6 +549,23 @@ static const struct header_ops vlan_header_ops = {
1711 .parse = eth_header_parse,
1712 };
1713
1714 +static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
1715 + unsigned short type,
1716 + const void *daddr, const void *saddr,
1717 + unsigned int len)
1718 +{
1719 + struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
1720 + struct net_device *real_dev = vlan->real_dev;
1721 +
1722 + return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
1723 +}
1724 +
1725 +static const struct header_ops vlan_passthru_header_ops = {
1726 + .create = vlan_passthru_hard_header,
1727 + .rebuild = dev_rebuild_header,
1728 + .parse = eth_header_parse,
1729 +};
1730 +
1731 static struct device_type vlan_type = {
1732 .name = "vlan",
1733 };
1734 @@ -592,7 +609,7 @@ static int vlan_dev_init(struct net_device *dev)
1735
1736 dev->needed_headroom = real_dev->needed_headroom;
1737 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
1738 - dev->header_ops = real_dev->header_ops;
1739 + dev->header_ops = &vlan_passthru_header_ops;
1740 dev->hard_header_len = real_dev->hard_header_len;
1741 } else {
1742 dev->header_ops = &vlan_header_ops;
1743 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1744 index 8b0b610ca2c9..1b148a3affa7 100644
1745 --- a/net/bridge/br_multicast.c
1746 +++ b/net/bridge/br_multicast.c
1747 @@ -2004,7 +2004,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1748 u32 old;
1749 struct net_bridge_mdb_htable *mdb;
1750
1751 - spin_lock(&br->multicast_lock);
1752 + spin_lock_bh(&br->multicast_lock);
1753 if (!netif_running(br->dev))
1754 goto unlock;
1755
1756 @@ -2036,7 +2036,7 @@ rollback:
1757 }
1758
1759 unlock:
1760 - spin_unlock(&br->multicast_lock);
1761 + spin_unlock_bh(&br->multicast_lock);
1762
1763 return err;
1764 }
1765 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
1766 index e14c33b42f75..9a63c4206e4a 100644
1767 --- a/net/bridge/br_private.h
1768 +++ b/net/bridge/br_private.h
1769 @@ -442,6 +442,16 @@ extern netdev_features_t br_features_recompute(struct net_bridge *br,
1770 extern int br_handle_frame_finish(struct sk_buff *skb);
1771 extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
1772
1773 +static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
1774 +{
1775 + return rcu_dereference(dev->rx_handler) == br_handle_frame;
1776 +}
1777 +
1778 +static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
1779 +{
1780 + return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
1781 +}
1782 +
1783 /* br_ioctl.c */
1784 extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
1785 extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
1786 diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
1787 index 8660ea3be705..bdb459d21ad8 100644
1788 --- a/net/bridge/br_stp_bpdu.c
1789 +++ b/net/bridge/br_stp_bpdu.c
1790 @@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
1791 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
1792 goto err;
1793
1794 - p = br_port_get_rcu(dev);
1795 + p = br_port_get_check_rcu(dev);
1796 if (!p)
1797 goto err;
1798
1799 diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
1800 index 5e78d44333b9..f27d126239b1 100644
1801 --- a/net/core/drop_monitor.c
1802 +++ b/net/core/drop_monitor.c
1803 @@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
1804 .hdrsize = 0,
1805 .name = "NET_DM",
1806 .version = 2,
1807 - .maxattr = NET_DM_CMD_MAX,
1808 };
1809
1810 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
1811 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1812 index 6072610a8672..11af243bf92f 100644
1813 --- a/net/core/neighbour.c
1814 +++ b/net/core/neighbour.c
1815 @@ -1274,7 +1274,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1816
1817 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1818 skb->len) < 0 &&
1819 - dev->header_ops->rebuild(skb))
1820 + dev_rebuild_header(skb))
1821 return 0;
1822
1823 return dev_queue_xmit(skb);
1824 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1825 index fc75c9e461b8..0c1482c6ff98 100644
1826 --- a/net/core/netpoll.c
1827 +++ b/net/core/netpoll.c
1828 @@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1829 !vlan_hw_offload_capable(netif_skb_features(skb),
1830 skb->vlan_proto)) {
1831 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
1832 - if (unlikely(!skb))
1833 - break;
1834 + if (unlikely(!skb)) {
1835 + /* This is actually a packet drop, but we
1836 + * don't want the code at the end of this
1837 + * function to try and re-queue a NULL skb.
1838 + */
1839 + status = NETDEV_TX_OK;
1840 + goto unlock_txq;
1841 + }
1842 skb->vlan_tci = 0;
1843 }
1844
1845 @@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1846 if (status == NETDEV_TX_OK)
1847 txq_trans_update(txq);
1848 }
1849 + unlock_txq:
1850 __netif_tx_unlock(txq);
1851
1852 if (status == NETDEV_TX_OK)
1853 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1854 index c28c7fed0d0b..743e6ebf5f9f 100644
1855 --- a/net/core/skbuff.c
1856 +++ b/net/core/skbuff.c
1857 @@ -3541,6 +3541,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
1858 skb->tstamp.tv64 = 0;
1859 skb->pkt_type = PACKET_HOST;
1860 skb->skb_iif = 0;
1861 + skb->local_df = 0;
1862 skb_dst_drop(skb);
1863 skb->mark = 0;
1864 secpath_reset(skb);
1865 diff --git a/net/core/sock.c b/net/core/sock.c
1866 index 0b39e7ae4383..5cec994ee2f3 100644
1867 --- a/net/core/sock.c
1868 +++ b/net/core/sock.c
1869 @@ -888,7 +888,7 @@ set_rcvbuf:
1870
1871 case SO_PEEK_OFF:
1872 if (sock->ops->set_peek_off)
1873 - sock->ops->set_peek_off(sk, val);
1874 + ret = sock->ops->set_peek_off(sk, val);
1875 else
1876 ret = -EOPNOTSUPP;
1877 break;
1878 diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
1879 index 523be38e37de..f2e15738534d 100644
1880 --- a/net/ipv4/fib_rules.c
1881 +++ b/net/ipv4/fib_rules.c
1882 @@ -104,7 +104,10 @@ errout:
1883 static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
1884 {
1885 struct fib_result *result = (struct fib_result *) arg->result;
1886 - struct net_device *dev = result->fi->fib_dev;
1887 + struct net_device *dev = NULL;
1888 +
1889 + if (result->fi)
1890 + dev = result->fi->fib_dev;
1891
1892 /* do not accept result if the route does
1893 * not meet the required prefix length
1894 diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
1895 index 55e6bfb3a289..db98705905f7 100644
1896 --- a/net/ipv4/gre_offload.c
1897 +++ b/net/ipv4/gre_offload.c
1898 @@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
1899 netdev_features_t enc_features;
1900 int ghl = GRE_HEADER_SECTION;
1901 struct gre_base_hdr *greh;
1902 + u16 mac_offset = skb->mac_header;
1903 int mac_len = skb->mac_len;
1904 __be16 protocol = skb->protocol;
1905 int tnl_hlen;
1906 @@ -57,13 +58,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
1907 } else
1908 csum = false;
1909
1910 + if (unlikely(!pskb_may_pull(skb, ghl)))
1911 + goto out;
1912 +
1913 /* setup inner skb. */
1914 skb->protocol = greh->protocol;
1915 skb->encapsulation = 0;
1916
1917 - if (unlikely(!pskb_may_pull(skb, ghl)))
1918 - goto out;
1919 -
1920 __skb_pull(skb, ghl);
1921 skb_reset_mac_header(skb);
1922 skb_set_network_header(skb, skb_inner_network_offset(skb));
1923 @@ -72,8 +73,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
1924 /* segment inner packet. */
1925 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
1926 segs = skb_mac_gso_segment(skb, enc_features);
1927 - if (!segs || IS_ERR(segs))
1928 + if (!segs || IS_ERR(segs)) {
1929 + skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
1930 goto out;
1931 + }
1932
1933 skb = segs;
1934 tnl_hlen = skb_tnl_header_len(skb);
1935 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
1936 index 5f648751fce2..31cf54d18221 100644
1937 --- a/net/ipv4/inet_diag.c
1938 +++ b/net/ipv4/inet_diag.c
1939 @@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
1940
1941 r->id.idiag_sport = inet->inet_sport;
1942 r->id.idiag_dport = inet->inet_dport;
1943 +
1944 + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
1945 + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
1946 +
1947 r->id.idiag_src[0] = inet->inet_rcv_saddr;
1948 r->id.idiag_dst[0] = inet->inet_daddr;
1949
1950 @@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
1951
1952 r->idiag_family = tw->tw_family;
1953 r->idiag_retrans = 0;
1954 +
1955 r->id.idiag_if = tw->tw_bound_dev_if;
1956 sock_diag_save_cookie(tw, r->id.idiag_cookie);
1957 +
1958 r->id.idiag_sport = tw->tw_sport;
1959 r->id.idiag_dport = tw->tw_dport;
1960 +
1961 + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
1962 + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
1963 +
1964 r->id.idiag_src[0] = tw->tw_rcv_saddr;
1965 r->id.idiag_dst[0] = tw->tw_daddr;
1966 +
1967 r->idiag_state = tw->tw_substate;
1968 r->idiag_timer = 3;
1969 r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
1970 @@ -732,8 +743,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
1971
1972 r->id.idiag_sport = inet->inet_sport;
1973 r->id.idiag_dport = ireq->rmt_port;
1974 +
1975 + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
1976 + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
1977 +
1978 r->id.idiag_src[0] = ireq->loc_addr;
1979 r->id.idiag_dst[0] = ireq->rmt_addr;
1980 +
1981 r->idiag_expires = jiffies_to_msecs(tmo);
1982 r->idiag_rqueue = 0;
1983 r->idiag_wqueue = 0;
1984 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
1985 index d7aea4c5b940..e560ef34cf4b 100644
1986 --- a/net/ipv4/ip_gre.c
1987 +++ b/net/ipv4/ip_gre.c
1988 @@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
1989 iph->saddr, iph->daddr, tpi->key);
1990
1991 if (tunnel) {
1992 + skb_pop_mac_header(skb);
1993 ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
1994 return PACKET_RCVD;
1995 }
1996 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1997 index 5e2c2f1a075d..6ca990726d5b 100644
1998 --- a/net/ipv4/udp.c
1999 +++ b/net/ipv4/udp.c
2000 @@ -2294,6 +2294,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2001 netdev_features_t features)
2002 {
2003 struct sk_buff *segs = ERR_PTR(-EINVAL);
2004 + u16 mac_offset = skb->mac_header;
2005 int mac_len = skb->mac_len;
2006 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2007 __be16 protocol = skb->protocol;
2008 @@ -2313,8 +2314,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2009 /* segment inner packet. */
2010 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2011 segs = skb_mac_gso_segment(skb, enc_features);
2012 - if (!segs || IS_ERR(segs))
2013 + if (!segs || IS_ERR(segs)) {
2014 + skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
2015 + mac_len);
2016 goto out;
2017 + }
2018
2019 outer_hlen = skb_tnl_header_len(skb);
2020 skb = segs;
2021 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
2022 index f35eccaa855e..6b809e4bf1ed 100644
2023 --- a/net/ipv4/udp_offload.c
2024 +++ b/net/ipv4/udp_offload.c
2025 @@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2026 {
2027 struct sk_buff *segs = ERR_PTR(-EINVAL);
2028 unsigned int mss;
2029 + int offset;
2030 + __wsum csum;
2031 +
2032 + if (skb->encapsulation &&
2033 + skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
2034 + segs = skb_udp_tunnel_segment(skb, features);
2035 + goto out;
2036 + }
2037
2038 mss = skb_shinfo(skb)->gso_size;
2039 if (unlikely(skb->len <= mss))
2040 @@ -62,27 +70,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2041 goto out;
2042 }
2043
2044 + /* Do software UFO. Complete and fill in the UDP checksum as
2045 + * HW cannot do checksum of UDP packets sent as multiple
2046 + * IP fragments.
2047 + */
2048 + offset = skb_checksum_start_offset(skb);
2049 + csum = skb_checksum(skb, offset, skb->len - offset, 0);
2050 + offset += skb->csum_offset;
2051 + *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2052 + skb->ip_summed = CHECKSUM_NONE;
2053 +
2054 /* Fragment the skb. IP headers of the fragments are updated in
2055 * inet_gso_segment()
2056 */
2057 - if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
2058 - segs = skb_udp_tunnel_segment(skb, features);
2059 - else {
2060 - int offset;
2061 - __wsum csum;
2062 -
2063 - /* Do software UFO. Complete and fill in the UDP checksum as
2064 - * HW cannot do checksum of UDP packets sent as multiple
2065 - * IP fragments.
2066 - */
2067 - offset = skb_checksum_start_offset(skb);
2068 - csum = skb_checksum(skb, offset, skb->len - offset, 0);
2069 - offset += skb->csum_offset;
2070 - *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2071 - skb->ip_summed = CHECKSUM_NONE;
2072 -
2073 - segs = skb_segment(skb, features);
2074 - }
2075 + segs = skb_segment(skb, features);
2076 out:
2077 return segs;
2078 }
2079 diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
2080 index e27591635f92..3fd0a578329e 100644
2081 --- a/net/ipv6/fib6_rules.c
2082 +++ b/net/ipv6/fib6_rules.c
2083 @@ -122,7 +122,11 @@ out:
2084 static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
2085 {
2086 struct rt6_info *rt = (struct rt6_info *) arg->result;
2087 - struct net_device *dev = rt->rt6i_idev->dev;
2088 + struct net_device *dev = NULL;
2089 +
2090 + if (rt->rt6i_idev)
2091 + dev = rt->rt6i_idev->dev;
2092 +
2093 /* do not accept result if the route does
2094 * not meet the required prefix length
2095 */
2096 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2097 index 77308af056bc..0accb1321dd6 100644
2098 --- a/net/ipv6/route.c
2099 +++ b/net/ipv6/route.c
2100 @@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops);
2101
2102 static int ip6_pkt_discard(struct sk_buff *skb);
2103 static int ip6_pkt_discard_out(struct sk_buff *skb);
2104 +static int ip6_pkt_prohibit(struct sk_buff *skb);
2105 +static int ip6_pkt_prohibit_out(struct sk_buff *skb);
2106 static void ip6_link_failure(struct sk_buff *skb);
2107 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2108 struct sk_buff *skb, u32 mtu);
2109 @@ -234,9 +236,6 @@ static const struct rt6_info ip6_null_entry_template = {
2110
2111 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2112
2113 -static int ip6_pkt_prohibit(struct sk_buff *skb);
2114 -static int ip6_pkt_prohibit_out(struct sk_buff *skb);
2115 -
2116 static const struct rt6_info ip6_prohibit_entry_template = {
2117 .dst = {
2118 .__refcnt = ATOMIC_INIT(1),
2119 @@ -1570,21 +1569,24 @@ int ip6_route_add(struct fib6_config *cfg)
2120 goto out;
2121 }
2122 }
2123 - rt->dst.output = ip6_pkt_discard_out;
2124 - rt->dst.input = ip6_pkt_discard;
2125 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
2126 switch (cfg->fc_type) {
2127 case RTN_BLACKHOLE:
2128 rt->dst.error = -EINVAL;
2129 + rt->dst.output = dst_discard;
2130 + rt->dst.input = dst_discard;
2131 break;
2132 case RTN_PROHIBIT:
2133 rt->dst.error = -EACCES;
2134 + rt->dst.output = ip6_pkt_prohibit_out;
2135 + rt->dst.input = ip6_pkt_prohibit;
2136 break;
2137 case RTN_THROW:
2138 - rt->dst.error = -EAGAIN;
2139 - break;
2140 default:
2141 - rt->dst.error = -ENETUNREACH;
2142 + rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
2143 + : -ENETUNREACH;
2144 + rt->dst.output = ip6_pkt_discard_out;
2145 + rt->dst.input = ip6_pkt_discard;
2146 break;
2147 }
2148 goto install_route;
2149 @@ -1908,9 +1910,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
2150 else
2151 rt->rt6i_gateway = *dest;
2152 rt->rt6i_flags = ort->rt6i_flags;
2153 - if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
2154 - (RTF_DEFAULT | RTF_ADDRCONF))
2155 - rt6_set_from(rt, ort);
2156 + rt6_set_from(rt, ort);
2157 rt->rt6i_metric = 0;
2158
2159 #ifdef CONFIG_IPV6_SUBTREES
2160 @@ -2149,8 +2149,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb)
2161 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2162 }
2163
2164 -#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2165 -
2166 static int ip6_pkt_prohibit(struct sk_buff *skb)
2167 {
2168 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2169 @@ -2162,8 +2160,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2170 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2171 }
2172
2173 -#endif
2174 -
2175 /*
2176 * Allocate a dst for local (unicast / anycast) address.
2177 */
2178 @@ -2173,12 +2169,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2179 bool anycast)
2180 {
2181 struct net *net = dev_net(idev->dev);
2182 - struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
2183 -
2184 - if (!rt) {
2185 - net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
2186 + struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2187 + DST_NOCOUNT, NULL);
2188 + if (!rt)
2189 return ERR_PTR(-ENOMEM);
2190 - }
2191
2192 in6_dev_hold(idev);
2193
2194 diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
2195 index 34c6fff3ae84..06556d6e1a4d 100644
2196 --- a/net/ipv6/udp_offload.c
2197 +++ b/net/ipv6/udp_offload.c
2198 @@ -88,7 +88,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
2199
2200 /* Check if there is enough headroom to insert fragment header. */
2201 tnl_hlen = skb_tnl_header_len(skb);
2202 - if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
2203 + if (skb_mac_header(skb) < skb->head + tnl_hlen + frag_hdr_sz) {
2204 if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
2205 goto out;
2206 }
2207 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
2208 index 7b01b9f5846c..c71b699eb555 100644
2209 --- a/net/llc/af_llc.c
2210 +++ b/net/llc/af_llc.c
2211 @@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
2212 unsigned long cpu_flags;
2213 size_t copied = 0;
2214 u32 peek_seq = 0;
2215 - u32 *seq;
2216 + u32 *seq, skb_len;
2217 unsigned long used;
2218 int target; /* Read at least this many bytes */
2219 long timeo;
2220 @@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
2221 }
2222 continue;
2223 found_ok_skb:
2224 + skb_len = skb->len;
2225 /* Ok so how much can we use? */
2226 used = skb->len - offset;
2227 if (len < used)
2228 @@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
2229 }
2230
2231 /* Partial read */
2232 - if (used + offset < skb->len)
2233 + if (used + offset < skb_len)
2234 continue;
2235 } while (len > 0);
2236
2237 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
2238 index 70b5a05c0a4e..1eb2b78e927b 100644
2239 --- a/net/mac80211/tx.c
2240 +++ b/net/mac80211/tx.c
2241 @@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
2242 {
2243 struct sta_info *sta = tx->sta;
2244 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
2245 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
2246 struct ieee80211_local *local = tx->local;
2247
2248 if (unlikely(!sta))
2249 @@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
2250 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
2251 int ac = skb_get_queue_mapping(tx->skb);
2252
2253 - /* only deauth, disassoc and action are bufferable MMPDUs */
2254 - if (ieee80211_is_mgmt(hdr->frame_control) &&
2255 - !ieee80211_is_deauth(hdr->frame_control) &&
2256 - !ieee80211_is_disassoc(hdr->frame_control) &&
2257 - !ieee80211_is_action(hdr->frame_control)) {
2258 - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
2259 - return TX_CONTINUE;
2260 - }
2261 -
2262 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
2263 sta->sta.addr, sta->sta.aid, ac);
2264 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
2265 @@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
2266 static ieee80211_tx_result debug_noinline
2267 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
2268 {
2269 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
2270 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
2271 +
2272 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
2273 return TX_CONTINUE;
2274
2275 + /* only deauth, disassoc and action are bufferable MMPDUs */
2276 + if (ieee80211_is_mgmt(hdr->frame_control) &&
2277 + !ieee80211_is_deauth(hdr->frame_control) &&
2278 + !ieee80211_is_disassoc(hdr->frame_control) &&
2279 + !ieee80211_is_action(hdr->frame_control)) {
2280 + if (tx->flags & IEEE80211_TX_UNICAST)
2281 + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
2282 + return TX_CONTINUE;
2283 + }
2284 +
2285 if (tx->flags & IEEE80211_TX_UNICAST)
2286 return ieee80211_tx_h_unicast_ps_buf(tx);
2287 else
2288 diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
2289 index 5f9bfd060dea..17c1bcb182c6 100644
2290 --- a/net/netfilter/nf_conntrack_seqadj.c
2291 +++ b/net/netfilter/nf_conntrack_seqadj.c
2292 @@ -41,8 +41,8 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
2293 spin_lock_bh(&ct->lock);
2294 this_way = &seqadj->seq[dir];
2295 if (this_way->offset_before == this_way->offset_after ||
2296 - before(this_way->correction_pos, seq)) {
2297 - this_way->correction_pos = seq;
2298 + before(this_way->correction_pos, ntohl(seq))) {
2299 + this_way->correction_pos = ntohl(seq);
2300 this_way->offset_before = this_way->offset_after;
2301 this_way->offset_after += off;
2302 }
2303 diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
2304 index f02b3605823e..1fb2258c3535 100644
2305 --- a/net/netfilter/nf_nat_irc.c
2306 +++ b/net/netfilter/nf_nat_irc.c
2307 @@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
2308 struct nf_conntrack_expect *exp)
2309 {
2310 char buffer[sizeof("4294967296 65635")];
2311 + struct nf_conn *ct = exp->master;
2312 + union nf_inet_addr newaddr;
2313 u_int16_t port;
2314 unsigned int ret;
2315
2316 /* Reply comes from server. */
2317 + newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
2318 +
2319 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
2320 exp->dir = IP_CT_DIR_REPLY;
2321 exp->expectfn = nf_nat_follow_master;
2322 @@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
2323 }
2324
2325 if (port == 0) {
2326 - nf_ct_helper_log(skb, exp->master, "all ports in use");
2327 + nf_ct_helper_log(skb, ct, "all ports in use");
2328 return NF_DROP;
2329 }
2330
2331 - ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
2332 - protoff, matchoff, matchlen, buffer,
2333 - strlen(buffer));
2334 + /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
2335 + * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
2336 + * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
2337 + * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
2338 + * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
2339 + *
2340 + * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
2341 + * 255.255.255.255==4294967296, 10 digits)
2342 + * P: bound port (min 1 d, max 5d (65635))
2343 + * F: filename (min 1 d )
2344 + * S: size (min 1 d )
2345 + * 0x01, \n: terminators
2346 + */
2347 + /* AAA = "us", ie. where server normally talks to. */
2348 + snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
2349 + pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
2350 + buffer, &newaddr.ip, port);
2351 +
2352 + ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
2353 + matchlen, buffer, strlen(buffer));
2354 if (ret != NF_ACCEPT) {
2355 - nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
2356 + nf_ct_helper_log(skb, ct, "cannot mangle packet");
2357 nf_ct_unexpect_related(exp);
2358 }
2359 +
2360 return ret;
2361 }
2362
2363 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2364 index ba2548bd85bf..88cfbc189558 100644
2365 --- a/net/packet/af_packet.c
2366 +++ b/net/packet/af_packet.c
2367 @@ -237,6 +237,30 @@ struct packet_skb_cb {
2368 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
2369 static void __fanout_link(struct sock *sk, struct packet_sock *po);
2370
2371 +static struct net_device *packet_cached_dev_get(struct packet_sock *po)
2372 +{
2373 + struct net_device *dev;
2374 +
2375 + rcu_read_lock();
2376 + dev = rcu_dereference(po->cached_dev);
2377 + if (likely(dev))
2378 + dev_hold(dev);
2379 + rcu_read_unlock();
2380 +
2381 + return dev;
2382 +}
2383 +
2384 +static void packet_cached_dev_assign(struct packet_sock *po,
2385 + struct net_device *dev)
2386 +{
2387 + rcu_assign_pointer(po->cached_dev, dev);
2388 +}
2389 +
2390 +static void packet_cached_dev_reset(struct packet_sock *po)
2391 +{
2392 + RCU_INIT_POINTER(po->cached_dev, NULL);
2393 +}
2394 +
2395 /* register_prot_hook must be invoked with the po->bind_lock held,
2396 * or from a context in which asynchronous accesses to the packet
2397 * socket is not possible (packet_create()).
2398 @@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
2399 struct packet_sock *po = pkt_sk(sk);
2400
2401 if (!po->running) {
2402 - if (po->fanout) {
2403 + if (po->fanout)
2404 __fanout_link(sk, po);
2405 - } else {
2406 + else
2407 dev_add_pack(&po->prot_hook);
2408 - rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
2409 - }
2410
2411 sock_hold(sk);
2412 po->running = 1;
2413 @@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
2414 struct packet_sock *po = pkt_sk(sk);
2415
2416 po->running = 0;
2417 - if (po->fanout) {
2418 +
2419 + if (po->fanout)
2420 __fanout_unlink(sk, po);
2421 - } else {
2422 + else
2423 __dev_remove_pack(&po->prot_hook);
2424 - RCU_INIT_POINTER(po->cached_dev, NULL);
2425 - }
2426
2427 __sock_put(sk);
2428
2429 @@ -2059,19 +2080,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2430 return tp_len;
2431 }
2432
2433 -static struct net_device *packet_cached_dev_get(struct packet_sock *po)
2434 -{
2435 - struct net_device *dev;
2436 -
2437 - rcu_read_lock();
2438 - dev = rcu_dereference(po->cached_dev);
2439 - if (dev)
2440 - dev_hold(dev);
2441 - rcu_read_unlock();
2442 -
2443 - return dev;
2444 -}
2445 -
2446 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2447 {
2448 struct sk_buff *skb;
2449 @@ -2088,7 +2096,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2450
2451 mutex_lock(&po->pg_vec_lock);
2452
2453 - if (saddr == NULL) {
2454 + if (likely(saddr == NULL)) {
2455 dev = packet_cached_dev_get(po);
2456 proto = po->num;
2457 addr = NULL;
2458 @@ -2242,7 +2250,7 @@ static int packet_snd(struct socket *sock,
2459 * Get and verify the address.
2460 */
2461
2462 - if (saddr == NULL) {
2463 + if (likely(saddr == NULL)) {
2464 dev = packet_cached_dev_get(po);
2465 proto = po->num;
2466 addr = NULL;
2467 @@ -2451,6 +2459,8 @@ static int packet_release(struct socket *sock)
2468
2469 spin_lock(&po->bind_lock);
2470 unregister_prot_hook(sk, false);
2471 + packet_cached_dev_reset(po);
2472 +
2473 if (po->prot_hook.dev) {
2474 dev_put(po->prot_hook.dev);
2475 po->prot_hook.dev = NULL;
2476 @@ -2506,14 +2516,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
2477
2478 spin_lock(&po->bind_lock);
2479 unregister_prot_hook(sk, true);
2480 +
2481 po->num = protocol;
2482 po->prot_hook.type = protocol;
2483 if (po->prot_hook.dev)
2484 dev_put(po->prot_hook.dev);
2485 - po->prot_hook.dev = dev;
2486
2487 + po->prot_hook.dev = dev;
2488 po->ifindex = dev ? dev->ifindex : 0;
2489
2490 + packet_cached_dev_assign(po, dev);
2491 +
2492 if (protocol == 0)
2493 goto out_unlock;
2494
2495 @@ -2626,7 +2639,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2496 po = pkt_sk(sk);
2497 sk->sk_family = PF_PACKET;
2498 po->num = proto;
2499 - RCU_INIT_POINTER(po->cached_dev, NULL);
2500 +
2501 + packet_cached_dev_reset(po);
2502
2503 sk->sk_destruct = packet_sock_destruct;
2504 sk_refcnt_debug_inc(sk);
2505 @@ -3337,6 +3351,7 @@ static int packet_notifier(struct notifier_block *this,
2506 sk->sk_error_report(sk);
2507 }
2508 if (msg == NETDEV_UNREGISTER) {
2509 + packet_cached_dev_reset(po);
2510 po->ifindex = -1;
2511 if (po->prot_hook.dev)
2512 dev_put(po->prot_hook.dev);
2513 diff --git a/net/rds/ib.c b/net/rds/ib.c
2514 index b4c8b0022fee..ba2dffeff608 100644
2515 --- a/net/rds/ib.c
2516 +++ b/net/rds/ib.c
2517 @@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
2518 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
2519 /* due to this, we will claim to support iWARP devices unless we
2520 check node_type. */
2521 - if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
2522 + if (ret || !cm_id->device ||
2523 + cm_id->device->node_type != RDMA_NODE_IB_CA)
2524 ret = -EADDRNOTAVAIL;
2525
2526 rdsdebug("addr %pI4 ret %d node type %d\n",
2527 diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
2528 index e59094981175..37be6e226d1b 100644
2529 --- a/net/rds/ib_send.c
2530 +++ b/net/rds/ib_send.c
2531 @@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
2532 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
2533 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
2534 scat = &rm->data.op_sg[sg];
2535 - ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
2536 - ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
2537 - return ret;
2538 + ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
2539 + return sizeof(struct rds_header) + ret;
2540 }
2541
2542 /* FIXME we may overallocate here */
2543 diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
2544 index 33af77246bfe..62ced6516c58 100644
2545 --- a/net/rose/af_rose.c
2546 +++ b/net/rose/af_rose.c
2547 @@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
2548
2549 if (msg->msg_name) {
2550 struct sockaddr_rose *srose;
2551 + struct full_sockaddr_rose *full_srose = msg->msg_name;
2552
2553 memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
2554 srose = msg->msg_name;
2555 @@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
2556 srose->srose_addr = rose->dest_addr;
2557 srose->srose_call = rose->dest_call;
2558 srose->srose_ndigis = rose->dest_ndigis;
2559 - if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
2560 - struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
2561 - for (n = 0 ; n < rose->dest_ndigis ; n++)
2562 - full_srose->srose_digis[n] = rose->dest_digis[n];
2563 - msg->msg_namelen = sizeof(struct full_sockaddr_rose);
2564 - } else {
2565 - if (rose->dest_ndigis >= 1) {
2566 - srose->srose_ndigis = 1;
2567 - srose->srose_digi = rose->dest_digis[0];
2568 - }
2569 - msg->msg_namelen = sizeof(struct sockaddr_rose);
2570 - }
2571 + for (n = 0 ; n < rose->dest_ndigis ; n++)
2572 + full_srose->srose_digis[n] = rose->dest_digis[n];
2573 + msg->msg_namelen = sizeof(struct full_sockaddr_rose);
2574 }
2575
2576 skb_free_datagram(sk, skb);
2577 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2578 index 01625ccc3ae6..a427623ee574 100644
2579 --- a/net/unix/af_unix.c
2580 +++ b/net/unix/af_unix.c
2581 @@ -530,13 +530,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
2582 static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
2583 struct msghdr *, size_t, int);
2584
2585 -static void unix_set_peek_off(struct sock *sk, int val)
2586 +static int unix_set_peek_off(struct sock *sk, int val)
2587 {
2588 struct unix_sock *u = unix_sk(sk);
2589
2590 - mutex_lock(&u->readlock);
2591 + if (mutex_lock_interruptible(&u->readlock))
2592 + return -EINTR;
2593 +
2594 sk->sk_peek_off = val;
2595 mutex_unlock(&u->readlock);
2596 +
2597 + return 0;
2598 }
2599
2600
2601 @@ -714,7 +718,9 @@ static int unix_autobind(struct socket *sock)
2602 int err;
2603 unsigned int retries = 0;
2604
2605 - mutex_lock(&u->readlock);
2606 + err = mutex_lock_interruptible(&u->readlock);
2607 + if (err)
2608 + return err;
2609
2610 err = 0;
2611 if (u->addr)
2612 @@ -873,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2613 goto out;
2614 addr_len = err;
2615
2616 - mutex_lock(&u->readlock);
2617 + err = mutex_lock_interruptible(&u->readlock);
2618 + if (err)
2619 + goto out;
2620
2621 err = -EINVAL;
2622 if (u->addr)