Magellan Linux

Contents of /trunk/kernel26-alx/patches-3.10/0140-3.10.41-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (show annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 116259 byte(s)
-3.10.84-alx-r1
1 diff --git a/Makefile b/Makefile
2 index b2285cababb0..370cc01afb07 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 40
9 +SUBLEVEL = 41
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
14 index 2e67a272df70..9ce8ba1a1433 100644
15 --- a/arch/arm/configs/multi_v7_defconfig
16 +++ b/arch/arm/configs/multi_v7_defconfig
17 @@ -1,6 +1,7 @@
18 CONFIG_EXPERIMENTAL=y
19 CONFIG_NO_HZ=y
20 CONFIG_HIGH_RES_TIMERS=y
21 +CONFIG_BLK_DEV_INITRD=y
22 CONFIG_ARCH_MVEBU=y
23 CONFIG_MACH_ARMADA_370=y
24 CONFIG_ARCH_SIRF=y
25 @@ -22,6 +23,7 @@ CONFIG_AEABI=y
26 CONFIG_HIGHMEM=y
27 CONFIG_HIGHPTE=y
28 CONFIG_ARM_APPENDED_DTB=y
29 +CONFIG_ARM_ATAG_DTB_COMPAT=y
30 CONFIG_VFP=y
31 CONFIG_NEON=y
32 CONFIG_NET=y
33 diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
34 index 0c9107285e66..10a0c2aad8cf 100644
35 --- a/arch/parisc/kernel/syscall_table.S
36 +++ b/arch/parisc/kernel/syscall_table.S
37 @@ -392,7 +392,7 @@
38 ENTRY_COMP(vmsplice)
39 ENTRY_COMP(move_pages) /* 295 */
40 ENTRY_SAME(getcpu)
41 - ENTRY_SAME(epoll_pwait)
42 + ENTRY_COMP(epoll_pwait)
43 ENTRY_COMP(statfs64)
44 ENTRY_COMP(fstatfs64)
45 ENTRY_COMP(kexec_load) /* 300 */
46 diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
47 index b2c68ce139ae..a5b30c71a8d3 100644
48 --- a/arch/powerpc/lib/crtsavres.S
49 +++ b/arch/powerpc/lib/crtsavres.S
50 @@ -231,6 +231,87 @@ _GLOBAL(_rest32gpr_31_x)
51 mr 1,11
52 blr
53
54 +#ifdef CONFIG_ALTIVEC
55 +/* Called with r0 pointing just beyond the end of the vector save area. */
56 +
57 +_GLOBAL(_savevr_20)
58 + li r11,-192
59 + stvx vr20,r11,r0
60 +_GLOBAL(_savevr_21)
61 + li r11,-176
62 + stvx vr21,r11,r0
63 +_GLOBAL(_savevr_22)
64 + li r11,-160
65 + stvx vr22,r11,r0
66 +_GLOBAL(_savevr_23)
67 + li r11,-144
68 + stvx vr23,r11,r0
69 +_GLOBAL(_savevr_24)
70 + li r11,-128
71 + stvx vr24,r11,r0
72 +_GLOBAL(_savevr_25)
73 + li r11,-112
74 + stvx vr25,r11,r0
75 +_GLOBAL(_savevr_26)
76 + li r11,-96
77 + stvx vr26,r11,r0
78 +_GLOBAL(_savevr_27)
79 + li r11,-80
80 + stvx vr27,r11,r0
81 +_GLOBAL(_savevr_28)
82 + li r11,-64
83 + stvx vr28,r11,r0
84 +_GLOBAL(_savevr_29)
85 + li r11,-48
86 + stvx vr29,r11,r0
87 +_GLOBAL(_savevr_30)
88 + li r11,-32
89 + stvx vr30,r11,r0
90 +_GLOBAL(_savevr_31)
91 + li r11,-16
92 + stvx vr31,r11,r0
93 + blr
94 +
95 +_GLOBAL(_restvr_20)
96 + li r11,-192
97 + lvx vr20,r11,r0
98 +_GLOBAL(_restvr_21)
99 + li r11,-176
100 + lvx vr21,r11,r0
101 +_GLOBAL(_restvr_22)
102 + li r11,-160
103 + lvx vr22,r11,r0
104 +_GLOBAL(_restvr_23)
105 + li r11,-144
106 + lvx vr23,r11,r0
107 +_GLOBAL(_restvr_24)
108 + li r11,-128
109 + lvx vr24,r11,r0
110 +_GLOBAL(_restvr_25)
111 + li r11,-112
112 + lvx vr25,r11,r0
113 +_GLOBAL(_restvr_26)
114 + li r11,-96
115 + lvx vr26,r11,r0
116 +_GLOBAL(_restvr_27)
117 + li r11,-80
118 + lvx vr27,r11,r0
119 +_GLOBAL(_restvr_28)
120 + li r11,-64
121 + lvx vr28,r11,r0
122 +_GLOBAL(_restvr_29)
123 + li r11,-48
124 + lvx vr29,r11,r0
125 +_GLOBAL(_restvr_30)
126 + li r11,-32
127 + lvx vr30,r11,r0
128 +_GLOBAL(_restvr_31)
129 + li r11,-16
130 + lvx vr31,r11,r0
131 + blr
132 +
133 +#endif /* CONFIG_ALTIVEC */
134 +
135 #else /* CONFIG_PPC64 */
136
137 .section ".text.save.restore","ax",@progbits
138 @@ -356,6 +437,111 @@ _restgpr0_31:
139 mtlr r0
140 blr
141
142 +#ifdef CONFIG_ALTIVEC
143 +/* Called with r0 pointing just beyond the end of the vector save area. */
144 +
145 +.globl _savevr_20
146 +_savevr_20:
147 + li r12,-192
148 + stvx vr20,r12,r0
149 +.globl _savevr_21
150 +_savevr_21:
151 + li r12,-176
152 + stvx vr21,r12,r0
153 +.globl _savevr_22
154 +_savevr_22:
155 + li r12,-160
156 + stvx vr22,r12,r0
157 +.globl _savevr_23
158 +_savevr_23:
159 + li r12,-144
160 + stvx vr23,r12,r0
161 +.globl _savevr_24
162 +_savevr_24:
163 + li r12,-128
164 + stvx vr24,r12,r0
165 +.globl _savevr_25
166 +_savevr_25:
167 + li r12,-112
168 + stvx vr25,r12,r0
169 +.globl _savevr_26
170 +_savevr_26:
171 + li r12,-96
172 + stvx vr26,r12,r0
173 +.globl _savevr_27
174 +_savevr_27:
175 + li r12,-80
176 + stvx vr27,r12,r0
177 +.globl _savevr_28
178 +_savevr_28:
179 + li r12,-64
180 + stvx vr28,r12,r0
181 +.globl _savevr_29
182 +_savevr_29:
183 + li r12,-48
184 + stvx vr29,r12,r0
185 +.globl _savevr_30
186 +_savevr_30:
187 + li r12,-32
188 + stvx vr30,r12,r0
189 +.globl _savevr_31
190 +_savevr_31:
191 + li r12,-16
192 + stvx vr31,r12,r0
193 + blr
194 +
195 +.globl _restvr_20
196 +_restvr_20:
197 + li r12,-192
198 + lvx vr20,r12,r0
199 +.globl _restvr_21
200 +_restvr_21:
201 + li r12,-176
202 + lvx vr21,r12,r0
203 +.globl _restvr_22
204 +_restvr_22:
205 + li r12,-160
206 + lvx vr22,r12,r0
207 +.globl _restvr_23
208 +_restvr_23:
209 + li r12,-144
210 + lvx vr23,r12,r0
211 +.globl _restvr_24
212 +_restvr_24:
213 + li r12,-128
214 + lvx vr24,r12,r0
215 +.globl _restvr_25
216 +_restvr_25:
217 + li r12,-112
218 + lvx vr25,r12,r0
219 +.globl _restvr_26
220 +_restvr_26:
221 + li r12,-96
222 + lvx vr26,r12,r0
223 +.globl _restvr_27
224 +_restvr_27:
225 + li r12,-80
226 + lvx vr27,r12,r0
227 +.globl _restvr_28
228 +_restvr_28:
229 + li r12,-64
230 + lvx vr28,r12,r0
231 +.globl _restvr_29
232 +_restvr_29:
233 + li r12,-48
234 + lvx vr29,r12,r0
235 +.globl _restvr_30
236 +_restvr_30:
237 + li r12,-32
238 + lvx vr30,r12,r0
239 +.globl _restvr_31
240 +_restvr_31:
241 + li r12,-16
242 + lvx vr31,r12,r0
243 + blr
244 +
245 +#endif /* CONFIG_ALTIVEC */
246 +
247 #endif /* CONFIG_PPC64 */
248
249 #endif
250 diff --git a/block/blk-core.c b/block/blk-core.c
251 index 2c66daba44dd..5a750b18172e 100644
252 --- a/block/blk-core.c
253 +++ b/block/blk-core.c
254 @@ -2299,7 +2299,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
255 if (!req->bio)
256 return false;
257
258 - trace_block_rq_complete(req->q, req);
259 + trace_block_rq_complete(req->q, req, nr_bytes);
260
261 /*
262 * For fs requests, rq is just carrier of independent bio's
263 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
264 index c421fa528518..1e89a3dd3d51 100644
265 --- a/drivers/block/rbd.c
266 +++ b/drivers/block/rbd.c
267 @@ -2278,7 +2278,7 @@ out_partial:
268 rbd_obj_request_put(obj_request);
269 out_unwind:
270 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
271 - rbd_obj_request_put(obj_request);
272 + rbd_img_obj_request_del(img_request, obj_request);
273
274 return -ENOMEM;
275 }
276 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
277 index afb701ec90ca..0f3e3047e29c 100644
278 --- a/drivers/firewire/ohci.c
279 +++ b/drivers/firewire/ohci.c
280 @@ -271,6 +271,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
281
282 static char ohci_driver_name[] = KBUILD_MODNAME;
283
284 +#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
285 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
286 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
287 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
288 @@ -278,17 +279,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
289 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
290 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
291 #define PCI_DEVICE_ID_VIA_VT630X 0x3044
292 -#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
293 #define PCI_REV_ID_VIA_VT6306 0x46
294
295 -#define QUIRK_CYCLE_TIMER 1
296 -#define QUIRK_RESET_PACKET 2
297 -#define QUIRK_BE_HEADERS 4
298 -#define QUIRK_NO_1394A 8
299 -#define QUIRK_NO_MSI 16
300 -#define QUIRK_TI_SLLZ059 32
301 -#define QUIRK_IR_WAKE 64
302 -#define QUIRK_PHY_LCTRL_TIMEOUT 128
303 +#define QUIRK_CYCLE_TIMER 0x1
304 +#define QUIRK_RESET_PACKET 0x2
305 +#define QUIRK_BE_HEADERS 0x4
306 +#define QUIRK_NO_1394A 0x8
307 +#define QUIRK_NO_MSI 0x10
308 +#define QUIRK_TI_SLLZ059 0x20
309 +#define QUIRK_IR_WAKE 0x40
310
311 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
312 static const struct {
313 @@ -301,10 +300,7 @@ static const struct {
314 QUIRK_BE_HEADERS},
315
316 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
317 - QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
318 -
319 - {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
320 - QUIRK_PHY_LCTRL_TIMEOUT},
321 + QUIRK_NO_MSI},
322
323 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
324 QUIRK_RESET_PACKET},
325 @@ -351,7 +347,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
326 ", disable MSI = " __stringify(QUIRK_NO_MSI)
327 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
328 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
329 - ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
330 ")");
331
332 #define OHCI_PARAM_DEBUG_AT_AR 1
333 @@ -2293,9 +2288,6 @@ static int ohci_enable(struct fw_card *card,
334 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
335 * cannot actually use the phy at that time. These need tens of
336 * millisecods pause between LPS write and first phy access too.
337 - *
338 - * But do not wait for 50msec on Agere/LSI cards. Their phy
339 - * arbitration state machine may time out during such a long wait.
340 */
341
342 reg_write(ohci, OHCI1394_HCControlSet,
343 @@ -2303,11 +2295,8 @@ static int ohci_enable(struct fw_card *card,
344 OHCI1394_HCControl_postedWriteEnable);
345 flush_writes(ohci);
346
347 - if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
348 + for (lps = 0, i = 0; !lps && i < 3; i++) {
349 msleep(50);
350 -
351 - for (lps = 0, i = 0; !lps && i < 150; i++) {
352 - msleep(1);
353 lps = reg_read(ohci, OHCI1394_HCControlSet) &
354 OHCI1394_HCControl_LPS;
355 }
356 diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
357 index 489cb8cece4d..3401eb86786c 100644
358 --- a/drivers/gpu/drm/qxl/qxl_ttm.c
359 +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
360 @@ -431,6 +431,7 @@ static int qxl_sync_obj_flush(void *sync_obj)
361
362 static void qxl_sync_obj_unref(void **sync_obj)
363 {
364 + *sync_obj = NULL;
365 }
366
367 static void *qxl_sync_obj_ref(void *sync_obj)
368 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
369 index eb18bb7af1cc..06ccfe477650 100644
370 --- a/drivers/gpu/drm/radeon/radeon_display.c
371 +++ b/drivers/gpu/drm/radeon/radeon_display.c
372 @@ -729,6 +729,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
373 if (radeon_connector->edid) {
374 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
375 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
376 + drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
377 return ret;
378 }
379 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
380 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
381 index 394e6476105b..da068bd13f92 100644
382 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
383 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
384 @@ -834,14 +834,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
385 SVGA3dCmdSurfaceDMA dma;
386 } *cmd;
387 int ret;
388 + SVGA3dCmdSurfaceDMASuffix *suffix;
389 + uint32_t bo_size;
390
391 cmd = container_of(header, struct vmw_dma_cmd, header);
392 + suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
393 + header->size - sizeof(*suffix));
394 +
395 + /* Make sure device and verifier stays in sync. */
396 + if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
397 + DRM_ERROR("Invalid DMA suffix size.\n");
398 + return -EINVAL;
399 + }
400 +
401 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
402 &cmd->dma.guest.ptr,
403 &vmw_bo);
404 if (unlikely(ret != 0))
405 return ret;
406
407 + /* Make sure DMA doesn't cross BO boundaries. */
408 + bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
409 + if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
410 + DRM_ERROR("Invalid DMA offset.\n");
411 + return -EINVAL;
412 + }
413 +
414 + bo_size -= cmd->dma.guest.ptr.offset;
415 + if (unlikely(suffix->maximumOffset > bo_size))
416 + suffix->maximumOffset = bo_size;
417 +
418 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
419 user_surface_converter, &cmd->dma.host.sid,
420 NULL);
421 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
422 index ed5ce2a41bbf..021b5227e783 100644
423 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
424 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
425 @@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
426 }
427
428 if (!vmw_kms_validate_mode_vram(vmw_priv,
429 - info->fix.line_length,
430 + var->xres * var->bits_per_pixel/8,
431 var->yoffset + var->yres)) {
432 DRM_ERROR("Requested geom can not fit in framebuffer\n");
433 return -EINVAL;
434 @@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
435 struct vmw_private *vmw_priv = par->vmw_priv;
436 int ret;
437
438 + info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
439 +
440 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
441 info->fix.line_length,
442 par->bpp, par->depth);
443 @@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info)
444 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
445 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
446 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
447 + vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
448 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
449 }
450
451 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
452 index e73740b55aea..75771b2077c0 100644
453 --- a/drivers/md/raid1.c
454 +++ b/drivers/md/raid1.c
455 @@ -94,6 +94,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
456 struct pool_info *pi = data;
457 struct r1bio *r1_bio;
458 struct bio *bio;
459 + int need_pages;
460 int i, j;
461
462 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
463 @@ -116,15 +117,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
464 * RESYNC_PAGES for each bio.
465 */
466 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
467 - j = pi->raid_disks;
468 + need_pages = pi->raid_disks;
469 else
470 - j = 1;
471 - while(j--) {
472 + need_pages = 1;
473 + for (j = 0; j < need_pages; j++) {
474 bio = r1_bio->bios[j];
475 bio->bi_vcnt = RESYNC_PAGES;
476
477 if (bio_alloc_pages(bio, gfp_flags))
478 - goto out_free_bio;
479 + goto out_free_pages;
480 }
481 /* If not user-requests, copy the page pointers to all bios */
482 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
483 @@ -138,6 +139,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
484
485 return r1_bio;
486
487 +out_free_pages:
488 + while (--j >= 0) {
489 + struct bio_vec *bv;
490 +
491 + bio_for_each_segment_all(bv, r1_bio->bios[j], i)
492 + __free_page(bv->bv_page);
493 + }
494 +
495 out_free_bio:
496 while (++j < pi->raid_disks)
497 bio_put(r1_bio->bios[j]);
498 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
499 index 8395b0992a89..b143ce91e081 100644
500 --- a/drivers/net/bonding/bond_main.c
501 +++ b/drivers/net/bonding/bond_main.c
502 @@ -4995,6 +4995,7 @@ static int __init bonding_init(void)
503 out:
504 return res;
505 err:
506 + bond_destroy_debugfs();
507 rtnl_link_unregister(&bond_link_ops);
508 err_link:
509 unregister_pernet_subsys(&bond_net_ops);
510 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
511 index 32a9609cc98b..fd50781e996c 100644
512 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
513 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
514 @@ -1038,9 +1038,6 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
515 ETH_VLAN_FILTER_CLASSIFY, config);
516 }
517
518 -#define list_next_entry(pos, member) \
519 - list_entry((pos)->member.next, typeof(*(pos)), member)
520 -
521 /**
522 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
523 *
524 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
525 index 4a0db617ab64..4942ddf9c8ae 100644
526 --- a/drivers/net/ethernet/broadcom/tg3.c
527 +++ b/drivers/net/ethernet/broadcom/tg3.c
528 @@ -12073,7 +12073,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
529 if (tg3_flag(tp, MAX_RXPEND_64) &&
530 tp->rx_pending > 63)
531 tp->rx_pending = 63;
532 - tp->rx_jumbo_pending = ering->rx_jumbo_pending;
533 +
534 + if (tg3_flag(tp, JUMBO_RING_ENABLE))
535 + tp->rx_jumbo_pending = ering->rx_jumbo_pending;
536
537 for (i = 0; i < tp->irq_max; i++)
538 tp->napi[i].tx_pending = ering->tx_pending;
539 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
540 index 06eba6e480c9..c12aeaee22fa 100644
541 --- a/drivers/net/macvlan.c
542 +++ b/drivers/net/macvlan.c
543 @@ -261,11 +261,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
544 const struct macvlan_dev *vlan = netdev_priv(dev);
545 const struct macvlan_port *port = vlan->port;
546 const struct macvlan_dev *dest;
547 - __u8 ip_summed = skb->ip_summed;
548
549 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
550 const struct ethhdr *eth = (void *)skb->data;
551 - skb->ip_summed = CHECKSUM_UNNECESSARY;
552
553 /* send to other bridge ports directly */
554 if (is_multicast_ether_addr(eth->h_dest)) {
555 @@ -283,7 +281,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
556 }
557
558 xmit_world:
559 - skb->ip_summed = ip_summed;
560 skb->dev = vlan->lowerdev;
561 return dev_queue_xmit(skb);
562 }
563 @@ -423,8 +420,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
564 struct macvlan_dev *vlan = netdev_priv(dev);
565 struct net_device *lowerdev = vlan->lowerdev;
566
567 - if (change & IFF_ALLMULTI)
568 - dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
569 + if (dev->flags & IFF_UP) {
570 + if (change & IFF_ALLMULTI)
571 + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
572 + }
573 }
574
575 static void macvlan_set_mac_lists(struct net_device *dev)
576 diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
577 index 25ba7eca9a13..7cabe4583904 100644
578 --- a/drivers/net/usb/cdc_mbim.c
579 +++ b/drivers/net/usb/cdc_mbim.c
580 @@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
581 cdc_ncm_unbind(dev, intf);
582 }
583
584 +/* verify that the ethernet protocol is IPv4 or IPv6 */
585 +static bool is_ip_proto(__be16 proto)
586 +{
587 + switch (proto) {
588 + case htons(ETH_P_IP):
589 + case htons(ETH_P_IPV6):
590 + return true;
591 + }
592 + return false;
593 +}
594
595 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
596 {
597 @@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
598 struct cdc_ncm_ctx *ctx = info->ctx;
599 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
600 u16 tci = 0;
601 + bool is_ip;
602 u8 *c;
603
604 if (!ctx)
605 @@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
606 if (skb->len <= ETH_HLEN)
607 goto error;
608
609 + /* Some applications using e.g. packet sockets will
610 + * bypass the VLAN acceleration and create tagged
611 + * ethernet frames directly. We primarily look for
612 + * the accelerated out-of-band tag, but fall back if
613 + * required
614 + */
615 + skb_reset_mac_header(skb);
616 + if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
617 + __vlan_get_tag(skb, &tci) == 0) {
618 + is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
619 + skb_pull(skb, VLAN_ETH_HLEN);
620 + } else {
621 + is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
622 + skb_pull(skb, ETH_HLEN);
623 + }
624 +
625 /* mapping VLANs to MBIM sessions:
626 * no tag => IPS session <0>
627 * 1 - 255 => IPS session <vlanid>
628 * 256 - 511 => DSS session <vlanid - 256>
629 * 512 - 4095 => unsupported, drop
630 */
631 - vlan_get_tag(skb, &tci);
632 -
633 switch (tci & 0x0f00) {
634 case 0x0000: /* VLAN ID 0 - 255 */
635 - /* verify that datagram is IPv4 or IPv6 */
636 - skb_reset_mac_header(skb);
637 - switch (eth_hdr(skb)->h_proto) {
638 - case htons(ETH_P_IP):
639 - case htons(ETH_P_IPV6):
640 - break;
641 - default:
642 + if (!is_ip)
643 goto error;
644 - }
645 c = (u8 *)&sign;
646 c[3] = tci;
647 break;
648 @@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
649 "unsupported tci=0x%04x\n", tci);
650 goto error;
651 }
652 - skb_pull(skb, ETH_HLEN);
653 }
654
655 spin_lock_bh(&ctx->mtx);
656 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
657 index 37d9785974fc..7be4860ccfd7 100644
658 --- a/drivers/net/usb/qmi_wwan.c
659 +++ b/drivers/net/usb/qmi_wwan.c
660 @@ -649,9 +649,26 @@ static const struct usb_device_id products[] = {
661 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
662 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
663 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
664 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
665 + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
666 + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
667 + {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
668 + {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
669 + {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
670 + {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
671 + {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
672 + {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
673 + {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
674 + {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
675 + {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
676 + {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
677 + {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
678 + {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
679 + {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
680 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
681 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
682 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
683 + {QMI_FIXED_INTF(0x19d2, 0x0019, 3)}, /* ONDA MT689DC */
684 {QMI_FIXED_INTF(0x19d2, 0x0021, 4)},
685 {QMI_FIXED_INTF(0x19d2, 0x0025, 1)},
686 {QMI_FIXED_INTF(0x19d2, 0x0031, 4)},
687 @@ -698,6 +715,7 @@ static const struct usb_device_id products[] = {
688 {QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
689 {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
690 {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
691 + {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
692 {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
693 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
694 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
695 @@ -708,12 +726,28 @@ static const struct usb_device_id products[] = {
696 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
697 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
698 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
699 + {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
700 + {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
701 + {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
702 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
703 + {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
704 + {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
705 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
706 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
707 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
708 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
709 + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
710 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
711 - {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */
712 + {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
713 + {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
714 + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
715 + {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
716 + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
717 + {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
718 + {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
719 + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
720 + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
721 + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
722
723 /* 4. Gobi 1000 devices */
724 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
725 @@ -747,6 +781,7 @@ static const struct usb_device_id products[] = {
726 {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
727 {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
728 {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
729 + {QMI_GOBI_DEVICE(0x0af0, 0x8120)}, /* Option GTM681W */
730 {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
731 {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
732 {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
733 @@ -760,7 +795,6 @@ static const struct usb_device_id products[] = {
734 {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
735 {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
736 {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
737 - {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */
738 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
739 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
740 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
741 diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
742 index 25506c777381..9bec1717047e 100644
743 --- a/drivers/scsi/megaraid/megaraid_mm.c
744 +++ b/drivers/scsi/megaraid/megaraid_mm.c
745 @@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
746
747 pthru32->dataxferaddr = kioc->buf_paddr;
748 if (kioc->data_dir & UIOC_WR) {
749 + if (pthru32->dataxferlen > kioc->xferlen)
750 + return -EINVAL;
751 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
752 pthru32->dataxferlen)) {
753 return (-EFAULT);
754 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
755 index 3e58b2245f1f..859240408f9e 100644
756 --- a/drivers/scsi/scsi_scan.c
757 +++ b/drivers/scsi/scsi_scan.c
758 @@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
759 struct Scsi_Host *shost = dev_to_shost(dev->parent);
760 unsigned long flags;
761
762 + starget->state = STARGET_DEL;
763 transport_destroy_device(dev);
764 spin_lock_irqsave(shost->host_lock, flags);
765 if (shost->hostt->target_destroy)
766 @@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
767 }
768
769 /**
770 + * scsi_target_reap_ref_release - remove target from visibility
771 + * @kref: the reap_ref in the target being released
772 + *
773 + * Called on last put of reap_ref, which is the indication that no device
774 + * under this target is visible anymore, so render the target invisible in
775 + * sysfs. Note: we have to be in user context here because the target reaps
776 + * should be done in places where the scsi device visibility is being removed.
777 + */
778 +static void scsi_target_reap_ref_release(struct kref *kref)
779 +{
780 + struct scsi_target *starget
781 + = container_of(kref, struct scsi_target, reap_ref);
782 +
783 + /*
784 + * if we get here and the target is still in the CREATED state that
785 + * means it was allocated but never made visible (because a scan
786 + * turned up no LUNs), so don't call device_del() on it.
787 + */
788 + if (starget->state != STARGET_CREATED) {
789 + transport_remove_device(&starget->dev);
790 + device_del(&starget->dev);
791 + }
792 + scsi_target_destroy(starget);
793 +}
794 +
795 +static void scsi_target_reap_ref_put(struct scsi_target *starget)
796 +{
797 + kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
798 +}
799 +
800 +/**
801 * scsi_alloc_target - allocate a new or find an existing target
802 * @parent: parent of the target (need not be a scsi host)
803 * @channel: target channel number (zero if no channels)
804 @@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
805 + shost->transportt->target_size;
806 struct scsi_target *starget;
807 struct scsi_target *found_target;
808 - int error;
809 + int error, ref_got;
810
811 starget = kzalloc(size, GFP_KERNEL);
812 if (!starget) {
813 @@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
814 }
815 dev = &starget->dev;
816 device_initialize(dev);
817 - starget->reap_ref = 1;
818 + kref_init(&starget->reap_ref);
819 dev->parent = get_device(parent);
820 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
821 dev->bus = &scsi_bus_type;
822 @@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
823 return starget;
824
825 found:
826 - found_target->reap_ref++;
827 + /*
828 + * release routine already fired if kref is zero, so if we can still
829 + * take the reference, the target must be alive. If we can't, it must
830 + * be dying and we need to wait for a new target
831 + */
832 + ref_got = kref_get_unless_zero(&found_target->reap_ref);
833 +
834 spin_unlock_irqrestore(shost->host_lock, flags);
835 - if (found_target->state != STARGET_DEL) {
836 + if (ref_got) {
837 put_device(dev);
838 return found_target;
839 }
840 - /* Unfortunately, we found a dying target; need to
841 - * wait until it's dead before we can get a new one */
842 + /*
843 + * Unfortunately, we found a dying target; need to wait until it's
844 + * dead before we can get a new one. There is an anomaly here. We
845 + * *should* call scsi_target_reap() to balance the kref_get() of the
846 + * reap_ref above. However, since the target being released, it's
847 + * already invisible and the reap_ref is irrelevant. If we call
848 + * scsi_target_reap() we might spuriously do another device_del() on
849 + * an already invisible target.
850 + */
851 put_device(&found_target->dev);
852 - flush_scheduled_work();
853 + /*
854 + * length of time is irrelevant here, we just want to yield the CPU
855 + * for a tick to avoid busy waiting for the target to die.
856 + */
857 + msleep(1);
858 goto retry;
859 }
860
861 -static void scsi_target_reap_usercontext(struct work_struct *work)
862 -{
863 - struct scsi_target *starget =
864 - container_of(work, struct scsi_target, ew.work);
865 -
866 - transport_remove_device(&starget->dev);
867 - device_del(&starget->dev);
868 - scsi_target_destroy(starget);
869 -}
870 -
871 /**
872 * scsi_target_reap - check to see if target is in use and destroy if not
873 * @starget: target to be checked
874 @@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
875 */
876 void scsi_target_reap(struct scsi_target *starget)
877 {
878 - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
879 - unsigned long flags;
880 - enum scsi_target_state state;
881 - int empty = 0;
882 -
883 - spin_lock_irqsave(shost->host_lock, flags);
884 - state = starget->state;
885 - if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
886 - empty = 1;
887 - starget->state = STARGET_DEL;
888 - }
889 - spin_unlock_irqrestore(shost->host_lock, flags);
890 -
891 - if (!empty)
892 - return;
893 -
894 - BUG_ON(state == STARGET_DEL);
895 - if (state == STARGET_CREATED)
896 - scsi_target_destroy(starget);
897 - else
898 - execute_in_process_context(scsi_target_reap_usercontext,
899 - &starget->ew);
900 + /*
901 + * serious problem if this triggers: STARGET_DEL is only set in the if
902 + * the reap_ref drops to zero, so we're trying to do another final put
903 + * on an already released kref
904 + */
905 + BUG_ON(starget->state == STARGET_DEL);
906 + scsi_target_reap_ref_put(starget);
907 }
908
909 /**
910 @@ -1527,6 +1551,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
911 }
912 mutex_unlock(&shost->scan_mutex);
913 scsi_autopm_put_target(starget);
914 + /*
915 + * paired with scsi_alloc_target(). Target will be destroyed unless
916 + * scsi_probe_and_add_lun made an underlying device visible
917 + */
918 scsi_target_reap(starget);
919 put_device(&starget->dev);
920
921 @@ -1607,8 +1635,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
922
923 out_reap:
924 scsi_autopm_put_target(starget);
925 - /* now determine if the target has any children at all
926 - * and if not, nuke it */
927 + /*
928 + * paired with scsi_alloc_target(): determine if the target has
929 + * any children at all and if not, nuke it
930 + */
931 scsi_target_reap(starget);
932
933 put_device(&starget->dev);
934 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
935 index 931a7d954203..9e2dd478dd15 100644
936 --- a/drivers/scsi/scsi_sysfs.c
937 +++ b/drivers/scsi/scsi_sysfs.c
938 @@ -332,17 +332,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
939 {
940 struct scsi_device *sdev;
941 struct device *parent;
942 - struct scsi_target *starget;
943 struct list_head *this, *tmp;
944 unsigned long flags;
945
946 sdev = container_of(work, struct scsi_device, ew.work);
947
948 parent = sdev->sdev_gendev.parent;
949 - starget = to_scsi_target(parent);
950
951 spin_lock_irqsave(sdev->host->host_lock, flags);
952 - starget->reap_ref++;
953 list_del(&sdev->siblings);
954 list_del(&sdev->same_target_siblings);
955 list_del(&sdev->starved_entry);
956 @@ -362,8 +359,6 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
957 /* NULL queue means the device can't be used */
958 sdev->request_queue = NULL;
959
960 - scsi_target_reap(scsi_target(sdev));
961 -
962 kfree(sdev->inquiry);
963 kfree(sdev);
964
965 @@ -978,6 +973,13 @@ void __scsi_remove_device(struct scsi_device *sdev)
966 sdev->host->hostt->slave_destroy(sdev);
967 transport_destroy_device(dev);
968
969 + /*
970 + * Paired with the kref_get() in scsi_sysfs_initialize(). We have
971 + * remoed sysfs visibility from the device, so make the target
972 + * invisible if this was the last device underneath it.
973 + */
974 + scsi_target_reap(scsi_target(sdev));
975 +
976 put_device(dev);
977 }
978
979 @@ -1040,7 +1042,7 @@ void scsi_remove_target(struct device *dev)
980 continue;
981 if (starget->dev.parent == dev || &starget->dev == dev) {
982 /* assuming new targets arrive at the end */
983 - starget->reap_ref++;
984 + kref_get(&starget->reap_ref);
985 spin_unlock_irqrestore(shost->host_lock, flags);
986 if (last)
987 scsi_target_reap(last);
988 @@ -1124,6 +1126,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
989 list_add_tail(&sdev->same_target_siblings, &starget->devices);
990 list_add_tail(&sdev->siblings, &shost->__devices);
991 spin_unlock_irqrestore(shost->host_lock, flags);
992 + /*
993 + * device can now only be removed via __scsi_remove_device() so hold
994 + * the target. Target will be held in CREATED state until something
995 + * beneath it becomes visible (in which case it moves to RUNNING)
996 + */
997 + kref_get(&starget->reap_ref);
998 }
999
1000 int scsi_is_sdev_device(const struct device *dev)
1001 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1002 index 81f59e8b7521..89cce1a32059 100644
1003 --- a/drivers/usb/class/cdc-acm.c
1004 +++ b/drivers/usb/class/cdc-acm.c
1005 @@ -1574,13 +1574,27 @@ static const struct usb_device_id acm_ids[] = {
1006 },
1007 /* Motorola H24 HSPA module: */
1008 { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
1009 - { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
1010 - { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
1011 - { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
1012 - { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
1013 - { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
1014 - { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
1015 - { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
1016 + { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */
1017 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1018 + },
1019 + { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */
1020 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1021 + },
1022 + { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */
1023 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1024 + },
1025 + { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */
1026 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1027 + },
1028 + { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */
1029 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1030 + },
1031 + { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */
1032 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1033 + },
1034 + { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */
1035 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1036 + },
1037
1038 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
1039 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
1040 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1041 index c90d960e091b..ded751ca104a 100644
1042 --- a/drivers/usb/serial/cp210x.c
1043 +++ b/drivers/usb/serial/cp210x.c
1044 @@ -104,6 +104,7 @@ static const struct usb_device_id id_table[] = {
1045 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
1046 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
1047 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
1048 + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
1049 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
1050 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
1051 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
1052 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1053 index b83da38bc915..2c635bd9c185 100644
1054 --- a/drivers/usb/serial/ftdi_sio.c
1055 +++ b/drivers/usb/serial/ftdi_sio.c
1056 @@ -912,6 +912,39 @@ static struct usb_device_id id_table_combined [] = {
1057 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
1058 /* Cressi Devices */
1059 { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
1060 + /* Brainboxes Devices */
1061 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) },
1062 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) },
1063 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
1064 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
1065 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
1066 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
1067 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
1068 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
1069 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) },
1070 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) },
1071 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
1072 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
1073 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
1074 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
1075 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
1076 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
1077 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
1078 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
1079 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
1080 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
1081 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
1082 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
1083 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) },
1084 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) },
1085 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) },
1086 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) },
1087 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) },
1088 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) },
1089 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) },
1090 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
1091 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
1092 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
1093 { }, /* Optional parameter entry */
1094 { } /* Terminating entry */
1095 };
1096 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1097 index e599fbfcde5f..993c93df6874 100644
1098 --- a/drivers/usb/serial/ftdi_sio_ids.h
1099 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1100 @@ -1326,3 +1326,40 @@
1101 * Manufacturer: Cressi
1102 */
1103 #define FTDI_CRESSI_PID 0x87d0
1104 +
1105 +/*
1106 + * Brainboxes devices
1107 + */
1108 +#define BRAINBOXES_VID 0x05d1
1109 +#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */
1110 +#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */
1111 +#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
1112 +#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
1113 +#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
1114 +#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
1115 +#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
1116 +#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
1117 +#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */
1118 +#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */
1119 +#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */
1120 +#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */
1121 +#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */
1122 +#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */
1123 +#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */
1124 +#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */
1125 +#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */
1126 +#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */
1127 +#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */
1128 +#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */
1129 +#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */
1130 +#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */
1131 +#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */
1132 +#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */
1133 +#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */
1134 +#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */
1135 +#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */
1136 +#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */
1137 +#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */
1138 +#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
1139 +#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
1140 +#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
1141 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
1142 index 1be6ba7bee27..c5c9cbf107d1 100644
1143 --- a/drivers/usb/serial/io_ti.c
1144 +++ b/drivers/usb/serial/io_ti.c
1145 @@ -29,6 +29,7 @@
1146 #include <linux/spinlock.h>
1147 #include <linux/mutex.h>
1148 #include <linux/serial.h>
1149 +#include <linux/swab.h>
1150 #include <linux/kfifo.h>
1151 #include <linux/ioctl.h>
1152 #include <linux/firmware.h>
1153 @@ -284,7 +285,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
1154 {
1155 int status = 0;
1156 __u8 read_length;
1157 - __be16 be_start_address;
1158 + u16 be_start_address;
1159
1160 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
1161
1162 @@ -300,10 +301,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
1163 if (read_length > 1) {
1164 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
1165 }
1166 - be_start_address = cpu_to_be16(start_address);
1167 + /*
1168 + * NOTE: Must use swab as wIndex is sent in little-endian
1169 + * byte order regardless of host byte order.
1170 + */
1171 + be_start_address = swab16((u16)start_address);
1172 status = ti_vread_sync(dev, UMPC_MEMORY_READ,
1173 (__u16)address_type,
1174 - (__force __u16)be_start_address,
1175 + be_start_address,
1176 buffer, read_length);
1177
1178 if (status) {
1179 @@ -400,7 +405,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
1180 struct device *dev = &serial->serial->dev->dev;
1181 int status = 0;
1182 int write_length;
1183 - __be16 be_start_address;
1184 + u16 be_start_address;
1185
1186 /* We can only send a maximum of 1 aligned byte page at a time */
1187
1188 @@ -415,11 +420,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
1189 __func__, start_address, write_length);
1190 usb_serial_debug_data(dev, __func__, write_length, buffer);
1191
1192 - /* Write first page */
1193 - be_start_address = cpu_to_be16(start_address);
1194 + /*
1195 + * Write first page.
1196 + *
1197 + * NOTE: Must use swab as wIndex is sent in little-endian byte order
1198 + * regardless of host byte order.
1199 + */
1200 + be_start_address = swab16((u16)start_address);
1201 status = ti_vsend_sync(serial->serial->dev,
1202 UMPC_MEMORY_WRITE, (__u16)address_type,
1203 - (__force __u16)be_start_address,
1204 + be_start_address,
1205 buffer, write_length);
1206 if (status) {
1207 dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
1208 @@ -442,11 +452,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
1209 __func__, start_address, write_length);
1210 usb_serial_debug_data(dev, __func__, write_length, buffer);
1211
1212 - /* Write next page */
1213 - be_start_address = cpu_to_be16(start_address);
1214 + /*
1215 + * Write next page.
1216 + *
1217 + * NOTE: Must use swab as wIndex is sent in little-endian byte
1218 + * order regardless of host byte order.
1219 + */
1220 + be_start_address = swab16((u16)start_address);
1221 status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
1222 (__u16)address_type,
1223 - (__force __u16)be_start_address,
1224 + be_start_address,
1225 buffer, write_length);
1226 if (status) {
1227 dev_err(dev, "%s - ERROR %d\n", __func__, status);
1228 @@ -593,8 +608,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
1229 if (rom_desc->Type == desc_type)
1230 return start_address;
1231
1232 - start_address = start_address + sizeof(struct ti_i2c_desc)
1233 - + rom_desc->Size;
1234 + start_address = start_address + sizeof(struct ti_i2c_desc) +
1235 + le16_to_cpu(rom_desc->Size);
1236
1237 } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
1238
1239 @@ -607,7 +622,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
1240 __u16 i;
1241 __u8 cs = 0;
1242
1243 - for (i = 0; i < rom_desc->Size; i++)
1244 + for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
1245 cs = (__u8)(cs + buffer[i]);
1246
1247 if (cs != rom_desc->CheckSum) {
1248 @@ -661,7 +676,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
1249 break;
1250
1251 if ((start_address + sizeof(struct ti_i2c_desc) +
1252 - rom_desc->Size) > TI_MAX_I2C_SIZE) {
1253 + le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
1254 status = -ENODEV;
1255 dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
1256 break;
1257 @@ -676,7 +691,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
1258 /* Read the descriptor data */
1259 status = read_rom(serial, start_address +
1260 sizeof(struct ti_i2c_desc),
1261 - rom_desc->Size, buffer);
1262 + le16_to_cpu(rom_desc->Size),
1263 + buffer);
1264 if (status)
1265 break;
1266
1267 @@ -685,7 +701,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
1268 break;
1269 }
1270 start_address = start_address + sizeof(struct ti_i2c_desc) +
1271 - rom_desc->Size;
1272 + le16_to_cpu(rom_desc->Size);
1273
1274 } while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
1275 (start_address < TI_MAX_I2C_SIZE));
1276 @@ -724,7 +740,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
1277
1278 /* Read the descriptor data */
1279 status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
1280 - rom_desc->Size, buffer);
1281 + le16_to_cpu(rom_desc->Size), buffer);
1282 if (status)
1283 goto exit;
1284
1285 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1286 index 68fc9fe65936..f213ee978516 100644
1287 --- a/drivers/usb/serial/option.c
1288 +++ b/drivers/usb/serial/option.c
1289 @@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
1290 #define QUALCOMM_VENDOR_ID 0x05C6
1291
1292 #define CMOTECH_VENDOR_ID 0x16d8
1293 -#define CMOTECH_PRODUCT_6008 0x6008
1294 -#define CMOTECH_PRODUCT_6280 0x6280
1295 +#define CMOTECH_PRODUCT_6001 0x6001
1296 +#define CMOTECH_PRODUCT_CMU_300 0x6002
1297 +#define CMOTECH_PRODUCT_6003 0x6003
1298 +#define CMOTECH_PRODUCT_6004 0x6004
1299 +#define CMOTECH_PRODUCT_6005 0x6005
1300 +#define CMOTECH_PRODUCT_CGU_628A 0x6006
1301 +#define CMOTECH_PRODUCT_CHE_628S 0x6007
1302 +#define CMOTECH_PRODUCT_CMU_301 0x6008
1303 +#define CMOTECH_PRODUCT_CHU_628 0x6280
1304 +#define CMOTECH_PRODUCT_CHU_628S 0x6281
1305 +#define CMOTECH_PRODUCT_CDU_680 0x6803
1306 +#define CMOTECH_PRODUCT_CDU_685A 0x6804
1307 +#define CMOTECH_PRODUCT_CHU_720S 0x7001
1308 +#define CMOTECH_PRODUCT_7002 0x7002
1309 +#define CMOTECH_PRODUCT_CHU_629K 0x7003
1310 +#define CMOTECH_PRODUCT_7004 0x7004
1311 +#define CMOTECH_PRODUCT_7005 0x7005
1312 +#define CMOTECH_PRODUCT_CGU_629 0x7006
1313 +#define CMOTECH_PRODUCT_CHU_629S 0x700a
1314 +#define CMOTECH_PRODUCT_CHU_720I 0x7211
1315 +#define CMOTECH_PRODUCT_7212 0x7212
1316 +#define CMOTECH_PRODUCT_7213 0x7213
1317 +#define CMOTECH_PRODUCT_7251 0x7251
1318 +#define CMOTECH_PRODUCT_7252 0x7252
1319 +#define CMOTECH_PRODUCT_7253 0x7253
1320
1321 #define TELIT_VENDOR_ID 0x1bc7
1322 #define TELIT_PRODUCT_UC864E 0x1003
1323 @@ -243,6 +266,7 @@ static void option_instat_callback(struct urb *urb);
1324 #define TELIT_PRODUCT_CC864_DUAL 0x1005
1325 #define TELIT_PRODUCT_CC864_SINGLE 0x1006
1326 #define TELIT_PRODUCT_DE910_DUAL 0x1010
1327 +#define TELIT_PRODUCT_UE910_V2 0x1012
1328 #define TELIT_PRODUCT_LE920 0x1200
1329
1330 /* ZTE PRODUCTS */
1331 @@ -286,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
1332 #define ALCATEL_PRODUCT_X060S_X200 0x0000
1333 #define ALCATEL_PRODUCT_X220_X500D 0x0017
1334 #define ALCATEL_PRODUCT_L100V 0x011e
1335 +#define ALCATEL_PRODUCT_L800MA 0x0203
1336
1337 #define PIRELLI_VENDOR_ID 0x1266
1338 #define PIRELLI_PRODUCT_C100_1 0x1002
1339 @@ -348,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
1340 #define OLIVETTI_PRODUCT_OLICARD100 0xc000
1341 #define OLIVETTI_PRODUCT_OLICARD145 0xc003
1342 #define OLIVETTI_PRODUCT_OLICARD200 0xc005
1343 +#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
1344
1345 /* Celot products */
1346 #define CELOT_VENDOR_ID 0x211f
1347 @@ -501,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
1348 .reserved = BIT(1) | BIT(2),
1349 };
1350
1351 +static const struct option_blacklist_info net_intf0_blacklist = {
1352 + .reserved = BIT(0),
1353 +};
1354 +
1355 static const struct option_blacklist_info net_intf1_blacklist = {
1356 .reserved = BIT(1),
1357 };
1358 @@ -1034,13 +1064,53 @@ static const struct usb_device_id option_ids[] = {
1359 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1360 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1361 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1362 - { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
1363 - { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
1364 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1365 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1366 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
1367 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1368 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
1369 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
1370 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
1371 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
1372 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1373 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
1374 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1375 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
1376 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1377 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
1378 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
1379 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
1380 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
1381 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1382 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
1383 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1384 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
1385 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1386 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
1387 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1388 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
1389 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
1390 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1391 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
1392 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1393 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
1394 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1395 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
1396 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1397 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
1398 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1399 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
1400 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1401 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
1402 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1403 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
1404 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1405 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
1406 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
1407 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
1408 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
1409 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
1410 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
1411 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1412 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
1413 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1414 @@ -1498,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
1415 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1416 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
1417 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1418 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
1419 + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1420 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
1421 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
1422 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1423 @@ -1543,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
1424 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1425 .driver_info = (kernel_ulong_t)&net_intf6_blacklist
1426 },
1427 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
1428 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist
1429 + },
1430 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1431 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1432 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1433 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1434 index 968a40201e5f..7ed681a714a5 100644
1435 --- a/drivers/usb/serial/qcserial.c
1436 +++ b/drivers/usb/serial/qcserial.c
1437 @@ -136,9 +136,18 @@ static const struct usb_device_id id_table[] = {
1438 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */
1439 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */
1440 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */
1441 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)}, /* Sierra Wireless MC73xx Device Management */
1442 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)}, /* Sierra Wireless MC73xx NMEA */
1443 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)}, /* Sierra Wireless MC73xx Modem */
1444 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
1445 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
1446 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
1447 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
1448 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
1449 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
1450 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
1451 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
1452 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
1453 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
1454 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
1455 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
1456 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1457 index 8894665cd610..2df566c0e9e8 100644
1458 --- a/drivers/usb/serial/sierra.c
1459 +++ b/drivers/usb/serial/sierra.c
1460 @@ -291,7 +291,6 @@ static const struct usb_device_id id_table[] = {
1461 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
1462 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
1463 },
1464 - { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
1465
1466 { }
1467 };
1468 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1469 index 5f6b1ff9d29e..c1032d42b9d5 100644
1470 --- a/drivers/usb/serial/usb-serial.c
1471 +++ b/drivers/usb/serial/usb-serial.c
1472 @@ -1367,10 +1367,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
1473 static void usb_serial_deregister(struct usb_serial_driver *device)
1474 {
1475 pr_info("USB Serial deregistering driver %s\n", device->description);
1476 +
1477 mutex_lock(&table_lock);
1478 list_del(&device->driver_list);
1479 - usb_serial_bus_deregister(device);
1480 mutex_unlock(&table_lock);
1481 +
1482 + usb_serial_bus_deregister(device);
1483 }
1484
1485 /**
1486 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
1487 index db0cf536de18..11952b6dc224 100644
1488 --- a/drivers/usb/serial/usb_wwan.c
1489 +++ b/drivers/usb/serial/usb_wwan.c
1490 @@ -470,6 +470,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1491 int err;
1492 int i;
1493
1494 + if (!port->bulk_in_size || !port->bulk_out_size)
1495 + return -ENODEV;
1496 +
1497 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
1498 if (!portdata)
1499 return -ENOMEM;
1500 @@ -477,9 +480,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1501 init_usb_anchor(&portdata->delayed);
1502
1503 for (i = 0; i < N_IN_URB; i++) {
1504 - if (!port->bulk_in_size)
1505 - break;
1506 -
1507 buffer = (u8 *)__get_free_page(GFP_KERNEL);
1508 if (!buffer)
1509 goto bail_out_error;
1510 @@ -493,9 +493,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1511 }
1512
1513 for (i = 0; i < N_OUT_URB; i++) {
1514 - if (!port->bulk_out_size)
1515 - break;
1516 -
1517 buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
1518 if (!buffer)
1519 goto bail_out_error2;
1520 diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
1521 index ba77f753649c..a78ca6a01094 100644
1522 --- a/drivers/video/tgafb.c
1523 +++ b/drivers/video/tgafb.c
1524 @@ -188,6 +188,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1525
1526 if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
1527 return -EINVAL;
1528 + if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
1529 + return -EINVAL;
1530 if (var->nonstd)
1531 return -EINVAL;
1532 if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
1533 @@ -268,6 +270,7 @@ tgafb_set_par(struct fb_info *info)
1534 par->yres = info->var.yres;
1535 par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
1536 par->bits_per_pixel = info->var.bits_per_pixel;
1537 + info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
1538
1539 tga_type = par->tga_type;
1540
1541 @@ -1314,6 +1317,7 @@ tgafb_init_fix(struct fb_info *info)
1542 int tga_bus_tc = TGA_BUS_TC(par->dev);
1543 u8 tga_type = par->tga_type;
1544 const char *tga_type_name = NULL;
1545 + unsigned memory_size;
1546
1547 switch (tga_type) {
1548 case TGA_TYPE_8PLANE:
1549 @@ -1321,21 +1325,25 @@ tgafb_init_fix(struct fb_info *info)
1550 tga_type_name = "Digital ZLXp-E1";
1551 if (tga_bus_tc)
1552 tga_type_name = "Digital ZLX-E1";
1553 + memory_size = 2097152;
1554 break;
1555 case TGA_TYPE_24PLANE:
1556 if (tga_bus_pci)
1557 tga_type_name = "Digital ZLXp-E2";
1558 if (tga_bus_tc)
1559 tga_type_name = "Digital ZLX-E2";
1560 + memory_size = 8388608;
1561 break;
1562 case TGA_TYPE_24PLUSZ:
1563 if (tga_bus_pci)
1564 tga_type_name = "Digital ZLXp-E3";
1565 if (tga_bus_tc)
1566 tga_type_name = "Digital ZLX-E3";
1567 + memory_size = 16777216;
1568 break;
1569 default:
1570 tga_type_name = "Unknown";
1571 + memory_size = 16777216;
1572 break;
1573 }
1574
1575 @@ -1347,9 +1355,8 @@ tgafb_init_fix(struct fb_info *info)
1576 ? FB_VISUAL_PSEUDOCOLOR
1577 : FB_VISUAL_DIRECTCOLOR);
1578
1579 - info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
1580 info->fix.smem_start = (size_t) par->tga_fb_base;
1581 - info->fix.smem_len = info->fix.line_length * par->yres;
1582 + info->fix.smem_len = memory_size;
1583 info->fix.mmio_start = (size_t) par->tga_regs_base;
1584 info->fix.mmio_len = 512;
1585
1586 @@ -1473,6 +1480,9 @@ static int tgafb_register(struct device *dev)
1587 modedb_tga = &modedb_tc;
1588 modedbsize_tga = 1;
1589 }
1590 +
1591 + tgafb_init_fix(info);
1592 +
1593 ret = fb_find_mode(&info->var, info,
1594 mode_option ? mode_option : mode_option_tga,
1595 modedb_tga, modedbsize_tga, NULL,
1596 @@ -1490,7 +1500,6 @@ static int tgafb_register(struct device *dev)
1597 }
1598
1599 tgafb_set_par(info);
1600 - tgafb_init_fix(info);
1601
1602 if (register_framebuffer(info) < 0) {
1603 printk(KERN_ERR "tgafb: Could not register framebuffer\n");
1604 diff --git a/fs/file_table.c b/fs/file_table.c
1605 index 485dc0eddd67..54a34be444f9 100644
1606 --- a/fs/file_table.c
1607 +++ b/fs/file_table.c
1608 @@ -211,10 +211,10 @@ static void drop_file_write_access(struct file *file)
1609 struct dentry *dentry = file->f_path.dentry;
1610 struct inode *inode = dentry->d_inode;
1611
1612 - put_write_access(inode);
1613 -
1614 if (special_file(inode->i_mode))
1615 return;
1616 +
1617 + put_write_access(inode);
1618 if (file_check_writeable(file) != 0)
1619 return;
1620 __mnt_drop_write(mnt);
1621 diff --git a/fs/open.c b/fs/open.c
1622 index 8c741002f947..86092bde31f4 100644
1623 --- a/fs/open.c
1624 +++ b/fs/open.c
1625 @@ -628,23 +628,12 @@ out:
1626 static inline int __get_file_write_access(struct inode *inode,
1627 struct vfsmount *mnt)
1628 {
1629 - int error;
1630 - error = get_write_access(inode);
1631 + int error = get_write_access(inode);
1632 if (error)
1633 return error;
1634 - /*
1635 - * Do not take mount writer counts on
1636 - * special files since no writes to
1637 - * the mount itself will occur.
1638 - */
1639 - if (!special_file(inode->i_mode)) {
1640 - /*
1641 - * Balanced in __fput()
1642 - */
1643 - error = __mnt_want_write(mnt);
1644 - if (error)
1645 - put_write_access(inode);
1646 - }
1647 + error = __mnt_want_write(mnt);
1648 + if (error)
1649 + put_write_access(inode);
1650 return error;
1651 }
1652
1653 @@ -677,12 +666,11 @@ static int do_dentry_open(struct file *f,
1654
1655 path_get(&f->f_path);
1656 inode = f->f_inode = f->f_path.dentry->d_inode;
1657 - if (f->f_mode & FMODE_WRITE) {
1658 + if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
1659 error = __get_file_write_access(inode, f->f_path.mnt);
1660 if (error)
1661 goto cleanup_file;
1662 - if (!special_file(inode->i_mode))
1663 - file_take_write(f);
1664 + file_take_write(f);
1665 }
1666
1667 f->f_mapping = inode->i_mapping;
1668 @@ -723,7 +711,6 @@ cleanup_all:
1669 fops_put(f->f_op);
1670 file_sb_list_del(f);
1671 if (f->f_mode & FMODE_WRITE) {
1672 - put_write_access(inode);
1673 if (!special_file(inode->i_mode)) {
1674 /*
1675 * We don't consider this a real
1676 @@ -731,6 +718,7 @@ cleanup_all:
1677 * because it all happenend right
1678 * here, so just reset the state.
1679 */
1680 + put_write_access(inode);
1681 file_reset_write(f);
1682 __mnt_drop_write(f->f_path.mnt);
1683 }
1684 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
1685 index b58268a5ddd4..17bccd3a4b03 100644
1686 --- a/include/asm-generic/pgtable.h
1687 +++ b/include/asm-generic/pgtable.h
1688 @@ -620,32 +620,47 @@ static inline int pmd_numa(pmd_t pmd)
1689 #ifndef pte_mknonnuma
1690 static inline pte_t pte_mknonnuma(pte_t pte)
1691 {
1692 - pte = pte_clear_flags(pte, _PAGE_NUMA);
1693 - return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED);
1694 + pteval_t val = pte_val(pte);
1695 +
1696 + val &= ~_PAGE_NUMA;
1697 + val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
1698 + return __pte(val);
1699 }
1700 #endif
1701
1702 #ifndef pmd_mknonnuma
1703 static inline pmd_t pmd_mknonnuma(pmd_t pmd)
1704 {
1705 - pmd = pmd_clear_flags(pmd, _PAGE_NUMA);
1706 - return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED);
1707 + pmdval_t val = pmd_val(pmd);
1708 +
1709 + val &= ~_PAGE_NUMA;
1710 + val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
1711 +
1712 + return __pmd(val);
1713 }
1714 #endif
1715
1716 #ifndef pte_mknuma
1717 static inline pte_t pte_mknuma(pte_t pte)
1718 {
1719 - pte = pte_set_flags(pte, _PAGE_NUMA);
1720 - return pte_clear_flags(pte, _PAGE_PRESENT);
1721 + pteval_t val = pte_val(pte);
1722 +
1723 + val &= ~_PAGE_PRESENT;
1724 + val |= _PAGE_NUMA;
1725 +
1726 + return __pte(val);
1727 }
1728 #endif
1729
1730 #ifndef pmd_mknuma
1731 static inline pmd_t pmd_mknuma(pmd_t pmd)
1732 {
1733 - pmd = pmd_set_flags(pmd, _PAGE_NUMA);
1734 - return pmd_clear_flags(pmd, _PAGE_PRESENT);
1735 + pmdval_t val = pmd_val(pmd);
1736 +
1737 + val &= ~_PAGE_PRESENT;
1738 + val |= _PAGE_NUMA;
1739 +
1740 + return __pmd(val);
1741 }
1742 #endif
1743 #else
1744 diff --git a/include/linux/list.h b/include/linux/list.h
1745 index b83e5657365a..83a9576f479f 100644
1746 --- a/include/linux/list.h
1747 +++ b/include/linux/list.h
1748 @@ -373,6 +373,22 @@ static inline void list_splice_tail_init(struct list_head *list,
1749 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
1750
1751 /**
1752 + * list_next_entry - get the next element in list
1753 + * @pos: the type * to cursor
1754 + * @member: the name of the list_struct within the struct.
1755 + */
1756 +#define list_next_entry(pos, member) \
1757 + list_entry((pos)->member.next, typeof(*(pos)), member)
1758 +
1759 +/**
1760 + * list_prev_entry - get the prev element in list
1761 + * @pos: the type * to cursor
1762 + * @member: the name of the list_struct within the struct.
1763 + */
1764 +#define list_prev_entry(pos, member) \
1765 + list_entry((pos)->member.prev, typeof(*(pos)), member)
1766 +
1767 +/**
1768 * list_for_each - iterate over a list
1769 * @pos: the &struct list_head to use as a loop cursor.
1770 * @head: the head for your list.
1771 diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
1772 index 54f91d35e5fd..302ab805b0bb 100644
1773 --- a/include/linux/sock_diag.h
1774 +++ b/include/linux/sock_diag.h
1775 @@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
1776 void sock_diag_save_cookie(void *sk, __u32 *cookie);
1777
1778 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
1779 -int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
1780 +int sock_diag_put_filterinfo(struct sock *sk,
1781 struct sk_buff *skb, int attrtype);
1782
1783 #endif
1784 diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
1785 index b906f4a131a4..8d977b343647 100644
1786 --- a/include/net/ip6_route.h
1787 +++ b/include/net/ip6_route.h
1788 @@ -32,6 +32,11 @@ struct route_info {
1789 #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
1790 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
1791
1792 +/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
1793 + * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
1794 + */
1795 +#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
1796 +
1797 /*
1798 * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
1799 * between IPV6_ADDR_PREFERENCES socket option values
1800 diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
1801 index 331310851cfb..86dd7dd3d617 100644
1802 --- a/include/net/netfilter/nf_conntrack_extend.h
1803 +++ b/include/net/netfilter/nf_conntrack_extend.h
1804 @@ -41,8 +41,8 @@ enum nf_ct_ext_id {
1805 /* Extensions: optional stuff which isn't permanently in struct. */
1806 struct nf_ct_ext {
1807 struct rcu_head rcu;
1808 - u8 offset[NF_CT_EXT_NUM];
1809 - u8 len;
1810 + u16 offset[NF_CT_EXT_NUM];
1811 + u16 len;
1812 char data[0];
1813 };
1814
1815 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
1816 index 1bd4c4144fe8..da6b9a01ff75 100644
1817 --- a/include/net/sctp/structs.h
1818 +++ b/include/net/sctp/structs.h
1819 @@ -1252,6 +1252,7 @@ struct sctp_endpoint {
1820 /* SCTP-AUTH: endpoint shared keys */
1821 struct list_head endpoint_shared_keys;
1822 __u16 active_key_id;
1823 + __u8 auth_enable;
1824 };
1825
1826 /* Recover the outter endpoint structure. */
1827 @@ -1280,7 +1281,8 @@ struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
1828 int sctp_has_association(struct net *net, const union sctp_addr *laddr,
1829 const union sctp_addr *paddr);
1830
1831 -int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
1832 +int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
1833 + const struct sctp_association *asoc,
1834 sctp_cid_t, sctp_init_chunk_t *peer_init,
1835 struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
1836 int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
1837 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
1838 index cc645876d147..cc92ef3df62e 100644
1839 --- a/include/scsi/scsi_device.h
1840 +++ b/include/scsi/scsi_device.h
1841 @@ -248,7 +248,7 @@ struct scsi_target {
1842 struct list_head siblings;
1843 struct list_head devices;
1844 struct device dev;
1845 - unsigned int reap_ref; /* protected by the host lock */
1846 + struct kref reap_ref; /* last put renders target invisible */
1847 unsigned int channel;
1848 unsigned int id; /* target id ... replace
1849 * scsi_device.id eventually */
1850 @@ -272,7 +272,6 @@ struct scsi_target {
1851 #define SCSI_DEFAULT_TARGET_BLOCKED 3
1852
1853 char scsi_level;
1854 - struct execute_work ew;
1855 enum scsi_target_state state;
1856 void *hostdata; /* available to low-level driver */
1857 unsigned long starget_data[0]; /* for the transport */
1858 diff --git a/include/trace/events/block.h b/include/trace/events/block.h
1859 index 60ae7c3db912..2e96e2bb1529 100644
1860 --- a/include/trace/events/block.h
1861 +++ b/include/trace/events/block.h
1862 @@ -132,6 +132,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
1863 * block_rq_complete - block IO operation completed by device driver
1864 * @q: queue containing the block operation request
1865 * @rq: block operations request
1866 + * @nr_bytes: number of completed bytes
1867 *
1868 * The block_rq_complete tracepoint event indicates that some portion
1869 * of operation request has been completed by the device driver. If
1870 @@ -139,11 +140,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
1871 * do for the request. If @rq->bio is non-NULL then there is
1872 * additional work required to complete the request.
1873 */
1874 -DEFINE_EVENT(block_rq_with_error, block_rq_complete,
1875 +TRACE_EVENT(block_rq_complete,
1876
1877 - TP_PROTO(struct request_queue *q, struct request *rq),
1878 + TP_PROTO(struct request_queue *q, struct request *rq,
1879 + unsigned int nr_bytes),
1880
1881 - TP_ARGS(q, rq)
1882 + TP_ARGS(q, rq, nr_bytes),
1883 +
1884 + TP_STRUCT__entry(
1885 + __field( dev_t, dev )
1886 + __field( sector_t, sector )
1887 + __field( unsigned int, nr_sector )
1888 + __field( int, errors )
1889 + __array( char, rwbs, RWBS_LEN )
1890 + __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
1891 + ),
1892 +
1893 + TP_fast_assign(
1894 + __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
1895 + __entry->sector = blk_rq_pos(rq);
1896 + __entry->nr_sector = nr_bytes >> 9;
1897 + __entry->errors = rq->errors;
1898 +
1899 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
1900 + blk_dump_cmd(__get_str(cmd), rq);
1901 + ),
1902 +
1903 + TP_printk("%d,%d %s (%s) %llu + %u [%d]",
1904 + MAJOR(__entry->dev), MINOR(__entry->dev),
1905 + __entry->rwbs, __get_str(cmd),
1906 + (unsigned long long)__entry->sector,
1907 + __entry->nr_sector, __entry->errors)
1908 );
1909
1910 DECLARE_EVENT_CLASS(block_rq,
1911 diff --git a/kernel/events/core.c b/kernel/events/core.c
1912 index f8eb2b154bdb..ac9b8cce3df2 100644
1913 --- a/kernel/events/core.c
1914 +++ b/kernel/events/core.c
1915 @@ -2016,9 +2016,6 @@ static void __perf_event_sync_stat(struct perf_event *event,
1916 perf_event_update_userpage(next_event);
1917 }
1918
1919 -#define list_next_entry(pos, member) \
1920 - list_entry(pos->member.next, typeof(*pos), member)
1921 -
1922 static void perf_event_sync_stat(struct perf_event_context *ctx,
1923 struct perf_event_context *next_ctx)
1924 {
1925 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
1926 index b8b8560bfb95..686417ba5cd1 100644
1927 --- a/kernel/trace/blktrace.c
1928 +++ b/kernel/trace/blktrace.c
1929 @@ -685,6 +685,7 @@ void blk_trace_shutdown(struct request_queue *q)
1930 * blk_add_trace_rq - Add a trace for a request oriented action
1931 * @q: queue the io is for
1932 * @rq: the source request
1933 + * @nr_bytes: number of completed bytes
1934 * @what: the action
1935 *
1936 * Description:
1937 @@ -692,7 +693,7 @@ void blk_trace_shutdown(struct request_queue *q)
1938 *
1939 **/
1940 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
1941 - u32 what)
1942 + unsigned int nr_bytes, u32 what)
1943 {
1944 struct blk_trace *bt = q->blk_trace;
1945
1946 @@ -701,11 +702,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
1947
1948 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
1949 what |= BLK_TC_ACT(BLK_TC_PC);
1950 - __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
1951 + __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
1952 what, rq->errors, rq->cmd_len, rq->cmd);
1953 } else {
1954 what |= BLK_TC_ACT(BLK_TC_FS);
1955 - __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1956 + __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
1957 rq->cmd_flags, what, rq->errors, 0, NULL);
1958 }
1959 }
1960 @@ -713,33 +714,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
1961 static void blk_add_trace_rq_abort(void *ignore,
1962 struct request_queue *q, struct request *rq)
1963 {
1964 - blk_add_trace_rq(q, rq, BLK_TA_ABORT);
1965 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
1966 }
1967
1968 static void blk_add_trace_rq_insert(void *ignore,
1969 struct request_queue *q, struct request *rq)
1970 {
1971 - blk_add_trace_rq(q, rq, BLK_TA_INSERT);
1972 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
1973 }
1974
1975 static void blk_add_trace_rq_issue(void *ignore,
1976 struct request_queue *q, struct request *rq)
1977 {
1978 - blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
1979 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
1980 }
1981
1982 static void blk_add_trace_rq_requeue(void *ignore,
1983 struct request_queue *q,
1984 struct request *rq)
1985 {
1986 - blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
1987 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
1988 }
1989
1990 static void blk_add_trace_rq_complete(void *ignore,
1991 struct request_queue *q,
1992 - struct request *rq)
1993 + struct request *rq,
1994 + unsigned int nr_bytes)
1995 {
1996 - blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
1997 + blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
1998 }
1999
2000 /**
2001 diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
2002 index 031cc5655a51..63630aef3bd3 100644
2003 --- a/kernel/tracepoint.c
2004 +++ b/kernel/tracepoint.c
2005 @@ -641,6 +641,9 @@ static int tracepoint_module_coming(struct module *mod)
2006 struct tp_module *tp_mod, *iter;
2007 int ret = 0;
2008
2009 + if (!mod->num_tracepoints)
2010 + return 0;
2011 +
2012 /*
2013 * We skip modules that taint the kernel, especially those with different
2014 * module headers (for forced load), to make sure we don't cause a crash.
2015 @@ -684,6 +687,9 @@ static int tracepoint_module_going(struct module *mod)
2016 {
2017 struct tp_module *pos;
2018
2019 + if (!mod->num_tracepoints)
2020 + return 0;
2021 +
2022 mutex_lock(&tracepoints_mutex);
2023 tracepoint_update_probe_range(mod->tracepoints_ptrs,
2024 mod->tracepoints_ptrs + mod->num_tracepoints);
2025 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2026 index 8a7f27b42131..de73c9d144e1 100644
2027 --- a/mm/hugetlb.c
2028 +++ b/mm/hugetlb.c
2029 @@ -1100,6 +1100,7 @@ static void return_unused_surplus_pages(struct hstate *h,
2030 while (nr_pages--) {
2031 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
2032 break;
2033 + cond_resched_lock(&hugetlb_lock);
2034 }
2035 }
2036
2037 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2038 index cf35f383db4c..698e922f41ea 100644
2039 --- a/net/8021q/vlan_dev.c
2040 +++ b/net/8021q/vlan_dev.c
2041 @@ -512,10 +512,48 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
2042 }
2043 }
2044
2045 +static int vlan_calculate_locking_subclass(struct net_device *real_dev)
2046 +{
2047 + int subclass = 0;
2048 +
2049 + while (is_vlan_dev(real_dev)) {
2050 + subclass++;
2051 + real_dev = vlan_dev_priv(real_dev)->real_dev;
2052 + }
2053 +
2054 + return subclass;
2055 +}
2056 +
2057 +static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
2058 +{
2059 + int err = 0, subclass;
2060 +
2061 + subclass = vlan_calculate_locking_subclass(to);
2062 +
2063 + spin_lock_nested(&to->addr_list_lock, subclass);
2064 + err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
2065 + if (!err)
2066 + __dev_set_rx_mode(to);
2067 + spin_unlock(&to->addr_list_lock);
2068 +}
2069 +
2070 +static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
2071 +{
2072 + int err = 0, subclass;
2073 +
2074 + subclass = vlan_calculate_locking_subclass(to);
2075 +
2076 + spin_lock_nested(&to->addr_list_lock, subclass);
2077 + err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
2078 + if (!err)
2079 + __dev_set_rx_mode(to);
2080 + spin_unlock(&to->addr_list_lock);
2081 +}
2082 +
2083 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
2084 {
2085 - dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
2086 - dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
2087 + vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
2088 + vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
2089 }
2090
2091 /*
2092 @@ -624,9 +662,7 @@ static int vlan_dev_init(struct net_device *dev)
2093
2094 SET_NETDEV_DEVTYPE(dev, &vlan_type);
2095
2096 - if (is_vlan_dev(real_dev))
2097 - subclass = 1;
2098 -
2099 + subclass = vlan_calculate_locking_subclass(dev);
2100 vlan_dev_set_lockdep_class(dev, subclass);
2101
2102 vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
2103 diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
2104 index 828e2bcc1f52..0a3bc82782cf 100644
2105 --- a/net/bridge/br_input.c
2106 +++ b/net/bridge/br_input.c
2107 @@ -71,7 +71,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
2108 goto drop;
2109
2110 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
2111 - goto drop;
2112 + goto out;
2113
2114 /* insert into forwarding database after filtering to avoid spoofing */
2115 br = p->br;
2116 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
2117 index 06873e80a432..f16e9e487750 100644
2118 --- a/net/bridge/br_netlink.c
2119 +++ b/net/bridge/br_netlink.c
2120 @@ -438,6 +438,20 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
2121 return 0;
2122 }
2123
2124 +static int br_dev_newlink(struct net *src_net, struct net_device *dev,
2125 + struct nlattr *tb[], struct nlattr *data[])
2126 +{
2127 + struct net_bridge *br = netdev_priv(dev);
2128 +
2129 + if (tb[IFLA_ADDRESS]) {
2130 + spin_lock_bh(&br->lock);
2131 + br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
2132 + spin_unlock_bh(&br->lock);
2133 + }
2134 +
2135 + return register_netdevice(dev);
2136 +}
2137 +
2138 static size_t br_get_link_af_size(const struct net_device *dev)
2139 {
2140 struct net_port_vlans *pv;
2141 @@ -466,6 +480,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
2142 .priv_size = sizeof(struct net_bridge),
2143 .setup = br_dev_setup,
2144 .validate = br_validate,
2145 + .newlink = br_dev_newlink,
2146 .dellink = br_dev_delete,
2147 };
2148
2149 diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
2150 index 9a9ffe7e4019..d8deb8bda736 100644
2151 --- a/net/bridge/br_vlan.c
2152 +++ b/net/bridge/br_vlan.c
2153 @@ -202,7 +202,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
2154 * rejected.
2155 */
2156 if (!v)
2157 - return false;
2158 + goto drop;
2159
2160 if (br_vlan_get_tag(skb, vid)) {
2161 u16 pvid = br_get_pvid(v);
2162 @@ -212,7 +212,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
2163 * traffic belongs to.
2164 */
2165 if (pvid == VLAN_N_VID)
2166 - return false;
2167 + goto drop;
2168
2169 /* PVID is set on this port. Any untagged ingress
2170 * frame is considered to belong to this vlan.
2171 @@ -224,7 +224,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
2172 /* Frame had a valid vlan tag. See if vlan is allowed */
2173 if (test_bit(*vid, v->vlan_bitmap))
2174 return true;
2175 -
2176 +drop:
2177 + kfree_skb(skb);
2178 return false;
2179 }
2180
2181 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2182 index 3d110c4fc787..6651a7797d46 100644
2183 --- a/net/bridge/netfilter/ebtables.c
2184 +++ b/net/bridge/netfilter/ebtables.c
2185 @@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
2186 if (repl->num_counters &&
2187 copy_to_user(repl->counters, counterstmp,
2188 repl->num_counters * sizeof(struct ebt_counter))) {
2189 - ret = -EFAULT;
2190 + /* Silent error, can't fail, new table is already in place */
2191 + net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
2192 }
2193 - else
2194 - ret = 0;
2195
2196 /* decrease module count and free resources */
2197 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
2198 diff --git a/net/core/dev.c b/net/core/dev.c
2199 index a0e55ffc03c9..56383a3e5d71 100644
2200 --- a/net/core/dev.c
2201 +++ b/net/core/dev.c
2202 @@ -3898,6 +3898,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2203 skb->vlan_tci = 0;
2204 skb->dev = napi->dev;
2205 skb->skb_iif = 0;
2206 + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
2207
2208 napi->skb = skb;
2209 }
2210 @@ -4634,6 +4635,7 @@ void __dev_set_rx_mode(struct net_device *dev)
2211 if (ops->ndo_set_rx_mode)
2212 ops->ndo_set_rx_mode(dev);
2213 }
2214 +EXPORT_SYMBOL(__dev_set_rx_mode);
2215
2216 void dev_set_rx_mode(struct net_device *dev)
2217 {
2218 diff --git a/net/core/filter.c b/net/core/filter.c
2219 index 52f01229ee01..c6c18d8a2d88 100644
2220 --- a/net/core/filter.c
2221 +++ b/net/core/filter.c
2222 @@ -355,6 +355,8 @@ load_b:
2223
2224 if (skb_is_nonlinear(skb))
2225 return 0;
2226 + if (skb->len < sizeof(struct nlattr))
2227 + return 0;
2228 if (A > skb->len - sizeof(struct nlattr))
2229 return 0;
2230
2231 @@ -371,11 +373,13 @@ load_b:
2232
2233 if (skb_is_nonlinear(skb))
2234 return 0;
2235 + if (skb->len < sizeof(struct nlattr))
2236 + return 0;
2237 if (A > skb->len - sizeof(struct nlattr))
2238 return 0;
2239
2240 nla = (struct nlattr *)&skb->data[A];
2241 - if (nla->nla_len > A - skb->len)
2242 + if (nla->nla_len > skb->len - A)
2243 return 0;
2244
2245 nla = nla_find_nested(nla, X);
2246 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2247 index 4c3087dffe78..87ec574ffac8 100644
2248 --- a/net/core/rtnetlink.c
2249 +++ b/net/core/rtnetlink.c
2250 @@ -714,7 +714,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
2251 return 0;
2252 }
2253
2254 -static size_t rtnl_port_size(const struct net_device *dev)
2255 +static size_t rtnl_port_size(const struct net_device *dev,
2256 + u32 ext_filter_mask)
2257 {
2258 size_t port_size = nla_total_size(4) /* PORT_VF */
2259 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
2260 @@ -730,7 +731,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
2261 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
2262 + port_size;
2263
2264 - if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
2265 + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
2266 + !(ext_filter_mask & RTEXT_FILTER_VF))
2267 return 0;
2268 if (dev_num_vf(dev->dev.parent))
2269 return port_self_size + vf_ports_size +
2270 @@ -765,7 +767,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
2271 + nla_total_size(ext_filter_mask
2272 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
2273 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
2274 - + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
2275 + + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
2276 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
2277 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
2278 }
2279 @@ -826,11 +828,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
2280 return 0;
2281 }
2282
2283 -static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
2284 +static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
2285 + u32 ext_filter_mask)
2286 {
2287 int err;
2288
2289 - if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
2290 + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
2291 + !(ext_filter_mask & RTEXT_FILTER_VF))
2292 return 0;
2293
2294 err = rtnl_port_self_fill(skb, dev);
2295 @@ -985,7 +989,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
2296 nla_nest_end(skb, vfinfo);
2297 }
2298
2299 - if (rtnl_port_fill(skb, dev))
2300 + if (rtnl_port_fill(skb, dev, ext_filter_mask))
2301 goto nla_put_failure;
2302
2303 if (dev->rtnl_link_ops) {
2304 @@ -1039,6 +1043,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2305 struct hlist_head *head;
2306 struct nlattr *tb[IFLA_MAX+1];
2307 u32 ext_filter_mask = 0;
2308 + int err;
2309
2310 s_h = cb->args[0];
2311 s_idx = cb->args[1];
2312 @@ -1059,11 +1064,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2313 hlist_for_each_entry_rcu(dev, head, index_hlist) {
2314 if (idx < s_idx)
2315 goto cont;
2316 - if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
2317 - NETLINK_CB(cb->skb).portid,
2318 - cb->nlh->nlmsg_seq, 0,
2319 - NLM_F_MULTI,
2320 - ext_filter_mask) <= 0)
2321 + err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
2322 + NETLINK_CB(cb->skb).portid,
2323 + cb->nlh->nlmsg_seq, 0,
2324 + NLM_F_MULTI,
2325 + ext_filter_mask);
2326 + /* If we ran out of room on the first message,
2327 + * we're in trouble
2328 + */
2329 + WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
2330 +
2331 + if (err <= 0)
2332 goto out;
2333
2334 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2335 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2336 index 79143b7af7e5..66f722b5052a 100644
2337 --- a/net/core/skbuff.c
2338 +++ b/net/core/skbuff.c
2339 @@ -3487,12 +3487,14 @@ EXPORT_SYMBOL(skb_try_coalesce);
2340 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
2341 {
2342 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2343 - unsigned int hdr_len;
2344
2345 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2346 - hdr_len = tcp_hdrlen(skb);
2347 - else
2348 - hdr_len = sizeof(struct udphdr);
2349 - return hdr_len + shinfo->gso_size;
2350 + return tcp_hdrlen(skb) + shinfo->gso_size;
2351 +
2352 + /* UFO sets gso_size to the size of the fragmentation
2353 + * payload, i.e. the size of the L4 (UDP) header is already
2354 + * accounted for.
2355 + */
2356 + return shinfo->gso_size;
2357 }
2358 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
2359 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
2360 index a0e9cf6379de..6a7fae228634 100644
2361 --- a/net/core/sock_diag.c
2362 +++ b/net/core/sock_diag.c
2363 @@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
2364 }
2365 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
2366
2367 -int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
2368 +int sock_diag_put_filterinfo(struct sock *sk,
2369 struct sk_buff *skb, int attrtype)
2370 {
2371 struct nlattr *attr;
2372 @@ -57,7 +57,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
2373 unsigned int len;
2374 int err = 0;
2375
2376 - if (!ns_capable(user_ns, CAP_NET_ADMIN)) {
2377 + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
2378 nla_reserve(skb, attrtype, 0);
2379 return 0;
2380 }
2381 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
2382 index 8f6cb7a87cd6..9c3979a50804 100644
2383 --- a/net/ipv4/fib_semantics.c
2384 +++ b/net/ipv4/fib_semantics.c
2385 @@ -818,13 +818,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
2386 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
2387 if (fi == NULL)
2388 goto failure;
2389 + fib_info_cnt++;
2390 if (cfg->fc_mx) {
2391 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
2392 if (!fi->fib_metrics)
2393 goto failure;
2394 } else
2395 fi->fib_metrics = (u32 *) dst_default_metrics;
2396 - fib_info_cnt++;
2397
2398 fi->fib_net = hold_net(net);
2399 fi->fib_protocol = cfg->fc_protocol;
2400 diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
2401 index 98d7e53d2afd..bd1c5baf69be 100644
2402 --- a/net/ipv4/ip_forward.c
2403 +++ b/net/ipv4/ip_forward.c
2404 @@ -42,12 +42,12 @@
2405 static bool ip_may_fragment(const struct sk_buff *skb)
2406 {
2407 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
2408 - !skb->local_df;
2409 + skb->local_df;
2410 }
2411
2412 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
2413 {
2414 - if (skb->len <= mtu || skb->local_df)
2415 + if (skb->len <= mtu)
2416 return false;
2417
2418 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
2419 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2420 index 828b2e8631e7..fae5a8459538 100644
2421 --- a/net/ipv4/ip_gre.c
2422 +++ b/net/ipv4/ip_gre.c
2423 @@ -652,6 +652,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
2424 static void ipgre_tunnel_setup(struct net_device *dev)
2425 {
2426 dev->netdev_ops = &ipgre_netdev_ops;
2427 + dev->type = ARPHRD_IPGRE;
2428 ip_tunnel_setup(dev, ipgre_net_id);
2429 }
2430
2431 @@ -690,7 +691,6 @@ static int ipgre_tunnel_init(struct net_device *dev)
2432 memcpy(dev->dev_addr, &iph->saddr, 4);
2433 memcpy(dev->broadcast, &iph->daddr, 4);
2434
2435 - dev->type = ARPHRD_IPGRE;
2436 dev->flags = IFF_NOARP;
2437 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
2438 dev->addr_len = 4;
2439 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
2440 index feb19db62359..4ec34275160b 100644
2441 --- a/net/ipv4/ip_vti.c
2442 +++ b/net/ipv4/ip_vti.c
2443 @@ -579,9 +579,9 @@ static void vti_dev_free(struct net_device *dev)
2444 static void vti_tunnel_setup(struct net_device *dev)
2445 {
2446 dev->netdev_ops = &vti_netdev_ops;
2447 + dev->type = ARPHRD_TUNNEL;
2448 dev->destructor = vti_dev_free;
2449
2450 - dev->type = ARPHRD_TUNNEL;
2451 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
2452 dev->mtu = ETH_DATA_LEN;
2453 dev->flags = IFF_NOARP;
2454 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2455 index 85a4f21aac1a..c8abe31961ed 100644
2456 --- a/net/ipv4/netfilter/arp_tables.c
2457 +++ b/net/ipv4/netfilter/arp_tables.c
2458 @@ -1039,8 +1039,10 @@ static int __do_replace(struct net *net, const char *name,
2459
2460 xt_free_table_info(oldinfo);
2461 if (copy_to_user(counters_ptr, counters,
2462 - sizeof(struct xt_counters) * num_counters) != 0)
2463 - ret = -EFAULT;
2464 + sizeof(struct xt_counters) * num_counters) != 0) {
2465 + /* Silent error, can't fail, new table is already in place */
2466 + net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
2467 + }
2468 vfree(counters);
2469 xt_table_unlock(t);
2470 return ret;
2471 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2472 index d23118d95ff9..651c10774d58 100644
2473 --- a/net/ipv4/netfilter/ip_tables.c
2474 +++ b/net/ipv4/netfilter/ip_tables.c
2475 @@ -1226,8 +1226,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
2476
2477 xt_free_table_info(oldinfo);
2478 if (copy_to_user(counters_ptr, counters,
2479 - sizeof(struct xt_counters) * num_counters) != 0)
2480 - ret = -EFAULT;
2481 + sizeof(struct xt_counters) * num_counters) != 0) {
2482 + /* Silent error, can't fail, new table is already in place */
2483 + net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
2484 + }
2485 vfree(counters);
2486 xt_table_unlock(t);
2487 return ret;
2488 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
2489 index 8cae28f5c3cf..aa857a4a06a8 100644
2490 --- a/net/ipv4/ping.c
2491 +++ b/net/ipv4/ping.c
2492 @@ -204,26 +204,33 @@ static int ping_init_sock(struct sock *sk)
2493 {
2494 struct net *net = sock_net(sk);
2495 kgid_t group = current_egid();
2496 - struct group_info *group_info = get_current_groups();
2497 - int i, j, count = group_info->ngroups;
2498 + struct group_info *group_info;
2499 + int i, j, count;
2500 kgid_t low, high;
2501 + int ret = 0;
2502
2503 inet_get_ping_group_range_net(net, &low, &high);
2504 if (gid_lte(low, group) && gid_lte(group, high))
2505 return 0;
2506
2507 + group_info = get_current_groups();
2508 + count = group_info->ngroups;
2509 for (i = 0; i < group_info->nblocks; i++) {
2510 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
2511 for (j = 0; j < cp_count; j++) {
2512 kgid_t gid = group_info->blocks[i][j];
2513 if (gid_lte(low, gid) && gid_lte(gid, high))
2514 - return 0;
2515 + goto out_release_group;
2516 }
2517
2518 count -= cp_count;
2519 }
2520
2521 - return -EACCES;
2522 + ret = -EACCES;
2523 +
2524 +out_release_group:
2525 + put_group_info(group_info);
2526 + return ret;
2527 }
2528
2529 static void ping_close(struct sock *sk, long timeout)
2530 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2531 index 1a362f375e67..7256eef088b2 100644
2532 --- a/net/ipv4/route.c
2533 +++ b/net/ipv4/route.c
2534 @@ -1478,7 +1478,7 @@ static int __mkroute_input(struct sk_buff *skb,
2535 struct in_device *out_dev;
2536 unsigned int flags = 0;
2537 bool do_cache;
2538 - u32 itag;
2539 + u32 itag = 0;
2540
2541 /* get a working reference to the output device */
2542 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
2543 @@ -2306,7 +2306,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2544 }
2545 } else
2546 #endif
2547 - if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2548 + if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2549 goto nla_put_failure;
2550 }
2551
2552 diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
2553 index b6ae92a51f58..894b7cea5d7b 100644
2554 --- a/net/ipv4/tcp_cubic.c
2555 +++ b/net/ipv4/tcp_cubic.c
2556 @@ -408,7 +408,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
2557 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
2558 ratio += cnt;
2559
2560 - ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
2561 + ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
2562 }
2563
2564 /* Some calls are for duplicates without timetamps */
2565 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2566 index 9c06ecb6556e..009c9620f442 100644
2567 --- a/net/ipv6/ip6_fib.c
2568 +++ b/net/ipv6/ip6_fib.c
2569 @@ -1418,7 +1418,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
2570
2571 if (w->skip) {
2572 w->skip--;
2573 - continue;
2574 + goto skip;
2575 }
2576
2577 err = w->func(w);
2578 @@ -1428,6 +1428,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
2579 w->count++;
2580 continue;
2581 }
2582 +skip:
2583 w->state = FWS_U;
2584 case FWS_U:
2585 if (fn == w->root)
2586 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2587 index 1f9a1a5b61f4..7dca7c43fdf1 100644
2588 --- a/net/ipv6/ip6_gre.c
2589 +++ b/net/ipv6/ip6_gre.c
2590 @@ -1549,6 +1549,15 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2591 return 0;
2592 }
2593
2594 +static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2595 +{
2596 + struct net *net = dev_net(dev);
2597 + struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2598 +
2599 + if (dev != ign->fb_tunnel_dev)
2600 + unregister_netdevice_queue(dev, head);
2601 +}
2602 +
2603 static size_t ip6gre_get_size(const struct net_device *dev)
2604 {
2605 return
2606 @@ -1626,6 +1635,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2607 .validate = ip6gre_tunnel_validate,
2608 .newlink = ip6gre_newlink,
2609 .changelink = ip6gre_changelink,
2610 + .dellink = ip6gre_dellink,
2611 .get_size = ip6gre_get_size,
2612 .fill_info = ip6gre_fill_info,
2613 };
2614 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2615 index 32fb114b86bb..ffa8d295c56c 100644
2616 --- a/net/ipv6/ip6_output.c
2617 +++ b/net/ipv6/ip6_output.c
2618 @@ -347,12 +347,16 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
2619
2620 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
2621 {
2622 - if (skb->len <= mtu || skb->local_df)
2623 + if (skb->len <= mtu)
2624 return false;
2625
2626 + /* ipv6 conntrack defrag sets max_frag_size + local_df */
2627 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
2628 return true;
2629
2630 + if (skb->local_df)
2631 + return false;
2632 +
2633 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
2634 return false;
2635
2636 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2637 index f21cf476b00c..73d7f68da557 100644
2638 --- a/net/ipv6/ip6_tunnel.c
2639 +++ b/net/ipv6/ip6_tunnel.c
2640 @@ -1531,7 +1531,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
2641 {
2642 u8 proto;
2643
2644 - if (!data)
2645 + if (!data || !data[IFLA_IPTUN_PROTO])
2646 return 0;
2647
2648 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
2649 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
2650 index 44400c216dc6..89a4e4ddd8bb 100644
2651 --- a/net/ipv6/netfilter/ip6_tables.c
2652 +++ b/net/ipv6/netfilter/ip6_tables.c
2653 @@ -1236,8 +1236,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
2654
2655 xt_free_table_info(oldinfo);
2656 if (copy_to_user(counters_ptr, counters,
2657 - sizeof(struct xt_counters) * num_counters) != 0)
2658 - ret = -EFAULT;
2659 + sizeof(struct xt_counters) * num_counters) != 0) {
2660 + /* Silent error, can't fail, new table is already in place */
2661 + net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
2662 + }
2663 vfree(counters);
2664 xt_table_unlock(t);
2665 return ret;
2666 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2667 index 3fde3e977862..b2614b22622b 100644
2668 --- a/net/ipv6/route.c
2669 +++ b/net/ipv6/route.c
2670 @@ -1236,7 +1236,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
2671 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2672
2673 if (mtu)
2674 - return mtu;
2675 + goto out;
2676
2677 mtu = IPV6_MIN_MTU;
2678
2679 @@ -1246,7 +1246,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
2680 mtu = idev->cnf.mtu6;
2681 rcu_read_unlock();
2682
2683 - return mtu;
2684 +out:
2685 + return min_t(unsigned int, mtu, IP6_MAX_MTU);
2686 }
2687
2688 static struct dst_entry *icmp6_dst_gc_list;
2689 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2690 index 44441c0c5037..9a0e5874e73e 100644
2691 --- a/net/l2tp/l2tp_ppp.c
2692 +++ b/net/l2tp/l2tp_ppp.c
2693 @@ -754,9 +754,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2694 session->deref = pppol2tp_session_sock_put;
2695
2696 /* If PMTU discovery was enabled, use the MTU that was discovered */
2697 - dst = sk_dst_get(sk);
2698 + dst = sk_dst_get(tunnel->sock);
2699 if (dst != NULL) {
2700 - u32 pmtu = dst_mtu(__sk_dst_get(sk));
2701 + u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
2702 if (pmtu != 0)
2703 session->mtu = session->mru = pmtu -
2704 PPPOL2TP_HEADER_OVERHEAD;
2705 diff --git a/net/packet/diag.c b/net/packet/diag.c
2706 index a9584a2f6d69..ec8b6e8a80b1 100644
2707 --- a/net/packet/diag.c
2708 +++ b/net/packet/diag.c
2709 @@ -171,7 +171,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
2710 goto out_nlmsg_trim;
2711
2712 if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
2713 - sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER))
2714 + sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER))
2715 goto out_nlmsg_trim;
2716
2717 return nlmsg_end(skb, nlh);
2718 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
2719 index ba1dfc3f8def..7a19117254db 100644
2720 --- a/net/sctp/auth.c
2721 +++ b/net/sctp/auth.c
2722 @@ -393,14 +393,13 @@ nomem:
2723 */
2724 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
2725 {
2726 - struct net *net = sock_net(asoc->base.sk);
2727 struct sctp_auth_bytes *secret;
2728 struct sctp_shared_key *ep_key;
2729
2730 /* If we don't support AUTH, or peer is not capable
2731 * we don't need to do anything.
2732 */
2733 - if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
2734 + if (!asoc->ep->auth_enable || !asoc->peer.auth_capable)
2735 return 0;
2736
2737 /* If the key_id is non-zero and we couldn't find an
2738 @@ -447,16 +446,16 @@ struct sctp_shared_key *sctp_auth_get_shkey(
2739 */
2740 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
2741 {
2742 - struct net *net = sock_net(ep->base.sk);
2743 struct crypto_hash *tfm = NULL;
2744 __u16 id;
2745
2746 - /* if the transforms are already allocted, we are done */
2747 - if (!net->sctp.auth_enable) {
2748 + /* If AUTH extension is disabled, we are done */
2749 + if (!ep->auth_enable) {
2750 ep->auth_hmacs = NULL;
2751 return 0;
2752 }
2753
2754 + /* If the transforms are already allocated, we are done */
2755 if (ep->auth_hmacs)
2756 return 0;
2757
2758 @@ -677,12 +676,10 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
2759 /* Check if peer requested that this chunk is authenticated */
2760 int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
2761 {
2762 - struct net *net;
2763 if (!asoc)
2764 return 0;
2765
2766 - net = sock_net(asoc->base.sk);
2767 - if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
2768 + if (!asoc->ep->auth_enable || !asoc->peer.auth_capable)
2769 return 0;
2770
2771 return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
2772 @@ -691,12 +688,10 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
2773 /* Check if we requested that peer authenticate this chunk. */
2774 int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
2775 {
2776 - struct net *net;
2777 if (!asoc)
2778 return 0;
2779
2780 - net = sock_net(asoc->base.sk);
2781 - if (!net->sctp.auth_enable)
2782 + if (!asoc->ep->auth_enable)
2783 return 0;
2784
2785 return __sctp_auth_cid(chunk,
2786 diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
2787 index 5fbd7bc6bb11..e09f906514db 100644
2788 --- a/net/sctp/endpointola.c
2789 +++ b/net/sctp/endpointola.c
2790 @@ -75,7 +75,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
2791 if (!ep->digest)
2792 return NULL;
2793
2794 - if (net->sctp.auth_enable) {
2795 + ep->auth_enable = net->sctp.auth_enable;
2796 + if (ep->auth_enable) {
2797 /* Allocate space for HMACS and CHUNKS authentication
2798 * variables. There are arrays that we encode directly
2799 * into parameters to make the rest of the operations easier.
2800 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
2801 index eaee00c61139..5a3c1c0a84a1 100644
2802 --- a/net/sctp/protocol.c
2803 +++ b/net/sctp/protocol.c
2804 @@ -498,8 +498,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2805 continue;
2806 if ((laddr->state == SCTP_ADDR_SRC) &&
2807 (AF_INET == laddr->a.sa.sa_family)) {
2808 - fl4->saddr = laddr->a.v4.sin_addr.s_addr;
2809 fl4->fl4_sport = laddr->a.v4.sin_port;
2810 + flowi4_update_output(fl4,
2811 + asoc->base.sk->sk_bound_dev_if,
2812 + RT_CONN_FLAGS(asoc->base.sk),
2813 + daddr->v4.sin_addr.s_addr,
2814 + laddr->a.v4.sin_addr.s_addr);
2815 +
2816 rt = ip_route_output_key(sock_net(sk), fl4);
2817 if (!IS_ERR(rt)) {
2818 dst = &rt->dst;
2819 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
2820 index 673921cfb97e..87e244be899a 100644
2821 --- a/net/sctp/sm_make_chunk.c
2822 +++ b/net/sctp/sm_make_chunk.c
2823 @@ -199,6 +199,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
2824 gfp_t gfp, int vparam_len)
2825 {
2826 struct net *net = sock_net(asoc->base.sk);
2827 + struct sctp_endpoint *ep = asoc->ep;
2828 sctp_inithdr_t init;
2829 union sctp_params addrs;
2830 size_t chunksize;
2831 @@ -258,7 +259,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
2832 chunksize += vparam_len;
2833
2834 /* Account for AUTH related parameters */
2835 - if (net->sctp.auth_enable) {
2836 + if (ep->auth_enable) {
2837 /* Add random parameter length*/
2838 chunksize += sizeof(asoc->c.auth_random);
2839
2840 @@ -343,7 +344,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
2841 }
2842
2843 /* Add SCTP-AUTH chunks to the parameter list */
2844 - if (net->sctp.auth_enable) {
2845 + if (ep->auth_enable) {
2846 sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
2847 asoc->c.auth_random);
2848 if (auth_hmacs)
2849 @@ -1995,7 +1996,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
2850 /* if the peer reports AUTH, assume that he
2851 * supports AUTH.
2852 */
2853 - if (net->sctp.auth_enable)
2854 + if (asoc->ep->auth_enable)
2855 asoc->peer.auth_capable = 1;
2856 break;
2857 case SCTP_CID_ASCONF:
2858 @@ -2087,6 +2088,7 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
2859 * SCTP_IERROR_NO_ERROR - continue with the chunk
2860 */
2861 static sctp_ierror_t sctp_verify_param(struct net *net,
2862 + const struct sctp_endpoint *ep,
2863 const struct sctp_association *asoc,
2864 union sctp_params param,
2865 sctp_cid_t cid,
2866 @@ -2137,7 +2139,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
2867 goto fallthrough;
2868
2869 case SCTP_PARAM_RANDOM:
2870 - if (!net->sctp.auth_enable)
2871 + if (!ep->auth_enable)
2872 goto fallthrough;
2873
2874 /* SCTP-AUTH: Secion 6.1
2875 @@ -2154,7 +2156,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
2876 break;
2877
2878 case SCTP_PARAM_CHUNKS:
2879 - if (!net->sctp.auth_enable)
2880 + if (!ep->auth_enable)
2881 goto fallthrough;
2882
2883 /* SCTP-AUTH: Section 3.2
2884 @@ -2170,7 +2172,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
2885 break;
2886
2887 case SCTP_PARAM_HMAC_ALGO:
2888 - if (!net->sctp.auth_enable)
2889 + if (!ep->auth_enable)
2890 goto fallthrough;
2891
2892 hmacs = (struct sctp_hmac_algo_param *)param.p;
2893 @@ -2204,10 +2206,9 @@ fallthrough:
2894 }
2895
2896 /* Verify the INIT packet before we process it. */
2897 -int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
2898 - sctp_cid_t cid,
2899 - sctp_init_chunk_t *peer_init,
2900 - struct sctp_chunk *chunk,
2901 +int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
2902 + const struct sctp_association *asoc, sctp_cid_t cid,
2903 + sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
2904 struct sctp_chunk **errp)
2905 {
2906 union sctp_params param;
2907 @@ -2250,8 +2251,8 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
2908
2909 /* Verify all the variable length parameters */
2910 sctp_walk_params(param, peer_init, init_hdr.params) {
2911 -
2912 - result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
2913 + result = sctp_verify_param(net, ep, asoc, param, cid,
2914 + chunk, errp);
2915 switch (result) {
2916 case SCTP_IERROR_ABORT:
2917 case SCTP_IERROR_NOMEM:
2918 @@ -2483,6 +2484,7 @@ static int sctp_process_param(struct sctp_association *asoc,
2919 struct sctp_af *af;
2920 union sctp_addr_param *addr_param;
2921 struct sctp_transport *t;
2922 + struct sctp_endpoint *ep = asoc->ep;
2923
2924 /* We maintain all INIT parameters in network byte order all the
2925 * time. This allows us to not worry about whether the parameters
2926 @@ -2623,7 +2625,7 @@ do_addr_param:
2927 goto fall_through;
2928
2929 case SCTP_PARAM_RANDOM:
2930 - if (!net->sctp.auth_enable)
2931 + if (!ep->auth_enable)
2932 goto fall_through;
2933
2934 /* Save peer's random parameter */
2935 @@ -2636,7 +2638,7 @@ do_addr_param:
2936 break;
2937
2938 case SCTP_PARAM_HMAC_ALGO:
2939 - if (!net->sctp.auth_enable)
2940 + if (!ep->auth_enable)
2941 goto fall_through;
2942
2943 /* Save peer's HMAC list */
2944 @@ -2652,7 +2654,7 @@ do_addr_param:
2945 break;
2946
2947 case SCTP_PARAM_CHUNKS:
2948 - if (!net->sctp.auth_enable)
2949 + if (!ep->auth_enable)
2950 goto fall_through;
2951
2952 asoc->peer.peer_chunks = kmemdup(param.p,
2953 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2954 index 9973079401c4..6eb26403de6a 100644
2955 --- a/net/sctp/sm_statefuns.c
2956 +++ b/net/sctp/sm_statefuns.c
2957 @@ -364,7 +364,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
2958
2959 /* Verify the INIT chunk before processing it. */
2960 err_chunk = NULL;
2961 - if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
2962 + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
2963 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
2964 &err_chunk)) {
2965 /* This chunk contains fatal error. It is to be discarded.
2966 @@ -531,7 +531,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
2967
2968 /* Verify the INIT chunk before processing it. */
2969 err_chunk = NULL;
2970 - if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
2971 + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
2972 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
2973 &err_chunk)) {
2974
2975 @@ -1437,7 +1437,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
2976
2977 /* Verify the INIT chunk before processing it. */
2978 err_chunk = NULL;
2979 - if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
2980 + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
2981 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
2982 &err_chunk)) {
2983 /* This chunk contains fatal error. It is to be discarded.
2984 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2985 index 8554e5eebaeb..dfb9b133e662 100644
2986 --- a/net/sctp/socket.c
2987 +++ b/net/sctp/socket.c
2988 @@ -3318,10 +3318,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
2989 char __user *optval,
2990 unsigned int optlen)
2991 {
2992 - struct net *net = sock_net(sk);
2993 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
2994 struct sctp_authchunk val;
2995
2996 - if (!net->sctp.auth_enable)
2997 + if (!ep->auth_enable)
2998 return -EACCES;
2999
3000 if (optlen != sizeof(struct sctp_authchunk))
3001 @@ -3338,7 +3338,7 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
3002 }
3003
3004 /* add this chunk id to the endpoint */
3005 - return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk);
3006 + return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
3007 }
3008
3009 /*
3010 @@ -3351,12 +3351,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3011 char __user *optval,
3012 unsigned int optlen)
3013 {
3014 - struct net *net = sock_net(sk);
3015 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3016 struct sctp_hmacalgo *hmacs;
3017 u32 idents;
3018 int err;
3019
3020 - if (!net->sctp.auth_enable)
3021 + if (!ep->auth_enable)
3022 return -EACCES;
3023
3024 if (optlen < sizeof(struct sctp_hmacalgo))
3025 @@ -3373,7 +3373,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3026 goto out;
3027 }
3028
3029 - err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs);
3030 + err = sctp_auth_ep_set_hmacs(ep, hmacs);
3031 out:
3032 kfree(hmacs);
3033 return err;
3034 @@ -3389,12 +3389,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3035 char __user *optval,
3036 unsigned int optlen)
3037 {
3038 - struct net *net = sock_net(sk);
3039 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3040 struct sctp_authkey *authkey;
3041 struct sctp_association *asoc;
3042 int ret;
3043
3044 - if (!net->sctp.auth_enable)
3045 + if (!ep->auth_enable)
3046 return -EACCES;
3047
3048 if (optlen <= sizeof(struct sctp_authkey))
3049 @@ -3415,7 +3415,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3050 goto out;
3051 }
3052
3053 - ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
3054 + ret = sctp_auth_set_key(ep, asoc, authkey);
3055 out:
3056 kzfree(authkey);
3057 return ret;
3058 @@ -3431,11 +3431,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
3059 char __user *optval,
3060 unsigned int optlen)
3061 {
3062 - struct net *net = sock_net(sk);
3063 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3064 struct sctp_authkeyid val;
3065 struct sctp_association *asoc;
3066
3067 - if (!net->sctp.auth_enable)
3068 + if (!ep->auth_enable)
3069 return -EACCES;
3070
3071 if (optlen != sizeof(struct sctp_authkeyid))
3072 @@ -3447,8 +3447,7 @@ static int sctp_setsockopt_active_key(struct sock *sk,
3073 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3074 return -EINVAL;
3075
3076 - return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc,
3077 - val.scact_keynumber);
3078 + return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3079 }
3080
3081 /*
3082 @@ -3460,11 +3459,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3083 char __user *optval,
3084 unsigned int optlen)
3085 {
3086 - struct net *net = sock_net(sk);
3087 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3088 struct sctp_authkeyid val;
3089 struct sctp_association *asoc;
3090
3091 - if (!net->sctp.auth_enable)
3092 + if (!ep->auth_enable)
3093 return -EACCES;
3094
3095 if (optlen != sizeof(struct sctp_authkeyid))
3096 @@ -3476,8 +3475,7 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3097 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3098 return -EINVAL;
3099
3100 - return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc,
3101 - val.scact_keynumber);
3102 + return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
3103
3104 }
3105
3106 @@ -5368,16 +5366,16 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
3107 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
3108 char __user *optval, int __user *optlen)
3109 {
3110 - struct net *net = sock_net(sk);
3111 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3112 struct sctp_hmacalgo __user *p = (void __user *)optval;
3113 struct sctp_hmac_algo_param *hmacs;
3114 __u16 data_len = 0;
3115 u32 num_idents;
3116
3117 - if (!net->sctp.auth_enable)
3118 + if (!ep->auth_enable)
3119 return -EACCES;
3120
3121 - hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
3122 + hmacs = ep->auth_hmacs_list;
3123 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
3124
3125 if (len < sizeof(struct sctp_hmacalgo) + data_len)
3126 @@ -5398,11 +5396,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
3127 static int sctp_getsockopt_active_key(struct sock *sk, int len,
3128 char __user *optval, int __user *optlen)
3129 {
3130 - struct net *net = sock_net(sk);
3131 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3132 struct sctp_authkeyid val;
3133 struct sctp_association *asoc;
3134
3135 - if (!net->sctp.auth_enable)
3136 + if (!ep->auth_enable)
3137 return -EACCES;
3138
3139 if (len < sizeof(struct sctp_authkeyid))
3140 @@ -5417,7 +5415,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
3141 if (asoc)
3142 val.scact_keynumber = asoc->active_key_id;
3143 else
3144 - val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
3145 + val.scact_keynumber = ep->active_key_id;
3146
3147 len = sizeof(struct sctp_authkeyid);
3148 if (put_user(len, optlen))
3149 @@ -5431,7 +5429,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
3150 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
3151 char __user *optval, int __user *optlen)
3152 {
3153 - struct net *net = sock_net(sk);
3154 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3155 struct sctp_authchunks __user *p = (void __user *)optval;
3156 struct sctp_authchunks val;
3157 struct sctp_association *asoc;
3158 @@ -5439,7 +5437,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
3159 u32 num_chunks = 0;
3160 char __user *to;
3161
3162 - if (!net->sctp.auth_enable)
3163 + if (!ep->auth_enable)
3164 return -EACCES;
3165
3166 if (len < sizeof(struct sctp_authchunks))
3167 @@ -5475,7 +5473,7 @@ num:
3168 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
3169 char __user *optval, int __user *optlen)
3170 {
3171 - struct net *net = sock_net(sk);
3172 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3173 struct sctp_authchunks __user *p = (void __user *)optval;
3174 struct sctp_authchunks val;
3175 struct sctp_association *asoc;
3176 @@ -5483,7 +5481,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
3177 u32 num_chunks = 0;
3178 char __user *to;
3179
3180 - if (!net->sctp.auth_enable)
3181 + if (!ep->auth_enable)
3182 return -EACCES;
3183
3184 if (len < sizeof(struct sctp_authchunks))
3185 @@ -5500,7 +5498,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
3186 if (asoc)
3187 ch = (struct sctp_chunks_param*)asoc->c.auth_chunks;
3188 else
3189 - ch = sctp_sk(sk)->ep->auth_chunk_list;
3190 + ch = ep->auth_chunk_list;
3191
3192 if (!ch)
3193 goto num;
3194 @@ -6582,6 +6580,46 @@ static void __sctp_write_space(struct sctp_association *asoc)
3195 }
3196 }
3197
3198 +static void sctp_wake_up_waiters(struct sock *sk,
3199 + struct sctp_association *asoc)
3200 +{
3201 + struct sctp_association *tmp = asoc;
3202 +
3203 + /* We do accounting for the sndbuf space per association,
3204 + * so we only need to wake our own association.
3205 + */
3206 + if (asoc->ep->sndbuf_policy)
3207 + return __sctp_write_space(asoc);
3208 +
3209 + /* If association goes down and is just flushing its
3210 + * outq, then just normally notify others.
3211 + */
3212 + if (asoc->base.dead)
3213 + return sctp_write_space(sk);
3214 +
3215 + /* Accounting for the sndbuf space is per socket, so we
3216 + * need to wake up others, try to be fair and in case of
3217 + * other associations, let them have a go first instead
3218 + * of just doing a sctp_write_space() call.
3219 + *
3220 + * Note that we reach sctp_wake_up_waiters() only when
3221 + * associations free up queued chunks, thus we are under
3222 + * lock and the list of associations on a socket is
3223 + * guaranteed not to change.
3224 + */
3225 + for (tmp = list_next_entry(tmp, asocs); 1;
3226 + tmp = list_next_entry(tmp, asocs)) {
3227 + /* Manually skip the head element. */
3228 + if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
3229 + continue;
3230 + /* Wake up association. */
3231 + __sctp_write_space(tmp);
3232 + /* We've reached the end. */
3233 + if (tmp == asoc)
3234 + break;
3235 + }
3236 +}
3237 +
3238 /* Do accounting for the sndbuf space.
3239 * Decrement the used sndbuf space of the corresponding association by the
3240 * data size which was just transmitted(freed).
3241 @@ -6609,7 +6647,7 @@ static void sctp_wfree(struct sk_buff *skb)
3242 sk_mem_uncharge(sk, skb->truesize);
3243
3244 sock_wfree(skb);
3245 - __sctp_write_space(asoc);
3246 + sctp_wake_up_waiters(sk, asoc);
3247
3248 sctp_association_put(asoc);
3249 }
3250 diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
3251 index bf3c6e8fc401..fe0ba7488bdf 100644
3252 --- a/net/sctp/sysctl.c
3253 +++ b/net/sctp/sysctl.c
3254 @@ -65,8 +65,11 @@ extern int sysctl_sctp_wmem[3];
3255 static int proc_sctp_do_hmac_alg(ctl_table *ctl,
3256 int write,
3257 void __user *buffer, size_t *lenp,
3258 -
3259 loff_t *ppos);
3260 +static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
3261 + void __user *buffer, size_t *lenp,
3262 + loff_t *ppos);
3263 +
3264 static ctl_table sctp_table[] = {
3265 {
3266 .procname = "sctp_mem",
3267 @@ -267,7 +270,7 @@ static ctl_table sctp_net_table[] = {
3268 .data = &init_net.sctp.auth_enable,
3269 .maxlen = sizeof(int),
3270 .mode = 0644,
3271 - .proc_handler = proc_dointvec,
3272 + .proc_handler = proc_sctp_do_auth,
3273 },
3274 {
3275 .procname = "addr_scope_policy",
3276 @@ -348,6 +351,37 @@ static int proc_sctp_do_hmac_alg(ctl_table *ctl,
3277 return ret;
3278 }
3279
3280 +static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
3281 + void __user *buffer, size_t *lenp,
3282 + loff_t *ppos)
3283 +{
3284 + struct net *net = current->nsproxy->net_ns;
3285 + struct ctl_table tbl;
3286 + int new_value, ret;
3287 +
3288 + memset(&tbl, 0, sizeof(struct ctl_table));
3289 + tbl.maxlen = sizeof(unsigned int);
3290 +
3291 + if (write)
3292 + tbl.data = &new_value;
3293 + else
3294 + tbl.data = &net->sctp.auth_enable;
3295 +
3296 + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
3297 +
3298 + if (write) {
3299 + struct sock *sk = net->sctp.ctl_sock;
3300 +
3301 + net->sctp.auth_enable = new_value;
3302 + /* Update the value in the control socket */
3303 + lock_sock(sk);
3304 + sctp_sk(sk)->ep->auth_enable = new_value;
3305 + release_sock(sk);
3306 + }
3307 +
3308 + return ret;
3309 +}
3310 +
3311 int sctp_sysctl_net_register(struct net *net)
3312 {
3313 struct ctl_table *table;
3314 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
3315 index a4be8e112bb6..a2c361c86e75 100644
3316 --- a/scripts/mod/modpost.c
3317 +++ b/scripts/mod/modpost.c
3318 @@ -573,12 +573,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
3319 if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
3320 strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
3321 strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
3322 - strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
3323 + strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
3324 + strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
3325 + strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
3326 return 1;
3327 if (info->hdr->e_machine == EM_PPC64)
3328 /* Special register function linked on all modules during final link of .ko */
3329 if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
3330 - strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
3331 + strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
3332 + strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
3333 + strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
3334 return 1;
3335 /* Do not ignore this symbol */
3336 return 0;