Magellan Linux

Contents of /trunk/kernel-alx/patches-3.14/0104-3.14.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2506 - (show annotations) (download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 6 months ago) by niro
File size: 190272 byte(s)
-patches for 3.14
1 diff --git a/Makefile b/Makefile
2 index d7c07fd8c944..fa77b0bed2a2 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,8 +1,8 @@
6 VERSION = 3
7 PATCHLEVEL = 14
8 -SUBLEVEL = 4
9 +SUBLEVEL = 5
10 EXTRAVERSION =
11 -NAME = Shuffling Zombie Juror
12 +NAME = Remembering Coco
13
14 # *DOCUMENTATION*
15 # To see a list of typical targets execute "make help"
16 diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h
17 index 628ddc22faa8..afe1300ab667 100644
18 --- a/arch/parisc/include/asm/shmparam.h
19 +++ b/arch/parisc/include/asm/shmparam.h
20 @@ -1,8 +1,7 @@
21 #ifndef _ASMPARISC_SHMPARAM_H
22 #define _ASMPARISC_SHMPARAM_H
23
24 -#define __ARCH_FORCE_SHMLBA 1
25 -
26 -#define SHMLBA 0x00400000 /* attach addr needs to be 4 Mb aligned */
27 +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
28 +#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
29
30 #endif /* _ASMPARISC_SHMPARAM_H */
31 diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h
32 index 8b06343b62ed..090483c47dbb 100644
33 --- a/arch/parisc/include/uapi/asm/resource.h
34 +++ b/arch/parisc/include/uapi/asm/resource.h
35 @@ -1,7 +1,6 @@
36 #ifndef _ASM_PARISC_RESOURCE_H
37 #define _ASM_PARISC_RESOURCE_H
38
39 -#define _STK_LIM_MAX 10 * _STK_LIM
40 #include <asm-generic/resource.h>
41
42 #endif
43 diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
44 index a6ffc775a9f8..f6448c7c62b5 100644
45 --- a/arch/parisc/kernel/cache.c
46 +++ b/arch/parisc/kernel/cache.c
47 @@ -323,7 +323,8 @@ void flush_dcache_page(struct page *page)
48 * specifically accesses it, of course) */
49
50 flush_tlb_page(mpnt, addr);
51 - if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
52 + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
53 + != (addr & (SHM_COLOUR - 1))) {
54 __flush_cache_page(mpnt, addr, page_to_phys(page));
55 if (old_addr)
56 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
57 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
58 index b7cadc4a06cd..31ffa9b55322 100644
59 --- a/arch/parisc/kernel/sys_parisc.c
60 +++ b/arch/parisc/kernel/sys_parisc.c
61 @@ -45,7 +45,7 @@
62
63 static int get_offset(unsigned int last_mmap)
64 {
65 - return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT;
66 + return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
67 }
68
69 static unsigned long shared_align_offset(unsigned int last_mmap,
70 @@ -57,8 +57,8 @@ static unsigned long shared_align_offset(unsigned int last_mmap,
71 static inline unsigned long COLOR_ALIGN(unsigned long addr,
72 unsigned int last_mmap, unsigned long pgoff)
73 {
74 - unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1);
75 - unsigned long off = (SHMLBA-1) &
76 + unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
77 + unsigned long off = (SHM_COLOUR-1) &
78 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
79
80 return base + off;
81 @@ -101,7 +101,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
82 if (flags & MAP_FIXED) {
83 if ((flags & MAP_SHARED) && last_mmap &&
84 (addr - shared_align_offset(last_mmap, pgoff))
85 - & (SHMLBA - 1))
86 + & (SHM_COLOUR - 1))
87 return -EINVAL;
88 goto found_addr;
89 }
90 @@ -122,7 +122,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
91 info.length = len;
92 info.low_limit = mm->mmap_legacy_base;
93 info.high_limit = mmap_upper_limit();
94 - info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
95 + info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
96 info.align_offset = shared_align_offset(last_mmap, pgoff);
97 addr = vm_unmapped_area(&info);
98
99 @@ -161,7 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
100 if (flags & MAP_FIXED) {
101 if ((flags & MAP_SHARED) && last_mmap &&
102 (addr - shared_align_offset(last_mmap, pgoff))
103 - & (SHMLBA - 1))
104 + & (SHM_COLOUR - 1))
105 return -EINVAL;
106 goto found_addr;
107 }
108 @@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
109 info.length = len;
110 info.low_limit = PAGE_SIZE;
111 info.high_limit = mm->mmap_base;
112 - info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
113 + info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
114 info.align_offset = shared_align_offset(last_mmap, pgoff);
115 addr = vm_unmapped_area(&info);
116 if (!(addr & ~PAGE_MASK))
117 diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
118 index 80e5dd248934..83ead0ea127d 100644
119 --- a/arch/parisc/kernel/syscall_table.S
120 +++ b/arch/parisc/kernel/syscall_table.S
121 @@ -392,7 +392,7 @@
122 ENTRY_COMP(vmsplice)
123 ENTRY_COMP(move_pages) /* 295 */
124 ENTRY_SAME(getcpu)
125 - ENTRY_SAME(epoll_pwait)
126 + ENTRY_COMP(epoll_pwait)
127 ENTRY_COMP(statfs64)
128 ENTRY_COMP(fstatfs64)
129 ENTRY_COMP(kexec_load) /* 300 */
130 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
131 index a778ee27518a..8e08c6712eb8 100644
132 --- a/arch/s390/net/bpf_jit_comp.c
133 +++ b/arch/s390/net/bpf_jit_comp.c
134 @@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
135 return NULL;
136 memset(header, 0, sz);
137 header->pages = sz / PAGE_SIZE;
138 - hole = sz - (bpfsize + sizeof(*header));
139 + hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
140 /* Insert random number of illegal instructions before BPF code
141 * and make sure the first instruction starts at an even address.
142 */
143 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
144 index c8b051933b1b..b39e194f6c8d 100644
145 --- a/arch/x86/include/asm/preempt.h
146 +++ b/arch/x86/include/asm/preempt.h
147 @@ -5,6 +5,18 @@
148 #include <asm/percpu.h>
149 #include <linux/thread_info.h>
150
151 +#ifdef CONFIG_X86_32
152 +/*
153 + * i386's current_thread_info() depends on ESP and for interrupt/exception
154 + * stacks this doesn't yield the actual task thread_info.
155 + *
156 + * We hard rely on the fact that all the TIF_NEED_RESCHED bits are
157 + * the same, therefore use the slightly more expensive version below.
158 + */
159 +#undef tif_need_resched
160 +#define tif_need_resched() test_tsk_thread_flag(current, TIF_NEED_RESCHED)
161 +#endif
162 +
163 DECLARE_PER_CPU(int, __preempt_count);
164
165 /*
166 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
167 index 2b8578432d5b..ee0c3b554a38 100644
168 --- a/arch/x86/kvm/x86.c
169 +++ b/arch/x86/kvm/x86.c
170 @@ -1109,7 +1109,6 @@ static inline u64 get_kernel_ns(void)
171 {
172 struct timespec ts;
173
174 - WARN_ON(preemptible());
175 ktime_get_ts(&ts);
176 monotonic_to_bootbased(&ts);
177 return timespec_to_ns(&ts);
178 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
179 index 4ed75dd81d05..af2d4317b218 100644
180 --- a/arch/x86/net/bpf_jit_comp.c
181 +++ b/arch/x86/net/bpf_jit_comp.c
182 @@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
183 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
184
185 header->pages = sz / PAGE_SIZE;
186 - hole = sz - (proglen + sizeof(*header));
187 + hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
188
189 /* insert a random number of int3 instructions before BPF code */
190 *image_ptr = &header->image[prandom_u32() % hole];
191 diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
192 index 581521c843a5..c570feab9fa2 100644
193 --- a/arch/x86/xen/spinlock.c
194 +++ b/arch/x86/xen/spinlock.c
195 @@ -274,7 +274,7 @@ void __init xen_init_spinlocks(void)
196 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
197 return;
198 }
199 -
200 + printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
201 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
202 pv_lock_ops.unlock_kick = xen_unlock_kick;
203 }
204 @@ -290,6 +290,9 @@ static __init int xen_init_spinlocks_jump(void)
205 if (!xen_pvspin)
206 return 0;
207
208 + if (!xen_domain())
209 + return 0;
210 +
211 static_key_slow_inc(&paravirt_ticketlocks_enabled);
212 return 0;
213 }
214 diff --git a/block/blk-core.c b/block/blk-core.c
215 index bfe16d5af9f9..e45b321cf6a0 100644
216 --- a/block/blk-core.c
217 +++ b/block/blk-core.c
218 @@ -2353,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
219 if (!req->bio)
220 return false;
221
222 - trace_block_rq_complete(req->q, req);
223 + trace_block_rq_complete(req->q, req, nr_bytes);
224
225 /*
226 * For fs requests, rq is just carrier of independent bio's
227 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
228 index 34898d53395b..7f2d09fbb10b 100644
229 --- a/drivers/block/rbd.c
230 +++ b/drivers/block/rbd.c
231 @@ -2253,7 +2253,7 @@ out_partial:
232 rbd_obj_request_put(obj_request);
233 out_unwind:
234 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
235 - rbd_obj_request_put(obj_request);
236 + rbd_img_obj_request_del(img_request, obj_request);
237
238 return -ENOMEM;
239 }
240 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
241 index df77e20e3c3d..697f2150a997 100644
242 --- a/drivers/gpu/drm/i915/i915_drv.h
243 +++ b/drivers/gpu/drm/i915/i915_drv.h
244 @@ -734,6 +734,7 @@ enum intel_sbi_destination {
245 #define QUIRK_PIPEA_FORCE (1<<0)
246 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
247 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
248 +#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
249
250 struct intel_fbdev;
251 struct intel_fbc_work;
252 diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
253 index 6e580c98dede..783ae08200e8 100644
254 --- a/drivers/gpu/drm/i915/i915_trace.h
255 +++ b/drivers/gpu/drm/i915/i915_trace.h
256 @@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm,
257 TP_ARGS(vm),
258
259 TP_STRUCT__entry(
260 + __field(u32, dev)
261 __field(struct i915_address_space *, vm)
262 ),
263
264 TP_fast_assign(
265 + __entry->dev = vm->dev->primary->index;
266 __entry->vm = vm;
267 ),
268
269 - TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
270 + TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
271 );
272
273 TRACE_EVENT(i915_gem_ring_sync_to,
274 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
275 index 9b8a7c7ea7fc..963639d9049b 100644
276 --- a/drivers/gpu/drm/i915/intel_display.c
277 +++ b/drivers/gpu/drm/i915/intel_display.c
278 @@ -10771,6 +10771,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
279 DRM_INFO("applying inverted panel brightness quirk\n");
280 }
281
282 +/*
283 + * Some machines (Dell XPS13) suffer broken backlight controls if
284 + * BLM_PCH_PWM_ENABLE is set.
285 + */
286 +static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
287 +{
288 + struct drm_i915_private *dev_priv = dev->dev_private;
289 + dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
290 + DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
291 +}
292 +
293 struct intel_quirk {
294 int device;
295 int subsystem_vendor;
296 @@ -10839,6 +10850,11 @@ static struct intel_quirk intel_quirks[] = {
297
298 /* Acer Aspire 4736Z */
299 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
300 +
301 + /* Dell XPS13 HD Sandy Bridge */
302 + { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
303 + /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
304 + { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
305 };
306
307 static void intel_init_quirks(struct drm_device *dev)
308 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
309 index 079ea38f14d9..9f1d7a9300e8 100644
310 --- a/drivers/gpu/drm/i915/intel_panel.c
311 +++ b/drivers/gpu/drm/i915/intel_panel.c
312 @@ -671,6 +671,10 @@ static void pch_enable_backlight(struct intel_connector *connector)
313 pch_ctl2 = panel->backlight.max << 16;
314 I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
315
316 + /* XXX: transitional */
317 + if (dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)
318 + return;
319 +
320 pch_ctl1 = 0;
321 if (panel->backlight.active_low_pwm)
322 pch_ctl1 |= BLM_PCH_POLARITY;
323 diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
324 index 22cf0f4ba248..99faac555d3b 100644
325 --- a/drivers/gpu/drm/i915/intel_tv.c
326 +++ b/drivers/gpu/drm/i915/intel_tv.c
327 @@ -1536,9 +1536,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
328 /*
329 * If the device type is not TV, continue.
330 */
331 - if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
332 - p_child->old.device_type != DEVICE_TYPE_TV)
333 + switch (p_child->old.device_type) {
334 + case DEVICE_TYPE_INT_TV:
335 + case DEVICE_TYPE_TV:
336 + case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
337 + break;
338 + default:
339 continue;
340 + }
341 /* Only when the addin_offset is non-zero, it is regarded
342 * as present.
343 */
344 diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
345 index c7e7e6590c2b..c82c1d6a965a 100644
346 --- a/drivers/gpu/drm/qxl/qxl_ttm.c
347 +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
348 @@ -433,6 +433,7 @@ static int qxl_sync_obj_flush(void *sync_obj)
349
350 static void qxl_sync_obj_unref(void **sync_obj)
351 {
352 + *sync_obj = NULL;
353 }
354
355 static void *qxl_sync_obj_ref(void *sync_obj)
356 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
357 index bbb17841a9e5..986f9e7364ca 100644
358 --- a/drivers/gpu/drm/radeon/cik.c
359 +++ b/drivers/gpu/drm/radeon/cik.c
360 @@ -1095,7 +1095,7 @@ static const u32 spectre_golden_registers[] =
361 0x8a14, 0xf000003f, 0x00000007,
362 0x8b24, 0xffffffff, 0x00ffffff,
363 0x28350, 0x3f3f3fff, 0x00000082,
364 - 0x28355, 0x0000003f, 0x00000000,
365 + 0x28354, 0x0000003f, 0x00000000,
366 0x3e78, 0x00000001, 0x00000002,
367 0x913c, 0xffff03df, 0x00000004,
368 0xc768, 0x00000008, 0x00000008,
369 @@ -6521,8 +6521,8 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
370 buffer[count++] = cpu_to_le32(0x00000000);
371 break;
372 case CHIP_HAWAII:
373 - buffer[count++] = 0x3a00161a;
374 - buffer[count++] = 0x0000002e;
375 + buffer[count++] = cpu_to_le32(0x3a00161a);
376 + buffer[count++] = cpu_to_le32(0x0000002e);
377 break;
378 default:
379 buffer[count++] = cpu_to_le32(0x00000000);
380 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
381 index fbd8b930f2be..6e6f82c53fef 100644
382 --- a/drivers/gpu/drm/radeon/radeon_display.c
383 +++ b/drivers/gpu/drm/radeon/radeon_display.c
384 @@ -792,6 +792,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
385 if (radeon_connector->edid) {
386 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
387 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
388 + drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
389 return ret;
390 }
391 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
392 diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
393 index 15e44a7281ab..8391fca52a58 100644
394 --- a/drivers/gpu/drm/radeon/radeon_ring.c
395 +++ b/drivers/gpu/drm/radeon/radeon_ring.c
396 @@ -257,6 +257,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
397 r = radeon_ib_test(rdev, i, ring);
398 if (r) {
399 ring->ready = false;
400 + rdev->needs_reset = false;
401
402 if (i == RADEON_RING_TYPE_GFX_INDEX) {
403 /* oh, oh, that's really bad */
404 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
405 index 1e80152674b5..4fbcfdb02360 100644
406 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
407 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
408 @@ -117,10 +117,10 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
409 (void) vmw_context_binding_state_kill
410 (&container_of(res, struct vmw_user_context, res)->cbs);
411 (void) vmw_gb_context_destroy(res);
412 + mutex_unlock(&dev_priv->binding_mutex);
413 if (dev_priv->pinned_bo != NULL &&
414 !dev_priv->query_cid_valid)
415 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
416 - mutex_unlock(&dev_priv->binding_mutex);
417 mutex_unlock(&dev_priv->cmdbuf_mutex);
418 return;
419 }
420 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
421 index efb575a7996c..a3480c13eb1b 100644
422 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
423 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
424 @@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
425 SVGA3dCmdSurfaceDMA dma;
426 } *cmd;
427 int ret;
428 + SVGA3dCmdSurfaceDMASuffix *suffix;
429 + uint32_t bo_size;
430
431 cmd = container_of(header, struct vmw_dma_cmd, header);
432 + suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
433 + header->size - sizeof(*suffix));
434 +
435 + /* Make sure device and verifier stays in sync. */
436 + if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
437 + DRM_ERROR("Invalid DMA suffix size.\n");
438 + return -EINVAL;
439 + }
440 +
441 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
442 &cmd->dma.guest.ptr,
443 &vmw_bo);
444 if (unlikely(ret != 0))
445 return ret;
446
447 + /* Make sure DMA doesn't cross BO boundaries. */
448 + bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
449 + if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
450 + DRM_ERROR("Invalid DMA offset.\n");
451 + return -EINVAL;
452 + }
453 +
454 + bo_size -= cmd->dma.guest.ptr.offset;
455 + if (unlikely(suffix->maximumOffset > bo_size))
456 + suffix->maximumOffset = bo_size;
457 +
458 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
459 user_surface_converter, &cmd->dma.host.sid,
460 NULL);
461 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
462 index ed5ce2a41bbf..021b5227e783 100644
463 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
464 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
465 @@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
466 }
467
468 if (!vmw_kms_validate_mode_vram(vmw_priv,
469 - info->fix.line_length,
470 + var->xres * var->bits_per_pixel/8,
471 var->yoffset + var->yres)) {
472 DRM_ERROR("Requested geom can not fit in framebuffer\n");
473 return -EINVAL;
474 @@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
475 struct vmw_private *vmw_priv = par->vmw_priv;
476 int ret;
477
478 + info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
479 +
480 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
481 info->fix.line_length,
482 par->bpp, par->depth);
483 @@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info)
484 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
485 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
486 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
487 + vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
488 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
489 }
490
491 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
492 index cc32a6f96c64..8a5384ce0352 100644
493 --- a/drivers/hid/hid-core.c
494 +++ b/drivers/hid/hid-core.c
495 @@ -718,6 +718,9 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
496 case HID_MAIN_ITEM_TAG_END_COLLECTION:
497 break;
498 case HID_MAIN_ITEM_TAG_INPUT:
499 + /* ignore constant inputs, they will be ignored by hid-input */
500 + if (data & HID_MAIN_ITEM_CONSTANT)
501 + break;
502 for (i = 0; i < parser->local.usage_index; i++)
503 hid_scan_input_usage(parser, parser->local.usage[i]);
504 break;
505 @@ -1780,8 +1783,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
506 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
507 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
508 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
509 - { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
510 - { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
511 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
512 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
513 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
514 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
515 index 22f28d6b33a8..6e12cd0317f6 100644
516 --- a/drivers/hid/hid-ids.h
517 +++ b/drivers/hid/hid-ids.h
518 @@ -624,8 +624,6 @@
519 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
520 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
521 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
522 -#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
523 -#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
524
525 #define USB_VENDOR_ID_MOJO 0x8282
526 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
527 @@ -830,6 +828,7 @@
528 #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
529 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
530 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
531 +#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
532
533 #define USB_VENDOR_ID_THINGM 0x27b8
534 #define USB_DEVICE_ID_BLINK1 0x01ed
535 diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
536 index 404a3a8a82f1..c6ef6eed3091 100644
537 --- a/drivers/hid/hid-microsoft.c
538 +++ b/drivers/hid/hid-microsoft.c
539 @@ -208,10 +208,6 @@ static const struct hid_device_id ms_devices[] = {
540 .driver_data = MS_NOGET },
541 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
542 .driver_data = MS_DUPLICATE_USAGES },
543 - { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
544 - .driver_data = 0 },
545 - { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
546 - .driver_data = 0 },
547
548 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
549 .driver_data = MS_PRESENTER },
550 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
551 index dbd83878ff99..8e4ddb369883 100644
552 --- a/drivers/hid/usbhid/hid-quirks.c
553 +++ b/drivers/hid/usbhid/hid-quirks.c
554 @@ -119,6 +119,7 @@ static const struct hid_blacklist {
555 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
556 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
557 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
558 + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
559
560 { 0, 0 }
561 };
562 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
563 index 4a6ca1cb2e78..56e24c072b62 100644
564 --- a/drivers/md/raid1.c
565 +++ b/drivers/md/raid1.c
566 @@ -97,6 +97,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
567 struct pool_info *pi = data;
568 struct r1bio *r1_bio;
569 struct bio *bio;
570 + int need_pages;
571 int i, j;
572
573 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
574 @@ -119,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
575 * RESYNC_PAGES for each bio.
576 */
577 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
578 - j = pi->raid_disks;
579 + need_pages = pi->raid_disks;
580 else
581 - j = 1;
582 - while(j--) {
583 + need_pages = 1;
584 + for (j = 0; j < need_pages; j++) {
585 bio = r1_bio->bios[j];
586 bio->bi_vcnt = RESYNC_PAGES;
587
588 if (bio_alloc_pages(bio, gfp_flags))
589 - goto out_free_bio;
590 + goto out_free_pages;
591 }
592 /* If not user-requests, copy the page pointers to all bios */
593 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
594 @@ -141,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
595
596 return r1_bio;
597
598 +out_free_pages:
599 + while (--j >= 0) {
600 + struct bio_vec *bv;
601 +
602 + bio_for_each_segment_all(bv, r1_bio->bios[j], i)
603 + __free_page(bv->bv_page);
604 + }
605 +
606 out_free_bio:
607 while (++j < pi->raid_disks)
608 bio_put(r1_bio->bios[j]);
609 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
610 index e5628fc725c3..91ec8cd12478 100644
611 --- a/drivers/net/bonding/bond_main.c
612 +++ b/drivers/net/bonding/bond_main.c
613 @@ -4536,6 +4536,7 @@ static int __init bonding_init(void)
614 out:
615 return res;
616 err:
617 + bond_destroy_debugfs();
618 bond_netlink_fini();
619 err_link:
620 unregister_pernet_subsys(&bond_net_ops);
621 diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
622 index 298c26509095..a937a37ae89f 100644
623 --- a/drivers/net/bonding/bond_options.c
624 +++ b/drivers/net/bonding/bond_options.c
625 @@ -70,6 +70,7 @@ static struct bond_opt_value bond_fail_over_mac_tbl[] = {
626 static struct bond_opt_value bond_intmax_tbl[] = {
627 { "off", 0, BOND_VALFLAG_DEFAULT},
628 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
629 + { NULL, -1, 0}
630 };
631
632 static struct bond_opt_value bond_lacp_rate_tbl[] = {
633 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
634 index 70a225c8df5c..a210766279d3 100644
635 --- a/drivers/net/ethernet/broadcom/tg3.c
636 +++ b/drivers/net/ethernet/broadcom/tg3.c
637 @@ -12294,7 +12294,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
638 if (tg3_flag(tp, MAX_RXPEND_64) &&
639 tp->rx_pending > 63)
640 tp->rx_pending = 63;
641 - tp->rx_jumbo_pending = ering->rx_jumbo_pending;
642 +
643 + if (tg3_flag(tp, JUMBO_RING_ENABLE))
644 + tp->rx_jumbo_pending = ering->rx_jumbo_pending;
645
646 for (i = 0; i < tp->irq_max; i++)
647 tp->napi[i].tx_pending = ering->tx_pending;
648 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
649 index 70e95324a97d..c2cd8d31bcad 100644
650 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
651 +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
652 @@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
653
654 cq->ring = ring;
655 cq->is_tx = mode;
656 - spin_lock_init(&cq->lock);
657
658 /* Allocate HW buffers on provided NUMA node.
659 * dev->numa_node is used in mtt range allocation flow.
660 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
661 index 84a96f70dfb5..2f83f3489fdb 100644
662 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
663 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
664 @@ -1315,15 +1315,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
665 {
666 struct mlx4_en_priv *priv = netdev_priv(dev);
667 struct mlx4_en_cq *cq;
668 - unsigned long flags;
669 int i;
670
671 for (i = 0; i < priv->rx_ring_num; i++) {
672 cq = priv->rx_cq[i];
673 - spin_lock_irqsave(&cq->lock, flags);
674 - napi_synchronize(&cq->napi);
675 - mlx4_en_process_rx_cq(dev, cq, 0);
676 - spin_unlock_irqrestore(&cq->lock, flags);
677 + napi_schedule(&cq->napi);
678 }
679 }
680 #endif
681 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
682 index d413e60071d4..95c316bb7a42 100644
683 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
684 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
685 @@ -2399,7 +2399,8 @@ slave_start:
686 * No return code for this call, just warn the user in case of PCI
687 * express device capabilities are under-satisfied by the bus.
688 */
689 - mlx4_check_pcie_caps(dev);
690 + if (!mlx4_is_slave(dev))
691 + mlx4_check_pcie_caps(dev);
692
693 /* In master functions, the communication channel must be initialized
694 * after obtaining its address from fw */
695 diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
696 index b57e8c87a34e..72db9bedd765 100644
697 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
698 +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
699 @@ -309,7 +309,6 @@ struct mlx4_en_cq {
700 struct mlx4_cq mcq;
701 struct mlx4_hwq_resources wqres;
702 int ring;
703 - spinlock_t lock;
704 struct net_device *dev;
705 struct napi_struct napi;
706 int size;
707 diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
708 index 79226b19e3c4..cb3fb9dba8fb 100644
709 --- a/drivers/net/ethernet/sfc/nic.c
710 +++ b/drivers/net/ethernet/sfc/nic.c
711 @@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
712 efx->net_dev->rx_cpu_rmap = NULL;
713 #endif
714
715 - /* Disable MSI/MSI-X interrupts */
716 - efx_for_each_channel(channel, efx)
717 - free_irq(channel->irq, &efx->msi_context[channel->channel]);
718 -
719 - /* Disable legacy interrupt */
720 - if (efx->legacy_irq)
721 + if (EFX_INT_MODE_USE_MSI(efx)) {
722 + /* Disable MSI/MSI-X interrupts */
723 + efx_for_each_channel(channel, efx)
724 + free_irq(channel->irq,
725 + &efx->msi_context[channel->channel]);
726 + } else {
727 + /* Disable legacy interrupt */
728 free_irq(efx->legacy_irq, efx);
729 + }
730 }
731
732 /* Register dump */
733 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
734 index 1831fb7cd017..20bb66944c4a 100644
735 --- a/drivers/net/macvlan.c
736 +++ b/drivers/net/macvlan.c
737 @@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
738 const struct macvlan_dev *vlan = netdev_priv(dev);
739 const struct macvlan_port *port = vlan->port;
740 const struct macvlan_dev *dest;
741 - __u8 ip_summed = skb->ip_summed;
742
743 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
744 const struct ethhdr *eth = (void *)skb->data;
745 - skb->ip_summed = CHECKSUM_UNNECESSARY;
746
747 /* send to other bridge ports directly */
748 if (is_multicast_ether_addr(eth->h_dest)) {
749 @@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
750 }
751
752 xmit_world:
753 - skb->ip_summed = ip_summed;
754 skb->dev = vlan->lowerdev;
755 return dev_queue_xmit(skb);
756 }
757 @@ -461,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
758 struct macvlan_dev *vlan = netdev_priv(dev);
759 struct net_device *lowerdev = vlan->lowerdev;
760
761 - if (change & IFF_ALLMULTI)
762 - dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
763 + if (dev->flags & IFF_UP) {
764 + if (change & IFF_ALLMULTI)
765 + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
766 + }
767 }
768
769 static void macvlan_set_mac_lists(struct net_device *dev)
770 @@ -518,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
771 #define MACVLAN_STATE_MASK \
772 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
773
774 +static int macvlan_get_nest_level(struct net_device *dev)
775 +{
776 + return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
777 +}
778 +
779 static void macvlan_set_lockdep_class_one(struct net_device *dev,
780 struct netdev_queue *txq,
781 void *_unused)
782 @@ -528,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
783
784 static void macvlan_set_lockdep_class(struct net_device *dev)
785 {
786 - lockdep_set_class(&dev->addr_list_lock,
787 - &macvlan_netdev_addr_lock_key);
788 + lockdep_set_class_and_subclass(&dev->addr_list_lock,
789 + &macvlan_netdev_addr_lock_key,
790 + macvlan_get_nest_level(dev));
791 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
792 }
793
794 @@ -731,6 +736,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
795 .ndo_fdb_add = macvlan_fdb_add,
796 .ndo_fdb_del = macvlan_fdb_del,
797 .ndo_fdb_dump = ndo_dflt_fdb_dump,
798 + .ndo_get_lock_subclass = macvlan_get_nest_level,
799 };
800
801 void macvlan_common_setup(struct net_device *dev)
802 @@ -859,6 +865,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
803 vlan->dev = dev;
804 vlan->port = port;
805 vlan->set_features = MACVLAN_FEATURES;
806 + vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
807
808 vlan->mode = MACVLAN_MODE_VEPA;
809 if (data && data[IFLA_MACVLAN_MODE])
810 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
811 index ff111a89e17f..3381c4f91a8c 100644
812 --- a/drivers/net/macvtap.c
813 +++ b/drivers/net/macvtap.c
814 @@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
815 segs = nskb;
816 }
817 } else {
818 + /* If we receive a partial checksum and the tap side
819 + * doesn't support checksum offload, compute the checksum.
820 + * Note: it doesn't matter which checksum feature to
821 + * check, we either support them all or none.
822 + */
823 + if (skb->ip_summed == CHECKSUM_PARTIAL &&
824 + !(features & NETIF_F_ALL_CSUM) &&
825 + skb_checksum_help(skb))
826 + goto drop;
827 skb_queue_tail(&q->sk.sk_receive_queue, skb);
828 }
829
830 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
831 index 2f6989b1e0dc..365375408904 100644
832 --- a/drivers/net/phy/phy_device.c
833 +++ b/drivers/net/phy/phy_device.c
834 @@ -613,8 +613,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
835 err = phy_init_hw(phydev);
836 if (err)
837 phy_detach(phydev);
838 -
839 - phy_resume(phydev);
840 + else
841 + phy_resume(phydev);
842
843 return err;
844 }
845 diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
846 index cc70ecfc7062..ad4a94e9ff57 100644
847 --- a/drivers/net/slip/slip.c
848 +++ b/drivers/net/slip/slip.c
849 @@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
850 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
851 return;
852
853 - spin_lock(&sl->lock);
854 + spin_lock_bh(&sl->lock);
855 if (sl->xleft <= 0) {
856 /* Now serial buffer is almost free & we can start
857 * transmission of another packet */
858 sl->dev->stats.tx_packets++;
859 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
860 - spin_unlock(&sl->lock);
861 + spin_unlock_bh(&sl->lock);
862 sl_unlock(sl);
863 return;
864 }
865 @@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
866 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
867 sl->xleft -= actual;
868 sl->xhead += actual;
869 - spin_unlock(&sl->lock);
870 + spin_unlock_bh(&sl->lock);
871 }
872
873 static void sl_tx_timeout(struct net_device *dev)
874 diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
875 index c9f3281506af..2e025ddcef21 100644
876 --- a/drivers/net/usb/cdc_mbim.c
877 +++ b/drivers/net/usb/cdc_mbim.c
878 @@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
879 cdc_ncm_unbind(dev, intf);
880 }
881
882 +/* verify that the ethernet protocol is IPv4 or IPv6 */
883 +static bool is_ip_proto(__be16 proto)
884 +{
885 + switch (proto) {
886 + case htons(ETH_P_IP):
887 + case htons(ETH_P_IPV6):
888 + return true;
889 + }
890 + return false;
891 +}
892
893 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
894 {
895 @@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
896 struct cdc_ncm_ctx *ctx = info->ctx;
897 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
898 u16 tci = 0;
899 + bool is_ip;
900 u8 *c;
901
902 if (!ctx)
903 @@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
904 if (skb->len <= ETH_HLEN)
905 goto error;
906
907 + /* Some applications using e.g. packet sockets will
908 + * bypass the VLAN acceleration and create tagged
909 + * ethernet frames directly. We primarily look for
910 + * the accelerated out-of-band tag, but fall back if
911 + * required
912 + */
913 + skb_reset_mac_header(skb);
914 + if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
915 + __vlan_get_tag(skb, &tci) == 0) {
916 + is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
917 + skb_pull(skb, VLAN_ETH_HLEN);
918 + } else {
919 + is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
920 + skb_pull(skb, ETH_HLEN);
921 + }
922 +
923 /* mapping VLANs to MBIM sessions:
924 * no tag => IPS session <0>
925 * 1 - 255 => IPS session <vlanid>
926 * 256 - 511 => DSS session <vlanid - 256>
927 * 512 - 4095 => unsupported, drop
928 */
929 - vlan_get_tag(skb, &tci);
930 -
931 switch (tci & 0x0f00) {
932 case 0x0000: /* VLAN ID 0 - 255 */
933 - /* verify that datagram is IPv4 or IPv6 */
934 - skb_reset_mac_header(skb);
935 - switch (eth_hdr(skb)->h_proto) {
936 - case htons(ETH_P_IP):
937 - case htons(ETH_P_IPV6):
938 - break;
939 - default:
940 + if (!is_ip)
941 goto error;
942 - }
943 c = (u8 *)&sign;
944 c[3] = tci;
945 break;
946 @@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
947 "unsupported tci=0x%04x\n", tci);
948 goto error;
949 }
950 - skb_pull(skb, ETH_HLEN);
951 }
952
953 spin_lock_bh(&ctx->mtx);
954 @@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
955 return;
956
957 /* need to send the NA on the VLAN dev, if any */
958 - if (tci)
959 + rcu_read_lock();
960 + if (tci) {
961 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
962 tci);
963 - else
964 + if (!netdev) {
965 + rcu_read_unlock();
966 + return;
967 + }
968 + } else {
969 netdev = dev->net;
970 - if (!netdev)
971 - return;
972 + }
973 + dev_hold(netdev);
974 + rcu_read_unlock();
975
976 in6_dev = in6_dev_get(netdev);
977 if (!in6_dev)
978 - return;
979 + goto out;
980 is_router = !!in6_dev->cnf.forwarding;
981 in6_dev_put(in6_dev);
982
983 @@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
984 true /* solicited */,
985 false /* override */,
986 true /* inc_opt */);
987 +out:
988 + dev_put(netdev);
989 }
990
991 static bool is_neigh_solicit(u8 *buf, size_t len)
992 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
993 index d350d2795e10..75d7d9dbbe35 100644
994 --- a/drivers/net/usb/cdc_ncm.c
995 +++ b/drivers/net/usb/cdc_ncm.c
996 @@ -768,7 +768,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
997 skb_out->len > CDC_NCM_MIN_TX_PKT)
998 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
999 ctx->tx_max - skb_out->len);
1000 - else if ((skb_out->len % dev->maxpacket) == 0)
1001 + else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
1002 *skb_put(skb_out, 1) = 0; /* force short packet */
1003
1004 /* set final frame length */
1005 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1006 index 313cb6cd4848..48c4902c0d62 100644
1007 --- a/drivers/net/usb/qmi_wwan.c
1008 +++ b/drivers/net/usb/qmi_wwan.c
1009 @@ -662,6 +662,22 @@ static const struct usb_device_id products[] = {
1010 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1011 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1012 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1013 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1014 + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
1015 + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
1016 + {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
1017 + {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
1018 + {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
1019 + {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
1020 + {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
1021 + {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
1022 + {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
1023 + {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
1024 + {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
1025 + {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
1026 + {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
1027 + {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
1028 + {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
1029 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
1030 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
1031 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
1032 @@ -723,16 +739,28 @@ static const struct usb_device_id products[] = {
1033 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1034 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1035 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
1036 + {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
1037 + {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
1038 + {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
1039 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
1040 + {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
1041 + {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
1042 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
1043 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1044 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1045 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1046 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1047 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1048 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
1049 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
1050 + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
1051 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
1052 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
1053 + {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
1054 + {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
1055 + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
1056 + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
1057 + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
1058
1059 /* 4. Gobi 1000 devices */
1060 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
1061 diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
1062 index 398f3d2c0a6c..a76e98eb8372 100644
1063 --- a/drivers/net/wireless/ti/wl18xx/event.h
1064 +++ b/drivers/net/wireless/ti/wl18xx/event.h
1065 @@ -68,6 +68,26 @@ struct wl18xx_event_mailbox {
1066
1067 /* bitmap of inactive stations (by HLID) */
1068 __le32 inactive_sta_bitmap;
1069 +
1070 + /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */
1071 + u8 rx_ba_role_id;
1072 + u8 rx_ba_link_id;
1073 + u8 rx_ba_win_size;
1074 + u8 padding;
1075 +
1076 + /* smart config */
1077 + u8 sc_ssid_len;
1078 + u8 sc_pwd_len;
1079 + u8 sc_token_len;
1080 + u8 padding1;
1081 + u8 sc_ssid[32];
1082 + u8 sc_pwd[32];
1083 + u8 sc_token[32];
1084 +
1085 + /* smart config sync channel */
1086 + u8 sc_sync_channel;
1087 + u8 sc_sync_band;
1088 + u8 padding2[2];
1089 } __packed;
1090
1091 int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
1092 diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
1093 index dfffd0f37916..a70692779a16 100644
1094 --- a/drivers/scsi/megaraid/megaraid_mm.c
1095 +++ b/drivers/scsi/megaraid/megaraid_mm.c
1096 @@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
1097
1098 pthru32->dataxferaddr = kioc->buf_paddr;
1099 if (kioc->data_dir & UIOC_WR) {
1100 + if (pthru32->dataxferlen > kioc->xferlen)
1101 + return -EINVAL;
1102 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
1103 pthru32->dataxferlen)) {
1104 return (-EFAULT);
1105 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1106 index 307a81137607..4109530e92a0 100644
1107 --- a/drivers/scsi/scsi_scan.c
1108 +++ b/drivers/scsi/scsi_scan.c
1109 @@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
1110 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1111 unsigned long flags;
1112
1113 + starget->state = STARGET_DEL;
1114 transport_destroy_device(dev);
1115 spin_lock_irqsave(shost->host_lock, flags);
1116 if (shost->hostt->target_destroy)
1117 @@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
1118 }
1119
1120 /**
1121 + * scsi_target_reap_ref_release - remove target from visibility
1122 + * @kref: the reap_ref in the target being released
1123 + *
1124 + * Called on last put of reap_ref, which is the indication that no device
1125 + * under this target is visible anymore, so render the target invisible in
1126 + * sysfs. Note: we have to be in user context here because the target reaps
1127 + * should be done in places where the scsi device visibility is being removed.
1128 + */
1129 +static void scsi_target_reap_ref_release(struct kref *kref)
1130 +{
1131 + struct scsi_target *starget
1132 + = container_of(kref, struct scsi_target, reap_ref);
1133 +
1134 + /*
1135 + * if we get here and the target is still in the CREATED state that
1136 + * means it was allocated but never made visible (because a scan
1137 + * turned up no LUNs), so don't call device_del() on it.
1138 + */
1139 + if (starget->state != STARGET_CREATED) {
1140 + transport_remove_device(&starget->dev);
1141 + device_del(&starget->dev);
1142 + }
1143 + scsi_target_destroy(starget);
1144 +}
1145 +
1146 +static void scsi_target_reap_ref_put(struct scsi_target *starget)
1147 +{
1148 + kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
1149 +}
1150 +
1151 +/**
1152 * scsi_alloc_target - allocate a new or find an existing target
1153 * @parent: parent of the target (need not be a scsi host)
1154 * @channel: target channel number (zero if no channels)
1155 @@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
1156 + shost->transportt->target_size;
1157 struct scsi_target *starget;
1158 struct scsi_target *found_target;
1159 - int error;
1160 + int error, ref_got;
1161
1162 starget = kzalloc(size, GFP_KERNEL);
1163 if (!starget) {
1164 @@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
1165 }
1166 dev = &starget->dev;
1167 device_initialize(dev);
1168 - starget->reap_ref = 1;
1169 + kref_init(&starget->reap_ref);
1170 dev->parent = get_device(parent);
1171 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
1172 dev->bus = &scsi_bus_type;
1173 @@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
1174 return starget;
1175
1176 found:
1177 - found_target->reap_ref++;
1178 + /*
1179 + * release routine already fired if kref is zero, so if we can still
1180 + * take the reference, the target must be alive. If we can't, it must
1181 + * be dying and we need to wait for a new target
1182 + */
1183 + ref_got = kref_get_unless_zero(&found_target->reap_ref);
1184 +
1185 spin_unlock_irqrestore(shost->host_lock, flags);
1186 - if (found_target->state != STARGET_DEL) {
1187 + if (ref_got) {
1188 put_device(dev);
1189 return found_target;
1190 }
1191 - /* Unfortunately, we found a dying target; need to
1192 - * wait until it's dead before we can get a new one */
1193 + /*
1194 + * Unfortunately, we found a dying target; need to wait until it's
1195 + * dead before we can get a new one. There is an anomaly here. We
1196 + * *should* call scsi_target_reap() to balance the kref_get() of the
1197 + * reap_ref above. However, since the target being released, it's
1198 + * already invisible and the reap_ref is irrelevant. If we call
1199 + * scsi_target_reap() we might spuriously do another device_del() on
1200 + * an already invisible target.
1201 + */
1202 put_device(&found_target->dev);
1203 - flush_scheduled_work();
1204 + /*
1205 + * length of time is irrelevant here, we just want to yield the CPU
1206 + * for a tick to avoid busy waiting for the target to die.
1207 + */
1208 + msleep(1);
1209 goto retry;
1210 }
1211
1212 -static void scsi_target_reap_usercontext(struct work_struct *work)
1213 -{
1214 - struct scsi_target *starget =
1215 - container_of(work, struct scsi_target, ew.work);
1216 -
1217 - transport_remove_device(&starget->dev);
1218 - device_del(&starget->dev);
1219 - scsi_target_destroy(starget);
1220 -}
1221 -
1222 /**
1223 * scsi_target_reap - check to see if target is in use and destroy if not
1224 * @starget: target to be checked
1225 @@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
1226 */
1227 void scsi_target_reap(struct scsi_target *starget)
1228 {
1229 - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1230 - unsigned long flags;
1231 - enum scsi_target_state state;
1232 - int empty = 0;
1233 -
1234 - spin_lock_irqsave(shost->host_lock, flags);
1235 - state = starget->state;
1236 - if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
1237 - empty = 1;
1238 - starget->state = STARGET_DEL;
1239 - }
1240 - spin_unlock_irqrestore(shost->host_lock, flags);
1241 -
1242 - if (!empty)
1243 - return;
1244 -
1245 - BUG_ON(state == STARGET_DEL);
1246 - if (state == STARGET_CREATED)
1247 - scsi_target_destroy(starget);
1248 - else
1249 - execute_in_process_context(scsi_target_reap_usercontext,
1250 - &starget->ew);
1251 + /*
1252 + * serious problem if this triggers: STARGET_DEL is only set in the if
1253 + * the reap_ref drops to zero, so we're trying to do another final put
1254 + * on an already released kref
1255 + */
1256 + BUG_ON(starget->state == STARGET_DEL);
1257 + scsi_target_reap_ref_put(starget);
1258 }
1259
1260 /**
1261 @@ -1532,6 +1556,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1262 }
1263 mutex_unlock(&shost->scan_mutex);
1264 scsi_autopm_put_target(starget);
1265 + /*
1266 + * paired with scsi_alloc_target(). Target will be destroyed unless
1267 + * scsi_probe_and_add_lun made an underlying device visible
1268 + */
1269 scsi_target_reap(starget);
1270 put_device(&starget->dev);
1271
1272 @@ -1612,8 +1640,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1273
1274 out_reap:
1275 scsi_autopm_put_target(starget);
1276 - /* now determine if the target has any children at all
1277 - * and if not, nuke it */
1278 + /*
1279 + * paired with scsi_alloc_target(): determine if the target has
1280 + * any children at all and if not, nuke it
1281 + */
1282 scsi_target_reap(starget);
1283
1284 put_device(&starget->dev);
1285 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
1286 index 9117d0bf408e..665acbf83693 100644
1287 --- a/drivers/scsi/scsi_sysfs.c
1288 +++ b/drivers/scsi/scsi_sysfs.c
1289 @@ -383,17 +383,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
1290 {
1291 struct scsi_device *sdev;
1292 struct device *parent;
1293 - struct scsi_target *starget;
1294 struct list_head *this, *tmp;
1295 unsigned long flags;
1296
1297 sdev = container_of(work, struct scsi_device, ew.work);
1298
1299 parent = sdev->sdev_gendev.parent;
1300 - starget = to_scsi_target(parent);
1301
1302 spin_lock_irqsave(sdev->host->host_lock, flags);
1303 - starget->reap_ref++;
1304 list_del(&sdev->siblings);
1305 list_del(&sdev->same_target_siblings);
1306 list_del(&sdev->starved_entry);
1307 @@ -413,8 +410,6 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
1308 /* NULL queue means the device can't be used */
1309 sdev->request_queue = NULL;
1310
1311 - scsi_target_reap(scsi_target(sdev));
1312 -
1313 kfree(sdev->inquiry);
1314 kfree(sdev);
1315
1316 @@ -1071,6 +1066,13 @@ void __scsi_remove_device(struct scsi_device *sdev)
1317 sdev->host->hostt->slave_destroy(sdev);
1318 transport_destroy_device(dev);
1319
1320 + /*
1321 + * Paired with the kref_get() in scsi_sysfs_initialize(). We have
1322 + * remoed sysfs visibility from the device, so make the target
1323 + * invisible if this was the last device underneath it.
1324 + */
1325 + scsi_target_reap(scsi_target(sdev));
1326 +
1327 put_device(dev);
1328 }
1329
1330 @@ -1133,7 +1135,7 @@ void scsi_remove_target(struct device *dev)
1331 continue;
1332 if (starget->dev.parent == dev || &starget->dev == dev) {
1333 /* assuming new targets arrive at the end */
1334 - starget->reap_ref++;
1335 + kref_get(&starget->reap_ref);
1336 spin_unlock_irqrestore(shost->host_lock, flags);
1337 if (last)
1338 scsi_target_reap(last);
1339 @@ -1217,6 +1219,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1340 list_add_tail(&sdev->same_target_siblings, &starget->devices);
1341 list_add_tail(&sdev->siblings, &shost->__devices);
1342 spin_unlock_irqrestore(shost->host_lock, flags);
1343 + /*
1344 + * device can now only be removed via __scsi_remove_device() so hold
1345 + * the target. Target will be held in CREATED state until something
1346 + * beneath it becomes visible (in which case it moves to RUNNING)
1347 + */
1348 + kref_get(&starget->reap_ref);
1349 }
1350
1351 int scsi_is_sdev_device(const struct device *dev)
1352 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
1353 index 77f035158d6c..db8434d3def9 100644
1354 --- a/drivers/tty/serial/omap-serial.c
1355 +++ b/drivers/tty/serial/omap-serial.c
1356 @@ -225,14 +225,19 @@ static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
1357 if (enable)
1358 enable_irq(up->wakeirq);
1359 else
1360 - disable_irq(up->wakeirq);
1361 + disable_irq_nosync(up->wakeirq);
1362 }
1363
1364 static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
1365 {
1366 struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
1367
1368 + if (enable == up->wakeups_enabled)
1369 + return;
1370 +
1371 serial_omap_enable_wakeirq(up, enable);
1372 + up->wakeups_enabled = enable;
1373 +
1374 if (!pdata || !pdata->enable_wakeup)
1375 return;
1376
1377 @@ -1488,6 +1493,11 @@ static int serial_omap_suspend(struct device *dev)
1378 uart_suspend_port(&serial_omap_reg, &up->port);
1379 flush_work(&up->qos_work);
1380
1381 + if (device_may_wakeup(dev))
1382 + serial_omap_enable_wakeup(up, true);
1383 + else
1384 + serial_omap_enable_wakeup(up, false);
1385 +
1386 return 0;
1387 }
1388
1389 @@ -1495,6 +1505,9 @@ static int serial_omap_resume(struct device *dev)
1390 {
1391 struct uart_omap_port *up = dev_get_drvdata(dev);
1392
1393 + if (device_may_wakeup(dev))
1394 + serial_omap_enable_wakeup(up, false);
1395 +
1396 uart_resume_port(&serial_omap_reg, &up->port);
1397
1398 return 0;
1399 @@ -1870,17 +1883,7 @@ static int serial_omap_runtime_suspend(struct device *dev)
1400
1401 up->context_loss_cnt = serial_omap_get_context_loss_count(up);
1402
1403 - if (device_may_wakeup(dev)) {
1404 - if (!up->wakeups_enabled) {
1405 - serial_omap_enable_wakeup(up, true);
1406 - up->wakeups_enabled = true;
1407 - }
1408 - } else {
1409 - if (up->wakeups_enabled) {
1410 - serial_omap_enable_wakeup(up, false);
1411 - up->wakeups_enabled = false;
1412 - }
1413 - }
1414 + serial_omap_enable_wakeup(up, true);
1415
1416 up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
1417 schedule_work(&up->qos_work);
1418 @@ -1894,6 +1897,8 @@ static int serial_omap_runtime_resume(struct device *dev)
1419
1420 int loss_cnt = serial_omap_get_context_loss_count(up);
1421
1422 + serial_omap_enable_wakeup(up, false);
1423 +
1424 if (loss_cnt < 0) {
1425 dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n",
1426 loss_cnt);
1427 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1428 index 900f7ff805ee..7783acabe443 100644
1429 --- a/drivers/usb/class/cdc-acm.c
1430 +++ b/drivers/usb/class/cdc-acm.c
1431 @@ -1652,13 +1652,27 @@ static const struct usb_device_id acm_ids[] = {
1432 },
1433 /* Motorola H24 HSPA module: */
1434 { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
1435 - { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
1436 - { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
1437 - { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
1438 - { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
1439 - { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
1440 - { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
1441 - { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
1442 + { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */
1443 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1444 + },
1445 + { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */
1446 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1447 + },
1448 + { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */
1449 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1450 + },
1451 + { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */
1452 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1453 + },
1454 + { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */
1455 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1456 + },
1457 + { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */
1458 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1459 + },
1460 + { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */
1461 + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1462 + },
1463
1464 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
1465 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
1466 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1467 index 95fa1217afdd..762e4a5f5ae9 100644
1468 --- a/drivers/usb/serial/cp210x.c
1469 +++ b/drivers/usb/serial/cp210x.c
1470 @@ -104,6 +104,7 @@ static const struct usb_device_id id_table[] = {
1471 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
1472 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
1473 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
1474 + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
1475 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
1476 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
1477 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
1478 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1479 index 44ab12986805..7c6e1dedeb06 100644
1480 --- a/drivers/usb/serial/ftdi_sio.c
1481 +++ b/drivers/usb/serial/ftdi_sio.c
1482 @@ -909,6 +909,39 @@ static const struct usb_device_id id_table_combined[] = {
1483 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
1484 /* Cressi Devices */
1485 { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
1486 + /* Brainboxes Devices */
1487 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) },
1488 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) },
1489 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
1490 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
1491 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
1492 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
1493 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
1494 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
1495 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) },
1496 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) },
1497 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
1498 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
1499 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
1500 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
1501 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
1502 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
1503 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
1504 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
1505 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
1506 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
1507 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
1508 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
1509 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) },
1510 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) },
1511 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) },
1512 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) },
1513 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) },
1514 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) },
1515 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) },
1516 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
1517 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
1518 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
1519 { } /* Terminating entry */
1520 };
1521
1522 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1523 index e599fbfcde5f..993c93df6874 100644
1524 --- a/drivers/usb/serial/ftdi_sio_ids.h
1525 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1526 @@ -1326,3 +1326,40 @@
1527 * Manufacturer: Cressi
1528 */
1529 #define FTDI_CRESSI_PID 0x87d0
1530 +
1531 +/*
1532 + * Brainboxes devices
1533 + */
1534 +#define BRAINBOXES_VID 0x05d1
1535 +#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */
1536 +#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */
1537 +#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
1538 +#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
1539 +#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
1540 +#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
1541 +#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
1542 +#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
1543 +#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */
1544 +#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */
1545 +#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */
1546 +#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */
1547 +#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */
1548 +#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */
1549 +#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */
1550 +#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */
1551 +#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */
1552 +#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */
1553 +#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */
1554 +#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */
1555 +#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */
1556 +#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */
1557 +#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */
1558 +#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */
1559 +#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */
1560 +#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */
1561 +#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */
1562 +#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */
1563 +#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */
1564 +#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
1565 +#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
1566 +#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
1567 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
1568 index a2db5be9c305..df90dae53eb9 100644
1569 --- a/drivers/usb/serial/io_ti.c
1570 +++ b/drivers/usb/serial/io_ti.c
1571 @@ -28,6 +28,7 @@
1572 #include <linux/spinlock.h>
1573 #include <linux/mutex.h>
1574 #include <linux/serial.h>
1575 +#include <linux/swab.h>
1576 #include <linux/kfifo.h>
1577 #include <linux/ioctl.h>
1578 #include <linux/firmware.h>
1579 @@ -280,7 +281,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
1580 {
1581 int status = 0;
1582 __u8 read_length;
1583 - __be16 be_start_address;
1584 + u16 be_start_address;
1585
1586 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
1587
1588 @@ -296,10 +297,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
1589 if (read_length > 1) {
1590 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
1591 }
1592 - be_start_address = cpu_to_be16(start_address);
1593 + /*
1594 + * NOTE: Must use swab as wIndex is sent in little-endian
1595 + * byte order regardless of host byte order.
1596 + */
1597 + be_start_address = swab16((u16)start_address);
1598 status = ti_vread_sync(dev, UMPC_MEMORY_READ,
1599 (__u16)address_type,
1600 - (__force __u16)be_start_address,
1601 + be_start_address,
1602 buffer, read_length);
1603
1604 if (status) {
1605 @@ -394,7 +399,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
1606 struct device *dev = &serial->serial->dev->dev;
1607 int status = 0;
1608 int write_length;
1609 - __be16 be_start_address;
1610 + u16 be_start_address;
1611
1612 /* We can only send a maximum of 1 aligned byte page at a time */
1613
1614 @@ -409,11 +414,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
1615 __func__, start_address, write_length);
1616 usb_serial_debug_data(dev, __func__, write_length, buffer);
1617
1618 - /* Write first page */
1619 - be_start_address = cpu_to_be16(start_address);
1620 + /*
1621 + * Write first page.
1622 + *
1623 + * NOTE: Must use swab as wIndex is sent in little-endian byte order
1624 + * regardless of host byte order.
1625 + */
1626 + be_start_address = swab16((u16)start_address);
1627 status = ti_vsend_sync(serial->serial->dev,
1628 UMPC_MEMORY_WRITE, (__u16)address_type,
1629 - (__force __u16)be_start_address,
1630 + be_start_address,
1631 buffer, write_length);
1632 if (status) {
1633 dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
1634 @@ -436,11 +446,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
1635 __func__, start_address, write_length);
1636 usb_serial_debug_data(dev, __func__, write_length, buffer);
1637
1638 - /* Write next page */
1639 - be_start_address = cpu_to_be16(start_address);
1640 + /*
1641 + * Write next page.
1642 + *
1643 + * NOTE: Must use swab as wIndex is sent in little-endian byte
1644 + * order regardless of host byte order.
1645 + */
1646 + be_start_address = swab16((u16)start_address);
1647 status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
1648 (__u16)address_type,
1649 - (__force __u16)be_start_address,
1650 + be_start_address,
1651 buffer, write_length);
1652 if (status) {
1653 dev_err(dev, "%s - ERROR %d\n", __func__, status);
1654 @@ -585,8 +600,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
1655 if (rom_desc->Type == desc_type)
1656 return start_address;
1657
1658 - start_address = start_address + sizeof(struct ti_i2c_desc)
1659 - + rom_desc->Size;
1660 + start_address = start_address + sizeof(struct ti_i2c_desc) +
1661 + le16_to_cpu(rom_desc->Size);
1662
1663 } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
1664
1665 @@ -599,7 +614,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
1666 __u16 i;
1667 __u8 cs = 0;
1668
1669 - for (i = 0; i < rom_desc->Size; i++)
1670 + for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
1671 cs = (__u8)(cs + buffer[i]);
1672
1673 if (cs != rom_desc->CheckSum) {
1674 @@ -650,7 +665,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
1675 break;
1676
1677 if ((start_address + sizeof(struct ti_i2c_desc) +
1678 - rom_desc->Size) > TI_MAX_I2C_SIZE) {
1679 + le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
1680 status = -ENODEV;
1681 dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
1682 break;
1683 @@ -665,7 +680,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
1684 /* Read the descriptor data */
1685 status = read_rom(serial, start_address +
1686 sizeof(struct ti_i2c_desc),
1687 - rom_desc->Size, buffer);
1688 + le16_to_cpu(rom_desc->Size),
1689 + buffer);
1690 if (status)
1691 break;
1692
1693 @@ -674,7 +690,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
1694 break;
1695 }
1696 start_address = start_address + sizeof(struct ti_i2c_desc) +
1697 - rom_desc->Size;
1698 + le16_to_cpu(rom_desc->Size);
1699
1700 } while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
1701 (start_address < TI_MAX_I2C_SIZE));
1702 @@ -712,7 +728,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
1703
1704 /* Read the descriptor data */
1705 status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
1706 - rom_desc->Size, buffer);
1707 + le16_to_cpu(rom_desc->Size), buffer);
1708 if (status)
1709 goto exit;
1710
1711 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1712 index 68fc9fe65936..f213ee978516 100644
1713 --- a/drivers/usb/serial/option.c
1714 +++ b/drivers/usb/serial/option.c
1715 @@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
1716 #define QUALCOMM_VENDOR_ID 0x05C6
1717
1718 #define CMOTECH_VENDOR_ID 0x16d8
1719 -#define CMOTECH_PRODUCT_6008 0x6008
1720 -#define CMOTECH_PRODUCT_6280 0x6280
1721 +#define CMOTECH_PRODUCT_6001 0x6001
1722 +#define CMOTECH_PRODUCT_CMU_300 0x6002
1723 +#define CMOTECH_PRODUCT_6003 0x6003
1724 +#define CMOTECH_PRODUCT_6004 0x6004
1725 +#define CMOTECH_PRODUCT_6005 0x6005
1726 +#define CMOTECH_PRODUCT_CGU_628A 0x6006
1727 +#define CMOTECH_PRODUCT_CHE_628S 0x6007
1728 +#define CMOTECH_PRODUCT_CMU_301 0x6008
1729 +#define CMOTECH_PRODUCT_CHU_628 0x6280
1730 +#define CMOTECH_PRODUCT_CHU_628S 0x6281
1731 +#define CMOTECH_PRODUCT_CDU_680 0x6803
1732 +#define CMOTECH_PRODUCT_CDU_685A 0x6804
1733 +#define CMOTECH_PRODUCT_CHU_720S 0x7001
1734 +#define CMOTECH_PRODUCT_7002 0x7002
1735 +#define CMOTECH_PRODUCT_CHU_629K 0x7003
1736 +#define CMOTECH_PRODUCT_7004 0x7004
1737 +#define CMOTECH_PRODUCT_7005 0x7005
1738 +#define CMOTECH_PRODUCT_CGU_629 0x7006
1739 +#define CMOTECH_PRODUCT_CHU_629S 0x700a
1740 +#define CMOTECH_PRODUCT_CHU_720I 0x7211
1741 +#define CMOTECH_PRODUCT_7212 0x7212
1742 +#define CMOTECH_PRODUCT_7213 0x7213
1743 +#define CMOTECH_PRODUCT_7251 0x7251
1744 +#define CMOTECH_PRODUCT_7252 0x7252
1745 +#define CMOTECH_PRODUCT_7253 0x7253
1746
1747 #define TELIT_VENDOR_ID 0x1bc7
1748 #define TELIT_PRODUCT_UC864E 0x1003
1749 @@ -243,6 +266,7 @@ static void option_instat_callback(struct urb *urb);
1750 #define TELIT_PRODUCT_CC864_DUAL 0x1005
1751 #define TELIT_PRODUCT_CC864_SINGLE 0x1006
1752 #define TELIT_PRODUCT_DE910_DUAL 0x1010
1753 +#define TELIT_PRODUCT_UE910_V2 0x1012
1754 #define TELIT_PRODUCT_LE920 0x1200
1755
1756 /* ZTE PRODUCTS */
1757 @@ -286,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
1758 #define ALCATEL_PRODUCT_X060S_X200 0x0000
1759 #define ALCATEL_PRODUCT_X220_X500D 0x0017
1760 #define ALCATEL_PRODUCT_L100V 0x011e
1761 +#define ALCATEL_PRODUCT_L800MA 0x0203
1762
1763 #define PIRELLI_VENDOR_ID 0x1266
1764 #define PIRELLI_PRODUCT_C100_1 0x1002
1765 @@ -348,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
1766 #define OLIVETTI_PRODUCT_OLICARD100 0xc000
1767 #define OLIVETTI_PRODUCT_OLICARD145 0xc003
1768 #define OLIVETTI_PRODUCT_OLICARD200 0xc005
1769 +#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
1770
1771 /* Celot products */
1772 #define CELOT_VENDOR_ID 0x211f
1773 @@ -501,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
1774 .reserved = BIT(1) | BIT(2),
1775 };
1776
1777 +static const struct option_blacklist_info net_intf0_blacklist = {
1778 + .reserved = BIT(0),
1779 +};
1780 +
1781 static const struct option_blacklist_info net_intf1_blacklist = {
1782 .reserved = BIT(1),
1783 };
1784 @@ -1034,13 +1064,53 @@ static const struct usb_device_id option_ids[] = {
1785 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1786 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1787 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1788 - { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
1789 - { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
1790 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1791 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1792 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
1793 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1794 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
1795 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
1796 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
1797 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
1798 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1799 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
1800 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1801 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
1802 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1803 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
1804 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
1805 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
1806 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
1807 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1808 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
1809 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1810 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
1811 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1812 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
1813 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1814 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
1815 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
1816 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1817 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
1818 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1819 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
1820 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1821 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
1822 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1823 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
1824 + .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1825 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
1826 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1827 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
1828 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1829 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
1830 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1831 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
1832 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
1833 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
1834 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
1835 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
1836 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
1837 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1838 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
1839 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1840 @@ -1498,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
1841 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1842 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
1843 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1844 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
1845 + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1846 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
1847 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
1848 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1849 @@ -1543,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
1850 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1851 .driver_info = (kernel_ulong_t)&net_intf6_blacklist
1852 },
1853 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
1854 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist
1855 + },
1856 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1857 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1858 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1859 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1860 index 968a40201e5f..7ed681a714a5 100644
1861 --- a/drivers/usb/serial/qcserial.c
1862 +++ b/drivers/usb/serial/qcserial.c
1863 @@ -136,9 +136,18 @@ static const struct usb_device_id id_table[] = {
1864 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */
1865 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */
1866 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */
1867 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)}, /* Sierra Wireless MC73xx Device Management */
1868 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)}, /* Sierra Wireless MC73xx NMEA */
1869 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)}, /* Sierra Wireless MC73xx Modem */
1870 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
1871 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
1872 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
1873 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
1874 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
1875 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
1876 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
1877 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
1878 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
1879 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
1880 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
1881 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
1882 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1883 index a9eb6221a815..6b192e602ce0 100644
1884 --- a/drivers/usb/serial/sierra.c
1885 +++ b/drivers/usb/serial/sierra.c
1886 @@ -291,7 +291,6 @@ static const struct usb_device_id id_table[] = {
1887 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
1888 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
1889 },
1890 - { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
1891
1892 { }
1893 };
1894 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1895 index 7c9dc28640bb..b169b0f9b3a2 100644
1896 --- a/drivers/usb/serial/usb-serial.c
1897 +++ b/drivers/usb/serial/usb-serial.c
1898 @@ -1348,10 +1348,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
1899 static void usb_serial_deregister(struct usb_serial_driver *device)
1900 {
1901 pr_info("USB Serial deregistering driver %s\n", device->description);
1902 +
1903 mutex_lock(&table_lock);
1904 list_del(&device->driver_list);
1905 - usb_serial_bus_deregister(device);
1906 mutex_unlock(&table_lock);
1907 +
1908 + usb_serial_bus_deregister(device);
1909 }
1910
1911 /**
1912 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
1913 index 640fe0173236..b078440e822f 100644
1914 --- a/drivers/usb/serial/usb_wwan.c
1915 +++ b/drivers/usb/serial/usb_wwan.c
1916 @@ -466,6 +466,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1917 int err;
1918 int i;
1919
1920 + if (!port->bulk_in_size || !port->bulk_out_size)
1921 + return -ENODEV;
1922 +
1923 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
1924 if (!portdata)
1925 return -ENOMEM;
1926 @@ -473,9 +476,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1927 init_usb_anchor(&portdata->delayed);
1928
1929 for (i = 0; i < N_IN_URB; i++) {
1930 - if (!port->bulk_in_size)
1931 - break;
1932 -
1933 buffer = (u8 *)__get_free_page(GFP_KERNEL);
1934 if (!buffer)
1935 goto bail_out_error;
1936 @@ -489,9 +489,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1937 }
1938
1939 for (i = 0; i < N_OUT_URB; i++) {
1940 - if (!port->bulk_out_size)
1941 - break;
1942 -
1943 buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
1944 if (!buffer)
1945 goto bail_out_error2;
1946 diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
1947 index 1de2a191b395..640b3cf1a338 100644
1948 --- a/drivers/xen/events/events_fifo.c
1949 +++ b/drivers/xen/events/events_fifo.c
1950 @@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
1951 static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
1952 static unsigned event_array_pages __read_mostly;
1953
1954 +/*
1955 + * sync_set_bit() and friends must be unsigned long aligned on non-x86
1956 + * platforms.
1957 + */
1958 +#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
1959 +
1960 +#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
1961 +#define EVTCHN_FIFO_BIT(b, w) \
1962 + (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
1963 +
1964 +#else
1965 +
1966 #define BM(w) ((unsigned long *)(w))
1967 +#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
1968 +
1969 +#endif
1970
1971 static inline event_word_t *event_word_from_port(unsigned port)
1972 {
1973 @@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
1974 static void evtchn_fifo_clear_pending(unsigned port)
1975 {
1976 event_word_t *word = event_word_from_port(port);
1977 - sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word));
1978 + sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
1979 }
1980
1981 static void evtchn_fifo_set_pending(unsigned port)
1982 {
1983 event_word_t *word = event_word_from_port(port);
1984 - sync_set_bit(EVTCHN_FIFO_PENDING, BM(word));
1985 + sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
1986 }
1987
1988 static bool evtchn_fifo_is_pending(unsigned port)
1989 {
1990 event_word_t *word = event_word_from_port(port);
1991 - return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word));
1992 + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
1993 }
1994
1995 static bool evtchn_fifo_test_and_set_mask(unsigned port)
1996 {
1997 event_word_t *word = event_word_from_port(port);
1998 - return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word));
1999 + return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
2000 }
2001
2002 static void evtchn_fifo_mask(unsigned port)
2003 {
2004 event_word_t *word = event_word_from_port(port);
2005 - sync_set_bit(EVTCHN_FIFO_MASKED, BM(word));
2006 + sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
2007 }
2008
2009 +static bool evtchn_fifo_is_masked(unsigned port)
2010 +{
2011 + event_word_t *word = event_word_from_port(port);
2012 + return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
2013 +}
2014 /*
2015 * Clear MASKED, spinning if BUSY is set.
2016 */
2017 @@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port)
2018 BUG_ON(!irqs_disabled());
2019
2020 clear_masked(word);
2021 - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) {
2022 + if (evtchn_fifo_is_pending(port)) {
2023 struct evtchn_unmask unmask = { .port = port };
2024 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
2025 }
2026 @@ -247,7 +267,7 @@ static void handle_irq_for_port(unsigned port)
2027
2028 static void consume_one_event(unsigned cpu,
2029 struct evtchn_fifo_control_block *control_block,
2030 - unsigned priority, uint32_t *ready)
2031 + unsigned priority, unsigned long *ready)
2032 {
2033 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
2034 uint32_t head;
2035 @@ -277,10 +297,9 @@ static void consume_one_event(unsigned cpu,
2036 * copy of the ready word.
2037 */
2038 if (head == 0)
2039 - clear_bit(priority, BM(ready));
2040 + clear_bit(priority, ready);
2041
2042 - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))
2043 - && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
2044 + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
2045 handle_irq_for_port(port);
2046
2047 q->head[priority] = head;
2048 @@ -289,7 +308,7 @@ static void consume_one_event(unsigned cpu,
2049 static void evtchn_fifo_handle_events(unsigned cpu)
2050 {
2051 struct evtchn_fifo_control_block *control_block;
2052 - uint32_t ready;
2053 + unsigned long ready;
2054 unsigned q;
2055
2056 control_block = per_cpu(cpu_control_block, cpu);
2057 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2058 index 849f6132b327..7c6b73c72210 100644
2059 --- a/fs/cifs/cifsfs.c
2060 +++ b/fs/cifs/cifsfs.c
2061 @@ -253,6 +253,11 @@ cifs_alloc_inode(struct super_block *sb)
2062 cifs_set_oplock_level(cifs_inode, 0);
2063 cifs_inode->delete_pending = false;
2064 cifs_inode->invalid_mapping = false;
2065 + clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cifs_inode->flags);
2066 + clear_bit(CIFS_INODE_PENDING_WRITERS, &cifs_inode->flags);
2067 + clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cifs_inode->flags);
2068 + spin_lock_init(&cifs_inode->writers_lock);
2069 + cifs_inode->writers = 0;
2070 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
2071 cifs_inode->server_eof = 0;
2072 cifs_inode->uniqueid = 0;
2073 @@ -731,19 +736,26 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2074 unsigned long nr_segs, loff_t pos)
2075 {
2076 struct inode *inode = file_inode(iocb->ki_filp);
2077 + struct cifsInodeInfo *cinode = CIFS_I(inode);
2078 ssize_t written;
2079 int rc;
2080
2081 + written = cifs_get_writer(cinode);
2082 + if (written)
2083 + return written;
2084 +
2085 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2086
2087 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
2088 - return written;
2089 + goto out;
2090
2091 rc = filemap_fdatawrite(inode->i_mapping);
2092 if (rc)
2093 cifs_dbg(FYI, "cifs_file_aio_write: %d rc on %p inode\n",
2094 rc, inode);
2095
2096 +out:
2097 + cifs_put_writer(cinode);
2098 return written;
2099 }
2100
2101 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2102 index c0f3718b77a8..30f6e9251a4a 100644
2103 --- a/fs/cifs/cifsglob.h
2104 +++ b/fs/cifs/cifsglob.h
2105 @@ -228,6 +228,8 @@ struct smb_version_operations {
2106 /* verify the message */
2107 int (*check_message)(char *, unsigned int);
2108 bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
2109 + void (*downgrade_oplock)(struct TCP_Server_Info *,
2110 + struct cifsInodeInfo *, bool);
2111 /* process transaction2 response */
2112 bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
2113 char *, int);
2114 @@ -1113,6 +1115,12 @@ struct cifsInodeInfo {
2115 unsigned int epoch; /* used to track lease state changes */
2116 bool delete_pending; /* DELETE_ON_CLOSE is set */
2117 bool invalid_mapping; /* pagecache is invalid */
2118 + unsigned long flags;
2119 +#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
2120 +#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
2121 +#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
2122 + spinlock_t writers_lock;
2123 + unsigned int writers; /* Number of writers on this inode */
2124 unsigned long time; /* jiffies of last update of inode */
2125 u64 server_eof; /* current file size on server -- protected by i_lock */
2126 u64 uniqueid; /* server inode number */
2127 diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
2128 index acc4ee8ed075..ca7980a1e303 100644
2129 --- a/fs/cifs/cifsproto.h
2130 +++ b/fs/cifs/cifsproto.h
2131 @@ -127,6 +127,9 @@ extern u64 cifs_UnixTimeToNT(struct timespec);
2132 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
2133 int offset);
2134 extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
2135 +extern int cifs_get_writer(struct cifsInodeInfo *cinode);
2136 +extern void cifs_put_writer(struct cifsInodeInfo *cinode);
2137 +extern void cifs_done_oplock_break(struct cifsInodeInfo *cinode);
2138 extern int cifs_unlock_range(struct cifsFileInfo *cfile,
2139 struct file_lock *flock, const unsigned int xid);
2140 extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
2141 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2142 index 834fce759d80..87c4dd072cde 100644
2143 --- a/fs/cifs/file.c
2144 +++ b/fs/cifs/file.c
2145 @@ -2608,12 +2608,20 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2146 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2147 ssize_t written;
2148
2149 + written = cifs_get_writer(cinode);
2150 + if (written)
2151 + return written;
2152 +
2153 if (CIFS_CACHE_WRITE(cinode)) {
2154 if (cap_unix(tcon->ses) &&
2155 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2156 - && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2157 - return generic_file_aio_write(iocb, iov, nr_segs, pos);
2158 - return cifs_writev(iocb, iov, nr_segs, pos);
2159 + && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2160 + written = generic_file_aio_write(
2161 + iocb, iov, nr_segs, pos);
2162 + goto out;
2163 + }
2164 + written = cifs_writev(iocb, iov, nr_segs, pos);
2165 + goto out;
2166 }
2167 /*
2168 * For non-oplocked files in strict cache mode we need to write the data
2169 @@ -2633,6 +2641,8 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2170 inode);
2171 cinode->oplock = 0;
2172 }
2173 +out:
2174 + cifs_put_writer(cinode);
2175 return written;
2176 }
2177
2178 @@ -3644,6 +3654,13 @@ static int cifs_launder_page(struct page *page)
2179 return rc;
2180 }
2181
2182 +static int
2183 +cifs_pending_writers_wait(void *unused)
2184 +{
2185 + schedule();
2186 + return 0;
2187 +}
2188 +
2189 void cifs_oplock_break(struct work_struct *work)
2190 {
2191 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2192 @@ -3651,8 +3668,15 @@ void cifs_oplock_break(struct work_struct *work)
2193 struct inode *inode = cfile->dentry->d_inode;
2194 struct cifsInodeInfo *cinode = CIFS_I(inode);
2195 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2196 + struct TCP_Server_Info *server = tcon->ses->server;
2197 int rc = 0;
2198
2199 + wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
2200 + cifs_pending_writers_wait, TASK_UNINTERRUPTIBLE);
2201 +
2202 + server->ops->downgrade_oplock(server, cinode,
2203 + test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
2204 +
2205 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
2206 cifs_has_mand_locks(cinode)) {
2207 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
2208 @@ -3689,6 +3713,7 @@ void cifs_oplock_break(struct work_struct *work)
2209 cinode);
2210 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
2211 }
2212 + cifs_done_oplock_break(cinode);
2213 }
2214
2215 /*
2216 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2217 index 2f9f3790679d..3b0c62e622da 100644
2218 --- a/fs/cifs/misc.c
2219 +++ b/fs/cifs/misc.c
2220 @@ -466,8 +466,22 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
2221 cifs_dbg(FYI, "file id match, oplock break\n");
2222 pCifsInode = CIFS_I(netfile->dentry->d_inode);
2223
2224 - cifs_set_oplock_level(pCifsInode,
2225 - pSMB->OplockLevel ? OPLOCK_READ : 0);
2226 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
2227 + &pCifsInode->flags);
2228 +
2229 + /*
2230 + * Set flag if the server downgrades the oplock
2231 + * to L2 else clear.
2232 + */
2233 + if (pSMB->OplockLevel)
2234 + set_bit(
2235 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2236 + &pCifsInode->flags);
2237 + else
2238 + clear_bit(
2239 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2240 + &pCifsInode->flags);
2241 +
2242 queue_work(cifsiod_wq,
2243 &netfile->oplock_break);
2244 netfile->oplock_break_cancelled = false;
2245 @@ -551,6 +565,62 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
2246 cinode->oplock = 0;
2247 }
2248
2249 +static int
2250 +cifs_oplock_break_wait(void *unused)
2251 +{
2252 + schedule();
2253 + return signal_pending(current) ? -ERESTARTSYS : 0;
2254 +}
2255 +
2256 +/*
2257 + * We wait for oplock breaks to be processed before we attempt to perform
2258 + * writes.
2259 + */
2260 +int cifs_get_writer(struct cifsInodeInfo *cinode)
2261 +{
2262 + int rc;
2263 +
2264 +start:
2265 + rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
2266 + cifs_oplock_break_wait, TASK_KILLABLE);
2267 + if (rc)
2268 + return rc;
2269 +
2270 + spin_lock(&cinode->writers_lock);
2271 + if (!cinode->writers)
2272 + set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
2273 + cinode->writers++;
2274 + /* Check to see if we have started servicing an oplock break */
2275 + if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
2276 + cinode->writers--;
2277 + if (cinode->writers == 0) {
2278 + clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
2279 + wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
2280 + }
2281 + spin_unlock(&cinode->writers_lock);
2282 + goto start;
2283 + }
2284 + spin_unlock(&cinode->writers_lock);
2285 + return 0;
2286 +}
2287 +
2288 +void cifs_put_writer(struct cifsInodeInfo *cinode)
2289 +{
2290 + spin_lock(&cinode->writers_lock);
2291 + cinode->writers--;
2292 + if (cinode->writers == 0) {
2293 + clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
2294 + wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
2295 + }
2296 + spin_unlock(&cinode->writers_lock);
2297 +}
2298 +
2299 +void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
2300 +{
2301 + clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
2302 + wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
2303 +}
2304 +
2305 bool
2306 backup_cred(struct cifs_sb_info *cifs_sb)
2307 {
2308 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
2309 index 526fb89f9230..d1fdfa848703 100644
2310 --- a/fs/cifs/smb1ops.c
2311 +++ b/fs/cifs/smb1ops.c
2312 @@ -372,6 +372,16 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
2313 return 0;
2314 }
2315
2316 +static void
2317 +cifs_downgrade_oplock(struct TCP_Server_Info *server,
2318 + struct cifsInodeInfo *cinode, bool set_level2)
2319 +{
2320 + if (set_level2)
2321 + cifs_set_oplock_level(cinode, OPLOCK_READ);
2322 + else
2323 + cifs_set_oplock_level(cinode, 0);
2324 +}
2325 +
2326 static bool
2327 cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server,
2328 char *buf, int malformed)
2329 @@ -1019,6 +1029,7 @@ struct smb_version_operations smb1_operations = {
2330 .clear_stats = cifs_clear_stats,
2331 .print_stats = cifs_print_stats,
2332 .is_oplock_break = is_valid_oplock_break,
2333 + .downgrade_oplock = cifs_downgrade_oplock,
2334 .check_trans2 = cifs_check_trans2,
2335 .need_neg = cifs_need_neg,
2336 .negotiate = cifs_negotiate,
2337 diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
2338 index fb3966265b6e..b8021fde987d 100644
2339 --- a/fs/cifs/smb2misc.c
2340 +++ b/fs/cifs/smb2misc.c
2341 @@ -575,9 +575,21 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2342 else
2343 cfile->oplock_break_cancelled = false;
2344
2345 - server->ops->set_oplock_level(cinode,
2346 - rsp->OplockLevel ? SMB2_OPLOCK_LEVEL_II : 0,
2347 - 0, NULL);
2348 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
2349 + &cinode->flags);
2350 +
2351 + /*
2352 + * Set flag if the server downgrades the oplock
2353 + * to L2 else clear.
2354 + */
2355 + if (rsp->OplockLevel)
2356 + set_bit(
2357 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2358 + &cinode->flags);
2359 + else
2360 + clear_bit(
2361 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2362 + &cinode->flags);
2363
2364 queue_work(cifsiod_wq, &cfile->oplock_break);
2365
2366 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2367 index 192f51a12cf1..35ddc3ed119d 100644
2368 --- a/fs/cifs/smb2ops.c
2369 +++ b/fs/cifs/smb2ops.c
2370 @@ -905,6 +905,17 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2371 }
2372
2373 static void
2374 +smb2_downgrade_oplock(struct TCP_Server_Info *server,
2375 + struct cifsInodeInfo *cinode, bool set_level2)
2376 +{
2377 + if (set_level2)
2378 + server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
2379 + 0, NULL);
2380 + else
2381 + server->ops->set_oplock_level(cinode, 0, 0, NULL);
2382 +}
2383 +
2384 +static void
2385 smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2386 unsigned int epoch, bool *purge_cache)
2387 {
2388 @@ -1110,6 +1121,7 @@ struct smb_version_operations smb20_operations = {
2389 .clear_stats = smb2_clear_stats,
2390 .print_stats = smb2_print_stats,
2391 .is_oplock_break = smb2_is_valid_oplock_break,
2392 + .downgrade_oplock = smb2_downgrade_oplock,
2393 .need_neg = smb2_need_neg,
2394 .negotiate = smb2_negotiate,
2395 .negotiate_wsize = smb2_negotiate_wsize,
2396 @@ -1184,6 +1196,7 @@ struct smb_version_operations smb21_operations = {
2397 .clear_stats = smb2_clear_stats,
2398 .print_stats = smb2_print_stats,
2399 .is_oplock_break = smb2_is_valid_oplock_break,
2400 + .downgrade_oplock = smb2_downgrade_oplock,
2401 .need_neg = smb2_need_neg,
2402 .negotiate = smb2_negotiate,
2403 .negotiate_wsize = smb2_negotiate_wsize,
2404 @@ -1259,6 +1272,7 @@ struct smb_version_operations smb30_operations = {
2405 .print_stats = smb2_print_stats,
2406 .dump_share_caps = smb2_dump_share_caps,
2407 .is_oplock_break = smb2_is_valid_oplock_break,
2408 + .downgrade_oplock = smb2_downgrade_oplock,
2409 .need_neg = smb2_need_neg,
2410 .negotiate = smb2_negotiate,
2411 .negotiate_wsize = smb2_negotiate_wsize,
2412 diff --git a/fs/file_table.c b/fs/file_table.c
2413 index 5b24008ea4f6..79ecae62209a 100644
2414 --- a/fs/file_table.c
2415 +++ b/fs/file_table.c
2416 @@ -209,10 +209,10 @@ static void drop_file_write_access(struct file *file)
2417 struct dentry *dentry = file->f_path.dentry;
2418 struct inode *inode = dentry->d_inode;
2419
2420 - put_write_access(inode);
2421 -
2422 if (special_file(inode->i_mode))
2423 return;
2424 +
2425 + put_write_access(inode);
2426 if (file_check_writeable(file) != 0)
2427 return;
2428 __mnt_drop_write(mnt);
2429 diff --git a/fs/open.c b/fs/open.c
2430 index b9ed8b25c108..2ed7325f713e 100644
2431 --- a/fs/open.c
2432 +++ b/fs/open.c
2433 @@ -641,23 +641,12 @@ out:
2434 static inline int __get_file_write_access(struct inode *inode,
2435 struct vfsmount *mnt)
2436 {
2437 - int error;
2438 - error = get_write_access(inode);
2439 + int error = get_write_access(inode);
2440 if (error)
2441 return error;
2442 - /*
2443 - * Do not take mount writer counts on
2444 - * special files since no writes to
2445 - * the mount itself will occur.
2446 - */
2447 - if (!special_file(inode->i_mode)) {
2448 - /*
2449 - * Balanced in __fput()
2450 - */
2451 - error = __mnt_want_write(mnt);
2452 - if (error)
2453 - put_write_access(inode);
2454 - }
2455 + error = __mnt_want_write(mnt);
2456 + if (error)
2457 + put_write_access(inode);
2458 return error;
2459 }
2460
2461 @@ -690,12 +679,11 @@ static int do_dentry_open(struct file *f,
2462
2463 path_get(&f->f_path);
2464 inode = f->f_inode = f->f_path.dentry->d_inode;
2465 - if (f->f_mode & FMODE_WRITE) {
2466 + if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
2467 error = __get_file_write_access(inode, f->f_path.mnt);
2468 if (error)
2469 goto cleanup_file;
2470 - if (!special_file(inode->i_mode))
2471 - file_take_write(f);
2472 + file_take_write(f);
2473 }
2474
2475 f->f_mapping = inode->i_mapping;
2476 @@ -742,7 +730,6 @@ static int do_dentry_open(struct file *f,
2477 cleanup_all:
2478 fops_put(f->f_op);
2479 if (f->f_mode & FMODE_WRITE) {
2480 - put_write_access(inode);
2481 if (!special_file(inode->i_mode)) {
2482 /*
2483 * We don't consider this a real
2484 @@ -750,6 +737,7 @@ cleanup_all:
2485 * because it all happenend right
2486 * here, so just reset the state.
2487 */
2488 + put_write_access(inode);
2489 file_reset_write(f);
2490 __mnt_drop_write(f->f_path.mnt);
2491 }
2492 diff --git a/fs/super.c b/fs/super.c
2493 index 80d5cf2ca765..7624267b2043 100644
2494 --- a/fs/super.c
2495 +++ b/fs/super.c
2496 @@ -802,7 +802,10 @@ void emergency_remount(void)
2497
2498 static DEFINE_IDA(unnamed_dev_ida);
2499 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
2500 -static int unnamed_dev_start = 0; /* don't bother trying below it */
2501 +/* Many userspace utilities consider an FSID of 0 invalid.
2502 + * Always return at least 1 from get_anon_bdev.
2503 + */
2504 +static int unnamed_dev_start = 1;
2505
2506 int get_anon_bdev(dev_t *p)
2507 {
2508 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
2509 index 34c7bdc06014..38a743705cc6 100644
2510 --- a/include/asm-generic/pgtable.h
2511 +++ b/include/asm-generic/pgtable.h
2512 @@ -680,24 +680,35 @@ static inline int pmd_numa(pmd_t pmd)
2513 #ifndef pte_mknonnuma
2514 static inline pte_t pte_mknonnuma(pte_t pte)
2515 {
2516 - pte = pte_clear_flags(pte, _PAGE_NUMA);
2517 - return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED);
2518 + pteval_t val = pte_val(pte);
2519 +
2520 + val &= ~_PAGE_NUMA;
2521 + val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
2522 + return __pte(val);
2523 }
2524 #endif
2525
2526 #ifndef pmd_mknonnuma
2527 static inline pmd_t pmd_mknonnuma(pmd_t pmd)
2528 {
2529 - pmd = pmd_clear_flags(pmd, _PAGE_NUMA);
2530 - return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED);
2531 + pmdval_t val = pmd_val(pmd);
2532 +
2533 + val &= ~_PAGE_NUMA;
2534 + val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
2535 +
2536 + return __pmd(val);
2537 }
2538 #endif
2539
2540 #ifndef pte_mknuma
2541 static inline pte_t pte_mknuma(pte_t pte)
2542 {
2543 - pte = pte_set_flags(pte, _PAGE_NUMA);
2544 - return pte_clear_flags(pte, _PAGE_PRESENT);
2545 + pteval_t val = pte_val(pte);
2546 +
2547 + val &= ~_PAGE_PRESENT;
2548 + val |= _PAGE_NUMA;
2549 +
2550 + return __pte(val);
2551 }
2552 #endif
2553
2554 @@ -716,8 +727,12 @@ static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
2555 #ifndef pmd_mknuma
2556 static inline pmd_t pmd_mknuma(pmd_t pmd)
2557 {
2558 - pmd = pmd_set_flags(pmd, _PAGE_NUMA);
2559 - return pmd_clear_flags(pmd, _PAGE_PRESENT);
2560 + pmdval_t val = pmd_val(pmd);
2561 +
2562 + val &= ~_PAGE_PRESENT;
2563 + val |= _PAGE_NUMA;
2564 +
2565 + return __pmd(val);
2566 }
2567 #endif
2568
2569 diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
2570 index 7c8b20b120ea..a9a53b12397b 100644
2571 --- a/include/linux/if_macvlan.h
2572 +++ b/include/linux/if_macvlan.h
2573 @@ -56,6 +56,7 @@ struct macvlan_dev {
2574 int numqueues;
2575 netdev_features_t tap_features;
2576 int minor;
2577 + int nest_level;
2578 };
2579
2580 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
2581 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
2582 index bbedfb56bd66..72ba6f5cbc8d 100644
2583 --- a/include/linux/if_vlan.h
2584 +++ b/include/linux/if_vlan.h
2585 @@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
2586 /* found in socket.c */
2587 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
2588
2589 -static inline int is_vlan_dev(struct net_device *dev)
2590 +static inline bool is_vlan_dev(struct net_device *dev)
2591 {
2592 return dev->priv_flags & IFF_802_1Q_VLAN;
2593 }
2594 @@ -158,6 +158,7 @@ struct vlan_dev_priv {
2595 #ifdef CONFIG_NET_POLL_CONTROLLER
2596 struct netpoll *netpoll;
2597 #endif
2598 + unsigned int nest_level;
2599 };
2600
2601 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
2602 diff --git a/include/linux/net.h b/include/linux/net.h
2603 index 94734a6259a4..17d83393afcc 100644
2604 --- a/include/linux/net.h
2605 +++ b/include/linux/net.h
2606 @@ -248,24 +248,17 @@ do { \
2607 bool __net_get_random_once(void *buf, int nbytes, bool *done,
2608 struct static_key *done_key);
2609
2610 -#ifdef HAVE_JUMP_LABEL
2611 -#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
2612 - { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
2613 -#else /* !HAVE_JUMP_LABEL */
2614 -#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
2615 -#endif /* HAVE_JUMP_LABEL */
2616 -
2617 #define net_get_random_once(buf, nbytes) \
2618 ({ \
2619 bool ___ret = false; \
2620 static bool ___done = false; \
2621 - static struct static_key ___done_key = \
2622 - ___NET_RANDOM_STATIC_KEY_INIT; \
2623 - if (!static_key_true(&___done_key)) \
2624 + static struct static_key ___once_key = \
2625 + STATIC_KEY_INIT_TRUE; \
2626 + if (static_key_true(&___once_key)) \
2627 ___ret = __net_get_random_once(buf, \
2628 nbytes, \
2629 &___done, \
2630 - &___done_key); \
2631 + &___once_key); \
2632 ___ret; \
2633 })
2634
2635 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2636 index daafd9561cbc..911718fa92ed 100644
2637 --- a/include/linux/netdevice.h
2638 +++ b/include/linux/netdevice.h
2639 @@ -1145,6 +1145,7 @@ struct net_device_ops {
2640 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
2641 struct net_device *dev,
2642 void *priv);
2643 + int (*ndo_get_lock_subclass)(struct net_device *dev);
2644 };
2645
2646 /*
2647 @@ -2861,7 +2862,12 @@ static inline void netif_addr_lock(struct net_device *dev)
2648
2649 static inline void netif_addr_lock_nested(struct net_device *dev)
2650 {
2651 - spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2652 + int subclass = SINGLE_DEPTH_NESTING;
2653 +
2654 + if (dev->netdev_ops->ndo_get_lock_subclass)
2655 + subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
2656 +
2657 + spin_lock_nested(&dev->addr_list_lock, subclass);
2658 }
2659
2660 static inline void netif_addr_lock_bh(struct net_device *dev)
2661 @@ -2988,6 +2994,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
2662 priv; \
2663 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
2664
2665 +void *netdev_lower_get_next(struct net_device *dev,
2666 + struct list_head **iter);
2667 +#define netdev_for_each_lower_dev(dev, ldev, iter) \
2668 + for (iter = &(dev)->adj_list.lower, \
2669 + ldev = netdev_lower_get_next(dev, &(iter)); \
2670 + ldev; \
2671 + ldev = netdev_lower_get_next(dev, &(iter)))
2672 +
2673 void *netdev_adjacent_get_private(struct list_head *adj_list);
2674 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
2675 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2676 @@ -3003,6 +3017,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
2677 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
2678 void *netdev_lower_dev_get_private(struct net_device *dev,
2679 struct net_device *lower_dev);
2680 +int dev_get_nest_level(struct net_device *dev,
2681 + bool (*type_check)(struct net_device *dev));
2682 int skb_checksum_help(struct sk_buff *skb);
2683 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2684 netdev_features_t features, bool tx_path);
2685 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
2686 index de83b4eb1642..1841b58cf173 100644
2687 --- a/include/linux/preempt.h
2688 +++ b/include/linux/preempt.h
2689 @@ -15,6 +15,8 @@
2690 */
2691 #define PREEMPT_NEED_RESCHED 0x80000000
2692
2693 +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
2694 +
2695 #include <asm/preempt.h>
2696
2697 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
2698 diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
2699 index 8e3e66ac0a52..953937ea5233 100644
2700 --- a/include/linux/rtnetlink.h
2701 +++ b/include/linux/rtnetlink.h
2702 @@ -4,6 +4,7 @@
2703
2704 #include <linux/mutex.h>
2705 #include <linux/netdevice.h>
2706 +#include <linux/wait.h>
2707 #include <uapi/linux/rtnetlink.h>
2708
2709 extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
2710 @@ -22,6 +23,10 @@ extern void rtnl_lock(void);
2711 extern void rtnl_unlock(void);
2712 extern int rtnl_trylock(void);
2713 extern int rtnl_is_locked(void);
2714 +
2715 +extern wait_queue_head_t netdev_unregistering_wq;
2716 +extern struct mutex net_mutex;
2717 +
2718 #ifdef CONFIG_PROVE_LOCKING
2719 extern int lockdep_rtnl_is_held(void);
2720 #else
2721 diff --git a/include/linux/sched.h b/include/linux/sched.h
2722 index a781dec1cd0b..ccd0c6f24f2c 100644
2723 --- a/include/linux/sched.h
2724 +++ b/include/linux/sched.h
2725 @@ -1688,6 +1688,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2726 }
2727
2728
2729 +static int pid_alive(const struct task_struct *p);
2730 +static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2731 +{
2732 + pid_t pid = 0;
2733 +
2734 + rcu_read_lock();
2735 + if (pid_alive(tsk))
2736 + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2737 + rcu_read_unlock();
2738 +
2739 + return pid;
2740 +}
2741 +
2742 +static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2743 +{
2744 + return task_ppid_nr_ns(tsk, &init_pid_ns);
2745 +}
2746 +
2747 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2748 struct pid_namespace *ns)
2749 {
2750 @@ -1727,7 +1745,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2751 *
2752 * Return: 1 if the process is alive. 0 otherwise.
2753 */
2754 -static inline int pid_alive(struct task_struct *p)
2755 +static inline int pid_alive(const struct task_struct *p)
2756 {
2757 return p->pids[PIDTYPE_PID].pid != NULL;
2758 }
2759 diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
2760 index 54f91d35e5fd..302ab805b0bb 100644
2761 --- a/include/linux/sock_diag.h
2762 +++ b/include/linux/sock_diag.h
2763 @@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
2764 void sock_diag_save_cookie(void *sk, __u32 *cookie);
2765
2766 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
2767 -int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
2768 +int sock_diag_put_filterinfo(struct sock *sk,
2769 struct sk_buff *skb, int attrtype);
2770
2771 #endif
2772 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
2773 index fddbe2023a5d..a629e4b23217 100644
2774 --- a/include/linux/thread_info.h
2775 +++ b/include/linux/thread_info.h
2776 @@ -118,8 +118,6 @@ static inline __deprecated void set_need_resched(void)
2777 */
2778 }
2779
2780 -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
2781 -
2782 #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
2783 /*
2784 * An arch can define its own version of set_restore_sigmask() to get the
2785 diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
2786 index 7d64d3609ec9..428277869400 100644
2787 --- a/include/net/af_vsock.h
2788 +++ b/include/net/af_vsock.h
2789 @@ -155,7 +155,11 @@ struct vsock_transport {
2790
2791 /**** CORE ****/
2792
2793 -int vsock_core_init(const struct vsock_transport *t);
2794 +int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
2795 +static inline int vsock_core_init(const struct vsock_transport *t)
2796 +{
2797 + return __vsock_core_init(t, THIS_MODULE);
2798 +}
2799 void vsock_core_exit(void);
2800
2801 /**** UTILS ****/
2802 diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
2803 index 017badb1aec7..2e74c6cfa612 100644
2804 --- a/include/net/ip6_route.h
2805 +++ b/include/net/ip6_route.h
2806 @@ -32,6 +32,11 @@ struct route_info {
2807 #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
2808 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
2809
2810 +/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
2811 + * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
2812 + */
2813 +#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
2814 +
2815 /*
2816 * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
2817 * between IPV6_ADDR_PREFERENCES socket option values
2818 diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
2819 index 956b175523ff..55d15049ab2f 100644
2820 --- a/include/net/netfilter/nf_conntrack_extend.h
2821 +++ b/include/net/netfilter/nf_conntrack_extend.h
2822 @@ -47,8 +47,8 @@ enum nf_ct_ext_id {
2823 /* Extensions: optional stuff which isn't permanently in struct. */
2824 struct nf_ct_ext {
2825 struct rcu_head rcu;
2826 - u8 offset[NF_CT_EXT_NUM];
2827 - u8 len;
2828 + u16 offset[NF_CT_EXT_NUM];
2829 + u16 len;
2830 char data[0];
2831 };
2832
2833 diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
2834 index cf2b7ae2b9d8..a75fc8e27cd6 100644
2835 --- a/include/net/netfilter/nf_tables_core.h
2836 +++ b/include/net/netfilter/nf_tables_core.h
2837 @@ -13,6 +13,16 @@ struct nft_cmp_fast_expr {
2838 u8 len;
2839 };
2840
2841 +/* Calculate the mask for the nft_cmp_fast expression. On big endian the
2842 + * mask needs to include the *upper* bytes when interpreting that data as
2843 + * something smaller than the full u32, therefore a cpu_to_le32 is done.
2844 + */
2845 +static inline u32 nft_cmp_fast_mask(unsigned int len)
2846 +{
2847 + return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr,
2848 + data) * BITS_PER_BYTE - len));
2849 +}
2850 +
2851 extern const struct nft_expr_ops nft_cmp_fast_ops;
2852
2853 int nft_cmp_module_init(void);
2854 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
2855 index 6ee76c804893..0dfcc92600e8 100644
2856 --- a/include/net/sctp/structs.h
2857 +++ b/include/net/sctp/structs.h
2858 @@ -1241,6 +1241,7 @@ struct sctp_endpoint {
2859 /* SCTP-AUTH: endpoint shared keys */
2860 struct list_head endpoint_shared_keys;
2861 __u16 active_key_id;
2862 + __u8 auth_enable;
2863 };
2864
2865 /* Recover the outter endpoint structure. */
2866 @@ -1269,7 +1270,8 @@ struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
2867 int sctp_has_association(struct net *net, const union sctp_addr *laddr,
2868 const union sctp_addr *paddr);
2869
2870 -int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
2871 +int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
2872 + const struct sctp_association *asoc,
2873 sctp_cid_t, sctp_init_chunk_t *peer_init,
2874 struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
2875 int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
2876 @@ -1653,6 +1655,17 @@ struct sctp_association {
2877 /* This is the last advertised value of rwnd over a SACK chunk. */
2878 __u32 a_rwnd;
2879
2880 + /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
2881 + * to slop over a maximum of the association's frag_point.
2882 + */
2883 + __u32 rwnd_over;
2884 +
2885 + /* Keeps treack of rwnd pressure. This happens when we have
2886 + * a window, but not recevie buffer (i.e small packets). This one
2887 + * is releases slowly (1 PMTU at a time ).
2888 + */
2889 + __u32 rwnd_press;
2890 +
2891 /* This is the sndbuf size in use for the association.
2892 * This corresponds to the sndbuf size for the association,
2893 * as specified in the sk->sndbuf.
2894 @@ -1881,7 +1894,8 @@ void sctp_assoc_update(struct sctp_association *old,
2895 __u32 sctp_association_get_next_tsn(struct sctp_association *);
2896
2897 void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
2898 -void sctp_assoc_rwnd_update(struct sctp_association *, bool);
2899 +void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
2900 +void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
2901 void sctp_assoc_set_primary(struct sctp_association *,
2902 struct sctp_transport *);
2903 void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
2904 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
2905 index d65fbec2533d..b4f1effc9216 100644
2906 --- a/include/scsi/scsi_device.h
2907 +++ b/include/scsi/scsi_device.h
2908 @@ -257,7 +257,7 @@ struct scsi_target {
2909 struct list_head siblings;
2910 struct list_head devices;
2911 struct device dev;
2912 - unsigned int reap_ref; /* protected by the host lock */
2913 + struct kref reap_ref; /* last put renders target invisible */
2914 unsigned int channel;
2915 unsigned int id; /* target id ... replace
2916 * scsi_device.id eventually */
2917 @@ -284,7 +284,6 @@ struct scsi_target {
2918 #define SCSI_DEFAULT_TARGET_BLOCKED 3
2919
2920 char scsi_level;
2921 - struct execute_work ew;
2922 enum scsi_target_state state;
2923 void *hostdata; /* available to low-level driver */
2924 unsigned long starget_data[0]; /* for the transport */
2925 diff --git a/include/trace/events/block.h b/include/trace/events/block.h
2926 index e76ae19a8d6f..e8a5eca1dbe5 100644
2927 --- a/include/trace/events/block.h
2928 +++ b/include/trace/events/block.h
2929 @@ -132,6 +132,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
2930 * block_rq_complete - block IO operation completed by device driver
2931 * @q: queue containing the block operation request
2932 * @rq: block operations request
2933 + * @nr_bytes: number of completed bytes
2934 *
2935 * The block_rq_complete tracepoint event indicates that some portion
2936 * of operation request has been completed by the device driver. If
2937 @@ -139,11 +140,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
2938 * do for the request. If @rq->bio is non-NULL then there is
2939 * additional work required to complete the request.
2940 */
2941 -DEFINE_EVENT(block_rq_with_error, block_rq_complete,
2942 +TRACE_EVENT(block_rq_complete,
2943
2944 - TP_PROTO(struct request_queue *q, struct request *rq),
2945 + TP_PROTO(struct request_queue *q, struct request *rq,
2946 + unsigned int nr_bytes),
2947
2948 - TP_ARGS(q, rq)
2949 + TP_ARGS(q, rq, nr_bytes),
2950 +
2951 + TP_STRUCT__entry(
2952 + __field( dev_t, dev )
2953 + __field( sector_t, sector )
2954 + __field( unsigned int, nr_sector )
2955 + __field( int, errors )
2956 + __array( char, rwbs, RWBS_LEN )
2957 + __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
2958 + ),
2959 +
2960 + TP_fast_assign(
2961 + __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
2962 + __entry->sector = blk_rq_pos(rq);
2963 + __entry->nr_sector = nr_bytes >> 9;
2964 + __entry->errors = rq->errors;
2965 +
2966 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
2967 + blk_dump_cmd(__get_str(cmd), rq);
2968 + ),
2969 +
2970 + TP_printk("%d,%d %s (%s) %llu + %u [%d]",
2971 + MAJOR(__entry->dev), MINOR(__entry->dev),
2972 + __entry->rwbs, __get_str(cmd),
2973 + (unsigned long long)__entry->sector,
2974 + __entry->nr_sector, __entry->errors)
2975 );
2976
2977 DECLARE_EVENT_CLASS(block_rq,
2978 diff --git a/init/Kconfig b/init/Kconfig
2979 index d56cb03c1b49..93c5ef0c5210 100644
2980 --- a/init/Kconfig
2981 +++ b/init/Kconfig
2982 @@ -1621,6 +1621,18 @@ config MMAP_ALLOW_UNINITIALIZED
2983
2984 See Documentation/nommu-mmap.txt for more information.
2985
2986 +config SYSTEM_TRUSTED_KEYRING
2987 + bool "Provide system-wide ring of trusted keys"
2988 + depends on KEYS
2989 + help
2990 + Provide a system keyring to which trusted keys can be added. Keys in
2991 + the keyring are considered to be trusted. Keys may be added at will
2992 + by the kernel from compiled-in data and from hardware key stores, but
2993 + userspace may only add extra keys if those keys can be verified by
2994 + keys already in the keyring.
2995 +
2996 + Keys in this keyring are used by module signature checking.
2997 +
2998 config PROFILING
2999 bool "Profiling support"
3000 help
3001 @@ -1656,18 +1668,6 @@ config BASE_SMALL
3002 default 0 if BASE_FULL
3003 default 1 if !BASE_FULL
3004
3005 -config SYSTEM_TRUSTED_KEYRING
3006 - bool "Provide system-wide ring of trusted keys"
3007 - depends on KEYS
3008 - help
3009 - Provide a system keyring to which trusted keys can be added. Keys in
3010 - the keyring are considered to be trusted. Keys may be added at will
3011 - by the kernel from compiled-in data and from hardware key stores, but
3012 - userspace may only add extra keys if those keys can be verified by
3013 - keys already in the keyring.
3014 -
3015 - Keys in this keyring are used by module signature checking.
3016 -
3017 menuconfig MODULES
3018 bool "Enable loadable module support"
3019 option modules
3020 diff --git a/kernel/audit.c b/kernel/audit.c
3021 index 95a20f3f52f1..d5f31c17813a 100644
3022 --- a/kernel/audit.c
3023 +++ b/kernel/audit.c
3024 @@ -1829,10 +1829,10 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
3025 spin_unlock_irq(&tsk->sighand->siglock);
3026
3027 audit_log_format(ab,
3028 - " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
3029 + " ppid=%d pid=%d auid=%u uid=%u gid=%u"
3030 " euid=%u suid=%u fsuid=%u"
3031 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
3032 - sys_getppid(),
3033 + task_ppid_nr(tsk),
3034 tsk->pid,
3035 from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
3036 from_kuid(&init_user_ns, cred->uid),
3037 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
3038 index 7aef2f4b6c64..3b29605ea1b2 100644
3039 --- a/kernel/auditsc.c
3040 +++ b/kernel/auditsc.c
3041 @@ -459,7 +459,7 @@ static int audit_filter_rules(struct task_struct *tsk,
3042 case AUDIT_PPID:
3043 if (ctx) {
3044 if (!ctx->ppid)
3045 - ctx->ppid = sys_getppid();
3046 + ctx->ppid = task_ppid_nr(tsk);
3047 result = audit_comparator(ctx->ppid, f->op, f->val);
3048 }
3049 break;
3050 diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
3051 index 20b2fe37d105..0de9d7f5045c 100644
3052 --- a/kernel/time/tick-common.c
3053 +++ b/kernel/time/tick-common.c
3054 @@ -276,7 +276,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
3055 bool tick_check_replacement(struct clock_event_device *curdev,
3056 struct clock_event_device *newdev)
3057 {
3058 - if (tick_check_percpu(curdev, newdev, smp_processor_id()))
3059 + if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
3060 return false;
3061
3062 return tick_check_preferred(curdev, newdev);
3063 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
3064 index 9f8af69c67ec..6558b7ac112d 100644
3065 --- a/kernel/time/tick-sched.c
3066 +++ b/kernel/time/tick-sched.c
3067 @@ -84,6 +84,9 @@ static void tick_do_update_jiffies64(ktime_t now)
3068
3069 /* Keep the tick_next_period variable up to date */
3070 tick_next_period = ktime_add(last_jiffies_update, tick_period);
3071 + } else {
3072 + write_sequnlock(&jiffies_lock);
3073 + return;
3074 }
3075 write_sequnlock(&jiffies_lock);
3076 update_wall_time();
3077 @@ -967,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
3078 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
3079 ktime_t next;
3080
3081 - if (!tick_nohz_active)
3082 + if (!tick_nohz_enabled)
3083 return;
3084
3085 local_irq_disable();
3086 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
3087 index b418cb0d7242..4f3a3c03eadb 100644
3088 --- a/kernel/trace/blktrace.c
3089 +++ b/kernel/trace/blktrace.c
3090 @@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q)
3091 * blk_add_trace_rq - Add a trace for a request oriented action
3092 * @q: queue the io is for
3093 * @rq: the source request
3094 + * @nr_bytes: number of completed bytes
3095 * @what: the action
3096 *
3097 * Description:
3098 @@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q)
3099 *
3100 **/
3101 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
3102 - u32 what)
3103 + unsigned int nr_bytes, u32 what)
3104 {
3105 struct blk_trace *bt = q->blk_trace;
3106
3107 @@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
3108
3109 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
3110 what |= BLK_TC_ACT(BLK_TC_PC);
3111 - __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
3112 + __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
3113 what, rq->errors, rq->cmd_len, rq->cmd);
3114 } else {
3115 what |= BLK_TC_ACT(BLK_TC_FS);
3116 - __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
3117 + __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
3118 rq->cmd_flags, what, rq->errors, 0, NULL);
3119 }
3120 }
3121 @@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
3122 static void blk_add_trace_rq_abort(void *ignore,
3123 struct request_queue *q, struct request *rq)
3124 {
3125 - blk_add_trace_rq(q, rq, BLK_TA_ABORT);
3126 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
3127 }
3128
3129 static void blk_add_trace_rq_insert(void *ignore,
3130 struct request_queue *q, struct request *rq)
3131 {
3132 - blk_add_trace_rq(q, rq, BLK_TA_INSERT);
3133 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
3134 }
3135
3136 static void blk_add_trace_rq_issue(void *ignore,
3137 struct request_queue *q, struct request *rq)
3138 {
3139 - blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
3140 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
3141 }
3142
3143 static void blk_add_trace_rq_requeue(void *ignore,
3144 struct request_queue *q,
3145 struct request *rq)
3146 {
3147 - blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
3148 + blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
3149 }
3150
3151 static void blk_add_trace_rq_complete(void *ignore,
3152 struct request_queue *q,
3153 - struct request *rq)
3154 + struct request *rq,
3155 + unsigned int nr_bytes)
3156 {
3157 - blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
3158 + blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
3159 }
3160
3161 /**
3162 diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
3163 index 8efbb69b04f0..6d6a789e579e 100644
3164 --- a/kernel/trace/trace_events_trigger.c
3165 +++ b/kernel/trace/trace_events_trigger.c
3166 @@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
3167 data->ops->func(data);
3168 continue;
3169 }
3170 - filter = rcu_dereference(data->filter);
3171 + filter = rcu_dereference_sched(data->filter);
3172 if (filter && !filter_match_preds(filter, rec))
3173 continue;
3174 if (data->cmd_ops->post_trigger) {
3175 diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
3176 index 79e52d93860b..bd0c9b133b54 100644
3177 --- a/kernel/trace/trace_uprobe.c
3178 +++ b/kernel/trace/trace_uprobe.c
3179 @@ -728,9 +728,15 @@ static int uprobe_buffer_enable(void)
3180
3181 static void uprobe_buffer_disable(void)
3182 {
3183 + int cpu;
3184 +
3185 BUG_ON(!mutex_is_locked(&event_mutex));
3186
3187 if (--uprobe_buffer_refcnt == 0) {
3188 + for_each_possible_cpu(cpu)
3189 + free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
3190 + cpu)->buf);
3191 +
3192 free_percpu(uprobe_cpu_buffer);
3193 uprobe_cpu_buffer = NULL;
3194 }
3195 diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
3196 index 031cc5655a51..63630aef3bd3 100644
3197 --- a/kernel/tracepoint.c
3198 +++ b/kernel/tracepoint.c
3199 @@ -641,6 +641,9 @@ static int tracepoint_module_coming(struct module *mod)
3200 struct tp_module *tp_mod, *iter;
3201 int ret = 0;
3202
3203 + if (!mod->num_tracepoints)
3204 + return 0;
3205 +
3206 /*
3207 * We skip modules that taint the kernel, especially those with different
3208 * module headers (for forced load), to make sure we don't cause a crash.
3209 @@ -684,6 +687,9 @@ static int tracepoint_module_going(struct module *mod)
3210 {
3211 struct tp_module *pos;
3212
3213 + if (!mod->num_tracepoints)
3214 + return 0;
3215 +
3216 mutex_lock(&tracepoints_mutex);
3217 tracepoint_update_probe_range(mod->tracepoints_ptrs,
3218 mod->tracepoints_ptrs + mod->num_tracepoints);
3219 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3220 index 1546655a2d78..1c42d0c36d0b 100644
3221 --- a/mm/huge_memory.c
3222 +++ b/mm/huge_memory.c
3223 @@ -1611,16 +1611,23 @@ pmd_t *page_check_address_pmd(struct page *page,
3224 enum page_check_address_pmd_flag flag,
3225 spinlock_t **ptl)
3226 {
3227 + pgd_t *pgd;
3228 + pud_t *pud;
3229 pmd_t *pmd;
3230
3231 if (address & ~HPAGE_PMD_MASK)
3232 return NULL;
3233
3234 - pmd = mm_find_pmd(mm, address);
3235 - if (!pmd)
3236 + pgd = pgd_offset(mm, address);
3237 + if (!pgd_present(*pgd))
3238 return NULL;
3239 + pud = pud_offset(pgd, address);
3240 + if (!pud_present(*pud))
3241 + return NULL;
3242 + pmd = pmd_offset(pud, address);
3243 +
3244 *ptl = pmd_lock(mm, pmd);
3245 - if (pmd_none(*pmd))
3246 + if (!pmd_present(*pmd))
3247 goto unlock;
3248 if (pmd_page(*pmd) != page)
3249 goto unlock;
3250 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3251 index 2de3c845f03a..06a9bc0a3120 100644
3252 --- a/mm/hugetlb.c
3253 +++ b/mm/hugetlb.c
3254 @@ -1134,6 +1134,7 @@ static void return_unused_surplus_pages(struct hstate *h,
3255 while (nr_pages--) {
3256 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
3257 break;
3258 + cond_resched_lock(&hugetlb_lock);
3259 }
3260 }
3261
3262 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
3263 index 175273f38cb1..44ebd5c2cd4a 100644
3264 --- a/net/8021q/vlan.c
3265 +++ b/net/8021q/vlan.c
3266 @@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
3267 if (err < 0)
3268 goto out_uninit_mvrp;
3269
3270 + vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
3271 err = register_netdevice(dev);
3272 if (err < 0)
3273 goto out_uninit_mvrp;
3274 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
3275 index 27bfe2f8e2de..cc0d21895420 100644
3276 --- a/net/8021q/vlan_dev.c
3277 +++ b/net/8021q/vlan_dev.c
3278 @@ -524,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
3279 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
3280 }
3281
3282 +static int vlan_dev_get_lock_subclass(struct net_device *dev)
3283 +{
3284 + return vlan_dev_priv(dev)->nest_level;
3285 +}
3286 +
3287 static const struct header_ops vlan_header_ops = {
3288 .create = vlan_dev_hard_header,
3289 .rebuild = vlan_dev_rebuild_header,
3290 @@ -559,7 +564,7 @@ static const struct net_device_ops vlan_netdev_ops;
3291 static int vlan_dev_init(struct net_device *dev)
3292 {
3293 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
3294 - int subclass = 0, i;
3295 + int i;
3296
3297 netif_carrier_off(dev);
3298
3299 @@ -608,10 +613,7 @@ static int vlan_dev_init(struct net_device *dev)
3300
3301 SET_NETDEV_DEVTYPE(dev, &vlan_type);
3302
3303 - if (is_vlan_dev(real_dev))
3304 - subclass = 1;
3305 -
3306 - vlan_dev_set_lockdep_class(dev, subclass);
3307 + vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
3308
3309 vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
3310 if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
3311 @@ -791,6 +793,7 @@ static const struct net_device_ops vlan_netdev_ops = {
3312 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
3313 #endif
3314 .ndo_fix_features = vlan_dev_fix_features,
3315 + .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
3316 };
3317
3318 void vlan_setup(struct net_device *dev)
3319 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
3320 index 8323bced8e5b..d074d06ce094 100644
3321 --- a/net/batman-adv/bat_iv_ogm.c
3322 +++ b/net/batman-adv/bat_iv_ogm.c
3323 @@ -1545,6 +1545,8 @@ out_neigh:
3324 if ((orig_neigh_node) && (!is_single_hop_neigh))
3325 batadv_orig_node_free_ref(orig_neigh_node);
3326 out:
3327 + if (router_ifinfo)
3328 + batadv_neigh_ifinfo_free_ref(router_ifinfo);
3329 if (router)
3330 batadv_neigh_node_free_ref(router);
3331 if (router_router)
3332 diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
3333 index edee50411892..bd8219a7dddf 100644
3334 --- a/net/batman-adv/distributed-arp-table.c
3335 +++ b/net/batman-adv/distributed-arp-table.c
3336 @@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
3337 * additional DAT answer may trigger kernel warnings about
3338 * a packet coming from the wrong port.
3339 */
3340 - if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
3341 - BATADV_NO_FLAGS)) {
3342 + if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
3343 ret = true;
3344 goto out;
3345 }
3346 diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
3347 index 88df9b1d552d..cc1cfd60c094 100644
3348 --- a/net/batman-adv/fragmentation.c
3349 +++ b/net/batman-adv/fragmentation.c
3350 @@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
3351 struct batadv_neigh_node *neigh_node)
3352 {
3353 struct batadv_priv *bat_priv;
3354 - struct batadv_hard_iface *primary_if;
3355 + struct batadv_hard_iface *primary_if = NULL;
3356 struct batadv_frag_packet frag_header;
3357 struct sk_buff *skb_fragment;
3358 unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
3359 unsigned header_size = sizeof(frag_header);
3360 unsigned max_fragment_size, max_packet_size;
3361 + bool ret = false;
3362
3363 /* To avoid merge and refragmentation at next-hops we never send
3364 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
3365 @@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
3366 skb->len + ETH_HLEN);
3367 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
3368
3369 - return true;
3370 + ret = true;
3371 +
3372 out_err:
3373 - return false;
3374 + if (primary_if)
3375 + batadv_hardif_free_ref(primary_if);
3376 +
3377 + return ret;
3378 }
3379 diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
3380 index 55cf2260d295..36b9ae61f5e8 100644
3381 --- a/net/batman-adv/gateway_client.c
3382 +++ b/net/batman-adv/gateway_client.c
3383 @@ -42,8 +42,10 @@
3384
3385 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
3386 {
3387 - if (atomic_dec_and_test(&gw_node->refcount))
3388 + if (atomic_dec_and_test(&gw_node->refcount)) {
3389 + batadv_orig_node_free_ref(gw_node->orig_node);
3390 kfree_rcu(gw_node, rcu);
3391 + }
3392 }
3393
3394 static struct batadv_gw_node *
3395 @@ -408,9 +410,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
3396 if (gateway->bandwidth_down == 0)
3397 return;
3398
3399 + if (!atomic_inc_not_zero(&orig_node->refcount))
3400 + return;
3401 +
3402 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
3403 - if (!gw_node)
3404 + if (!gw_node) {
3405 + batadv_orig_node_free_ref(orig_node);
3406 return;
3407 + }
3408
3409 INIT_HLIST_NODE(&gw_node->list);
3410 gw_node->orig_node = orig_node;
3411 diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
3412 index 853941629dc1..abf612d7d0ae 100644
3413 --- a/net/batman-adv/originator.c
3414 +++ b/net/batman-adv/originator.c
3415 @@ -500,12 +500,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
3416 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
3417 {
3418 struct batadv_orig_ifinfo *orig_ifinfo;
3419 + struct batadv_neigh_node *router;
3420
3421 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
3422
3423 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
3424 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
3425
3426 + /* this is the last reference to this object */
3427 + router = rcu_dereference_protected(orig_ifinfo->router, true);
3428 + if (router)
3429 + batadv_neigh_node_free_ref_now(router);
3430 kfree(orig_ifinfo);
3431 }
3432
3433 @@ -697,6 +702,47 @@ free_orig_node:
3434 }
3435
3436 /**
3437 + * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
3438 + * @bat_priv: the bat priv with all the soft interface information
3439 + * @neigh: orig node which is to be checked
3440 + */
3441 +static void
3442 +batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
3443 + struct batadv_neigh_node *neigh)
3444 +{
3445 + struct batadv_neigh_ifinfo *neigh_ifinfo;
3446 + struct batadv_hard_iface *if_outgoing;
3447 + struct hlist_node *node_tmp;
3448 +
3449 + spin_lock_bh(&neigh->ifinfo_lock);
3450 +
3451 + /* for all ifinfo objects for this neighinator */
3452 + hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
3453 + &neigh->ifinfo_list, list) {
3454 + if_outgoing = neigh_ifinfo->if_outgoing;
3455 +
3456 + /* always keep the default interface */
3457 + if (if_outgoing == BATADV_IF_DEFAULT)
3458 + continue;
3459 +
3460 + /* don't purge if the interface is not (going) down */
3461 + if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
3462 + (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
3463 + (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
3464 + continue;
3465 +
3466 + batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
3467 + "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
3468 + neigh->addr, if_outgoing->net_dev->name);
3469 +
3470 + hlist_del_rcu(&neigh_ifinfo->list);
3471 + batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
3472 + }
3473 +
3474 + spin_unlock_bh(&neigh->ifinfo_lock);
3475 +}
3476 +
3477 +/**
3478 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
3479 * @bat_priv: the bat priv with all the soft interface information
3480 * @orig_node: orig node which is to be checked
3481 @@ -795,6 +841,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
3482
3483 hlist_del_rcu(&neigh_node->list);
3484 batadv_neigh_node_free_ref(neigh_node);
3485 + } else {
3486 + /* only necessary if not the whole neighbor is to be
3487 + * deleted, but some interface has been removed.
3488 + */
3489 + batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
3490 }
3491 }
3492
3493 @@ -852,7 +903,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
3494 {
3495 struct batadv_neigh_node *best_neigh_node;
3496 struct batadv_hard_iface *hard_iface;
3497 - bool changed;
3498 + bool changed_ifinfo, changed_neigh;
3499
3500 if (batadv_has_timed_out(orig_node->last_seen,
3501 2 * BATADV_PURGE_TIMEOUT)) {
3502 @@ -862,10 +913,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
3503 jiffies_to_msecs(orig_node->last_seen));
3504 return true;
3505 }
3506 - changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
3507 - changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
3508 + changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
3509 + changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
3510
3511 - if (!changed)
3512 + if (!changed_ifinfo && !changed_neigh)
3513 return false;
3514
3515 /* first for NULL ... */
3516 @@ -1023,7 +1074,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
3517 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
3518
3519 out:
3520 - batadv_hardif_free_ref(hard_iface);
3521 + if (hard_iface)
3522 + batadv_hardif_free_ref(hard_iface);
3523 return 0;
3524 }
3525
3526 diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
3527 index d0cca3c65f01..7985deaff52f 100644
3528 --- a/net/bridge/br_input.c
3529 +++ b/net/bridge/br_input.c
3530 @@ -73,7 +73,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
3531 goto drop;
3532
3533 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
3534 - goto drop;
3535 + goto out;
3536
3537 /* insert into forwarding database after filtering to avoid spoofing */
3538 br = p->br;
3539 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
3540 index e74b6d530cb6..e8844d975b32 100644
3541 --- a/net/bridge/br_netlink.c
3542 +++ b/net/bridge/br_netlink.c
3543 @@ -445,6 +445,20 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
3544 return 0;
3545 }
3546
3547 +static int br_dev_newlink(struct net *src_net, struct net_device *dev,
3548 + struct nlattr *tb[], struct nlattr *data[])
3549 +{
3550 + struct net_bridge *br = netdev_priv(dev);
3551 +
3552 + if (tb[IFLA_ADDRESS]) {
3553 + spin_lock_bh(&br->lock);
3554 + br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
3555 + spin_unlock_bh(&br->lock);
3556 + }
3557 +
3558 + return register_netdevice(dev);
3559 +}
3560 +
3561 static size_t br_get_link_af_size(const struct net_device *dev)
3562 {
3563 struct net_port_vlans *pv;
3564 @@ -473,6 +487,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
3565 .priv_size = sizeof(struct net_bridge),
3566 .setup = br_dev_setup,
3567 .validate = br_validate,
3568 + .newlink = br_dev_newlink,
3569 .dellink = br_dev_delete,
3570 };
3571
3572 diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
3573 index f23c74b3a953..ba7757b7737d 100644
3574 --- a/net/bridge/br_vlan.c
3575 +++ b/net/bridge/br_vlan.c
3576 @@ -170,7 +170,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
3577 * rejected.
3578 */
3579 if (!v)
3580 - return false;
3581 + goto drop;
3582
3583 /* If vlan tx offload is disabled on bridge device and frame was
3584 * sent from vlan device on the bridge device, it does not have
3585 @@ -193,7 +193,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
3586 * vlan untagged or priority-tagged traffic belongs to.
3587 */
3588 if (pvid == VLAN_N_VID)
3589 - return false;
3590 + goto drop;
3591
3592 /* PVID is set on this port. Any untagged or priority-tagged
3593 * ingress frame is considered to belong to this vlan.
3594 @@ -216,7 +216,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
3595 /* Frame had a valid vlan tag. See if vlan is allowed */
3596 if (test_bit(*vid, v->vlan_bitmap))
3597 return true;
3598 -
3599 +drop:
3600 + kfree_skb(skb);
3601 return false;
3602 }
3603
3604 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3605 index 0e474b13463b..1059ed3bc255 100644
3606 --- a/net/bridge/netfilter/ebtables.c
3607 +++ b/net/bridge/netfilter/ebtables.c
3608 @@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
3609 if (repl->num_counters &&
3610 copy_to_user(repl->counters, counterstmp,
3611 repl->num_counters * sizeof(struct ebt_counter))) {
3612 - ret = -EFAULT;
3613 + /* Silent error, can't fail, new table is already in place */
3614 + net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
3615 }
3616 - else
3617 - ret = 0;
3618
3619 /* decrease module count and free resources */
3620 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
3621 diff --git a/net/core/dev.c b/net/core/dev.c
3622 index 45fa2f11f84d..fccc195e0fc8 100644
3623 --- a/net/core/dev.c
3624 +++ b/net/core/dev.c
3625 @@ -2289,7 +2289,7 @@ EXPORT_SYMBOL(skb_checksum_help);
3626 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3627 {
3628 __be16 type = skb->protocol;
3629 - int vlan_depth = ETH_HLEN;
3630 + int vlan_depth = skb->mac_len;
3631
3632 /* Tunnel gso handlers can set protocol to ethernet. */
3633 if (type == htons(ETH_P_TEB)) {
3634 @@ -3944,6 +3944,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3635 }
3636 NAPI_GRO_CB(skb)->count = 1;
3637 NAPI_GRO_CB(skb)->age = jiffies;
3638 + NAPI_GRO_CB(skb)->last = skb;
3639 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3640 skb->next = napi->gro_list;
3641 napi->gro_list = skb;
3642 @@ -4050,6 +4051,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3643 skb->vlan_tci = 0;
3644 skb->dev = napi->dev;
3645 skb->skb_iif = 0;
3646 + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
3647
3648 napi->skb = skb;
3649 }
3650 @@ -4605,6 +4607,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3651 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
3652
3653 /**
3654 + * netdev_lower_get_next - Get the next device from the lower neighbour
3655 + * list
3656 + * @dev: device
3657 + * @iter: list_head ** of the current position
3658 + *
3659 + * Gets the next netdev_adjacent from the dev's lower neighbour
3660 + * list, starting from iter position. The caller must hold RTNL lock or
3661 + * its own locking that guarantees that the neighbour lower
3662 + * list will remain unchainged.
3663 + */
3664 +void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
3665 +{
3666 + struct netdev_adjacent *lower;
3667 +
3668 + lower = list_entry((*iter)->next, struct netdev_adjacent, list);
3669 +
3670 + if (&lower->list == &dev->adj_list.lower)
3671 + return NULL;
3672 +
3673 + *iter = &lower->list;
3674 +
3675 + return lower->dev;
3676 +}
3677 +EXPORT_SYMBOL(netdev_lower_get_next);
3678 +
3679 +/**
3680 * netdev_lower_get_first_private_rcu - Get the first ->private from the
3681 * lower neighbour list, RCU
3682 * variant
3683 @@ -5054,6 +5082,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
3684 }
3685 EXPORT_SYMBOL(netdev_lower_dev_get_private);
3686
3687 +
3688 +int dev_get_nest_level(struct net_device *dev,
3689 + bool (*type_check)(struct net_device *dev))
3690 +{
3691 + struct net_device *lower = NULL;
3692 + struct list_head *iter;
3693 + int max_nest = -1;
3694 + int nest;
3695 +
3696 + ASSERT_RTNL();
3697 +
3698 + netdev_for_each_lower_dev(dev, lower, iter) {
3699 + nest = dev_get_nest_level(lower, type_check);
3700 + if (max_nest < nest)
3701 + max_nest = nest;
3702 + }
3703 +
3704 + if (type_check(dev))
3705 + max_nest++;
3706 +
3707 + return max_nest;
3708 +}
3709 +EXPORT_SYMBOL(dev_get_nest_level);
3710 +
3711 static void dev_change_rx_flags(struct net_device *dev, int flags)
3712 {
3713 const struct net_device_ops *ops = dev->netdev_ops;
3714 @@ -5523,7 +5575,7 @@ static int dev_new_index(struct net *net)
3715
3716 /* Delayed registration/unregisteration */
3717 static LIST_HEAD(net_todo_list);
3718 -static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
3719 +DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
3720
3721 static void net_set_todo(struct net_device *dev)
3722 {
3723 diff --git a/net/core/filter.c b/net/core/filter.c
3724 index ad30d626a5bd..ebce437678fc 100644
3725 --- a/net/core/filter.c
3726 +++ b/net/core/filter.c
3727 @@ -355,6 +355,8 @@ load_b:
3728
3729 if (skb_is_nonlinear(skb))
3730 return 0;
3731 + if (skb->len < sizeof(struct nlattr))
3732 + return 0;
3733 if (A > skb->len - sizeof(struct nlattr))
3734 return 0;
3735
3736 @@ -371,11 +373,13 @@ load_b:
3737
3738 if (skb_is_nonlinear(skb))
3739 return 0;
3740 + if (skb->len < sizeof(struct nlattr))
3741 + return 0;
3742 if (A > skb->len - sizeof(struct nlattr))
3743 return 0;
3744
3745 nla = (struct nlattr *)&skb->data[A];
3746 - if (nla->nla_len > A - skb->len)
3747 + if (nla->nla_len > skb->len - A)
3748 return 0;
3749
3750 nla = nla_find_nested(nla, X);
3751 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
3752 index e16129019c66..7d95f69635c6 100644
3753 --- a/net/core/neighbour.c
3754 +++ b/net/core/neighbour.c
3755 @@ -1247,8 +1247,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
3756 neigh->updated = jiffies;
3757 if (!(neigh->nud_state & NUD_FAILED))
3758 return;
3759 - neigh->nud_state = NUD_PROBE;
3760 - atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
3761 + neigh->nud_state = NUD_INCOMPLETE;
3762 + atomic_set(&neigh->probes, neigh_max_probes(neigh));
3763 neigh_add_timer(neigh,
3764 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
3765 }
3766 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
3767 index 81d3a9a08453..7c8ffd974961 100644
3768 --- a/net/core/net_namespace.c
3769 +++ b/net/core/net_namespace.c
3770 @@ -24,7 +24,7 @@
3771
3772 static LIST_HEAD(pernet_list);
3773 static struct list_head *first_device = &pernet_list;
3774 -static DEFINE_MUTEX(net_mutex);
3775 +DEFINE_MUTEX(net_mutex);
3776
3777 LIST_HEAD(net_namespace_list);
3778 EXPORT_SYMBOL_GPL(net_namespace_list);
3779 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3780 index 120eecc0f5a4..83b9d6ae5119 100644
3781 --- a/net/core/rtnetlink.c
3782 +++ b/net/core/rtnetlink.c
3783 @@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
3784 }
3785 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
3786
3787 +/* Return with the rtnl_lock held when there are no network
3788 + * devices unregistering in any network namespace.
3789 + */
3790 +static void rtnl_lock_unregistering_all(void)
3791 +{
3792 + struct net *net;
3793 + bool unregistering;
3794 + DEFINE_WAIT(wait);
3795 +
3796 + for (;;) {
3797 + prepare_to_wait(&netdev_unregistering_wq, &wait,
3798 + TASK_UNINTERRUPTIBLE);
3799 + unregistering = false;
3800 + rtnl_lock();
3801 + for_each_net(net) {
3802 + if (net->dev_unreg_count > 0) {
3803 + unregistering = true;
3804 + break;
3805 + }
3806 + }
3807 + if (!unregistering)
3808 + break;
3809 + __rtnl_unlock();
3810 + schedule();
3811 + }
3812 + finish_wait(&netdev_unregistering_wq, &wait);
3813 +}
3814 +
3815 /**
3816 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
3817 * @ops: struct rtnl_link_ops * to unregister
3818 */
3819 void rtnl_link_unregister(struct rtnl_link_ops *ops)
3820 {
3821 - rtnl_lock();
3822 + /* Close the race with cleanup_net() */
3823 + mutex_lock(&net_mutex);
3824 + rtnl_lock_unregistering_all();
3825 __rtnl_link_unregister(ops);
3826 rtnl_unlock();
3827 + mutex_unlock(&net_mutex);
3828 }
3829 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
3830
3831 @@ -774,7 +805,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
3832 return 0;
3833 }
3834
3835 -static size_t rtnl_port_size(const struct net_device *dev)
3836 +static size_t rtnl_port_size(const struct net_device *dev,
3837 + u32 ext_filter_mask)
3838 {
3839 size_t port_size = nla_total_size(4) /* PORT_VF */
3840 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
3841 @@ -790,7 +822,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
3842 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
3843 + port_size;
3844
3845 - if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
3846 + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
3847 + !(ext_filter_mask & RTEXT_FILTER_VF))
3848 return 0;
3849 if (dev_num_vf(dev->dev.parent))
3850 return port_self_size + vf_ports_size +
3851 @@ -825,7 +858,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
3852 + nla_total_size(ext_filter_mask
3853 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
3854 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
3855 - + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
3856 + + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
3857 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
3858 + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
3859 + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
3860 @@ -887,11 +920,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
3861 return 0;
3862 }
3863
3864 -static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
3865 +static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
3866 + u32 ext_filter_mask)
3867 {
3868 int err;
3869
3870 - if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
3871 + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
3872 + !(ext_filter_mask & RTEXT_FILTER_VF))
3873 return 0;
3874
3875 err = rtnl_port_self_fill(skb, dev);
3876 @@ -1076,7 +1111,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
3877 nla_nest_end(skb, vfinfo);
3878 }
3879
3880 - if (rtnl_port_fill(skb, dev))
3881 + if (rtnl_port_fill(skb, dev, ext_filter_mask))
3882 goto nla_put_failure;
3883
3884 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
3885 @@ -1130,6 +1165,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3886 struct hlist_head *head;
3887 struct nlattr *tb[IFLA_MAX+1];
3888 u32 ext_filter_mask = 0;
3889 + int err;
3890
3891 s_h = cb->args[0];
3892 s_idx = cb->args[1];
3893 @@ -1150,11 +1186,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3894 hlist_for_each_entry_rcu(dev, head, index_hlist) {
3895 if (idx < s_idx)
3896 goto cont;
3897 - if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
3898 - NETLINK_CB(cb->skb).portid,
3899 - cb->nlh->nlmsg_seq, 0,
3900 - NLM_F_MULTI,
3901 - ext_filter_mask) <= 0)
3902 + err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
3903 + NETLINK_CB(cb->skb).portid,
3904 + cb->nlh->nlmsg_seq, 0,
3905 + NLM_F_MULTI,
3906 + ext_filter_mask);
3907 + /* If we ran out of room on the first message,
3908 + * we're in trouble
3909 + */
3910 + WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
3911 +
3912 + if (err <= 0)
3913 goto out;
3914
3915 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3916 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3917 index 90b96a11b974..e5ae776ee9b4 100644
3918 --- a/net/core/skbuff.c
3919 +++ b/net/core/skbuff.c
3920 @@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3921 if (unlikely(p->len + len >= 65536))
3922 return -E2BIG;
3923
3924 - lp = NAPI_GRO_CB(p)->last ?: p;
3925 + lp = NAPI_GRO_CB(p)->last;
3926 pinfo = skb_shinfo(lp);
3927
3928 if (headlen <= offset) {
3929 @@ -3192,7 +3192,7 @@ merge:
3930
3931 __skb_pull(skb, offset);
3932
3933 - if (!NAPI_GRO_CB(p)->last)
3934 + if (NAPI_GRO_CB(p)->last == p)
3935 skb_shinfo(p)->frag_list = skb;
3936 else
3937 NAPI_GRO_CB(p)->last->next = skb;
3938 @@ -3951,12 +3951,14 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
3939 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3940 {
3941 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3942 - unsigned int hdr_len;
3943
3944 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3945 - hdr_len = tcp_hdrlen(skb);
3946 - else
3947 - hdr_len = sizeof(struct udphdr);
3948 - return hdr_len + shinfo->gso_size;
3949 + return tcp_hdrlen(skb) + shinfo->gso_size;
3950 +
3951 + /* UFO sets gso_size to the size of the fragmentation
3952 + * payload, i.e. the size of the L4 (UDP) header is already
3953 + * accounted for.
3954 + */
3955 + return shinfo->gso_size;
3956 }
3957 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
3958 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
3959 index a0e9cf6379de..6a7fae228634 100644
3960 --- a/net/core/sock_diag.c
3961 +++ b/net/core/sock_diag.c
3962 @@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
3963 }
3964 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
3965
3966 -int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
3967 +int sock_diag_put_filterinfo(struct sock *sk,
3968 struct sk_buff *skb, int attrtype)
3969 {
3970 struct nlattr *attr;
3971 @@ -57,7 +57,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
3972 unsigned int len;
3973 int err = 0;
3974
3975 - if (!ns_capable(user_ns, CAP_NET_ADMIN)) {
3976 + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
3977 nla_reserve(skb, attrtype, 0);
3978 return 0;
3979 }
3980 diff --git a/net/core/utils.c b/net/core/utils.c
3981 index 2f737bf90b3f..eed34338736c 100644
3982 --- a/net/core/utils.c
3983 +++ b/net/core/utils.c
3984 @@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
3985 {
3986 struct __net_random_once_work *work =
3987 container_of(w, struct __net_random_once_work, work);
3988 - if (!static_key_enabled(work->key))
3989 - static_key_slow_inc(work->key);
3990 + BUG_ON(!static_key_enabled(work->key));
3991 + static_key_slow_dec(work->key);
3992 kfree(work);
3993 }
3994
3995 @@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
3996 }
3997
3998 bool __net_get_random_once(void *buf, int nbytes, bool *done,
3999 - struct static_key *done_key)
4000 + struct static_key *once_key)
4001 {
4002 static DEFINE_SPINLOCK(lock);
4003 unsigned long flags;
4004 @@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
4005 *done = true;
4006 spin_unlock_irqrestore(&lock, flags);
4007
4008 - __net_random_once_disable_jump(done_key);
4009 + __net_random_once_disable_jump(once_key);
4010
4011 return true;
4012 }
4013 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
4014 index b53f0bf84dca..9d43468722ed 100644
4015 --- a/net/ipv4/fib_semantics.c
4016 +++ b/net/ipv4/fib_semantics.c
4017 @@ -820,13 +820,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
4018 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
4019 if (fi == NULL)
4020 goto failure;
4021 + fib_info_cnt++;
4022 if (cfg->fc_mx) {
4023 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
4024 if (!fi->fib_metrics)
4025 goto failure;
4026 } else
4027 fi->fib_metrics = (u32 *) dst_default_metrics;
4028 - fib_info_cnt++;
4029
4030 fi->fib_net = hold_net(net);
4031 fi->fib_protocol = cfg->fc_protocol;
4032 diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
4033 index f3869c186d97..1c6bd4359cbd 100644
4034 --- a/net/ipv4/ip_forward.c
4035 +++ b/net/ipv4/ip_forward.c
4036 @@ -42,12 +42,12 @@
4037 static bool ip_may_fragment(const struct sk_buff *skb)
4038 {
4039 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
4040 - !skb->local_df;
4041 + skb->local_df;
4042 }
4043
4044 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
4045 {
4046 - if (skb->len <= mtu || skb->local_df)
4047 + if (skb->len <= mtu)
4048 return false;
4049
4050 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
4051 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4052 index ec4f762efda5..94213c891565 100644
4053 --- a/net/ipv4/ip_gre.c
4054 +++ b/net/ipv4/ip_gre.c
4055 @@ -463,6 +463,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
4056 static void ipgre_tunnel_setup(struct net_device *dev)
4057 {
4058 dev->netdev_ops = &ipgre_netdev_ops;
4059 + dev->type = ARPHRD_IPGRE;
4060 ip_tunnel_setup(dev, ipgre_net_id);
4061 }
4062
4063 @@ -501,7 +502,6 @@ static int ipgre_tunnel_init(struct net_device *dev)
4064 memcpy(dev->dev_addr, &iph->saddr, 4);
4065 memcpy(dev->broadcast, &iph->daddr, 4);
4066
4067 - dev->type = ARPHRD_IPGRE;
4068 dev->flags = IFF_NOARP;
4069 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
4070 dev->addr_len = 4;
4071 diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
4072 index a82a22d8f77f..0c3a5d17b4a9 100644
4073 --- a/net/ipv4/ip_tunnel.c
4074 +++ b/net/ipv4/ip_tunnel.c
4075 @@ -438,6 +438,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
4076 tunnel->i_seqno = ntohl(tpi->seq) + 1;
4077 }
4078
4079 + skb_reset_network_header(skb);
4080 +
4081 err = IP_ECN_decapsulate(iph, skb);
4082 if (unlikely(err)) {
4083 if (log_ecn_error)
4084 @@ -534,9 +536,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
4085 unsigned int max_headroom; /* The extra header space needed */
4086 __be32 dst;
4087 int err;
4088 - bool connected = true;
4089 + bool connected;
4090
4091 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
4092 + connected = (tunnel->parms.iph.daddr != 0);
4093
4094 dst = tnl_params->daddr;
4095 if (dst == 0) {
4096 @@ -872,6 +875,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
4097 */
4098 if (!IS_ERR(itn->fb_tunnel_dev)) {
4099 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
4100 + itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
4101 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
4102 }
4103 rtnl_unlock();
4104 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
4105 index 48eafae51769..e4a8f76c8995 100644
4106 --- a/net/ipv4/ip_vti.c
4107 +++ b/net/ipv4/ip_vti.c
4108 @@ -207,6 +207,7 @@ static const struct net_device_ops vti_netdev_ops = {
4109 static void vti_tunnel_setup(struct net_device *dev)
4110 {
4111 dev->netdev_ops = &vti_netdev_ops;
4112 + dev->type = ARPHRD_TUNNEL;
4113 ip_tunnel_setup(dev, vti_net_id);
4114 }
4115
4116 @@ -218,7 +219,6 @@ static int vti_tunnel_init(struct net_device *dev)
4117 memcpy(dev->dev_addr, &iph->saddr, 4);
4118 memcpy(dev->broadcast, &iph->daddr, 4);
4119
4120 - dev->type = ARPHRD_TUNNEL;
4121 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
4122 dev->mtu = ETH_DATA_LEN;
4123 dev->flags = IFF_NOARP;
4124 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
4125 index 59da7cde0724..f95b6f93814b 100644
4126 --- a/net/ipv4/netfilter/arp_tables.c
4127 +++ b/net/ipv4/netfilter/arp_tables.c
4128 @@ -1044,8 +1044,10 @@ static int __do_replace(struct net *net, const char *name,
4129
4130 xt_free_table_info(oldinfo);
4131 if (copy_to_user(counters_ptr, counters,
4132 - sizeof(struct xt_counters) * num_counters) != 0)
4133 - ret = -EFAULT;
4134 + sizeof(struct xt_counters) * num_counters) != 0) {
4135 + /* Silent error, can't fail, new table is already in place */
4136 + net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
4137 + }
4138 vfree(counters);
4139 xt_table_unlock(t);
4140 return ret;
4141 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
4142 index 718dfbd30cbe..99e810f84671 100644
4143 --- a/net/ipv4/netfilter/ip_tables.c
4144 +++ b/net/ipv4/netfilter/ip_tables.c
4145 @@ -1231,8 +1231,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
4146
4147 xt_free_table_info(oldinfo);
4148 if (copy_to_user(counters_ptr, counters,
4149 - sizeof(struct xt_counters) * num_counters) != 0)
4150 - ret = -EFAULT;
4151 + sizeof(struct xt_counters) * num_counters) != 0) {
4152 + /* Silent error, can't fail, new table is already in place */
4153 + net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
4154 + }
4155 vfree(counters);
4156 xt_table_unlock(t);
4157 return ret;
4158 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
4159 index 2d11c094296e..e21934b06d4c 100644
4160 --- a/net/ipv4/ping.c
4161 +++ b/net/ipv4/ping.c
4162 @@ -252,26 +252,33 @@ int ping_init_sock(struct sock *sk)
4163 {
4164 struct net *net = sock_net(sk);
4165 kgid_t group = current_egid();
4166 - struct group_info *group_info = get_current_groups();
4167 - int i, j, count = group_info->ngroups;
4168 + struct group_info *group_info;
4169 + int i, j, count;
4170 kgid_t low, high;
4171 + int ret = 0;
4172
4173 inet_get_ping_group_range_net(net, &low, &high);
4174 if (gid_lte(low, group) && gid_lte(group, high))
4175 return 0;
4176
4177 + group_info = get_current_groups();
4178 + count = group_info->ngroups;
4179 for (i = 0; i < group_info->nblocks; i++) {
4180 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
4181 for (j = 0; j < cp_count; j++) {
4182 kgid_t gid = group_info->blocks[i][j];
4183 if (gid_lte(low, gid) && gid_lte(gid, high))
4184 - return 0;
4185 + goto out_release_group;
4186 }
4187
4188 count -= cp_count;
4189 }
4190
4191 - return -EACCES;
4192 + ret = -EACCES;
4193 +
4194 +out_release_group:
4195 + put_group_info(group_info);
4196 + return ret;
4197 }
4198 EXPORT_SYMBOL_GPL(ping_init_sock);
4199
4200 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
4201 index 4c011ec69ed4..134437309b1e 100644
4202 --- a/net/ipv4/route.c
4203 +++ b/net/ipv4/route.c
4204 @@ -1526,7 +1526,7 @@ static int __mkroute_input(struct sk_buff *skb,
4205 struct in_device *out_dev;
4206 unsigned int flags = 0;
4207 bool do_cache;
4208 - u32 itag;
4209 + u32 itag = 0;
4210
4211 /* get a working reference to the output device */
4212 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
4213 @@ -2364,7 +2364,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
4214 }
4215 } else
4216 #endif
4217 - if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
4218 + if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
4219 goto nla_put_failure;
4220 }
4221
4222 diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
4223 index 828e4c3ffbaf..121a9a22dc98 100644
4224 --- a/net/ipv4/tcp_cubic.c
4225 +++ b/net/ipv4/tcp_cubic.c
4226 @@ -409,7 +409,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
4227 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
4228 ratio += cnt;
4229
4230 - ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
4231 + ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
4232 }
4233
4234 /* Some calls are for duplicates without timetamps */
4235 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
4236 index 075602fc6b6a..1e55f5eba185 100644
4237 --- a/net/ipv6/ip6_fib.c
4238 +++ b/net/ipv6/ip6_fib.c
4239 @@ -1418,7 +1418,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
4240
4241 if (w->skip) {
4242 w->skip--;
4243 - continue;
4244 + goto skip;
4245 }
4246
4247 err = w->func(w);
4248 @@ -1428,6 +1428,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
4249 w->count++;
4250 continue;
4251 }
4252 +skip:
4253 w->state = FWS_U;
4254 case FWS_U:
4255 if (fn == w->root)
4256 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4257 index f3ffb43f59c0..2465d18e8a26 100644
4258 --- a/net/ipv6/ip6_gre.c
4259 +++ b/net/ipv6/ip6_gre.c
4260 @@ -1566,6 +1566,15 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
4261 return 0;
4262 }
4263
4264 +static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
4265 +{
4266 + struct net *net = dev_net(dev);
4267 + struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
4268 +
4269 + if (dev != ign->fb_tunnel_dev)
4270 + unregister_netdevice_queue(dev, head);
4271 +}
4272 +
4273 static size_t ip6gre_get_size(const struct net_device *dev)
4274 {
4275 return
4276 @@ -1643,6 +1652,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
4277 .validate = ip6gre_tunnel_validate,
4278 .newlink = ip6gre_newlink,
4279 .changelink = ip6gre_changelink,
4280 + .dellink = ip6gre_dellink,
4281 .get_size = ip6gre_get_size,
4282 .fill_info = ip6gre_fill_info,
4283 };
4284 diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
4285 index 59f95affceb0..b2f091566f88 100644
4286 --- a/net/ipv6/ip6_offload.c
4287 +++ b/net/ipv6/ip6_offload.c
4288 @@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
4289 unsigned int off;
4290 u16 flush = 1;
4291 int proto;
4292 - __wsum csum;
4293
4294 off = skb_gro_offset(skb);
4295 hlen = off + sizeof(*iph);
4296 @@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
4297
4298 NAPI_GRO_CB(skb)->flush |= flush;
4299
4300 - csum = skb->csum;
4301 - skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
4302 + skb_gro_postpull_rcsum(skb, iph, nlen);
4303
4304 pp = ops->callbacks.gro_receive(head, skb);
4305
4306 - skb->csum = csum;
4307 -
4308 out_unlock:
4309 rcu_read_unlock();
4310
4311 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4312 index 3702d179506d..a62b610307ec 100644
4313 --- a/net/ipv6/ip6_output.c
4314 +++ b/net/ipv6/ip6_output.c
4315 @@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
4316
4317 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
4318 {
4319 - if (skb->len <= mtu || skb->local_df)
4320 + if (skb->len <= mtu)
4321 return false;
4322
4323 + /* ipv6 conntrack defrag sets max_frag_size + local_df */
4324 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
4325 return true;
4326
4327 + if (skb->local_df)
4328 + return false;
4329 +
4330 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
4331 return false;
4332
4333 @@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
4334 unsigned int maxnonfragsize, headersize;
4335
4336 headersize = sizeof(struct ipv6hdr) +
4337 - (opt ? opt->tot_len : 0) +
4338 + (opt ? opt->opt_flen + opt->opt_nflen : 0) +
4339 (dst_allfrag(&rt->dst) ?
4340 sizeof(struct frag_hdr) : 0) +
4341 rt->rt6i_nfheader_len;
4342 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
4343 index 5db8d310f9c0..0e51f68ab163 100644
4344 --- a/net/ipv6/ip6_tunnel.c
4345 +++ b/net/ipv6/ip6_tunnel.c
4346 @@ -1564,7 +1564,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
4347 {
4348 u8 proto;
4349
4350 - if (!data)
4351 + if (!data || !data[IFLA_IPTUN_PROTO])
4352 return 0;
4353
4354 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
4355 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
4356 index 710238f58aa9..e080fbbbc0e5 100644
4357 --- a/net/ipv6/netfilter/ip6_tables.c
4358 +++ b/net/ipv6/netfilter/ip6_tables.c
4359 @@ -1241,8 +1241,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
4360
4361 xt_free_table_info(oldinfo);
4362 if (copy_to_user(counters_ptr, counters,
4363 - sizeof(struct xt_counters) * num_counters) != 0)
4364 - ret = -EFAULT;
4365 + sizeof(struct xt_counters) * num_counters) != 0) {
4366 + /* Silent error, can't fail, new table is already in place */
4367 + net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
4368 + }
4369 vfree(counters);
4370 xt_table_unlock(t);
4371 return ret;
4372 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4373 index fba54a407bb2..7cc1102e298c 100644
4374 --- a/net/ipv6/route.c
4375 +++ b/net/ipv6/route.c
4376 @@ -1342,7 +1342,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
4377 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
4378
4379 if (mtu)
4380 - return mtu;
4381 + goto out;
4382
4383 mtu = IPV6_MIN_MTU;
4384
4385 @@ -1352,7 +1352,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
4386 mtu = idev->cnf.mtu6;
4387 rcu_read_unlock();
4388
4389 - return mtu;
4390 +out:
4391 + return min_t(unsigned int, mtu, IP6_MAX_MTU);
4392 }
4393
4394 static struct dst_entry *icmp6_dst_gc_list;
4395 diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
4396 index 0d78132ff18a..8517d3cd1aed 100644
4397 --- a/net/ipv6/tcpv6_offload.c
4398 +++ b/net/ipv6/tcpv6_offload.c
4399 @@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
4400 if (NAPI_GRO_CB(skb)->flush)
4401 goto skip_csum;
4402
4403 - wsum = skb->csum;
4404 + wsum = NAPI_GRO_CB(skb)->csum;
4405
4406 switch (skb->ip_summed) {
4407 case CHECKSUM_NONE:
4408 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
4409 index 5990919356a5..ec6606325cda 100644
4410 --- a/net/l2tp/l2tp_ppp.c
4411 +++ b/net/l2tp/l2tp_ppp.c
4412 @@ -756,9 +756,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
4413 session->deref = pppol2tp_session_sock_put;
4414
4415 /* If PMTU discovery was enabled, use the MTU that was discovered */
4416 - dst = sk_dst_get(sk);
4417 + dst = sk_dst_get(tunnel->sock);
4418 if (dst != NULL) {
4419 - u32 pmtu = dst_mtu(__sk_dst_get(sk));
4420 + u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
4421 if (pmtu != 0)
4422 session->mtu = session->mru = pmtu -
4423 PPPOL2TP_HEADER_OVERHEAD;
4424 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4425 index adce01e8bb57..c68e5e0628df 100644
4426 --- a/net/netfilter/nf_tables_api.c
4427 +++ b/net/netfilter/nf_tables_api.c
4428 @@ -1934,7 +1934,8 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const
4429
4430 static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
4431 [NFTA_SET_TABLE] = { .type = NLA_STRING },
4432 - [NFTA_SET_NAME] = { .type = NLA_STRING },
4433 + [NFTA_SET_NAME] = { .type = NLA_STRING,
4434 + .len = IFNAMSIZ - 1 },
4435 [NFTA_SET_FLAGS] = { .type = NLA_U32 },
4436 [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 },
4437 [NFTA_SET_KEY_LEN] = { .type = NLA_U32 },
4438 diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
4439 index 90998a6ff8b9..804105391b9a 100644
4440 --- a/net/netfilter/nf_tables_core.c
4441 +++ b/net/netfilter/nf_tables_core.c
4442 @@ -25,9 +25,8 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr,
4443 struct nft_data data[NFT_REG_MAX + 1])
4444 {
4445 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
4446 - u32 mask;
4447 + u32 mask = nft_cmp_fast_mask(priv->len);
4448
4449 - mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len);
4450 if ((data[priv->sreg].data[0] & mask) == priv->data)
4451 return;
4452 data[NFT_REG_VERDICT].verdict = NFT_BREAK;
4453 diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
4454 index 954925db414d..e2b3f51c81f1 100644
4455 --- a/net/netfilter/nft_cmp.c
4456 +++ b/net/netfilter/nft_cmp.c
4457 @@ -128,7 +128,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
4458 BUG_ON(err < 0);
4459 desc.len *= BITS_PER_BYTE;
4460
4461 - mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len);
4462 + mask = nft_cmp_fast_mask(desc.len);
4463 priv->data = data.data[0] & mask;
4464 priv->len = desc.len;
4465 return 0;
4466 diff --git a/net/packet/diag.c b/net/packet/diag.c
4467 index 533ce4ff108a..435ff99ba8c7 100644
4468 --- a/net/packet/diag.c
4469 +++ b/net/packet/diag.c
4470 @@ -172,7 +172,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
4471 goto out_nlmsg_trim;
4472
4473 if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
4474 - sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER))
4475 + sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER))
4476 goto out_nlmsg_trim;
4477
4478 return nlmsg_end(skb, nlh);
4479 diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
4480 index eed8404443d8..f435a88d899a 100644
4481 --- a/net/sched/cls_tcindex.c
4482 +++ b/net/sched/cls_tcindex.c
4483 @@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
4484 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
4485 };
4486
4487 +static void tcindex_filter_result_init(struct tcindex_filter_result *r)
4488 +{
4489 + memset(r, 0, sizeof(*r));
4490 + tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
4491 +}
4492 +
4493 static int
4494 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
4495 u32 handle, struct tcindex_data *p,
4496 @@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
4497 return err;
4498
4499 memcpy(&cp, p, sizeof(cp));
4500 - memset(&new_filter_result, 0, sizeof(new_filter_result));
4501 - tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
4502 + tcindex_filter_result_init(&new_filter_result);
4503
4504 + tcindex_filter_result_init(&cr);
4505 if (old_r)
4506 - memcpy(&cr, r, sizeof(cr));
4507 - else {
4508 - memset(&cr, 0, sizeof(cr));
4509 - tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
4510 - }
4511 + cr.res = r->res;
4512
4513 if (tb[TCA_TCINDEX_HASH])
4514 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
4515 @@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
4516 err = -ENOMEM;
4517 if (!cp.perfect && !cp.h) {
4518 if (valid_perfect_hash(&cp)) {
4519 + int i;
4520 +
4521 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
4522 if (!cp.perfect)
4523 goto errout;
4524 + for (i = 0; i < cp.hash; i++)
4525 + tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
4526 + TCA_TCINDEX_POLICE);
4527 balloc = 1;
4528 } else {
4529 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
4530 @@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
4531 tcf_bind_filter(tp, &cr.res, base);
4532 }
4533
4534 - tcf_exts_change(tp, &cr.exts, &e);
4535 + if (old_r)
4536 + tcf_exts_change(tp, &r->exts, &e);
4537 + else
4538 + tcf_exts_change(tp, &cr.exts, &e);
4539
4540 tcf_tree_lock(tp);
4541 if (old_r && old_r != r)
4542 - memset(old_r, 0, sizeof(*old_r));
4543 + tcindex_filter_result_init(old_r);
4544
4545 memcpy(p, &cp, sizeof(cp));
4546 - memcpy(r, &cr, sizeof(cr));
4547 + r->res = cr.res;
4548
4549 if (r == &new_filter_result) {
4550 struct tcindex_filter **fp;
4551 diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
4552 index 647680b1c625..039977830783 100644
4553 --- a/net/sched/sch_hhf.c
4554 +++ b/net/sched/sch_hhf.c
4555 @@ -553,11 +553,6 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
4556 if (err < 0)
4557 return err;
4558
4559 - sch_tree_lock(sch);
4560 -
4561 - if (tb[TCA_HHF_BACKLOG_LIMIT])
4562 - sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
4563 -
4564 if (tb[TCA_HHF_QUANTUM])
4565 new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
4566
4567 @@ -567,6 +562,12 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
4568 non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
4569 if (non_hh_quantum > INT_MAX)
4570 return -EINVAL;
4571 +
4572 + sch_tree_lock(sch);
4573 +
4574 + if (tb[TCA_HHF_BACKLOG_LIMIT])
4575 + sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
4576 +
4577 q->quantum = new_quantum;
4578 q->hhf_non_hh_weight = new_hhf_non_hh_weight;
4579
4580 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
4581 index ee13d28d39d1..878e17aafbe5 100644
4582 --- a/net/sctp/associola.c
4583 +++ b/net/sctp/associola.c
4584 @@ -1396,35 +1396,44 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
4585 return false;
4586 }
4587
4588 -/* Update asoc's rwnd for the approximated state in the buffer,
4589 - * and check whether SACK needs to be sent.
4590 - */
4591 -void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
4592 +/* Increase asoc's rwnd by len and send any window update SACK if needed. */
4593 +void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
4594 {
4595 - int rx_count;
4596 struct sctp_chunk *sack;
4597 struct timer_list *timer;
4598
4599 - if (asoc->ep->rcvbuf_policy)
4600 - rx_count = atomic_read(&asoc->rmem_alloc);
4601 - else
4602 - rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
4603 + if (asoc->rwnd_over) {
4604 + if (asoc->rwnd_over >= len) {
4605 + asoc->rwnd_over -= len;
4606 + } else {
4607 + asoc->rwnd += (len - asoc->rwnd_over);
4608 + asoc->rwnd_over = 0;
4609 + }
4610 + } else {
4611 + asoc->rwnd += len;
4612 + }
4613
4614 - if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0)
4615 - asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1;
4616 - else
4617 - asoc->rwnd = 0;
4618 + /* If we had window pressure, start recovering it
4619 + * once our rwnd had reached the accumulated pressure
4620 + * threshold. The idea is to recover slowly, but up
4621 + * to the initial advertised window.
4622 + */
4623 + if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
4624 + int change = min(asoc->pathmtu, asoc->rwnd_press);
4625 + asoc->rwnd += change;
4626 + asoc->rwnd_press -= change;
4627 + }
4628
4629 - pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n",
4630 - __func__, asoc, asoc->rwnd, rx_count,
4631 - asoc->base.sk->sk_rcvbuf);
4632 + pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
4633 + __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
4634 + asoc->a_rwnd);
4635
4636 /* Send a window update SACK if the rwnd has increased by at least the
4637 * minimum of the association's PMTU and half of the receive buffer.
4638 * The algorithm used is similar to the one described in
4639 * Section 4.2.3.3 of RFC 1122.
4640 */
4641 - if (update_peer && sctp_peer_needs_update(asoc)) {
4642 + if (sctp_peer_needs_update(asoc)) {
4643 asoc->a_rwnd = asoc->rwnd;
4644
4645 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
4646 @@ -1446,6 +1455,45 @@ void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
4647 }
4648 }
4649
4650 +/* Decrease asoc's rwnd by len. */
4651 +void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
4652 +{
4653 + int rx_count;
4654 + int over = 0;
4655 +
4656 + if (unlikely(!asoc->rwnd || asoc->rwnd_over))
4657 + pr_debug("%s: association:%p has asoc->rwnd:%u, "
4658 + "asoc->rwnd_over:%u!\n", __func__, asoc,
4659 + asoc->rwnd, asoc->rwnd_over);
4660 +
4661 + if (asoc->ep->rcvbuf_policy)
4662 + rx_count = atomic_read(&asoc->rmem_alloc);
4663 + else
4664 + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
4665 +
4666 + /* If we've reached or overflowed our receive buffer, announce
4667 + * a 0 rwnd if rwnd would still be positive. Store the
4668 + * the potential pressure overflow so that the window can be restored
4669 + * back to original value.
4670 + */
4671 + if (rx_count >= asoc->base.sk->sk_rcvbuf)
4672 + over = 1;
4673 +
4674 + if (asoc->rwnd >= len) {
4675 + asoc->rwnd -= len;
4676 + if (over) {
4677 + asoc->rwnd_press += asoc->rwnd;
4678 + asoc->rwnd = 0;
4679 + }
4680 + } else {
4681 + asoc->rwnd_over = len - asoc->rwnd;
4682 + asoc->rwnd = 0;
4683 + }
4684 +
4685 + pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
4686 + __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
4687 + asoc->rwnd_press);
4688 +}
4689
4690 /* Build the bind address list for the association based on info from the
4691 * local endpoint and the remote peer.
4692 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
4693 index 683c7d1b1306..0e8529113dc5 100644
4694 --- a/net/sctp/auth.c
4695 +++ b/net/sctp/auth.c
4696 @@ -386,14 +386,13 @@ nomem:
4697 */
4698 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
4699 {
4700 - struct net *net = sock_net(asoc->base.sk);
4701 struct sctp_auth_bytes *secret;
4702 struct sctp_shared_key *ep_key;
4703
4704 /* If we don't support AUTH, or peer is not capable
4705 * we don't need to do anything.
4706 */
4707 - if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
4708 + if (!asoc->ep->auth_enable || !asoc->peer.auth_capable)
4709 return 0;
4710
4711 /* If the key_id is non-zero and we couldn't find an
4712 @@ -440,16 +439,16 @@ struct sctp_shared_key *sctp_auth_get_shkey(
4713 */
4714 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
4715 {
4716 - struct net *net = sock_net(ep->base.sk);
4717 struct crypto_hash *tfm = NULL;
4718 __u16 id;
4719
4720 - /* if the transforms are already allocted, we are done */
4721 - if (!net->sctp.auth_enable) {
4722 + /* If AUTH extension is disabled, we are done */
4723 + if (!ep->auth_enable) {
4724 ep->auth_hmacs = NULL;
4725 return 0;
4726 }
4727
4728 + /* If the transforms are already allocated, we are done */
4729 if (ep->auth_hmacs)
4730 return 0;
4731
4732 @@ -665,12 +664,10 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
4733 /* Check if peer requested that this chunk is authenticated */
4734 int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
4735 {
4736 - struct net *net;
4737 if (!asoc)
4738 return 0;
4739
4740 - net = sock_net(asoc->base.sk);
4741 - if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
4742 + if (!asoc->ep->auth_enable || !asoc->peer.auth_capable)
4743 return 0;
4744
4745 return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
4746 @@ -679,12 +676,10 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
4747 /* Check if we requested that peer authenticate this chunk. */
4748 int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
4749 {
4750 - struct net *net;
4751 if (!asoc)
4752 return 0;
4753
4754 - net = sock_net(asoc->base.sk);
4755 - if (!net->sctp.auth_enable)
4756 + if (!asoc->ep->auth_enable)
4757 return 0;
4758
4759 return __sctp_auth_cid(chunk,
4760 diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
4761 index 8e5fdea05216..3d9f429858dc 100644
4762 --- a/net/sctp/endpointola.c
4763 +++ b/net/sctp/endpointola.c
4764 @@ -68,7 +68,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
4765 if (!ep->digest)
4766 return NULL;
4767
4768 - if (net->sctp.auth_enable) {
4769 + ep->auth_enable = net->sctp.auth_enable;
4770 + if (ep->auth_enable) {
4771 /* Allocate space for HMACS and CHUNKS authentication
4772 * variables. There are arrays that we encode directly
4773 * into parameters to make the rest of the operations easier.
4774 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
4775 index 4e1d0fcb028e..a62a215dd22e 100644
4776 --- a/net/sctp/protocol.c
4777 +++ b/net/sctp/protocol.c
4778 @@ -491,8 +491,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
4779 continue;
4780 if ((laddr->state == SCTP_ADDR_SRC) &&
4781 (AF_INET == laddr->a.sa.sa_family)) {
4782 - fl4->saddr = laddr->a.v4.sin_addr.s_addr;
4783 fl4->fl4_sport = laddr->a.v4.sin_port;
4784 + flowi4_update_output(fl4,
4785 + asoc->base.sk->sk_bound_dev_if,
4786 + RT_CONN_FLAGS(asoc->base.sk),
4787 + daddr->v4.sin_addr.s_addr,
4788 + laddr->a.v4.sin_addr.s_addr);
4789 +
4790 rt = ip_route_output_key(sock_net(sk), fl4);
4791 if (!IS_ERR(rt)) {
4792 dst = &rt->dst;
4793 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
4794 index 3a1767ef3201..fee5552ddf92 100644
4795 --- a/net/sctp/sm_make_chunk.c
4796 +++ b/net/sctp/sm_make_chunk.c
4797 @@ -219,6 +219,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
4798 gfp_t gfp, int vparam_len)
4799 {
4800 struct net *net = sock_net(asoc->base.sk);
4801 + struct sctp_endpoint *ep = asoc->ep;
4802 sctp_inithdr_t init;
4803 union sctp_params addrs;
4804 size_t chunksize;
4805 @@ -278,7 +279,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
4806 chunksize += vparam_len;
4807
4808 /* Account for AUTH related parameters */
4809 - if (net->sctp.auth_enable) {
4810 + if (ep->auth_enable) {
4811 /* Add random parameter length*/
4812 chunksize += sizeof(asoc->c.auth_random);
4813
4814 @@ -363,7 +364,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
4815 }
4816
4817 /* Add SCTP-AUTH chunks to the parameter list */
4818 - if (net->sctp.auth_enable) {
4819 + if (ep->auth_enable) {
4820 sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
4821 asoc->c.auth_random);
4822 if (auth_hmacs)
4823 @@ -2010,7 +2011,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
4824 /* if the peer reports AUTH, assume that he
4825 * supports AUTH.
4826 */
4827 - if (net->sctp.auth_enable)
4828 + if (asoc->ep->auth_enable)
4829 asoc->peer.auth_capable = 1;
4830 break;
4831 case SCTP_CID_ASCONF:
4832 @@ -2102,6 +2103,7 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
4833 * SCTP_IERROR_NO_ERROR - continue with the chunk
4834 */
4835 static sctp_ierror_t sctp_verify_param(struct net *net,
4836 + const struct sctp_endpoint *ep,
4837 const struct sctp_association *asoc,
4838 union sctp_params param,
4839 sctp_cid_t cid,
4840 @@ -2152,7 +2154,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
4841 goto fallthrough;
4842
4843 case SCTP_PARAM_RANDOM:
4844 - if (!net->sctp.auth_enable)
4845 + if (!ep->auth_enable)
4846 goto fallthrough;
4847
4848 /* SCTP-AUTH: Secion 6.1
4849 @@ -2169,7 +2171,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
4850 break;
4851
4852 case SCTP_PARAM_CHUNKS:
4853 - if (!net->sctp.auth_enable)
4854 + if (!ep->auth_enable)
4855 goto fallthrough;
4856
4857 /* SCTP-AUTH: Section 3.2
4858 @@ -2185,7 +2187,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net,
4859 break;
4860
4861 case SCTP_PARAM_HMAC_ALGO:
4862 - if (!net->sctp.auth_enable)
4863 + if (!ep->auth_enable)
4864 goto fallthrough;
4865
4866 hmacs = (struct sctp_hmac_algo_param *)param.p;
4867 @@ -2220,10 +2222,9 @@ fallthrough:
4868 }
4869
4870 /* Verify the INIT packet before we process it. */
4871 -int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
4872 - sctp_cid_t cid,
4873 - sctp_init_chunk_t *peer_init,
4874 - struct sctp_chunk *chunk,
4875 +int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
4876 + const struct sctp_association *asoc, sctp_cid_t cid,
4877 + sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
4878 struct sctp_chunk **errp)
4879 {
4880 union sctp_params param;
4881 @@ -2264,8 +2265,8 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
4882
4883 /* Verify all the variable length parameters */
4884 sctp_walk_params(param, peer_init, init_hdr.params) {
4885 -
4886 - result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
4887 + result = sctp_verify_param(net, ep, asoc, param, cid,
4888 + chunk, errp);
4889 switch (result) {
4890 case SCTP_IERROR_ABORT:
4891 case SCTP_IERROR_NOMEM:
4892 @@ -2497,6 +2498,7 @@ static int sctp_process_param(struct sctp_association *asoc,
4893 struct sctp_af *af;
4894 union sctp_addr_param *addr_param;
4895 struct sctp_transport *t;
4896 + struct sctp_endpoint *ep = asoc->ep;
4897
4898 /* We maintain all INIT parameters in network byte order all the
4899 * time. This allows us to not worry about whether the parameters
4900 @@ -2636,7 +2638,7 @@ do_addr_param:
4901 goto fall_through;
4902
4903 case SCTP_PARAM_RANDOM:
4904 - if (!net->sctp.auth_enable)
4905 + if (!ep->auth_enable)
4906 goto fall_through;
4907
4908 /* Save peer's random parameter */
4909 @@ -2649,7 +2651,7 @@ do_addr_param:
4910 break;
4911
4912 case SCTP_PARAM_HMAC_ALGO:
4913 - if (!net->sctp.auth_enable)
4914 + if (!ep->auth_enable)
4915 goto fall_through;
4916
4917 /* Save peer's HMAC list */
4918 @@ -2665,7 +2667,7 @@ do_addr_param:
4919 break;
4920
4921 case SCTP_PARAM_CHUNKS:
4922 - if (!net->sctp.auth_enable)
4923 + if (!ep->auth_enable)
4924 goto fall_through;
4925
4926 asoc->peer.peer_chunks = kmemdup(param.p,
4927 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
4928 index 5d6883ff00c3..fef2acdf4a2e 100644
4929 --- a/net/sctp/sm_sideeffect.c
4930 +++ b/net/sctp/sm_sideeffect.c
4931 @@ -496,11 +496,10 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
4932
4933 /* If the transport error count is greater than the pf_retrans
4934 * threshold, and less than pathmaxrtx, and if the current state
4935 - * is not SCTP_UNCONFIRMED, then mark this transport as Partially
4936 - * Failed, see SCTP Quick Failover Draft, section 5.1
4937 + * is SCTP_ACTIVE, then mark this transport as Partially Failed,
4938 + * see SCTP Quick Failover Draft, section 5.1
4939 */
4940 - if ((transport->state != SCTP_PF) &&
4941 - (transport->state != SCTP_UNCONFIRMED) &&
4942 + if ((transport->state == SCTP_ACTIVE) &&
4943 (asoc->pf_retrans < transport->pathmaxrxt) &&
4944 (transport->error_count > asoc->pf_retrans)) {
4945
4946 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
4947 index 01e002430c85..5170a1ff95a1 100644
4948 --- a/net/sctp/sm_statefuns.c
4949 +++ b/net/sctp/sm_statefuns.c
4950 @@ -357,7 +357,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
4951
4952 /* Verify the INIT chunk before processing it. */
4953 err_chunk = NULL;
4954 - if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
4955 + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
4956 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
4957 &err_chunk)) {
4958 /* This chunk contains fatal error. It is to be discarded.
4959 @@ -524,7 +524,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
4960
4961 /* Verify the INIT chunk before processing it. */
4962 err_chunk = NULL;
4963 - if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
4964 + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
4965 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
4966 &err_chunk)) {
4967
4968 @@ -1430,7 +1430,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
4969
4970 /* Verify the INIT chunk before processing it. */
4971 err_chunk = NULL;
4972 - if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
4973 + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
4974 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
4975 &err_chunk)) {
4976 /* This chunk contains fatal error. It is to be discarded.
4977 @@ -6178,7 +6178,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
4978 * PMTU. In cases, such as loopback, this might be a rather
4979 * large spill over.
4980 */
4981 - if ((!chunk->data_accepted) && (!asoc->rwnd ||
4982 + if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
4983 (datalen > asoc->rwnd + asoc->frag_point))) {
4984
4985 /* If this is the next TSN, consider reneging to make
4986 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4987 index 981aaf8b6ace..604a6acdf92e 100644
4988 --- a/net/sctp/socket.c
4989 +++ b/net/sctp/socket.c
4990 @@ -2115,6 +2115,12 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
4991 sctp_skb_pull(skb, copied);
4992 skb_queue_head(&sk->sk_receive_queue, skb);
4993
4994 + /* When only partial message is copied to the user, increase
4995 + * rwnd by that amount. If all the data in the skb is read,
4996 + * rwnd is updated when the event is freed.
4997 + */
4998 + if (!sctp_ulpevent_is_notification(event))
4999 + sctp_assoc_rwnd_increase(event->asoc, copied);
5000 goto out;
5001 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
5002 (event->msg_flags & MSG_EOR))
5003 @@ -3315,10 +3321,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
5004 char __user *optval,
5005 unsigned int optlen)
5006 {
5007 - struct net *net = sock_net(sk);
5008 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5009 struct sctp_authchunk val;
5010
5011 - if (!net->sctp.auth_enable)
5012 + if (!ep->auth_enable)
5013 return -EACCES;
5014
5015 if (optlen != sizeof(struct sctp_authchunk))
5016 @@ -3335,7 +3341,7 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
5017 }
5018
5019 /* add this chunk id to the endpoint */
5020 - return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk);
5021 + return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
5022 }
5023
5024 /*
5025 @@ -3348,12 +3354,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
5026 char __user *optval,
5027 unsigned int optlen)
5028 {
5029 - struct net *net = sock_net(sk);
5030 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5031 struct sctp_hmacalgo *hmacs;
5032 u32 idents;
5033 int err;
5034
5035 - if (!net->sctp.auth_enable)
5036 + if (!ep->auth_enable)
5037 return -EACCES;
5038
5039 if (optlen < sizeof(struct sctp_hmacalgo))
5040 @@ -3370,7 +3376,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
5041 goto out;
5042 }
5043
5044 - err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs);
5045 + err = sctp_auth_ep_set_hmacs(ep, hmacs);
5046 out:
5047 kfree(hmacs);
5048 return err;
5049 @@ -3386,12 +3392,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
5050 char __user *optval,
5051 unsigned int optlen)
5052 {
5053 - struct net *net = sock_net(sk);
5054 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5055 struct sctp_authkey *authkey;
5056 struct sctp_association *asoc;
5057 int ret;
5058
5059 - if (!net->sctp.auth_enable)
5060 + if (!ep->auth_enable)
5061 return -EACCES;
5062
5063 if (optlen <= sizeof(struct sctp_authkey))
5064 @@ -3412,7 +3418,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
5065 goto out;
5066 }
5067
5068 - ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
5069 + ret = sctp_auth_set_key(ep, asoc, authkey);
5070 out:
5071 kzfree(authkey);
5072 return ret;
5073 @@ -3428,11 +3434,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
5074 char __user *optval,
5075 unsigned int optlen)
5076 {
5077 - struct net *net = sock_net(sk);
5078 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5079 struct sctp_authkeyid val;
5080 struct sctp_association *asoc;
5081
5082 - if (!net->sctp.auth_enable)
5083 + if (!ep->auth_enable)
5084 return -EACCES;
5085
5086 if (optlen != sizeof(struct sctp_authkeyid))
5087 @@ -3444,8 +3450,7 @@ static int sctp_setsockopt_active_key(struct sock *sk,
5088 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
5089 return -EINVAL;
5090
5091 - return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc,
5092 - val.scact_keynumber);
5093 + return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
5094 }
5095
5096 /*
5097 @@ -3457,11 +3462,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
5098 char __user *optval,
5099 unsigned int optlen)
5100 {
5101 - struct net *net = sock_net(sk);
5102 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5103 struct sctp_authkeyid val;
5104 struct sctp_association *asoc;
5105
5106 - if (!net->sctp.auth_enable)
5107 + if (!ep->auth_enable)
5108 return -EACCES;
5109
5110 if (optlen != sizeof(struct sctp_authkeyid))
5111 @@ -3473,8 +3478,7 @@ static int sctp_setsockopt_del_key(struct sock *sk,
5112 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
5113 return -EINVAL;
5114
5115 - return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc,
5116 - val.scact_keynumber);
5117 + return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
5118
5119 }
5120
5121 @@ -5381,16 +5385,16 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5122 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5123 char __user *optval, int __user *optlen)
5124 {
5125 - struct net *net = sock_net(sk);
5126 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5127 struct sctp_hmacalgo __user *p = (void __user *)optval;
5128 struct sctp_hmac_algo_param *hmacs;
5129 __u16 data_len = 0;
5130 u32 num_idents;
5131
5132 - if (!net->sctp.auth_enable)
5133 + if (!ep->auth_enable)
5134 return -EACCES;
5135
5136 - hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
5137 + hmacs = ep->auth_hmacs_list;
5138 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
5139
5140 if (len < sizeof(struct sctp_hmacalgo) + data_len)
5141 @@ -5411,11 +5415,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5142 static int sctp_getsockopt_active_key(struct sock *sk, int len,
5143 char __user *optval, int __user *optlen)
5144 {
5145 - struct net *net = sock_net(sk);
5146 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5147 struct sctp_authkeyid val;
5148 struct sctp_association *asoc;
5149
5150 - if (!net->sctp.auth_enable)
5151 + if (!ep->auth_enable)
5152 return -EACCES;
5153
5154 if (len < sizeof(struct sctp_authkeyid))
5155 @@ -5430,7 +5434,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
5156 if (asoc)
5157 val.scact_keynumber = asoc->active_key_id;
5158 else
5159 - val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
5160 + val.scact_keynumber = ep->active_key_id;
5161
5162 len = sizeof(struct sctp_authkeyid);
5163 if (put_user(len, optlen))
5164 @@ -5444,7 +5448,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
5165 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5166 char __user *optval, int __user *optlen)
5167 {
5168 - struct net *net = sock_net(sk);
5169 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5170 struct sctp_authchunks __user *p = (void __user *)optval;
5171 struct sctp_authchunks val;
5172 struct sctp_association *asoc;
5173 @@ -5452,7 +5456,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5174 u32 num_chunks = 0;
5175 char __user *to;
5176
5177 - if (!net->sctp.auth_enable)
5178 + if (!ep->auth_enable)
5179 return -EACCES;
5180
5181 if (len < sizeof(struct sctp_authchunks))
5182 @@ -5489,7 +5493,7 @@ num:
5183 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5184 char __user *optval, int __user *optlen)
5185 {
5186 - struct net *net = sock_net(sk);
5187 + struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5188 struct sctp_authchunks __user *p = (void __user *)optval;
5189 struct sctp_authchunks val;
5190 struct sctp_association *asoc;
5191 @@ -5497,7 +5501,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5192 u32 num_chunks = 0;
5193 char __user *to;
5194
5195 - if (!net->sctp.auth_enable)
5196 + if (!ep->auth_enable)
5197 return -EACCES;
5198
5199 if (len < sizeof(struct sctp_authchunks))
5200 @@ -5514,7 +5518,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5201 if (asoc)
5202 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
5203 else
5204 - ch = sctp_sk(sk)->ep->auth_chunk_list;
5205 + ch = ep->auth_chunk_list;
5206
5207 if (!ch)
5208 goto num;
5209 @@ -6593,6 +6597,46 @@ static void __sctp_write_space(struct sctp_association *asoc)
5210 }
5211 }
5212
5213 +static void sctp_wake_up_waiters(struct sock *sk,
5214 + struct sctp_association *asoc)
5215 +{
5216 + struct sctp_association *tmp = asoc;
5217 +
5218 + /* We do accounting for the sndbuf space per association,
5219 + * so we only need to wake our own association.
5220 + */
5221 + if (asoc->ep->sndbuf_policy)
5222 + return __sctp_write_space(asoc);
5223 +
5224 + /* If association goes down and is just flushing its
5225 + * outq, then just normally notify others.
5226 + */
5227 + if (asoc->base.dead)
5228 + return sctp_write_space(sk);
5229 +
5230 + /* Accounting for the sndbuf space is per socket, so we
5231 + * need to wake up others, try to be fair and in case of
5232 + * other associations, let them have a go first instead
5233 + * of just doing a sctp_write_space() call.
5234 + *
5235 + * Note that we reach sctp_wake_up_waiters() only when
5236 + * associations free up queued chunks, thus we are under
5237 + * lock and the list of associations on a socket is
5238 + * guaranteed not to change.
5239 + */
5240 + for (tmp = list_next_entry(tmp, asocs); 1;
5241 + tmp = list_next_entry(tmp, asocs)) {
5242 + /* Manually skip the head element. */
5243 + if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
5244 + continue;
5245 + /* Wake up association. */
5246 + __sctp_write_space(tmp);
5247 + /* We've reached the end. */
5248 + if (tmp == asoc)
5249 + break;
5250 + }
5251 +}
5252 +
5253 /* Do accounting for the sndbuf space.
5254 * Decrement the used sndbuf space of the corresponding association by the
5255 * data size which was just transmitted(freed).
5256 @@ -6620,7 +6664,7 @@ static void sctp_wfree(struct sk_buff *skb)
5257 sk_mem_uncharge(sk, skb->truesize);
5258
5259 sock_wfree(skb);
5260 - __sctp_write_space(asoc);
5261 + sctp_wake_up_waiters(sk, asoc);
5262
5263 sctp_association_put(asoc);
5264 }
5265 diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
5266 index 35c8923b5554..c82fdc1eab7c 100644
5267 --- a/net/sctp/sysctl.c
5268 +++ b/net/sctp/sysctl.c
5269 @@ -64,6 +64,9 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
5270 static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
5271 void __user *buffer, size_t *lenp,
5272 loff_t *ppos);
5273 +static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
5274 + void __user *buffer, size_t *lenp,
5275 + loff_t *ppos);
5276
5277 static struct ctl_table sctp_table[] = {
5278 {
5279 @@ -266,7 +269,7 @@ static struct ctl_table sctp_net_table[] = {
5280 .data = &init_net.sctp.auth_enable,
5281 .maxlen = sizeof(int),
5282 .mode = 0644,
5283 - .proc_handler = proc_dointvec,
5284 + .proc_handler = proc_sctp_do_auth,
5285 },
5286 {
5287 .procname = "addr_scope_policy",
5288 @@ -400,6 +403,37 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
5289 return ret;
5290 }
5291
5292 +static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
5293 + void __user *buffer, size_t *lenp,
5294 + loff_t *ppos)
5295 +{
5296 + struct net *net = current->nsproxy->net_ns;
5297 + struct ctl_table tbl;
5298 + int new_value, ret;
5299 +
5300 + memset(&tbl, 0, sizeof(struct ctl_table));
5301 + tbl.maxlen = sizeof(unsigned int);
5302 +
5303 + if (write)
5304 + tbl.data = &new_value;
5305 + else
5306 + tbl.data = &net->sctp.auth_enable;
5307 +
5308 + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
5309 +
5310 + if (write) {
5311 + struct sock *sk = net->sctp.ctl_sock;
5312 +
5313 + net->sctp.auth_enable = new_value;
5314 + /* Update the value in the control socket */
5315 + lock_sock(sk);
5316 + sctp_sk(sk)->ep->auth_enable = new_value;
5317 + release_sock(sk);
5318 + }
5319 +
5320 + return ret;
5321 +}
5322 +
5323 int sctp_sysctl_net_register(struct net *net)
5324 {
5325 struct ctl_table *table = sctp_net_table;
5326 diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
5327 index 8d198ae03606..85c64658bd0b 100644
5328 --- a/net/sctp/ulpevent.c
5329 +++ b/net/sctp/ulpevent.c
5330 @@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
5331 skb = sctp_event2skb(event);
5332 /* Set the owner and charge rwnd for bytes received. */
5333 sctp_ulpevent_set_owner(event, asoc);
5334 - sctp_assoc_rwnd_update(asoc, false);
5335 + sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
5336
5337 if (!skb->data_len)
5338 return;
5339 @@ -1011,7 +1011,6 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
5340 {
5341 struct sk_buff *skb, *frag;
5342 unsigned int len;
5343 - struct sctp_association *asoc;
5344
5345 /* Current stack structures assume that the rcv buffer is
5346 * per socket. For UDP style sockets this is not true as
5347 @@ -1036,11 +1035,8 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
5348 }
5349
5350 done:
5351 - asoc = event->asoc;
5352 - sctp_association_hold(asoc);
5353 + sctp_assoc_rwnd_increase(event->asoc, len);
5354 sctp_ulpevent_release_owner(event);
5355 - sctp_assoc_rwnd_update(asoc, true);
5356 - sctp_association_put(asoc);
5357 }
5358
5359 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
5360 diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
5361 index 5adfd94c5b85..85d232bed87d 100644
5362 --- a/net/vmw_vsock/af_vsock.c
5363 +++ b/net/vmw_vsock/af_vsock.c
5364 @@ -1925,9 +1925,23 @@ static struct miscdevice vsock_device = {
5365 .fops = &vsock_device_ops,
5366 };
5367
5368 -static int __vsock_core_init(void)
5369 +int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
5370 {
5371 - int err;
5372 + int err = mutex_lock_interruptible(&vsock_register_mutex);
5373 +
5374 + if (err)
5375 + return err;
5376 +
5377 + if (transport) {
5378 + err = -EBUSY;
5379 + goto err_busy;
5380 + }
5381 +
5382 + /* Transport must be the owner of the protocol so that it can't
5383 + * unload while there are open sockets.
5384 + */
5385 + vsock_proto.owner = owner;
5386 + transport = t;
5387
5388 vsock_init_tables();
5389
5390 @@ -1951,36 +1965,19 @@ static int __vsock_core_init(void)
5391 goto err_unregister_proto;
5392 }
5393
5394 + mutex_unlock(&vsock_register_mutex);
5395 return 0;
5396
5397 err_unregister_proto:
5398 proto_unregister(&vsock_proto);
5399 err_misc_deregister:
5400 misc_deregister(&vsock_device);
5401 - return err;
5402 -}
5403 -
5404 -int vsock_core_init(const struct vsock_transport *t)
5405 -{
5406 - int retval = mutex_lock_interruptible(&vsock_register_mutex);
5407 - if (retval)
5408 - return retval;
5409 -
5410 - if (transport) {
5411 - retval = -EBUSY;
5412 - goto out;
5413 - }
5414 -
5415 - transport = t;
5416 - retval = __vsock_core_init();
5417 - if (retval)
5418 - transport = NULL;
5419 -
5420 -out:
5421 + transport = NULL;
5422 +err_busy:
5423 mutex_unlock(&vsock_register_mutex);
5424 - return retval;
5425 + return err;
5426 }
5427 -EXPORT_SYMBOL_GPL(vsock_core_init);
5428 +EXPORT_SYMBOL_GPL(__vsock_core_init);
5429
5430 void vsock_core_exit(void)
5431 {
5432 @@ -2000,5 +1997,5 @@ EXPORT_SYMBOL_GPL(vsock_core_exit);
5433
5434 MODULE_AUTHOR("VMware, Inc.");
5435 MODULE_DESCRIPTION("VMware Virtual Socket Family");
5436 -MODULE_VERSION("1.0.0.0-k");
5437 +MODULE_VERSION("1.0.1.0-k");
5438 MODULE_LICENSE("GPL v2");
5439 diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
5440 index 1587ea392ad6..30e8e0c3f117 100644
5441 --- a/tools/lib/traceevent/event-parse.c
5442 +++ b/tools/lib/traceevent/event-parse.c
5443 @@ -4321,6 +4321,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
5444 format, len_arg, arg);
5445 trace_seq_terminate(&p);
5446 trace_seq_puts(s, p.buffer);
5447 + trace_seq_destroy(&p);
5448 arg = arg->next;
5449 break;
5450 default: