Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.13/0105-4.13.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3009 - (show annotations) (download)
Wed Oct 25 09:41:34 2017 UTC (6 years, 6 months ago) by niro
File size: 188568 byte(s)
-linux-4.13.6
1 diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
2 index 36f528a7fdd6..8caa60734647 100644
3 --- a/Documentation/filesystems/overlayfs.txt
4 +++ b/Documentation/filesystems/overlayfs.txt
5 @@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is
6 beneath or above the path of another overlay lower layer path.
7
8 Using an upper layer path and/or a workdir path that are already used by
9 -another overlay mount is not allowed and will fail with EBUSY. Using
10 +another overlay mount is not allowed and may fail with EBUSY. Using
11 partially overlapping paths is not allowed but will not fail with EBUSY.
12 +If files are accessed from two overlayfs mounts which share or overlap the
13 +upper layer and/or workdir path the behavior of the overlay is undefined,
14 +though it will not result in a crash or deadlock.
15
16 Mounting an overlay using an upper layer path, where the upper layer path
17 was previously used by another mounted overlay in combination with a
18 diff --git a/Makefile b/Makefile
19 index 189f1a748e4c..9e1af1af327b 100644
20 --- a/Makefile
21 +++ b/Makefile
22 @@ -1,6 +1,6 @@
23 VERSION = 4
24 PATCHLEVEL = 13
25 -SUBLEVEL = 5
26 +SUBLEVEL = 6
27 EXTRAVERSION =
28 NAME = Fearless Coyote
29
30 diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
31 index 4d360713ed12..30d48ecf46e0 100644
32 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
33 +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
34 @@ -254,7 +254,7 @@
35
36 ap_syscon: system-controller@6f4000 {
37 compatible = "syscon", "simple-mfd";
38 - reg = <0x6f4000 0x1000>;
39 + reg = <0x6f4000 0x2000>;
40
41 ap_clk: clock {
42 compatible = "marvell,ap806-clock";
43 @@ -265,7 +265,7 @@
44 compatible = "marvell,ap806-pinctrl";
45 };
46
47 - ap_gpio: gpio {
48 + ap_gpio: gpio@1040 {
49 compatible = "marvell,armada-8k-gpio";
50 offset = <0x1040>;
51 ngpios = <20>;
52 diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
53 index f0e6d717885b..d06fbe4cd38d 100644
54 --- a/arch/arm64/kernel/armv8_deprecated.c
55 +++ b/arch/arm64/kernel/armv8_deprecated.c
56 @@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void)
57 return 0;
58 }
59
60 -late_initcall(armv8_deprecated_init);
61 +core_initcall(armv8_deprecated_init);
62 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
63 index 9f9e0064c8c1..276eecab6cea 100644
64 --- a/arch/arm64/kernel/cpufeature.c
65 +++ b/arch/arm64/kernel/cpufeature.c
66 @@ -1294,4 +1294,4 @@ static int __init enable_mrs_emulation(void)
67 return 0;
68 }
69
70 -late_initcall(enable_mrs_emulation);
71 +core_initcall(enable_mrs_emulation);
72 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
73 index 1df770e8cbe0..7275fed271af 100644
74 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
75 +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
76 @@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void)
77 case PVR_POWER8:
78 case PVR_POWER8E:
79 case PVR_POWER8NVL:
80 - __flush_tlb_power8(POWER8_TLB_SETS);
81 + __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
82 break;
83 case PVR_POWER9:
84 - __flush_tlb_power9(POWER9_TLB_SETS_HASH);
85 + __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
86 break;
87 default:
88 pr_err("unknown CPU version for boot TLB flush\n");
89 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
90 index f14f3c04ec7e..d9dfdf7ede45 100644
91 --- a/arch/powerpc/kernel/exceptions-64s.S
92 +++ b/arch/powerpc/kernel/exceptions-64s.S
93 @@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
94 EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
95 TRAMP_KVM(PACA_EXGEN, 0x700)
96 EXC_COMMON_BEGIN(program_check_common)
97 - EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
98 + /*
99 + * It's possible to receive a TM Bad Thing type program check with
100 + * userspace register values (in particular r1), but with SRR1 reporting
101 + * that we came from the kernel. Normally that would confuse the bad
102 + * stack logic, and we would report a bad kernel stack pointer. Instead
103 + * we switch to the emergency stack if we're taking a TM Bad Thing from
104 + * the kernel.
105 + */
106 + li r10,MSR_PR /* Build a mask of MSR_PR .. */
107 + oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
108 + and r10,r10,r12 /* Mask SRR1 with that. */
109 + srdi r10,r10,8 /* Shift it so we can compare */
110 + cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
111 + bne 1f /* If != go to normal path. */
112 +
113 + /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
114 + andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
115 + /* 3 in EXCEPTION_PROLOG_COMMON */
116 + mr r10,r1 /* Save r1 */
117 + ld r1,PACAEMERGSP(r13) /* Use emergency stack */
118 + subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
119 + b 3f /* Jump into the macro !! */
120 +1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
121 bl save_nvgprs
122 RECONCILE_IRQ_STATE(r10, r11)
123 addi r3,r1,STACK_FRAME_OVERHEAD
124 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
125 index c83c115858c1..b2c002993d78 100644
126 --- a/arch/powerpc/kernel/signal_64.c
127 +++ b/arch/powerpc/kernel/signal_64.c
128 @@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
129 if (MSR_TM_RESV(msr))
130 return -EINVAL;
131
132 - /* pull in MSR TM from user context */
133 + /* pull in MSR TS bits from user context */
134 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
135
136 + /*
137 + * Ensure that TM is enabled in regs->msr before we leave the signal
138 + * handler. It could be the case that (a) user disabled the TM bit
139 + * through the manipulation of the MSR bits in uc_mcontext or (b) the
140 + * TM bit was disabled because a sufficient number of context switches
141 + * happened whilst in the signal handler and load_tm overflowed,
142 + * disabling the TM bit. In either case we can end up with an illegal
143 + * TM state leading to a TM Bad Thing when we return to userspace.
144 + */
145 + regs->msr |= MSR_TM;
146 +
147 /* pull in MSR LE from user context */
148 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
149
150 diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
151 index 13304622ab1c..bf457843e032 100644
152 --- a/arch/powerpc/kvm/book3s_xive.c
153 +++ b/arch/powerpc/kvm/book3s_xive.c
154 @@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
155 return -EINVAL;
156 state = &sb->irq_state[idx];
157 arch_spin_lock(&sb->lock);
158 - *server = state->guest_server;
159 + *server = state->act_server;
160 *priority = state->guest_priority;
161 arch_spin_unlock(&sb->lock);
162
163 @@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
164 xive->saved_src_count++;
165
166 /* Convert saved state into something compatible with xics */
167 - val = state->guest_server;
168 + val = state->act_server;
169 prio = state->saved_scan_prio;
170
171 if (prio == MASKED) {
172 @@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
173 /* First convert prio and mark interrupt as untargetted */
174 act_prio = xive_prio_from_guest(guest_prio);
175 state->act_priority = MASKED;
176 - state->guest_server = server;
177
178 /*
179 * We need to drop the lock due to the mutex below. Hopefully
180 diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
181 index 5938f7644dc1..6ba63f8e8a61 100644
182 --- a/arch/powerpc/kvm/book3s_xive.h
183 +++ b/arch/powerpc/kvm/book3s_xive.h
184 @@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state {
185 struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
186
187 /* Targetting as set by guest */
188 - u32 guest_server; /* Current guest selected target */
189 u8 guest_priority; /* Guest set priority */
190 u8 saved_priority; /* Saved priority when masking */
191
192 diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
193 index 897aa1400eb8..bbb73aa0eb8f 100644
194 --- a/arch/powerpc/platforms/powernv/setup.c
195 +++ b/arch/powerpc/platforms/powernv/setup.c
196 @@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
197 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
198 static unsigned long pnv_memory_block_size(void)
199 {
200 - return 256UL * 1024 * 1024;
201 + /*
202 + * We map the kernel linear region with 1GB large pages on radix. For
203 + * memory hot unplug to work our memory block size must be at least
204 + * this size.
205 + */
206 + if (radix_enabled())
207 + return 1UL * 1024 * 1024 * 1024;
208 + else
209 + return 256UL * 1024 * 1024;
210 }
211 #endif
212
213 diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
214 index bc62e7cbf1b1..59ad3d132353 100644
215 --- a/arch/x86/include/asm/kvm_para.h
216 +++ b/arch/x86/include/asm/kvm_para.h
217 @@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
218 bool kvm_para_available(void);
219 unsigned int kvm_arch_para_features(void);
220 void __init kvm_guest_init(void);
221 -void kvm_async_pf_task_wait(u32 token);
222 +void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
223 void kvm_async_pf_task_wake(u32 token);
224 u32 kvm_read_and_reset_pf_reason(void);
225 extern void kvm_disable_steal_time(void);
226 @@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void)
227
228 #else /* CONFIG_KVM_GUEST */
229 #define kvm_guest_init() do {} while (0)
230 -#define kvm_async_pf_task_wait(T) do {} while(0)
231 +#define kvm_async_pf_task_wait(T, I) do {} while(0)
232 #define kvm_async_pf_task_wake(T) do {} while(0)
233
234 static inline bool kvm_para_available(void)
235 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
236 index 58590a698a1a..e5e4306e4546 100644
237 --- a/arch/x86/kernel/kvm.c
238 +++ b/arch/x86/kernel/kvm.c
239 @@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
240 return NULL;
241 }
242
243 -void kvm_async_pf_task_wait(u32 token)
244 +/*
245 + * @interrupt_kernel: Is this called from a routine which interrupts the kernel
246 + * (other than user space)?
247 + */
248 +void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
249 {
250 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
251 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
252 @@ -140,8 +144,10 @@ void kvm_async_pf_task_wait(u32 token)
253
254 n.token = token;
255 n.cpu = smp_processor_id();
256 - n.halted = is_idle_task(current) || preempt_count() > 1 ||
257 - rcu_preempt_depth();
258 + n.halted = is_idle_task(current) ||
259 + (IS_ENABLED(CONFIG_PREEMPT_COUNT)
260 + ? preempt_count() > 1 || rcu_preempt_depth()
261 + : interrupt_kernel);
262 init_swait_queue_head(&n.wq);
263 hlist_add_head(&n.link, &b->list);
264 raw_spin_unlock(&b->lock);
265 @@ -269,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
266 case KVM_PV_REASON_PAGE_NOT_PRESENT:
267 /* page is swapped out by the host. */
268 prev_state = exception_enter();
269 - kvm_async_pf_task_wait((u32)read_cr2());
270 + kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
271 exception_exit(prev_state);
272 break;
273 case KVM_PV_REASON_PAGE_READY:
274 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
275 index 56e68dfac974..7558531c1215 100644
276 --- a/arch/x86/kvm/mmu.c
277 +++ b/arch/x86/kvm/mmu.c
278 @@ -3799,7 +3799,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
279 case KVM_PV_REASON_PAGE_NOT_PRESENT:
280 vcpu->arch.apf.host_apf_reason = 0;
281 local_irq_disable();
282 - kvm_async_pf_task_wait(fault_address);
283 + kvm_async_pf_task_wait(fault_address, 0);
284 local_irq_enable();
285 break;
286 case KVM_PV_REASON_PAGE_READY:
287 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
288 index e1324f280e06..4d50ced94686 100644
289 --- a/arch/x86/net/bpf_jit_comp.c
290 +++ b/arch/x86/net/bpf_jit_comp.c
291 @@ -282,9 +282,9 @@ static void emit_bpf_tail_call(u8 **pprog)
292 /* if (index >= array->map.max_entries)
293 * goto out;
294 */
295 - EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
296 + EMIT2(0x89, 0xD2); /* mov edx, edx */
297 + EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
298 offsetof(struct bpf_array, map.max_entries));
299 - EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
300 #define OFFSET1 47 /* number of bytes to jump */
301 EMIT2(X86_JBE, OFFSET1); /* jbe out */
302 label1 = cnt;
303 diff --git a/block/bsg-lib.c b/block/bsg-lib.c
304 index c587c71d78af..82ddfcd23939 100644
305 --- a/block/bsg-lib.c
306 +++ b/block/bsg-lib.c
307 @@ -207,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
308 struct bsg_job *job = blk_mq_rq_to_pdu(req);
309 struct scsi_request *sreq = &job->sreq;
310
311 + /* called right after the request is allocated for the request_queue */
312 +
313 + sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
314 + if (!sreq->sense)
315 + return -ENOMEM;
316 +
317 + return 0;
318 +}
319 +
320 +static void bsg_initialize_rq(struct request *req)
321 +{
322 + struct bsg_job *job = blk_mq_rq_to_pdu(req);
323 + struct scsi_request *sreq = &job->sreq;
324 + void *sense = sreq->sense;
325 +
326 + /* called right before the request is given to the request_queue user */
327 +
328 memset(job, 0, sizeof(*job));
329
330 scsi_req_init(sreq);
331 +
332 + sreq->sense = sense;
333 sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
334 - sreq->sense = kzalloc(sreq->sense_len, gfp);
335 - if (!sreq->sense)
336 - return -ENOMEM;
337
338 job->req = req;
339 - job->reply = sreq->sense;
340 + job->reply = sense;
341 job->reply_len = sreq->sense_len;
342 job->dd_data = job + 1;
343 -
344 - return 0;
345 }
346
347 static void bsg_exit_rq(struct request_queue *q, struct request *req)
348 @@ -250,6 +264,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
349 q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
350 q->init_rq_fn = bsg_init_rq;
351 q->exit_rq_fn = bsg_exit_rq;
352 + q->initialize_rq_fn = bsg_initialize_rq;
353 q->request_fn = bsg_request_fn;
354
355 ret = blk_init_allocated_queue(q);
356 diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
357 index cfeb049a01ef..642afd88870b 100644
358 --- a/drivers/auxdisplay/charlcd.c
359 +++ b/drivers/auxdisplay/charlcd.c
360 @@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
361 static int charlcd_open(struct inode *inode, struct file *file)
362 {
363 struct charlcd_priv *priv = to_priv(the_charlcd);
364 + int ret;
365
366 + ret = -EBUSY;
367 if (!atomic_dec_and_test(&charlcd_available))
368 - return -EBUSY; /* open only once at a time */
369 + goto fail; /* open only once at a time */
370
371 + ret = -EPERM;
372 if (file->f_mode & FMODE_READ) /* device is write-only */
373 - return -EPERM;
374 + goto fail;
375
376 if (priv->must_clear) {
377 charlcd_clear_display(&priv->lcd);
378 priv->must_clear = false;
379 }
380 return nonseekable_open(inode, file);
381 +
382 + fail:
383 + atomic_inc(&charlcd_available);
384 + return ret;
385 }
386
387 static int charlcd_release(struct inode *inode, struct file *file)
388 diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
389 index 7a8b8fb2f572..c54c20700d37 100644
390 --- a/drivers/auxdisplay/panel.c
391 +++ b/drivers/auxdisplay/panel.c
392 @@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file,
393
394 static int keypad_open(struct inode *inode, struct file *file)
395 {
396 + int ret;
397 +
398 + ret = -EBUSY;
399 if (!atomic_dec_and_test(&keypad_available))
400 - return -EBUSY; /* open only once at a time */
401 + goto fail; /* open only once at a time */
402
403 + ret = -EPERM;
404 if (file->f_mode & FMODE_WRITE) /* device is read-only */
405 - return -EPERM;
406 + goto fail;
407
408 keypad_buflen = 0; /* flush the buffer on opening */
409 return 0;
410 + fail:
411 + atomic_inc(&keypad_available);
412 + return ret;
413 }
414
415 static int keypad_release(struct inode *inode, struct file *file)
416 diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
417 index d1c33a85059e..df8945d7f009 100644
418 --- a/drivers/base/arch_topology.c
419 +++ b/drivers/base/arch_topology.c
420 @@ -160,12 +160,12 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
421 }
422
423 #ifdef CONFIG_CPU_FREQ
424 -static cpumask_var_t cpus_to_visit;
425 -static bool cap_parsing_done;
426 -static void parsing_done_workfn(struct work_struct *work);
427 -static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
428 +static bool cap_parsing_done __initdata;
429 +static cpumask_var_t cpus_to_visit __initdata;
430 +static void __init parsing_done_workfn(struct work_struct *work);
431 +static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
432
433 -static int
434 +static int __init
435 init_cpu_capacity_callback(struct notifier_block *nb,
436 unsigned long val,
437 void *data)
438 @@ -200,7 +200,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
439 return 0;
440 }
441
442 -static struct notifier_block init_cpu_capacity_notifier = {
443 +static struct notifier_block init_cpu_capacity_notifier __initdata = {
444 .notifier_call = init_cpu_capacity_callback,
445 };
446
447 @@ -226,7 +226,7 @@ static int __init register_cpufreq_notifier(void)
448 }
449 core_initcall(register_cpufreq_notifier);
450
451 -static void parsing_done_workfn(struct work_struct *work)
452 +static void __init parsing_done_workfn(struct work_struct *work)
453 {
454 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
455 CPUFREQ_POLICY_NOTIFIER);
456 diff --git a/drivers/base/platform.c b/drivers/base/platform.c
457 index d1bd99271066..9045c5f3734e 100644
458 --- a/drivers/base/platform.c
459 +++ b/drivers/base/platform.c
460 @@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev,
461 struct platform_device *pdev = to_platform_device(dev);
462 char *driver_override, *old, *cp;
463
464 - if (count > PATH_MAX)
465 + /* We need to keep extra room for a newline */
466 + if (count >= (PAGE_SIZE - 1))
467 return -EINVAL;
468
469 driver_override = kstrndup(buf, count, GFP_KERNEL);
470 diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
471 index e40b77583c47..d8d3cb67b402 100644
472 --- a/drivers/clk/samsung/clk-exynos4.c
473 +++ b/drivers/clk/samsung/clk-exynos4.c
474 @@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
475 #define PLL_ENABLED (1 << 31)
476 #define PLL_LOCKED (1 << 29)
477
478 +static void exynos4_clk_enable_pll(u32 reg)
479 +{
480 + u32 pll_con = readl(reg_base + reg);
481 + pll_con |= PLL_ENABLED;
482 + writel(pll_con, reg_base + reg);
483 +
484 + while (!(pll_con & PLL_LOCKED)) {
485 + cpu_relax();
486 + pll_con = readl(reg_base + reg);
487 + }
488 +}
489 +
490 static void exynos4_clk_wait_for_pll(u32 reg)
491 {
492 u32 pll_con;
493 @@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
494 samsung_clk_save(reg_base, exynos4_save_pll,
495 ARRAY_SIZE(exynos4_clk_pll_regs));
496
497 + exynos4_clk_enable_pll(EPLL_CON0);
498 + exynos4_clk_enable_pll(VPLL_CON0);
499 +
500 if (exynos4_soc == EXYNOS4210) {
501 samsung_clk_save(reg_base, exynos4_save_soc,
502 ARRAY_SIZE(exynos4210_clk_save));
503 diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
504 index d805b6e6fe71..27743be5b768 100644
505 --- a/drivers/gpu/drm/i915/intel_audio.c
506 +++ b/drivers/gpu/drm/i915/intel_audio.c
507 @@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
508 connector->encoder->base.id,
509 connector->encoder->name);
510
511 - /* ELD Conn_Type */
512 - connector->eld[5] &= ~(3 << 2);
513 - if (intel_crtc_has_dp_encoder(crtc_state))
514 - connector->eld[5] |= (1 << 2);
515 -
516 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
517
518 if (dev_priv->display.audio_codec_enable)
519 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
520 index 7ea7fd1e8856..645488071944 100644
521 --- a/drivers/gpu/drm/i915/intel_bios.c
522 +++ b/drivers/gpu/drm/i915/intel_bios.c
523 @@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
524 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
525 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
526
527 + if (port == PORT_A && is_dvi) {
528 + DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
529 + is_hdmi ? "/HDMI" : "");
530 + is_dvi = false;
531 + is_hdmi = false;
532 + }
533 +
534 info->supports_dvi = is_dvi;
535 info->supports_hdmi = is_hdmi;
536 info->supports_dp = is_dp;
537 diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
538 index 951e834dd274..28a778b785ac 100644
539 --- a/drivers/gpu/drm/i915/intel_modes.c
540 +++ b/drivers/gpu/drm/i915/intel_modes.c
541 @@ -30,6 +30,21 @@
542 #include "intel_drv.h"
543 #include "i915_drv.h"
544
545 +static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
546 +{
547 + u8 conn_type;
548 +
549 + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
550 + connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
551 + conn_type = DRM_ELD_CONN_TYPE_DP;
552 + } else {
553 + conn_type = DRM_ELD_CONN_TYPE_HDMI;
554 + }
555 +
556 + connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
557 + connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
558 +}
559 +
560 /**
561 * intel_connector_update_modes - update connector from edid
562 * @connector: DRM connector device to use
563 @@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
564 ret = drm_add_edid_modes(connector, edid);
565 drm_edid_to_eld(connector, edid);
566
567 + intel_connector_update_eld_conn_type(connector);
568 +
569 return ret;
570 }
571
572 diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
573 index 5b40c2614599..ef241d66562e 100644
574 --- a/drivers/hid/hid-rmi.c
575 +++ b/drivers/hid/hid-rmi.c
576 @@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev)
577 if (!(data->device_flags & RMI_DEVICE))
578 return 0;
579
580 - ret = rmi_reset_attn_mode(hdev);
581 + /* Make sure the HID device is ready to receive events */
582 + ret = hid_hw_open(hdev);
583 if (ret)
584 return ret;
585
586 + ret = rmi_reset_attn_mode(hdev);
587 + if (ret)
588 + goto out;
589 +
590 ret = rmi_driver_resume(rmi_dev, false);
591 if (ret) {
592 hid_warn(hdev, "Failed to resume device: %d\n", ret);
593 - return ret;
594 + goto out;
595 }
596
597 - return 0;
598 +out:
599 + hid_hw_close(hdev);
600 + return ret;
601 }
602 #endif /* CONFIG_PM */
603
604 diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
605 index 046f692fd0a2..364150435c62 100644
606 --- a/drivers/hid/i2c-hid/i2c-hid.c
607 +++ b/drivers/hid/i2c-hid/i2c-hid.c
608 @@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
609 {
610 /* the worst case is computed from the set_report command with a
611 * reportID > 15 and the maximum report length */
612 - int args_len = sizeof(__u8) + /* optional ReportID byte */
613 + int args_len = sizeof(__u8) + /* ReportID */
614 + sizeof(__u8) + /* optional ReportID byte */
615 sizeof(__u16) + /* data register */
616 sizeof(__u16) + /* size of the report */
617 report_size; /* report */
618 diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
619 index 838c1ebfffa9..a805ee2989cb 100644
620 --- a/drivers/hid/wacom_sys.c
621 +++ b/drivers/hid/wacom_sys.c
622 @@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
623
624 /* Try to find an already-probed interface from the same device */
625 list_for_each_entry(data, &wacom_udev_list, list) {
626 - if (compare_device_paths(hdev, data->dev, '/'))
627 + if (compare_device_paths(hdev, data->dev, '/')) {
628 + kref_get(&data->kref);
629 return data;
630 + }
631 }
632
633 /* Fallback to finding devices that appear to be "siblings" */
634 @@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom)
635 if (!wacom->led.groups)
636 return -ENOTSUPP;
637
638 + if (wacom->wacom_wac.features.type == REMOTE)
639 + return -ENOTSUPP;
640 +
641 if (wacom->wacom_wac.pid) { /* wireless connected */
642 report_id = WAC_CMD_WL_LED_CONTROL;
643 buf_size = 13;
644 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
645 index bb17d7bbefd3..aa692e28b2cd 100644
646 --- a/drivers/hid/wacom_wac.c
647 +++ b/drivers/hid/wacom_wac.c
648 @@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
649 keys = data[9] & 0x07;
650 }
651 } else {
652 - buttons = ((data[6] & 0x10) << 10) |
653 - ((data[5] & 0x10) << 9) |
654 + buttons = ((data[6] & 0x10) << 5) |
655 + ((data[5] & 0x10) << 4) |
656 ((data[6] & 0x0F) << 4) |
657 (data[5] & 0x0F);
658 }
659 @@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
660 continue;
661
662 if (range) {
663 + /* Fix rotation alignment: userspace expects zero at left */
664 + int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
665 + rotation += 1800/4;
666 + if (rotation > 899)
667 + rotation -= 1800;
668 +
669 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
670 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
671 - input_report_abs(pen_input, ABS_TILT_X, frame[7]);
672 - input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
673 - input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
674 + input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
675 + input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]);
676 + input_report_abs(pen_input, ABS_Z, rotation);
677 input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
678 }
679 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
680 @@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
681 unsigned char *data = wacom->data;
682
683 int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
684 - int ring = data[285];
685 - int prox = buttons | (ring & 0x80);
686 + int ring = data[285] & 0x7F;
687 + bool ringstatus = data[285] & 0x80;
688 + bool prox = buttons || ringstatus;
689 +
690 + /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
691 + ring = 71 - ring;
692 + ring += 3*72/16;
693 + if (ring > 71)
694 + ring -= 72;
695
696 wacom_report_numbered_buttons(pad_input, 9, buttons);
697
698 - input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
699 + input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
700
701 input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
702 input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
703 @@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
704 return 0;
705 }
706
707 +static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage,
708 + int value, int num, int denom)
709 +{
710 + struct input_absinfo *abs = &input->absinfo[usage->code];
711 + int range = (abs->maximum - abs->minimum + 1);
712 +
713 + value += num*range/denom;
714 + if (value > abs->maximum)
715 + value -= range;
716 + else if (value < abs->minimum)
717 + value += range;
718 + return value;
719 +}
720 +
721 int wacom_equivalent_usage(int usage)
722 {
723 if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
724 @@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
725 unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
726 int i;
727 bool is_touch_on = value;
728 + bool do_report = false;
729
730 /*
731 * Avoid reporting this event and setting inrange_state if this usage
732 @@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
733 }
734
735 switch (equivalent_usage) {
736 + case WACOM_HID_WD_TOUCHRING:
737 + /*
738 + * Userspace expects touchrings to increase in value with
739 + * clockwise gestures and have their zero point at the
740 + * tablet's left. HID events "should" be clockwise-
741 + * increasing and zero at top, though the MobileStudio
742 + * Pro and 2nd-gen Intuos Pro don't do this...
743 + */
744 + if (hdev->vendor == 0x56a &&
745 + (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
746 + hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */
747 + value = (field->logical_maximum - value);
748 +
749 + if (hdev->product == 0x357 || hdev->product == 0x358)
750 + value = wacom_offset_rotation(input, usage, value, 3, 16);
751 + else if (hdev->product == 0x34d || hdev->product == 0x34e)
752 + value = wacom_offset_rotation(input, usage, value, 1, 2);
753 + }
754 + else {
755 + value = wacom_offset_rotation(input, usage, value, 1, 4);
756 + }
757 + do_report = true;
758 + break;
759 case WACOM_HID_WD_TOUCHRINGSTATUS:
760 if (!value)
761 input_event(input, usage->type, usage->code, 0);
762 @@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
763 value, i);
764 /* fall through*/
765 default:
766 + do_report = true;
767 + break;
768 + }
769 +
770 + if (do_report) {
771 input_event(input, usage->type, usage->code, value);
772 if (value)
773 wacom_wac->hid_data.pad_input_event_flag = true;
774 - break;
775 }
776 }
777
778 @@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
779 wacom_wac->hid_data.tipswitch |= value;
780 return;
781 case HID_DG_TOOLSERIALNUMBER:
782 - wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
783 - wacom_wac->serial[0] |= (__u32)value;
784 + if (value) {
785 + wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
786 + wacom_wac->serial[0] |= (__u32)value;
787 + }
788 return;
789 + case HID_DG_TWIST:
790 + /*
791 + * Userspace expects pen twist to have its zero point when
792 + * the buttons/finger is on the tablet's left. HID values
793 + * are zero when buttons are toward the top.
794 + */
795 + value = wacom_offset_rotation(input, usage, value, 1, 4);
796 + break;
797 case WACOM_HID_WD_SENSE:
798 wacom_wac->hid_data.sense_state = value;
799 return;
800 case WACOM_HID_WD_SERIALHI:
801 - wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
802 - wacom_wac->serial[0] |= ((__u64)value) << 32;
803 - /*
804 - * Non-USI EMR devices may contain additional tool type
805 - * information here. See WACOM_HID_WD_TOOLTYPE case for
806 - * more details.
807 - */
808 - if (value >> 20 == 1) {
809 - wacom_wac->id[0] |= value & 0xFFFFF;
810 + if (value) {
811 + wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
812 + wacom_wac->serial[0] |= ((__u64)value) << 32;
813 + /*
814 + * Non-USI EMR devices may contain additional tool type
815 + * information here. See WACOM_HID_WD_TOOLTYPE case for
816 + * more details.
817 + */
818 + if (value >> 20 == 1) {
819 + wacom_wac->id[0] |= value & 0xFFFFF;
820 + }
821 }
822 return;
823 case WACOM_HID_WD_TOOLTYPE:
824 @@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
825 input_report_key(input, wacom_wac->tool[0], prox);
826 if (wacom_wac->serial[0]) {
827 input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
828 - input_report_abs(input, ABS_MISC, id);
829 + input_report_abs(input, ABS_MISC, prox ? id : 0);
830 }
831
832 wacom_wac->hid_data.tipswitch = false;
833 @@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
834 if (!prox) {
835 wacom_wac->tool[0] = 0;
836 wacom_wac->id[0] = 0;
837 + wacom_wac->serial[0] = 0;
838 }
839 }
840
841 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
842 index 4bbb8dea4727..037361158074 100644
843 --- a/drivers/hv/channel_mgmt.c
844 +++ b/drivers/hv/channel_mgmt.c
845 @@ -922,14 +922,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
846
847 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
848 {
849 - mutex_lock(&vmbus_connection.channel_mutex);
850 -
851 BUG_ON(!is_hvsock_channel(channel));
852
853 channel->rescind = true;
854 vmbus_device_unregister(channel->device_obj);
855 -
856 - mutex_unlock(&vmbus_connection.channel_mutex);
857 }
858 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
859
860 diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
861 index daa75bd41f86..2364281d8593 100644
862 --- a/drivers/hv/hv_fcopy.c
863 +++ b/drivers/hv/hv_fcopy.c
864 @@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy)
865 out_src = smsg_out;
866 break;
867
868 + case WRITE_TO_FILE:
869 + out_src = fcopy_transaction.fcopy_msg;
870 + out_len = sizeof(struct hv_do_fcopy);
871 + break;
872 default:
873 out_src = fcopy_transaction.fcopy_msg;
874 out_len = fcopy_transaction.recv_len;
875 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
876 index da40df2ff27d..ed6262be3643 100644
877 --- a/drivers/hwtracing/intel_th/pci.c
878 +++ b/drivers/hwtracing/intel_th/pci.c
879 @@ -90,6 +90,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
880 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
881 .driver_data = (kernel_ulong_t)0,
882 },
883 + {
884 + /* Lewisburg PCH */
885 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
886 + .driver_data = (kernel_ulong_t)0,
887 + },
888 {
889 /* Gemini Lake */
890 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
891 diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
892 index 0e731143f6a4..08b8305fee44 100644
893 --- a/drivers/hwtracing/stm/core.c
894 +++ b/drivers/hwtracing/stm/core.c
895 @@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
896
897 stm_source_link_drop(src);
898
899 - device_destroy(&stm_source_class, src->dev.devt);
900 + device_unregister(&src->dev);
901 }
902 EXPORT_SYMBOL_GPL(stm_source_unregister_device);
903
904 diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
905 index e6706a09e100..47c3d7f32900 100644
906 --- a/drivers/iio/adc/ad7793.c
907 +++ b/drivers/iio/adc/ad7793.c
908 @@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
909 unsigned int vref_mv)
910 {
911 struct ad7793_state *st = iio_priv(indio_dev);
912 - int i, ret = -1;
913 + int i, ret;
914 unsigned long long scale_uv;
915 u32 id;
916
917 @@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
918 return ret;
919
920 /* reset the serial interface */
921 - ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
922 + ret = ad_sd_reset(&st->sd, 32);
923 if (ret < 0)
924 goto out;
925 usleep_range(500, 2000); /* Wait for at least 500us */
926 diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
927 index d10bd0c97233..22c4c17cd996 100644
928 --- a/drivers/iio/adc/ad_sigma_delta.c
929 +++ b/drivers/iio/adc/ad_sigma_delta.c
930 @@ -177,6 +177,34 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta,
931 }
932 EXPORT_SYMBOL_GPL(ad_sd_read_reg);
933
934 +/**
935 + * ad_sd_reset() - Reset the serial interface
936 + *
937 + * @sigma_delta: The sigma delta device
938 + * @reset_length: Number of SCLKs with DIN = 1
939 + *
940 + * Returns 0 on success, an error code otherwise.
941 + **/
942 +int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
943 + unsigned int reset_length)
944 +{
945 + uint8_t *buf;
946 + unsigned int size;
947 + int ret;
948 +
949 + size = DIV_ROUND_UP(reset_length, 8);
950 + buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
951 + if (!buf)
952 + return -ENOMEM;
953 +
954 + memset(buf, 0xff, size);
955 + ret = spi_write(sigma_delta->spi, buf, size);
956 + kfree(buf);
957 +
958 + return ret;
959 +}
960 +EXPORT_SYMBOL_GPL(ad_sd_reset);
961 +
962 static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
963 unsigned int mode, unsigned int channel)
964 {
965 diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
966 index 634717ae12f3..071dd23a33d9 100644
967 --- a/drivers/iio/adc/mcp320x.c
968 +++ b/drivers/iio/adc/mcp320x.c
969 @@ -17,6 +17,8 @@
970 * MCP3204
971 * MCP3208
972 * ------------
973 + * 13 bit converter
974 + * MCP3301
975 *
976 * Datasheet can be found here:
977 * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
978 @@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
979 }
980
981 static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
982 - bool differential, int device_index)
983 + bool differential, int device_index, int *val)
984 {
985 int ret;
986
987 @@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
988
989 switch (device_index) {
990 case mcp3001:
991 - return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
992 + *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
993 + return 0;
994 case mcp3002:
995 case mcp3004:
996 case mcp3008:
997 - return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
998 + *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
999 + return 0;
1000 case mcp3201:
1001 - return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
1002 + *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
1003 + return 0;
1004 case mcp3202:
1005 case mcp3204:
1006 case mcp3208:
1007 - return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
1008 + *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
1009 + return 0;
1010 case mcp3301:
1011 - return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
1012 + *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
1013 + | adc->rx_buf[1], 12);
1014 + return 0;
1015 default:
1016 return -EINVAL;
1017 }
1018 @@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
1019 switch (mask) {
1020 case IIO_CHAN_INFO_RAW:
1021 ret = mcp320x_adc_conversion(adc, channel->address,
1022 - channel->differential, device_index);
1023 -
1024 + channel->differential, device_index, val);
1025 if (ret < 0)
1026 goto out;
1027
1028 - *val = ret;
1029 ret = IIO_VAL_INT;
1030 break;
1031
1032 @@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi)
1033 indio_dev->name = spi_get_device_id(spi)->name;
1034 indio_dev->modes = INDIO_DIRECT_MODE;
1035 indio_dev->info = &mcp320x_info;
1036 + spi_set_drvdata(spi, indio_dev);
1037
1038 chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
1039 indio_dev->channels = chip_info->channels;
1040 diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
1041 index 5bfcc1f13105..10e1d8328461 100644
1042 --- a/drivers/iio/adc/stm32-adc.c
1043 +++ b/drivers/iio/adc/stm32-adc.c
1044 @@ -1543,7 +1543,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
1045
1046 num_channels = of_property_count_u32_elems(node, "st,adc-channels");
1047 if (num_channels < 0 ||
1048 - num_channels >= adc_info->max_channels) {
1049 + num_channels > adc_info->max_channels) {
1050 dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
1051 return num_channels < 0 ? num_channels : -EINVAL;
1052 }
1053 diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
1054 index bd3d37fc2144..0c86fbb3033e 100644
1055 --- a/drivers/iio/adc/twl4030-madc.c
1056 +++ b/drivers/iio/adc/twl4030-madc.c
1057 @@ -887,8 +887,10 @@ static int twl4030_madc_probe(struct platform_device *pdev)
1058
1059 /* Enable 3v1 bias regulator for MADC[3:6] */
1060 madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
1061 - if (IS_ERR(madc->usb3v1))
1062 - return -ENODEV;
1063 + if (IS_ERR(madc->usb3v1)) {
1064 + ret = -ENODEV;
1065 + goto err_i2c;
1066 + }
1067
1068 ret = regulator_enable(madc->usb3v1);
1069 if (ret)
1070 @@ -897,11 +899,13 @@ static int twl4030_madc_probe(struct platform_device *pdev)
1071 ret = iio_device_register(iio_dev);
1072 if (ret) {
1073 dev_err(&pdev->dev, "could not register iio device\n");
1074 - goto err_i2c;
1075 + goto err_usb3v1;
1076 }
1077
1078 return 0;
1079
1080 +err_usb3v1:
1081 + regulator_disable(madc->usb3v1);
1082 err_i2c:
1083 twl4030_madc_set_current_generator(madc, 0, 0);
1084 err_current_generator:
1085 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
1086 index 17ec4cee51dc..a47428b4d31b 100644
1087 --- a/drivers/iio/industrialio-core.c
1088 +++ b/drivers/iio/industrialio-core.c
1089 @@ -310,8 +310,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
1090 ret = indio_dev->info->debugfs_reg_access(indio_dev,
1091 indio_dev->cached_reg_addr,
1092 0, &val);
1093 - if (ret)
1094 + if (ret) {
1095 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
1096 + return ret;
1097 + }
1098
1099 len = snprintf(buf, sizeof(buf), "0x%X\n", val);
1100
1101 diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
1102 index 0d2ea3ee371b..8f26428804a2 100644
1103 --- a/drivers/iio/pressure/bmp280-core.c
1104 +++ b/drivers/iio/pressure/bmp280-core.c
1105 @@ -573,7 +573,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
1106 u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
1107 BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
1108
1109 - ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
1110 + ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
1111 BMP280_OSRS_TEMP_MASK |
1112 BMP280_OSRS_PRESS_MASK |
1113 BMP280_MODE_MASK,
1114 diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
1115 index 25ad6abfee22..ea128bd82a28 100644
1116 --- a/drivers/iio/trigger/stm32-timer-trigger.c
1117 +++ b/drivers/iio/trigger/stm32-timer-trigger.c
1118 @@ -138,6 +138,7 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
1119 clk_disable(priv->clk);
1120
1121 /* Stop timer */
1122 + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
1123 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
1124 regmap_write(priv->regmap, TIM_PSC, 0);
1125 regmap_write(priv->regmap, TIM_ARR, 0);
1126 @@ -679,8 +680,9 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
1127 if (ret)
1128 return ret;
1129
1130 + /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
1131 + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
1132 regmap_write(priv->regmap, TIM_ARR, preset);
1133 - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
1134
1135 return len;
1136 }
1137 diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
1138 index 6c44609fd83a..cd2b3c69771a 100644
1139 --- a/drivers/isdn/i4l/isdn_ppp.c
1140 +++ b/drivers/isdn/i4l/isdn_ppp.c
1141 @@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
1142 isdn_net_local *lp;
1143 struct ippp_struct *is;
1144 int proto;
1145 - unsigned char protobuf[4];
1146
1147 is = file->private_data;
1148
1149 @@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
1150 if (!lp)
1151 printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
1152 else {
1153 - /*
1154 - * Don't reset huptimer for
1155 - * LCP packets. (Echo requests).
1156 - */
1157 - if (copy_from_user(protobuf, buf, 4))
1158 - return -EFAULT;
1159 - proto = PPP_PROTOCOL(protobuf);
1160 - if (proto != PPP_LCP)
1161 - lp->huptimer = 0;
1162 + if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
1163 + unsigned char protobuf[4];
1164 + /*
1165 + * Don't reset huptimer for
1166 + * LCP packets. (Echo requests).
1167 + */
1168 + if (copy_from_user(protobuf, buf, 4))
1169 + return -EFAULT;
1170 +
1171 + proto = PPP_PROTOCOL(protobuf);
1172 + if (proto != PPP_LCP)
1173 + lp->huptimer = 0;
1174
1175 - if (lp->isdn_device < 0 || lp->isdn_channel < 0)
1176 return 0;
1177 + }
1178
1179 if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
1180 lp->dialstate == 0 &&
1181 (lp->flags & ISDN_NET_CONNECTED)) {
1182 unsigned short hl;
1183 struct sk_buff *skb;
1184 + unsigned char *cpy_buf;
1185 /*
1186 * we need to reserve enough space in front of
1187 * sk_buff. old call to dev_alloc_skb only reserved
1188 @@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
1189 return count;
1190 }
1191 skb_reserve(skb, hl);
1192 - if (copy_from_user(skb_put(skb, count), buf, count))
1193 + cpy_buf = skb_put(skb, count);
1194 + if (copy_from_user(cpy_buf, buf, count))
1195 {
1196 kfree_skb(skb);
1197 return -EFAULT;
1198 }
1199 +
1200 + /*
1201 + * Don't reset huptimer for
1202 + * LCP packets. (Echo requests).
1203 + */
1204 + proto = PPP_PROTOCOL(cpy_buf);
1205 + if (proto != PPP_LCP)
1206 + lp->huptimer = 0;
1207 +
1208 if (is->debug & 0x40) {
1209 printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
1210 isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
1211 diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
1212 index 24eddbdf2ab4..203144762f36 100644
1213 --- a/drivers/md/dm-core.h
1214 +++ b/drivers/md/dm-core.h
1215 @@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
1216
1217 extern atomic_t dm_global_event_nr;
1218 extern wait_queue_head_t dm_global_eventq;
1219 +void dm_issue_global_event(void);
1220
1221 #endif
1222 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1223 index cdf6b1e12460..e3dd64a12f55 100644
1224 --- a/drivers/md/dm-crypt.c
1225 +++ b/drivers/md/dm-crypt.c
1226 @@ -2470,6 +2470,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
1227 kfree(cipher_api);
1228 return ret;
1229 }
1230 + kfree(cipher_api);
1231
1232 return 0;
1233 bad_mem:
1234 @@ -2588,6 +2589,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
1235 ti->error = "Invalid feature value for sector_size";
1236 return -EINVAL;
1237 }
1238 + if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
1239 + ti->error = "Device size is not multiple of sector_size feature";
1240 + return -EINVAL;
1241 + }
1242 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
1243 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
1244 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
1245 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
1246 index e06f0ef7d2ec..e9f9884b66a8 100644
1247 --- a/drivers/md/dm-ioctl.c
1248 +++ b/drivers/md/dm-ioctl.c
1249 @@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
1250 * Round up the ptr to an 8-byte boundary.
1251 */
1252 #define ALIGN_MASK 7
1253 +static inline size_t align_val(size_t val)
1254 +{
1255 + return (val + ALIGN_MASK) & ~ALIGN_MASK;
1256 +}
1257 static inline void *align_ptr(void *ptr)
1258 {
1259 - return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
1260 + return (void *)align_val((size_t)ptr);
1261 }
1262
1263 /*
1264 @@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
1265 struct hash_cell *hc;
1266 size_t len, needed = 0;
1267 struct gendisk *disk;
1268 - struct dm_name_list *nl, *old_nl = NULL;
1269 + struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
1270 uint32_t *event_nr;
1271
1272 down_write(&_hash_lock);
1273 @@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
1274 */
1275 for (i = 0; i < NUM_BUCKETS; i++) {
1276 list_for_each_entry (hc, _name_buckets + i, name_list) {
1277 - needed += sizeof(struct dm_name_list);
1278 - needed += strlen(hc->name) + 1;
1279 - needed += ALIGN_MASK;
1280 - needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
1281 + needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
1282 + needed += align_val(sizeof(uint32_t));
1283 }
1284 }
1285
1286 /*
1287 * Grab our output buffer.
1288 */
1289 - nl = get_result_buffer(param, param_size, &len);
1290 + nl = orig_nl = get_result_buffer(param, param_size, &len);
1291 if (len < needed) {
1292 param->flags |= DM_BUFFER_FULL_FLAG;
1293 goto out;
1294 @@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
1295 strcpy(nl->name, hc->name);
1296
1297 old_nl = nl;
1298 - event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
1299 + event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
1300 *event_nr = dm_get_event_nr(hc->md);
1301 nl = align_ptr(event_nr + 1);
1302 }
1303 }
1304 + /*
1305 + * If mismatch happens, security may be compromised due to buffer
1306 + * overflow, so it's better to crash.
1307 + */
1308 + BUG_ON((char *)nl - (char *)orig_nl != needed);
1309
1310 out:
1311 up_write(&_hash_lock);
1312 @@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
1313 * which has a variable size, is not used by the function processing
1314 * the ioctl.
1315 */
1316 -#define IOCTL_FLAGS_NO_PARAMS 1
1317 +#define IOCTL_FLAGS_NO_PARAMS 1
1318 +#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
1319
1320 /*-----------------------------------------------------------------
1321 * Implementation of open/close/ioctl on the special char
1322 @@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
1323 ioctl_fn fn;
1324 } _ioctls[] = {
1325 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
1326 - {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
1327 + {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
1328 {DM_LIST_DEVICES_CMD, 0, list_devices},
1329
1330 - {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
1331 - {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
1332 - {DM_DEV_RENAME_CMD, 0, dev_rename},
1333 + {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
1334 + {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
1335 + {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
1336 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
1337 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
1338 {DM_DEV_WAIT_CMD, 0, dev_wait},
1339 @@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
1340 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
1341 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
1342
1343 + if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
1344 + dm_issue_global_event();
1345 +
1346 /*
1347 * Copy the results back to userland.
1348 */
1349 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1350 index 825eaffc24da..eed539a4eec2 100644
1351 --- a/drivers/md/dm.c
1352 +++ b/drivers/md/dm.c
1353 @@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
1354 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
1355 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
1356
1357 +void dm_issue_global_event(void)
1358 +{
1359 + atomic_inc(&dm_global_event_nr);
1360 + wake_up(&dm_global_eventq);
1361 +}
1362 +
1363 /*
1364 * One of these is allocated per bio.
1365 */
1366 @@ -1865,9 +1871,8 @@ static void event_callback(void *context)
1367 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1368
1369 atomic_inc(&md->event_nr);
1370 - atomic_inc(&dm_global_event_nr);
1371 wake_up(&md->eventq);
1372 - wake_up(&dm_global_eventq);
1373 + dm_issue_global_event();
1374 }
1375
1376 /*
1377 @@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1378 }
1379
1380 map = __bind(md, table, &limits);
1381 + dm_issue_global_event();
1382
1383 out:
1384 mutex_unlock(&md->suspend_lock);
1385 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1386 index 2bae69e39544..b64be0ba1222 100644
1387 --- a/drivers/mmc/core/mmc.c
1388 +++ b/drivers/mmc/core/mmc.c
1389 @@ -1286,6 +1286,23 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
1390 return err;
1391 }
1392
1393 +static void mmc_select_driver_type(struct mmc_card *card)
1394 +{
1395 + int card_drv_type, drive_strength, drv_type;
1396 +
1397 + card_drv_type = card->ext_csd.raw_driver_strength |
1398 + mmc_driver_type_mask(0);
1399 +
1400 + drive_strength = mmc_select_drive_strength(card,
1401 + card->ext_csd.hs200_max_dtr,
1402 + card_drv_type, &drv_type);
1403 +
1404 + card->drive_strength = drive_strength;
1405 +
1406 + if (drv_type)
1407 + mmc_set_driver_type(card->host, drv_type);
1408 +}
1409 +
1410 static int mmc_select_hs400es(struct mmc_card *card)
1411 {
1412 struct mmc_host *host = card->host;
1413 @@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
1414 goto out_err;
1415 }
1416
1417 + mmc_select_driver_type(card);
1418 +
1419 /* Switch card to HS400 */
1420 val = EXT_CSD_TIMING_HS400 |
1421 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1422 @@ -1374,23 +1393,6 @@ static int mmc_select_hs400es(struct mmc_card *card)
1423 return err;
1424 }
1425
1426 -static void mmc_select_driver_type(struct mmc_card *card)
1427 -{
1428 - int card_drv_type, drive_strength, drv_type;
1429 -
1430 - card_drv_type = card->ext_csd.raw_driver_strength |
1431 - mmc_driver_type_mask(0);
1432 -
1433 - drive_strength = mmc_select_drive_strength(card,
1434 - card->ext_csd.hs200_max_dtr,
1435 - card_drv_type, &drv_type);
1436 -
1437 - card->drive_strength = drive_strength;
1438 -
1439 - if (drv_type)
1440 - mmc_set_driver_type(card->host, drv_type);
1441 -}
1442 -
1443 /*
1444 * For device supporting HS200 mode, the following sequence
1445 * should be done before executing the tuning process.
1446 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1447 index fc63992ab0e0..c99dc59d729b 100644
1448 --- a/drivers/net/bonding/bond_main.c
1449 +++ b/drivers/net/bonding/bond_main.c
1450 @@ -4289,7 +4289,7 @@ static int bond_check_params(struct bond_params *params)
1451 int bond_mode = BOND_MODE_ROUNDROBIN;
1452 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
1453 int lacp_fast = 0;
1454 - int tlb_dynamic_lb = 0;
1455 + int tlb_dynamic_lb;
1456
1457 /* Convert string parameters. */
1458 if (mode) {
1459 @@ -4601,16 +4601,13 @@ static int bond_check_params(struct bond_params *params)
1460 }
1461 ad_user_port_key = valptr->value;
1462
1463 - if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
1464 - bond_opt_initstr(&newval, "default");
1465 - valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
1466 - &newval);
1467 - if (!valptr) {
1468 - pr_err("Error: No tlb_dynamic_lb default value");
1469 - return -EINVAL;
1470 - }
1471 - tlb_dynamic_lb = valptr->value;
1472 + bond_opt_initstr(&newval, "default");
1473 + valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
1474 + if (!valptr) {
1475 + pr_err("Error: No tlb_dynamic_lb default value");
1476 + return -EINVAL;
1477 }
1478 + tlb_dynamic_lb = valptr->value;
1479
1480 if (lp_interval == 0) {
1481 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
1482 diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
1483 index a12d603d41c6..5931aa2fe997 100644
1484 --- a/drivers/net/bonding/bond_options.c
1485 +++ b/drivers/net/bonding/bond_options.c
1486 @@ -754,6 +754,9 @@ static int bond_option_mode_set(struct bonding *bond,
1487 bond->params.miimon);
1488 }
1489
1490 + if (newval->value == BOND_MODE_ALB)
1491 + bond->params.tlb_dynamic_lb = 1;
1492 +
1493 /* don't cache arp_validate between modes */
1494 bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
1495 bond->params.mode = newval->value;
1496 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1497 index 5bcdd33101b0..c75271c722a7 100644
1498 --- a/drivers/net/dsa/mv88e6xxx/chip.c
1499 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
1500 @@ -1184,6 +1184,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1501 };
1502 int i, err;
1503
1504 + /* DSA and CPU ports have to be members of multiple vlans */
1505 + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
1506 + return 0;
1507 +
1508 if (!vid_begin)
1509 return -EOPNOTSUPP;
1510
1511 @@ -4015,7 +4019,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
1512 if (chip->irq > 0) {
1513 if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT))
1514 mv88e6xxx_g2_irq_free(chip);
1515 + mutex_lock(&chip->reg_lock);
1516 mv88e6xxx_g1_irq_free(chip);
1517 + mutex_unlock(&chip->reg_lock);
1518 }
1519 }
1520
1521 diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
1522 index 91b1a558f37d..248888328232 100644
1523 --- a/drivers/net/ethernet/ibm/emac/mal.c
1524 +++ b/drivers/net/ethernet/ibm/emac/mal.c
1525 @@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
1526 unsigned long flags;
1527
1528 MAL_DBG2(mal, "poll(%d)" NL, budget);
1529 - again:
1530 +
1531 /* Process TX skbs */
1532 list_for_each(l, &mal->poll_list) {
1533 struct mal_commac *mc =
1534 @@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
1535 spin_lock_irqsave(&mal->lock, flags);
1536 mal_disable_eob_irq(mal);
1537 spin_unlock_irqrestore(&mal->lock, flags);
1538 - goto again;
1539 }
1540 mc->ops->poll_tx(mc->dev);
1541 }
1542 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
1543 index 85298051a3e4..145e392ab849 100644
1544 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
1545 +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
1546 @@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
1547 {
1548 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
1549 const struct mlx5e_profile *profile = priv->profile;
1550 + struct mlx5_core_dev *mdev = priv->mdev;
1551
1552 mlx5e_detach_netdev(priv);
1553 profile->cleanup(priv);
1554 destroy_workqueue(priv->wq);
1555 free_netdev(netdev);
1556
1557 - mlx5e_destroy_mdev_resources(priv->mdev);
1558 + mlx5e_destroy_mdev_resources(mdev);
1559 }
1560 EXPORT_SYMBOL(mlx5_rdma_netdev_free);
1561 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1562 index c6a3e61b53bd..73390f90b581 100644
1563 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1564 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1565 @@ -572,15 +572,14 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1566 }
1567
1568 static struct mlxsw_sp_span_entry *
1569 -mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
1570 +mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1571 {
1572 - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
1573 int i;
1574
1575 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
1576 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
1577
1578 - if (curr->used && curr->local_port == port->local_port)
1579 + if (curr->used && curr->local_port == local_port)
1580 return curr;
1581 }
1582 return NULL;
1583 @@ -591,7 +590,8 @@ static struct mlxsw_sp_span_entry
1584 {
1585 struct mlxsw_sp_span_entry *span_entry;
1586
1587 - span_entry = mlxsw_sp_span_entry_find(port);
1588 + span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
1589 + port->local_port);
1590 if (span_entry) {
1591 /* Already exists, just take a reference */
1592 span_entry->ref_count++;
1593 @@ -780,12 +780,13 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
1594 }
1595
1596 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
1597 - struct mlxsw_sp_port *to,
1598 + u8 destination_port,
1599 enum mlxsw_sp_span_type type)
1600 {
1601 struct mlxsw_sp_span_entry *span_entry;
1602
1603 - span_entry = mlxsw_sp_span_entry_find(to);
1604 + span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
1605 + destination_port);
1606 if (!span_entry) {
1607 netdev_err(from->dev, "no span entry found\n");
1608 return;
1609 @@ -1560,14 +1561,12 @@ static void
1610 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1611 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1612 {
1613 - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1614 enum mlxsw_sp_span_type span_type;
1615 - struct mlxsw_sp_port *to_port;
1616
1617 - to_port = mlxsw_sp->ports[mirror->to_local_port];
1618 span_type = mirror->ingress ?
1619 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1620 - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1621 + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
1622 + span_type);
1623 }
1624
1625 static int
1626 @@ -2519,7 +2518,9 @@ static int mlxsw_sp_flash_device(struct net_device *dev,
1627 return err;
1628 }
1629
1630 -#define MLXSW_SP_QSFP_I2C_ADDR 0x50
1631 +#define MLXSW_SP_I2C_ADDR_LOW 0x50
1632 +#define MLXSW_SP_I2C_ADDR_HIGH 0x51
1633 +#define MLXSW_SP_EEPROM_PAGE_LENGTH 256
1634
1635 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
1636 u16 offset, u16 size, void *data,
1637 @@ -2528,12 +2529,25 @@ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
1638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1639 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
1640 char mcia_pl[MLXSW_REG_MCIA_LEN];
1641 + u16 i2c_addr;
1642 int status;
1643 int err;
1644
1645 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
1646 +
1647 + if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
1648 + offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
1649 + /* Cross pages read, read until offset 256 in low page */
1650 + size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
1651 +
1652 + i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
1653 + if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
1654 + i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
1655 + offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
1656 + }
1657 +
1658 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
1659 - 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR);
1660 + 0, 0, offset, size, i2c_addr);
1661
1662 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
1663 if (err)
1664 diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
1665 index bcd4708b3745..97f18cdc9516 100644
1666 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
1667 +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
1668 @@ -876,7 +876,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
1669
1670 curr_rxbuf->dma_addr =
1671 dma_map_single(adpt->netdev->dev.parent, skb->data,
1672 - curr_rxbuf->length, DMA_FROM_DEVICE);
1673 + adpt->rxbuf_size, DMA_FROM_DEVICE);
1674 +
1675 ret = dma_mapping_error(adpt->netdev->dev.parent,
1676 curr_rxbuf->dma_addr);
1677 if (ret) {
1678 diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
1679 index ca22f2898664..d24b47b8e0b2 100644
1680 --- a/drivers/net/ethernet/realtek/8139too.c
1681 +++ b/drivers/net/ethernet/realtek/8139too.c
1682 @@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
1683 if (likely(RTL_R16(IntrStatus) & RxAckBits))
1684 work_done += rtl8139_rx(dev, tp, budget);
1685
1686 - if (work_done < budget && napi_complete_done(napi, work_done)) {
1687 + if (work_done < budget) {
1688 unsigned long flags;
1689
1690 spin_lock_irqsave(&tp->lock, flags);
1691 - RTL_W16_F(IntrMask, rtl8139_intr_mask);
1692 + if (napi_complete_done(napi, work_done))
1693 + RTL_W16_F(IntrMask, rtl8139_intr_mask);
1694 spin_unlock_irqrestore(&tp->lock, flags);
1695 }
1696 spin_unlock(&tp->rx_lock);
1697 diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
1698 index a63ef82e7c72..dfae3c9d57c6 100644
1699 --- a/drivers/net/ethernet/rocker/rocker_tlv.h
1700 +++ b/drivers/net/ethernet/rocker/rocker_tlv.h
1701 @@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
1702 int rocker_tlv_put(struct rocker_desc_info *desc_info,
1703 int attrtype, int attrlen, const void *data);
1704
1705 -static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
1706 - int attrtype, u8 value)
1707 +static inline int
1708 +rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
1709 {
1710 - return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
1711 + u8 tmp = value; /* work around GCC PR81715 */
1712 +
1713 + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
1714 }
1715
1716 -static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
1717 - int attrtype, u16 value)
1718 +static inline int
1719 +rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
1720 {
1721 - return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
1722 + u16 tmp = value;
1723 +
1724 + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
1725 }
1726
1727 -static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
1728 - int attrtype, __be16 value)
1729 +static inline int
1730 +rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
1731 {
1732 - return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
1733 + __be16 tmp = value;
1734 +
1735 + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
1736 }
1737
1738 -static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
1739 - int attrtype, u32 value)
1740 +static inline int
1741 +rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
1742 {
1743 - return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
1744 + u32 tmp = value;
1745 +
1746 + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
1747 }
1748
1749 -static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
1750 - int attrtype, __be32 value)
1751 +static inline int
1752 +rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
1753 {
1754 - return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
1755 + __be32 tmp = value;
1756 +
1757 + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
1758 }
1759
1760 -static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
1761 - int attrtype, u64 value)
1762 +static inline int
1763 +rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
1764 {
1765 - return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
1766 + u64 tmp = value;
1767 +
1768 + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
1769 }
1770
1771 static inline struct rocker_tlv *
1772 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
1773 index a366b3747eeb..8a280b48e3a9 100644
1774 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
1775 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
1776 @@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
1777 { .compatible = "allwinner,sun8i-h3-emac" },
1778 { .compatible = "allwinner,sun8i-v3s-emac" },
1779 { .compatible = "allwinner,sun50i-a64-emac" },
1780 + {},
1781 };
1782
1783 /* If phy-handle property is passed from DT, use it as the PHY */
1784 diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
1785 index d15dd3938ba8..2e5150b0b8d5 100644
1786 --- a/drivers/net/phy/xilinx_gmii2rgmii.c
1787 +++ b/drivers/net/phy/xilinx_gmii2rgmii.c
1788 @@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
1789 priv->phy_drv->read_status(phydev);
1790
1791 val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
1792 - val &= XILINX_GMII2RGMII_SPEED_MASK;
1793 + val &= ~XILINX_GMII2RGMII_SPEED_MASK;
1794
1795 if (phydev->speed == SPEED_1000)
1796 val |= BMCR_SPEED1000;
1797 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1798 index 0a2c0a42283f..cb1f7747adad 100644
1799 --- a/drivers/net/tun.c
1800 +++ b/drivers/net/tun.c
1801 @@ -1298,11 +1298,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1802 switch (tun->flags & TUN_TYPE_MASK) {
1803 case IFF_TUN:
1804 if (tun->flags & IFF_NO_PI) {
1805 - switch (skb->data[0] & 0xf0) {
1806 - case 0x40:
1807 + u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1808 +
1809 + switch (ip_version) {
1810 + case 4:
1811 pi.proto = htons(ETH_P_IP);
1812 break;
1813 - case 0x60:
1814 + case 6:
1815 pi.proto = htons(ETH_P_IPV6);
1816 break;
1817 default:
1818 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1819 index 7e689c86d565..f27d6fe4d5c0 100644
1820 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1821 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1822 @@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
1823
1824 eth_broadcast_addr(params_le->bssid);
1825 params_le->bss_type = DOT11_BSSTYPE_ANY;
1826 - params_le->scan_type = 0;
1827 + params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
1828 params_le->channel_num = 0;
1829 params_le->nprobes = cpu_to_le32(-1);
1830 params_le->active_time = cpu_to_le32(-1);
1831 @@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
1832 params_le->home_time = cpu_to_le32(-1);
1833 memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
1834
1835 - /* if request is null exit so it will be all channel broadcast scan */
1836 - if (!request)
1837 - return;
1838 -
1839 n_ssids = request->n_ssids;
1840 n_channels = request->n_channels;
1841 +
1842 /* Copy channel array if applicable */
1843 brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
1844 n_channels);
1845 @@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
1846 ptr += sizeof(ssid_le);
1847 }
1848 } else {
1849 - brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
1850 - if ((request->ssids) && request->ssids->ssid_len) {
1851 - brcmf_dbg(SCAN, "SSID %s len=%d\n",
1852 - params_le->ssid_le.SSID,
1853 - request->ssids->ssid_len);
1854 - params_le->ssid_le.SSID_len =
1855 - cpu_to_le32(request->ssids->ssid_len);
1856 - memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
1857 - request->ssids->ssid_len);
1858 - }
1859 + brcmf_dbg(SCAN, "Performing passive scan\n");
1860 + params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
1861 }
1862 /* Adding mask to channel numbers */
1863 params_le->channel_num =
1864 @@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
1865 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
1866 s32 status;
1867 struct brcmf_escan_result_le *escan_result_le;
1868 + u32 escan_buflen;
1869 struct brcmf_bss_info_le *bss_info_le;
1870 struct brcmf_bss_info_le *bss = NULL;
1871 u32 bi_length;
1872 @@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
1873
1874 if (status == BRCMF_E_STATUS_PARTIAL) {
1875 brcmf_dbg(SCAN, "ESCAN Partial result\n");
1876 + if (e->datalen < sizeof(*escan_result_le)) {
1877 + brcmf_err("invalid event data length\n");
1878 + goto exit;
1879 + }
1880 escan_result_le = (struct brcmf_escan_result_le *) data;
1881 if (!escan_result_le) {
1882 brcmf_err("Invalid escan result (NULL pointer)\n");
1883 goto exit;
1884 }
1885 + escan_buflen = le32_to_cpu(escan_result_le->buflen);
1886 + if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
1887 + escan_buflen > e->datalen ||
1888 + escan_buflen < sizeof(*escan_result_le)) {
1889 + brcmf_err("Invalid escan buffer length: %d\n",
1890 + escan_buflen);
1891 + goto exit;
1892 + }
1893 if (le16_to_cpu(escan_result_le->bss_count) != 1) {
1894 brcmf_err("Invalid bss_count %d: ignoring\n",
1895 escan_result_le->bss_count);
1896 @@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
1897 }
1898
1899 bi_length = le32_to_cpu(bss_info_le->length);
1900 - if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
1901 - WL_ESCAN_RESULTS_FIXED_SIZE)) {
1902 - brcmf_err("Invalid bss_info length %d: ignoring\n",
1903 + if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
1904 + brcmf_err("Ignoring invalid bss_info length: %d\n",
1905 bi_length);
1906 goto exit;
1907 }
1908 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
1909 index 8391989b1882..e0d22fedb2b4 100644
1910 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
1911 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
1912 @@ -45,6 +45,11 @@
1913 #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
1914 #define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
1915
1916 +/* scan type definitions */
1917 +#define BRCMF_SCANTYPE_DEFAULT 0xFF
1918 +#define BRCMF_SCANTYPE_ACTIVE 0
1919 +#define BRCMF_SCANTYPE_PASSIVE 1
1920 +
1921 #define BRCMF_WSEC_MAX_PSK_LEN 32
1922 #define BRCMF_WSEC_PASSPHRASE BIT(0)
1923
1924 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1925 index ce901be5fba8..f0132c492a79 100644
1926 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1927 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1928 @@ -1589,6 +1589,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1929 struct iwl_mvm_mc_iter_data *data = _data;
1930 struct iwl_mvm *mvm = data->mvm;
1931 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1932 + struct iwl_host_cmd hcmd = {
1933 + .id = MCAST_FILTER_CMD,
1934 + .flags = CMD_ASYNC,
1935 + .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1936 + };
1937 int ret, len;
1938
1939 /* if we don't have free ports, mcast frames will be dropped */
1940 @@ -1603,7 +1608,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1941 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1942 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1943
1944 - ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1945 + hcmd.len[0] = len;
1946 + hcmd.data[0] = cmd;
1947 +
1948 + ret = iwl_mvm_send_cmd(mvm, &hcmd);
1949 if (ret)
1950 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1951 }
1952 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1953 index cdf4c0e471b9..ba41b660b259 100644
1954 --- a/drivers/nvme/host/pci.c
1955 +++ b/drivers/nvme/host/pci.c
1956 @@ -93,7 +93,7 @@ struct nvme_dev {
1957 struct mutex shutdown_lock;
1958 bool subsystem;
1959 void __iomem *cmb;
1960 - dma_addr_t cmb_dma_addr;
1961 + pci_bus_addr_t cmb_bus_addr;
1962 u64 cmb_size;
1963 u32 cmbsz;
1964 u32 cmbloc;
1965 @@ -1218,7 +1218,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1966 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1967 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
1968 dev->ctrl.page_size);
1969 - nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
1970 + nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1971 nvmeq->sq_cmds_io = dev->cmb + offset;
1972 } else {
1973 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1974 @@ -1517,7 +1517,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1975 resource_size_t bar_size;
1976 struct pci_dev *pdev = to_pci_dev(dev->dev);
1977 void __iomem *cmb;
1978 - dma_addr_t dma_addr;
1979 + int bar;
1980
1981 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1982 if (!(NVME_CMB_SZ(dev->cmbsz)))
1983 @@ -1530,7 +1530,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1984 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
1985 size = szu * NVME_CMB_SZ(dev->cmbsz);
1986 offset = szu * NVME_CMB_OFST(dev->cmbloc);
1987 - bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
1988 + bar = NVME_CMB_BIR(dev->cmbloc);
1989 + bar_size = pci_resource_len(pdev, bar);
1990
1991 if (offset > bar_size)
1992 return NULL;
1993 @@ -1543,12 +1544,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1994 if (size > bar_size - offset)
1995 size = bar_size - offset;
1996
1997 - dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
1998 - cmb = ioremap_wc(dma_addr, size);
1999 + cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
2000 if (!cmb)
2001 return NULL;
2002
2003 - dev->cmb_dma_addr = dma_addr;
2004 + dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
2005 dev->cmb_size = size;
2006 return cmb;
2007 }
2008 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
2009 index fd88dabd599d..9f1d53e18956 100644
2010 --- a/drivers/scsi/scsi_scan.c
2011 +++ b/drivers/scsi/scsi_scan.c
2012 @@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
2013 if (*bflags & BLIST_NO_DIF)
2014 sdev->no_dif = 1;
2015
2016 + if (*bflags & BLIST_UNMAP_LIMIT_WS)
2017 + sdev->unmap_limit_for_ws = 1;
2018 +
2019 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
2020
2021 if (*bflags & BLIST_TRY_VPD_PAGES)
2022 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2023 index e2647f2d4430..b93d92572c01 100644
2024 --- a/drivers/scsi/sd.c
2025 +++ b/drivers/scsi/sd.c
2026 @@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
2027 break;
2028
2029 case SD_LBP_WS16:
2030 - max_blocks = min_not_zero(sdkp->max_ws_blocks,
2031 - (u32)SD_MAX_WS16_BLOCKS);
2032 + if (sdkp->device->unmap_limit_for_ws)
2033 + max_blocks = sdkp->max_unmap_blocks;
2034 + else
2035 + max_blocks = sdkp->max_ws_blocks;
2036 +
2037 + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
2038 break;
2039
2040 case SD_LBP_WS10:
2041 - max_blocks = min_not_zero(sdkp->max_ws_blocks,
2042 - (u32)SD_MAX_WS10_BLOCKS);
2043 + if (sdkp->device->unmap_limit_for_ws)
2044 + max_blocks = sdkp->max_unmap_blocks;
2045 + else
2046 + max_blocks = sdkp->max_ws_blocks;
2047 +
2048 + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
2049 break;
2050
2051 case SD_LBP_ZERO:
2052 @@ -3101,8 +3109,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
2053 sd_read_security(sdkp, buffer);
2054 }
2055
2056 - sdkp->first_scan = 0;
2057 -
2058 /*
2059 * We now have all cache related info, determine how we deal
2060 * with flush requests.
2061 @@ -3117,7 +3123,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2062 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
2063
2064 /*
2065 - * Use the device's preferred I/O size for reads and writes
2066 + * Determine the device's preferred I/O size for reads and writes
2067 * unless the reported value is unreasonably small, large, or
2068 * garbage.
2069 */
2070 @@ -3131,8 +3137,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
2071 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
2072 (sector_t)BLK_DEF_MAX_SECTORS);
2073
2074 - /* Combine with controller limits */
2075 - q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
2076 + /* Do not exceed controller limit */
2077 + rw_max = min(rw_max, queue_max_hw_sectors(q));
2078 +
2079 + /*
2080 + * Only update max_sectors if previously unset or if the current value
2081 + * exceeds the capabilities of the hardware.
2082 + */
2083 + if (sdkp->first_scan ||
2084 + q->limits.max_sectors > q->limits.max_dev_sectors ||
2085 + q->limits.max_sectors > q->limits.max_hw_sectors)
2086 + q->limits.max_sectors = rw_max;
2087 +
2088 + sdkp->first_scan = 0;
2089
2090 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
2091 sd_config_write_same(sdkp);
2092 diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
2093 index d11c6de9c777..6150d2780e22 100644
2094 --- a/drivers/staging/iio/adc/ad7192.c
2095 +++ b/drivers/staging/iio/adc/ad7192.c
2096 @@ -223,11 +223,9 @@ static int ad7192_setup(struct ad7192_state *st,
2097 struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
2098 unsigned long long scale_uv;
2099 int i, ret, id;
2100 - u8 ones[6];
2101
2102 /* reset the serial interface */
2103 - memset(&ones, 0xFF, 6);
2104 - ret = spi_write(st->sd.spi, &ones, 6);
2105 + ret = ad_sd_reset(&st->sd, 48);
2106 if (ret < 0)
2107 goto out;
2108 usleep_range(500, 1000); /* Wait for at least 500us */
2109 diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
2110 index b026fe66467c..90e7e702a411 100644
2111 --- a/drivers/staging/media/imx/imx-media-of.c
2112 +++ b/drivers/staging/media/imx/imx-media-of.c
2113 @@ -167,7 +167,7 @@ of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np,
2114 of_parse_sensor(imxmd, imxsd, sd_np);
2115
2116 for (i = 0; i < num_pads; i++) {
2117 - struct device_node *epnode = NULL, *port, *remote_np;
2118 + struct device_node *epnode = NULL, *port, *remote_np = NULL;
2119 struct imx_media_subdev *remote_imxsd;
2120 struct imx_media_pad *pad;
2121 int remote_pad;
2122 diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
2123 index 0159ca4407d8..be08849175ea 100644
2124 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
2125 +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
2126 @@ -612,18 +612,20 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
2127 if (head_bytes > actual)
2128 head_bytes = actual;
2129
2130 - memcpy((char *)page_address(pages[0]) +
2131 + memcpy((char *)kmap(pages[0]) +
2132 pagelist->offset,
2133 fragments,
2134 head_bytes);
2135 + kunmap(pages[0]);
2136 }
2137 if ((actual >= 0) && (head_bytes < actual) &&
2138 (tail_bytes != 0)) {
2139 - memcpy((char *)page_address(pages[num_pages - 1]) +
2140 + memcpy((char *)kmap(pages[num_pages - 1]) +
2141 ((pagelist->offset + actual) &
2142 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
2143 fragments + g_cache_line_size,
2144 tail_bytes);
2145 + kunmap(pages[num_pages - 1]);
2146 }
2147
2148 down(&g_free_fragments_mutex);
2149 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
2150 index 8f972247b1c1..6499391695b7 100644
2151 --- a/drivers/usb/class/cdc-wdm.c
2152 +++ b/drivers/usb/class/cdc-wdm.c
2153 @@ -194,8 +194,10 @@ static void wdm_in_callback(struct urb *urb)
2154 /*
2155 * only set a new error if there is no previous error.
2156 * Errors are only cleared during read/open
2157 + * Avoid propagating -EPIPE (stall) to userspace since it is
2158 + * better handled as an empty read
2159 */
2160 - if (desc->rerr == 0)
2161 + if (desc->rerr == 0 && status != -EPIPE)
2162 desc->rerr = status;
2163
2164 if (length + desc->length > desc->wMaxCommand) {
2165 diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2166 index 4be52c602e9b..68b54bd88d1e 100644
2167 --- a/drivers/usb/core/config.c
2168 +++ b/drivers/usb/core/config.c
2169 @@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
2170
2171 } else if (header->bDescriptorType ==
2172 USB_DT_INTERFACE_ASSOCIATION) {
2173 + struct usb_interface_assoc_descriptor *d;
2174 +
2175 + d = (struct usb_interface_assoc_descriptor *)header;
2176 + if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
2177 + dev_warn(ddev,
2178 + "config %d has an invalid interface association descriptor of length %d, skipping\n",
2179 + cfgno, d->bLength);
2180 + continue;
2181 + }
2182 +
2183 if (iad_num == USB_MAXIADS) {
2184 dev_warn(ddev, "found more Interface "
2185 "Association Descriptors "
2186 "than allocated for in "
2187 "configuration %d\n", cfgno);
2188 } else {
2189 - config->intf_assoc[iad_num] =
2190 - (struct usb_interface_assoc_descriptor
2191 - *)header;
2192 + config->intf_assoc[iad_num] = d;
2193 iad_num++;
2194 }
2195
2196 @@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev)
2197 }
2198
2199 if (dev->quirks & USB_QUIRK_DELAY_INIT)
2200 - msleep(100);
2201 + msleep(200);
2202
2203 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
2204 bigbuffer, length);
2205 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2206 index 0ff0feddfd1f..1d4dfdeb61c1 100644
2207 --- a/drivers/usb/core/devio.c
2208 +++ b/drivers/usb/core/devio.c
2209 @@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644);
2210 MODULE_PARM_DESC(usbfs_memory_mb,
2211 "maximum MB allowed for usbfs buffers (0 = no limit)");
2212
2213 +/* Hard limit, necessary to avoid arithmetic overflow */
2214 +#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
2215 +
2216 static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
2217
2218 /* Check whether it's okay to allocate more memory for a transfer */
2219 @@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
2220 USBDEVFS_URB_ZERO_PACKET |
2221 USBDEVFS_URB_NO_INTERRUPT))
2222 return -EINVAL;
2223 + if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
2224 + return -EINVAL;
2225 if (uurb->buffer_length > 0 && !uurb->buffer)
2226 return -EINVAL;
2227 if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
2228 @@ -1571,7 +1576,11 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
2229 totlen += isopkt[u].length;
2230 }
2231 u *= sizeof(struct usb_iso_packet_descriptor);
2232 - uurb->buffer_length = totlen;
2233 + if (totlen <= uurb->buffer_length)
2234 + uurb->buffer_length = totlen;
2235 + else
2236 + WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
2237 + totlen, uurb->buffer_length);
2238 break;
2239
2240 default:
2241 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2242 index 822f8c50e423..78c2aca5b0fc 100644
2243 --- a/drivers/usb/core/hub.c
2244 +++ b/drivers/usb/core/hub.c
2245 @@ -4825,7 +4825,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2246 goto loop;
2247
2248 if (udev->quirks & USB_QUIRK_DELAY_INIT)
2249 - msleep(1000);
2250 + msleep(2000);
2251
2252 /* consecutive bus-powered hubs aren't reliable; they can
2253 * violate the voltage drop budget. if the new child has
2254 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
2255 index 4c38ea41ae96..371a07d874a3 100644
2256 --- a/drivers/usb/core/message.c
2257 +++ b/drivers/usb/core/message.c
2258 @@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
2259 elength = 1;
2260 goto next_desc;
2261 }
2262 + if ((buflen < elength) || (elength < 3)) {
2263 + dev_err(&intf->dev, "invalid descriptor buffer length\n");
2264 + break;
2265 + }
2266 if (buffer[1] != USB_DT_CS_INTERFACE) {
2267 dev_err(&intf->dev, "skipping garbage\n");
2268 goto next_desc;
2269 diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
2270 index 827e376bfa97..75e6cb044eb2 100644
2271 --- a/drivers/usb/dwc3/ep0.c
2272 +++ b/drivers/usb/dwc3/ep0.c
2273 @@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
2274 DWC3_TRBCTL_CONTROL_DATA,
2275 true);
2276
2277 + req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
2278 +
2279 /* Now prepare one extra TRB to align transfer size */
2280 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
2281 maxpacket - rem,
2282 @@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
2283 DWC3_TRBCTL_CONTROL_DATA,
2284 true);
2285
2286 + req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
2287 +
2288 /* Now prepare one extra TRB to align transfer size */
2289 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
2290 0, DWC3_TRBCTL_CONTROL_DATA,
2291 @@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
2292 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
2293 req->request.length, DWC3_TRBCTL_CONTROL_DATA,
2294 false);
2295 +
2296 + req->trb = &dwc->ep0_trb[dep->trb_enqueue];
2297 +
2298 ret = dwc3_ep0_start_trans(dep);
2299 }
2300
2301 diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
2302 index f95bddd6513f..daf3a07e3ffb 100644
2303 --- a/drivers/usb/gadget/function/f_mass_storage.c
2304 +++ b/drivers/usb/gadget/function/f_mass_storage.c
2305 @@ -307,8 +307,6 @@ struct fsg_common {
2306 struct completion thread_notifier;
2307 struct task_struct *thread_task;
2308
2309 - /* Callback functions. */
2310 - const struct fsg_operations *ops;
2311 /* Gadget's private data. */
2312 void *private_data;
2313
2314 @@ -2440,6 +2438,7 @@ static void handle_exception(struct fsg_common *common)
2315 static int fsg_main_thread(void *common_)
2316 {
2317 struct fsg_common *common = common_;
2318 + int i;
2319
2320 /*
2321 * Allow the thread to be killed by a signal, but set the signal mask
2322 @@ -2485,21 +2484,16 @@ static int fsg_main_thread(void *common_)
2323 common->thread_task = NULL;
2324 spin_unlock_irq(&common->lock);
2325
2326 - if (!common->ops || !common->ops->thread_exits
2327 - || common->ops->thread_exits(common) < 0) {
2328 - int i;
2329 + /* Eject media from all LUNs */
2330
2331 - down_write(&common->filesem);
2332 - for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
2333 - struct fsg_lun *curlun = common->luns[i];
2334 - if (!curlun || !fsg_lun_is_open(curlun))
2335 - continue;
2336 + down_write(&common->filesem);
2337 + for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
2338 + struct fsg_lun *curlun = common->luns[i];
2339
2340 + if (curlun && fsg_lun_is_open(curlun))
2341 fsg_lun_close(curlun);
2342 - curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2343 - }
2344 - up_write(&common->filesem);
2345 }
2346 + up_write(&common->filesem);
2347
2348 /* Let fsg_unbind() know the thread has exited */
2349 complete_and_exit(&common->thread_notifier, 0);
2350 @@ -2690,13 +2684,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
2351 }
2352 EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
2353
2354 -void fsg_common_set_ops(struct fsg_common *common,
2355 - const struct fsg_operations *ops)
2356 -{
2357 - common->ops = ops;
2358 -}
2359 -EXPORT_SYMBOL_GPL(fsg_common_set_ops);
2360 -
2361 void fsg_common_free_buffers(struct fsg_common *common)
2362 {
2363 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
2364 diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
2365 index d3902313b8ac..dc05ca0c4359 100644
2366 --- a/drivers/usb/gadget/function/f_mass_storage.h
2367 +++ b/drivers/usb/gadget/function/f_mass_storage.h
2368 @@ -60,17 +60,6 @@ struct fsg_module_parameters {
2369 struct fsg_common;
2370
2371 /* FSF callback functions */
2372 -struct fsg_operations {
2373 - /*
2374 - * Callback function to call when thread exits. If no
2375 - * callback is set or it returns value lower then zero MSF
2376 - * will force eject all LUNs it operates on (including those
2377 - * marked as non-removable or with prevent_medium_removal flag
2378 - * set).
2379 - */
2380 - int (*thread_exits)(struct fsg_common *common);
2381 -};
2382 -
2383 struct fsg_lun_opts {
2384 struct config_group group;
2385 struct fsg_lun *lun;
2386 @@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
2387
2388 void fsg_common_remove_luns(struct fsg_common *common);
2389
2390 -void fsg_common_set_ops(struct fsg_common *common,
2391 - const struct fsg_operations *ops);
2392 -
2393 int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
2394 unsigned int id, const char *name,
2395 const char **name_pfx);
2396 diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
2397 index 684900fcfe24..5c28bee327e1 100644
2398 --- a/drivers/usb/gadget/legacy/inode.c
2399 +++ b/drivers/usb/gadget/legacy/inode.c
2400 @@ -28,7 +28,7 @@
2401 #include <linux/aio.h>
2402 #include <linux/uio.h>
2403 #include <linux/refcount.h>
2404 -
2405 +#include <linux/delay.h>
2406 #include <linux/device.h>
2407 #include <linux/moduleparam.h>
2408
2409 @@ -116,6 +116,7 @@ enum ep0_state {
2410 struct dev_data {
2411 spinlock_t lock;
2412 refcount_t count;
2413 + int udc_usage;
2414 enum ep0_state state; /* P: lock */
2415 struct usb_gadgetfs_event event [N_EVENT];
2416 unsigned ev_next;
2417 @@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
2418 INIT_WORK(&priv->work, ep_user_copy_worker);
2419 schedule_work(&priv->work);
2420 }
2421 - spin_unlock(&epdata->dev->lock);
2422
2423 usb_ep_free_request(ep, req);
2424 + spin_unlock(&epdata->dev->lock);
2425 put_ep(epdata);
2426 }
2427
2428 @@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
2429 struct usb_request *req = dev->req;
2430
2431 if ((retval = setup_req (ep, req, 0)) == 0) {
2432 + ++dev->udc_usage;
2433 spin_unlock_irq (&dev->lock);
2434 retval = usb_ep_queue (ep, req, GFP_KERNEL);
2435 spin_lock_irq (&dev->lock);
2436 + --dev->udc_usage;
2437 }
2438 dev->state = STATE_DEV_CONNECTED;
2439
2440 @@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
2441 retval = -EIO;
2442 else {
2443 len = min (len, (size_t)dev->req->actual);
2444 -// FIXME don't call this with the spinlock held ...
2445 + ++dev->udc_usage;
2446 + spin_unlock_irq(&dev->lock);
2447 if (copy_to_user (buf, dev->req->buf, len))
2448 retval = -EFAULT;
2449 else
2450 retval = len;
2451 + spin_lock_irq(&dev->lock);
2452 + --dev->udc_usage;
2453 clean_req (dev->gadget->ep0, dev->req);
2454 /* NOTE userspace can't yet choose to stall */
2455 }
2456 @@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2457 retval = setup_req (dev->gadget->ep0, dev->req, len);
2458 if (retval == 0) {
2459 dev->state = STATE_DEV_CONNECTED;
2460 + ++dev->udc_usage;
2461 spin_unlock_irq (&dev->lock);
2462 if (copy_from_user (dev->req->buf, buf, len))
2463 retval = -EFAULT;
2464 @@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2465 GFP_KERNEL);
2466 }
2467 spin_lock_irq(&dev->lock);
2468 + --dev->udc_usage;
2469 if (retval < 0) {
2470 clean_req (dev->gadget->ep0, dev->req);
2471 } else
2472 @@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
2473 struct usb_gadget *gadget = dev->gadget;
2474 long ret = -ENOTTY;
2475
2476 - if (gadget->ops->ioctl)
2477 + spin_lock_irq(&dev->lock);
2478 + if (dev->state == STATE_DEV_OPENED ||
2479 + dev->state == STATE_DEV_UNBOUND) {
2480 + /* Not bound to a UDC */
2481 + } else if (gadget->ops->ioctl) {
2482 + ++dev->udc_usage;
2483 + spin_unlock_irq(&dev->lock);
2484 +
2485 ret = gadget->ops->ioctl (gadget, code, value);
2486
2487 + spin_lock_irq(&dev->lock);
2488 + --dev->udc_usage;
2489 + }
2490 + spin_unlock_irq(&dev->lock);
2491 +
2492 return ret;
2493 }
2494
2495 @@ -1463,10 +1483,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
2496 if (value < 0)
2497 break;
2498
2499 + ++dev->udc_usage;
2500 spin_unlock (&dev->lock);
2501 value = usb_ep_queue (gadget->ep0, dev->req,
2502 GFP_KERNEL);
2503 spin_lock (&dev->lock);
2504 + --dev->udc_usage;
2505 if (value < 0) {
2506 clean_req (gadget->ep0, dev->req);
2507 break;
2508 @@ -1490,8 +1512,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
2509 req->length = value;
2510 req->zero = value < w_length;
2511
2512 + ++dev->udc_usage;
2513 spin_unlock (&dev->lock);
2514 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
2515 + spin_lock(&dev->lock);
2516 + --dev->udc_usage;
2517 + spin_unlock(&dev->lock);
2518 if (value < 0) {
2519 DBG (dev, "ep_queue --> %d\n", value);
2520 req->status = 0;
2521 @@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev)
2522 /* break link to FS */
2523 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
2524 list_del_init (&ep->epfiles);
2525 + spin_unlock_irq (&dev->lock);
2526 +
2527 dentry = ep->dentry;
2528 ep->dentry = NULL;
2529 parent = d_inode(dentry->d_parent);
2530
2531 /* break link to controller */
2532 + mutex_lock(&ep->lock);
2533 if (ep->state == STATE_EP_ENABLED)
2534 (void) usb_ep_disable (ep->ep);
2535 ep->state = STATE_EP_UNBOUND;
2536 usb_ep_free_request (ep->ep, ep->req);
2537 ep->ep = NULL;
2538 + mutex_unlock(&ep->lock);
2539 +
2540 wake_up (&ep->wait);
2541 put_ep (ep);
2542
2543 - spin_unlock_irq (&dev->lock);
2544 -
2545 /* break link to dcache */
2546 inode_lock(parent);
2547 d_delete (dentry);
2548 @@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
2549
2550 spin_lock_irq (&dev->lock);
2551 dev->state = STATE_DEV_UNBOUND;
2552 + while (dev->udc_usage > 0) {
2553 + spin_unlock_irq(&dev->lock);
2554 + usleep_range(1000, 2000);
2555 + spin_lock_irq(&dev->lock);
2556 + }
2557 spin_unlock_irq (&dev->lock);
2558
2559 destroy_ep_files (dev);
2560 diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
2561 index e99ab57ee3e5..fcba59782f26 100644
2562 --- a/drivers/usb/gadget/legacy/mass_storage.c
2563 +++ b/drivers/usb/gadget/legacy/mass_storage.c
2564 @@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
2565
2566 FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
2567
2568 -static unsigned long msg_registered;
2569 -static void msg_cleanup(void);
2570 -
2571 -static int msg_thread_exits(struct fsg_common *common)
2572 -{
2573 - msg_cleanup();
2574 - return 0;
2575 -}
2576 -
2577 static int msg_do_config(struct usb_configuration *c)
2578 {
2579 struct fsg_opts *opts;
2580 @@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
2581
2582 static int msg_bind(struct usb_composite_dev *cdev)
2583 {
2584 - static const struct fsg_operations ops = {
2585 - .thread_exits = msg_thread_exits,
2586 - };
2587 struct fsg_opts *opts;
2588 struct fsg_config config;
2589 int status;
2590 @@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
2591 if (status)
2592 goto fail;
2593
2594 - fsg_common_set_ops(opts->common, &ops);
2595 -
2596 status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
2597 if (status)
2598 goto fail_set_cdev;
2599 @@ -256,18 +242,12 @@ MODULE_LICENSE("GPL");
2600
2601 static int __init msg_init(void)
2602 {
2603 - int ret;
2604 -
2605 - ret = usb_composite_probe(&msg_driver);
2606 - set_bit(0, &msg_registered);
2607 -
2608 - return ret;
2609 + return usb_composite_probe(&msg_driver);
2610 }
2611 module_init(msg_init);
2612
2613 -static void msg_cleanup(void)
2614 +static void __exit msg_cleanup(void)
2615 {
2616 - if (test_and_clear_bit(0, &msg_registered))
2617 - usb_composite_unregister(&msg_driver);
2618 + usb_composite_unregister(&msg_driver);
2619 }
2620 module_exit(msg_cleanup);
2621 diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
2622 index 98d71400f8a1..a884c022df7a 100644
2623 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c
2624 +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
2625 @@ -29,6 +29,8 @@
2626 #include <linux/of_gpio.h>
2627
2628 #include "atmel_usba_udc.h"
2629 +#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
2630 + | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
2631
2632 #ifdef CONFIG_USB_GADGET_DEBUG_FS
2633 #include <linux/debugfs.h>
2634 @@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev)
2635 IRQ_NOAUTOEN);
2636 ret = devm_request_threaded_irq(&pdev->dev,
2637 gpio_to_irq(udc->vbus_pin), NULL,
2638 - usba_vbus_irq_thread, IRQF_ONESHOT,
2639 + usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
2640 "atmel_usba_udc", udc);
2641 if (ret) {
2642 udc->vbus_pin = -ENODEV;
2643 diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
2644 index e6f04eee95c4..63c5fe6f7bd4 100644
2645 --- a/drivers/usb/gadget/udc/core.c
2646 +++ b/drivers/usb/gadget/udc/core.c
2647 @@ -1314,8 +1314,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
2648 udc->dev.driver = &driver->driver;
2649 udc->gadget->dev.driver = &driver->driver;
2650
2651 - if (driver->max_speed < udc->gadget->max_speed)
2652 - usb_gadget_udc_set_speed(udc, driver->max_speed);
2653 + usb_gadget_udc_set_speed(udc, driver->max_speed);
2654
2655 ret = driver->bind(udc->gadget, driver);
2656 if (ret)
2657 diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
2658 index 3c3760315910..374f85f612d9 100644
2659 --- a/drivers/usb/gadget/udc/dummy_hcd.c
2660 +++ b/drivers/usb/gadget/udc/dummy_hcd.c
2661 @@ -237,6 +237,8 @@ struct dummy_hcd {
2662
2663 struct usb_device *udev;
2664 struct list_head urbp_list;
2665 + struct urbp *next_frame_urbp;
2666 +
2667 u32 stream_en_ep;
2668 u8 num_stream[30 / 2];
2669
2670 @@ -253,11 +255,13 @@ struct dummy {
2671 */
2672 struct dummy_ep ep[DUMMY_ENDPOINTS];
2673 int address;
2674 + int callback_usage;
2675 struct usb_gadget gadget;
2676 struct usb_gadget_driver *driver;
2677 struct dummy_request fifo_req;
2678 u8 fifo_buf[FIFO_SIZE];
2679 u16 devstatus;
2680 + unsigned ints_enabled:1;
2681 unsigned udc_suspended:1;
2682 unsigned pullup:1;
2683
2684 @@ -440,18 +444,27 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
2685 (~dum_hcd->old_status) & dum_hcd->port_status;
2686
2687 /* Report reset and disconnect events to the driver */
2688 - if (dum->driver && (disconnect || reset)) {
2689 + if (dum->ints_enabled && (disconnect || reset)) {
2690 stop_activity(dum);
2691 + ++dum->callback_usage;
2692 + spin_unlock(&dum->lock);
2693 if (reset)
2694 usb_gadget_udc_reset(&dum->gadget, dum->driver);
2695 else
2696 dum->driver->disconnect(&dum->gadget);
2697 + spin_lock(&dum->lock);
2698 + --dum->callback_usage;
2699 }
2700 - } else if (dum_hcd->active != dum_hcd->old_active) {
2701 + } else if (dum_hcd->active != dum_hcd->old_active &&
2702 + dum->ints_enabled) {
2703 + ++dum->callback_usage;
2704 + spin_unlock(&dum->lock);
2705 if (dum_hcd->old_active && dum->driver->suspend)
2706 dum->driver->suspend(&dum->gadget);
2707 else if (!dum_hcd->old_active && dum->driver->resume)
2708 dum->driver->resume(&dum->gadget);
2709 + spin_lock(&dum->lock);
2710 + --dum->callback_usage;
2711 }
2712
2713 dum_hcd->old_status = dum_hcd->port_status;
2714 @@ -972,8 +985,11 @@ static int dummy_udc_start(struct usb_gadget *g,
2715 * can't enumerate without help from the driver we're binding.
2716 */
2717
2718 + spin_lock_irq(&dum->lock);
2719 dum->devstatus = 0;
2720 dum->driver = driver;
2721 + dum->ints_enabled = 1;
2722 + spin_unlock_irq(&dum->lock);
2723
2724 return 0;
2725 }
2726 @@ -984,6 +1000,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
2727 struct dummy *dum = dum_hcd->dum;
2728
2729 spin_lock_irq(&dum->lock);
2730 + dum->ints_enabled = 0;
2731 + stop_activity(dum);
2732 +
2733 + /* emulate synchronize_irq(): wait for callbacks to finish */
2734 + while (dum->callback_usage > 0) {
2735 + spin_unlock_irq(&dum->lock);
2736 + usleep_range(1000, 2000);
2737 + spin_lock_irq(&dum->lock);
2738 + }
2739 +
2740 dum->driver = NULL;
2741 spin_unlock_irq(&dum->lock);
2742
2743 @@ -1037,7 +1063,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
2744 memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
2745 dum->gadget.name = gadget_name;
2746 dum->gadget.ops = &dummy_ops;
2747 - dum->gadget.max_speed = USB_SPEED_SUPER;
2748 + if (mod_data.is_super_speed)
2749 + dum->gadget.max_speed = USB_SPEED_SUPER;
2750 + else if (mod_data.is_high_speed)
2751 + dum->gadget.max_speed = USB_SPEED_HIGH;
2752 + else
2753 + dum->gadget.max_speed = USB_SPEED_FULL;
2754
2755 dum->gadget.dev.parent = &pdev->dev;
2756 init_dummy_udc_hw(dum);
2757 @@ -1246,6 +1277,8 @@ static int dummy_urb_enqueue(
2758
2759 list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
2760 urb->hcpriv = urbp;
2761 + if (!dum_hcd->next_frame_urbp)
2762 + dum_hcd->next_frame_urbp = urbp;
2763 if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
2764 urb->error_count = 1; /* mark as a new urb */
2765
2766 @@ -1521,6 +1554,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
2767 if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
2768 dum->ss_hcd : dum->hs_hcd)))
2769 return NULL;
2770 + if (!dum->ints_enabled)
2771 + return NULL;
2772 if ((address & ~USB_DIR_IN) == 0)
2773 return &dum->ep[0];
2774 for (i = 1; i < DUMMY_ENDPOINTS; i++) {
2775 @@ -1762,6 +1797,7 @@ static void dummy_timer(unsigned long _dum_hcd)
2776 spin_unlock_irqrestore(&dum->lock, flags);
2777 return;
2778 }
2779 + dum_hcd->next_frame_urbp = NULL;
2780
2781 for (i = 0; i < DUMMY_ENDPOINTS; i++) {
2782 if (!ep_info[i].name)
2783 @@ -1778,6 +1814,10 @@ static void dummy_timer(unsigned long _dum_hcd)
2784 int type;
2785 int status = -EINPROGRESS;
2786
2787 + /* stop when we reach URBs queued after the timer interrupt */
2788 + if (urbp == dum_hcd->next_frame_urbp)
2789 + break;
2790 +
2791 urb = urbp->urb;
2792 if (urb->unlinked)
2793 goto return_urb;
2794 @@ -1857,10 +1897,12 @@ static void dummy_timer(unsigned long _dum_hcd)
2795 * until setup() returns; no reentrancy issues etc.
2796 */
2797 if (value > 0) {
2798 + ++dum->callback_usage;
2799 spin_unlock(&dum->lock);
2800 value = dum->driver->setup(&dum->gadget,
2801 &setup);
2802 spin_lock(&dum->lock);
2803 + --dum->callback_usage;
2804
2805 if (value >= 0) {
2806 /* no delays (max 64KB data stage) */
2807 @@ -2561,8 +2603,6 @@ static struct hc_driver dummy_hcd = {
2808 .product_desc = "Dummy host controller",
2809 .hcd_priv_size = sizeof(struct dummy_hcd),
2810
2811 - .flags = HCD_USB3 | HCD_SHARED,
2812 -
2813 .reset = dummy_setup,
2814 .start = dummy_start,
2815 .stop = dummy_stop,
2816 @@ -2591,8 +2631,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
2817 dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
2818 dum = *((void **)dev_get_platdata(&pdev->dev));
2819
2820 - if (!mod_data.is_super_speed)
2821 + if (mod_data.is_super_speed)
2822 + dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
2823 + else if (mod_data.is_high_speed)
2824 dummy_hcd.flags = HCD_USB2;
2825 + else
2826 + dummy_hcd.flags = HCD_USB11;
2827 hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
2828 if (!hs_hcd)
2829 return -ENOMEM;
2830 diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
2831 index e1de8fe599a3..89ce1eddfe77 100644
2832 --- a/drivers/usb/gadget/udc/renesas_usb3.c
2833 +++ b/drivers/usb/gadget/udc/renesas_usb3.c
2834 @@ -1032,7 +1032,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
2835 usb3_ep->ep.maxpacket);
2836 u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
2837 u32 tmp = 0;
2838 - bool is_last;
2839 + bool is_last = !len ? true : false;
2840
2841 if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
2842 return -EBUSY;
2843 @@ -1053,7 +1053,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
2844 usb3_write(usb3, tmp, fifo_reg);
2845 }
2846
2847 - is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
2848 + if (!is_last)
2849 + is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
2850 /* Send the data */
2851 usb3_set_px_con_send(usb3_ep, len, is_last);
2852
2853 @@ -1144,7 +1145,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
2854 usb3_set_p0_con_for_ctrl_read_data(usb3);
2855 } else {
2856 usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
2857 - usb3_set_p0_con_for_ctrl_write_data(usb3);
2858 + if (usb3_req->req.length)
2859 + usb3_set_p0_con_for_ctrl_write_data(usb3);
2860 }
2861
2862 usb3_p0_xfer(usb3_ep, usb3_req);
2863 @@ -2047,7 +2049,16 @@ static u32 usb3_calc_ramarea(int ram_size)
2864 static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
2865 const struct usb_endpoint_descriptor *desc)
2866 {
2867 - return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc));
2868 + int i;
2869 + const u32 max_packet_array[] = {8, 16, 32, 64, 512};
2870 + u32 mpkt = PN_RAMMAP_MPKT(1024);
2871 +
2872 + for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
2873 + if (usb_endpoint_maxp(desc) <= max_packet_array[i])
2874 + mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
2875 + }
2876 +
2877 + return usb3_ep->rammap_val | mpkt;
2878 }
2879
2880 static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
2881 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
2882 index 658d9d1f9ea3..6dda3623a276 100644
2883 --- a/drivers/usb/host/pci-quirks.c
2884 +++ b/drivers/usb/host/pci-quirks.c
2885 @@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev)
2886 if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
2887 return 0;
2888
2889 - usleep_range(40, 60);
2890 + udelay(50);
2891 }
2892
2893 dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
2894 @@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
2895 *
2896 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
2897 * It signals to the BIOS that the OS wants control of the host controller,
2898 - * and then waits 5 seconds for the BIOS to hand over control.
2899 + * and then waits 1 second for the BIOS to hand over control.
2900 * If we timeout, assume the BIOS is broken and take control anyway.
2901 */
2902 static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
2903 @@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
2904 if (val & XHCI_HC_BIOS_OWNED) {
2905 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
2906
2907 - /* Wait for 5 seconds with 10 microsecond polling interval */
2908 + /* Wait for 1 second with 10 microsecond polling interval */
2909 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
2910 - 0, 5000, 10);
2911 + 0, 1000000, 10);
2912
2913 /* Assume a buggy BIOS and take HC ownership anyway */
2914 if (timeout) {
2915 @@ -1100,7 +1100,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
2916 * operational or runtime registers. Wait 5 seconds and no more.
2917 */
2918 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
2919 - 5000, 10);
2920 + 5000000, 10);
2921 /* Assume a buggy HC and start HC initialization anyway */
2922 if (timeout) {
2923 val = readl(op_reg_base + XHCI_STS_OFFSET);
2924 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2925 index 00721e8807ab..950dee33bfcc 100644
2926 --- a/drivers/usb/host/xhci-hub.c
2927 +++ b/drivers/usb/host/xhci-hub.c
2928 @@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
2929
2930 /* If PSI table exists, add the custom speed attributes from it */
2931 if (usb3_1 && xhci->usb3_rhub.psi_count) {
2932 - u32 ssp_cap_base, bm_attrib, psi;
2933 + u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
2934 int offset;
2935
2936 ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
2937 @@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
2938 for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
2939 psi = xhci->usb3_rhub.psi[i];
2940 psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
2941 + psi_exp = XHCI_EXT_PORT_PSIE(psi);
2942 + psi_mant = XHCI_EXT_PORT_PSIM(psi);
2943 +
2944 + /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
2945 + for (; psi_exp < 3; psi_exp++)
2946 + psi_mant /= 1000;
2947 + if (psi_mant >= 10)
2948 + psi |= BIT(14);
2949 +
2950 if ((psi & PLT_MASK) == PLT_SYM) {
2951 /* Symmetric, create SSA RX and TX from one PSI entry */
2952 put_unaligned_le32(psi, &buf[offset]);
2953 @@ -1473,9 +1482,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2954 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
2955 t2 &= ~PORT_WKDISC_E;
2956 }
2957 - if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
2958 - (hcd->speed < HCD_USB3))
2959 - t2 &= ~PORT_WAKE_BITS;
2960 } else
2961 t2 &= ~PORT_WAKE_BITS;
2962
2963 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2964 index 8071c8fdd15e..76f392954733 100644
2965 --- a/drivers/usb/host/xhci-pci.c
2966 +++ b/drivers/usb/host/xhci-pci.c
2967 @@ -54,11 +54,6 @@
2968 #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
2969 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
2970
2971 -#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
2972 -#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
2973 -#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
2974 -#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
2975 -
2976 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
2977
2978 static const char hcd_name[] = "xhci_hcd";
2979 @@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2980 if (pdev->vendor == PCI_VENDOR_ID_AMD)
2981 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
2982
2983 - if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
2984 - ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
2985 - (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
2986 - (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
2987 - (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
2988 - xhci->quirks |= XHCI_U2_DISABLE_WAKE;
2989 -
2990 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
2991 xhci->quirks |= XHCI_LPM_SUPPORT;
2992 xhci->quirks |= XHCI_INTEL_HOST;
2993 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
2994 index c04144b25a67..208740771ff9 100644
2995 --- a/drivers/usb/host/xhci-plat.c
2996 +++ b/drivers/usb/host/xhci-plat.c
2997 @@ -186,14 +186,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
2998 * 2. xhci_plat is child of a device from firmware (dwc3-plat)
2999 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
3000 */
3001 - sysdev = &pdev->dev;
3002 - if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
3003 - sysdev = sysdev->parent;
3004 + for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
3005 + if (is_of_node(sysdev->fwnode) ||
3006 + is_acpi_device_node(sysdev->fwnode))
3007 + break;
3008 #ifdef CONFIG_PCI
3009 - else if (sysdev->parent && sysdev->parent->parent &&
3010 - sysdev->parent->parent->bus == &pci_bus_type)
3011 - sysdev = sysdev->parent->parent;
3012 + else if (sysdev->bus == &pci_bus_type)
3013 + break;
3014 #endif
3015 + }
3016 +
3017 + if (!sysdev)
3018 + sysdev = &pdev->dev;
3019
3020 /* Try to set 64-bit DMA first */
3021 if (WARN_ON(!sysdev->dma_mask))
3022 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3023 index b2ff1ff1a02f..ee198ea47f49 100644
3024 --- a/drivers/usb/host/xhci.c
3025 +++ b/drivers/usb/host/xhci.c
3026 @@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
3027 if (xhci->quirks & XHCI_MTK_HOST) {
3028 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
3029 if (ret < 0) {
3030 - xhci_free_endpoint_ring(xhci, virt_dev, ep_index);
3031 + xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
3032 + virt_dev->eps[ep_index].new_ring = NULL;
3033 return ret;
3034 }
3035 }
3036 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3037 index e3e935291ed6..d7420bb9f2e2 100644
3038 --- a/drivers/usb/host/xhci.h
3039 +++ b/drivers/usb/host/xhci.h
3040 @@ -728,6 +728,8 @@ struct xhci_ep_ctx {
3041 #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
3042 /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
3043 #define EP_HAS_LSA (1 << 15)
3044 +/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
3045 +#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) >> 24) & 0xff)
3046
3047 /* ep_info2 bitmasks */
3048 /*
3049 @@ -1674,7 +1676,7 @@ struct xhci_bus_state {
3050
3051 static inline unsigned int hcd_index(struct usb_hcd *hcd)
3052 {
3053 - if (hcd->speed == HCD_USB3)
3054 + if (hcd->speed >= HCD_USB3)
3055 return 0;
3056 else
3057 return 1;
3058 @@ -1819,7 +1821,7 @@ struct xhci_hcd {
3059 /* For controller with a broken Port Disable implementation */
3060 #define XHCI_BROKEN_PORT_PED (1 << 25)
3061 #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
3062 -#define XHCI_U2_DISABLE_WAKE (1 << 27)
3063 +/* Reserved. It was XHCI_U2_DISABLE_WAKE */
3064 #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
3065
3066 unsigned int num_active_eps;
3067 @@ -2452,8 +2454,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
3068 u8 lsa;
3069 u8 hid;
3070
3071 - esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 |
3072 - EP_MAX_ESIT_PAYLOAD_LO(tx_info);
3073 + esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
3074 + CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
3075
3076 ep_state = info & EP_STATE_MASK;
3077 max_pstr = info & EP_MAXPSTREAMS_MASK;
3078 diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
3079 index d1af831f43eb..68f26904c316 100644
3080 --- a/drivers/usb/renesas_usbhs/fifo.c
3081 +++ b/drivers/usb/renesas_usbhs/fifo.c
3082 @@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
3083 struct usbhs_fifo *fifo)
3084 {
3085 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
3086 + int ret = 0;
3087
3088 - if (!usbhs_pipe_is_dcp(pipe))
3089 - usbhsf_fifo_barrier(priv, fifo);
3090 + if (!usbhs_pipe_is_dcp(pipe)) {
3091 + /*
3092 + * This driver checks the pipe condition first to avoid -EBUSY
3093 + * from usbhsf_fifo_barrier() with about 10 msec delay in
3094 + * the interrupt handler if the pipe is RX direction and empty.
3095 + */
3096 + if (usbhs_pipe_is_dir_in(pipe))
3097 + ret = usbhs_pipe_is_accessible(pipe);
3098 + if (!ret)
3099 + ret = usbhsf_fifo_barrier(priv, fifo);
3100 + }
3101
3102 - usbhs_write(priv, fifo->ctr, BCLR);
3103 + /*
3104 + * if non-DCP pipe, this driver should set BCLR when
3105 + * usbhsf_fifo_barrier() returns 0.
3106 + */
3107 + if (!ret)
3108 + usbhs_write(priv, fifo->ctr, BCLR);
3109 }
3110
3111 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
3112 diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
3113 index 1a59f335b063..a3ccb899df60 100644
3114 --- a/drivers/usb/storage/transport.c
3115 +++ b/drivers/usb/storage/transport.c
3116 @@ -834,13 +834,25 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
3117 if (result == USB_STOR_TRANSPORT_GOOD) {
3118 srb->result = SAM_STAT_GOOD;
3119 srb->sense_buffer[0] = 0x0;
3120 + }
3121 +
3122 + /*
3123 + * ATA-passthru commands use sense data to report
3124 + * the command completion status, and often devices
3125 + * return Check Condition status when nothing is
3126 + * wrong.
3127 + */
3128 + else if (srb->cmnd[0] == ATA_16 ||
3129 + srb->cmnd[0] == ATA_12) {
3130 + /* leave the data alone */
3131 + }
3132
3133 /*
3134 * If there was a problem, report an unspecified
3135 * hardware error to prevent the higher layers from
3136 * entering an infinite retry loop.
3137 */
3138 - } else {
3139 + else {
3140 srb->result = DID_ERROR << 16;
3141 if ((sshdr.response_code & 0x72) == 0x72)
3142 srb->sense_buffer[1] = HARDWARE_ERROR;
3143 diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
3144 index f58caa9e6a27..a155cd02bce2 100644
3145 --- a/drivers/usb/storage/uas-detect.h
3146 +++ b/drivers/usb/storage/uas-detect.h
3147 @@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
3148 intf->desc.bInterfaceProtocol == USB_PR_UAS);
3149 }
3150
3151 -static int uas_find_uas_alt_setting(struct usb_interface *intf)
3152 +static struct usb_host_interface *uas_find_uas_alt_setting(
3153 + struct usb_interface *intf)
3154 {
3155 int i;
3156
3157 @@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
3158 struct usb_host_interface *alt = &intf->altsetting[i];
3159
3160 if (uas_is_interface(alt))
3161 - return alt->desc.bAlternateSetting;
3162 + return alt;
3163 }
3164
3165 - return -ENODEV;
3166 + return NULL;
3167 }
3168
3169 static int uas_find_endpoints(struct usb_host_interface *alt,
3170 @@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
3171 struct usb_device *udev = interface_to_usbdev(intf);
3172 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
3173 unsigned long flags = id->driver_info;
3174 - int r, alt;
3175 -
3176 + struct usb_host_interface *alt;
3177 + int r;
3178
3179 alt = uas_find_uas_alt_setting(intf);
3180 - if (alt < 0)
3181 + if (!alt)
3182 return 0;
3183
3184 - r = uas_find_endpoints(&intf->altsetting[alt], eps);
3185 + r = uas_find_endpoints(alt, eps);
3186 if (r < 0)
3187 return 0;
3188
3189 diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
3190 index 5ef014ba6ae8..9876af4ab64e 100644
3191 --- a/drivers/usb/storage/uas.c
3192 +++ b/drivers/usb/storage/uas.c
3193 @@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
3194 static int uas_switch_interface(struct usb_device *udev,
3195 struct usb_interface *intf)
3196 {
3197 - int alt;
3198 + struct usb_host_interface *alt;
3199
3200 alt = uas_find_uas_alt_setting(intf);
3201 - if (alt < 0)
3202 - return alt;
3203 + if (!alt)
3204 + return -ENODEV;
3205
3206 - return usb_set_interface(udev,
3207 - intf->altsetting[0].desc.bInterfaceNumber, alt);
3208 + return usb_set_interface(udev, alt->desc.bInterfaceNumber,
3209 + alt->desc.bAlternateSetting);
3210 }
3211
3212 static int uas_configure_endpoints(struct uas_dev_info *devinfo)
3213 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
3214 index 5a70c33ef0e0..eb06d88b41d6 100644
3215 --- a/drivers/usb/storage/unusual_devs.h
3216 +++ b/drivers/usb/storage/unusual_devs.h
3217 @@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
3218 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3219 US_FL_SANE_SENSE ),
3220
3221 +/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
3222 +UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
3223 + "Seagate",
3224 + "External",
3225 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3226 + US_FL_NO_WP_DETECT ),
3227 +
3228 UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
3229 "Maxtor",
3230 "USB to SATA",
3231 diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
3232 index 35a1e777b449..9a53912bdfe9 100644
3233 --- a/drivers/uwb/hwa-rc.c
3234 +++ b/drivers/uwb/hwa-rc.c
3235 @@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface,
3236
3237 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
3238 return -ENODEV;
3239 + if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
3240 + return -ENODEV;
3241
3242 result = -ENOMEM;
3243 uwb_rc = uwb_rc_alloc();
3244 diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
3245 index 01c20a260a8b..39dd4ef53c77 100644
3246 --- a/drivers/uwb/uwbd.c
3247 +++ b/drivers/uwb/uwbd.c
3248 @@ -302,18 +302,22 @@ static int uwbd(void *param)
3249 /** Start the UWB daemon */
3250 void uwbd_start(struct uwb_rc *rc)
3251 {
3252 - rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
3253 - if (rc->uwbd.task == NULL)
3254 + struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
3255 + if (IS_ERR(task)) {
3256 + rc->uwbd.task = NULL;
3257 printk(KERN_ERR "UWB: Cannot start management daemon; "
3258 "UWB won't work\n");
3259 - else
3260 + } else {
3261 + rc->uwbd.task = task;
3262 rc->uwbd.pid = rc->uwbd.task->pid;
3263 + }
3264 }
3265
3266 /* Stop the UWB daemon and free any unprocessed events */
3267 void uwbd_stop(struct uwb_rc *rc)
3268 {
3269 - kthread_stop(rc->uwbd.task);
3270 + if (rc->uwbd.task)
3271 + kthread_stop(rc->uwbd.task);
3272 uwbd_flush(rc);
3273 }
3274
3275 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
3276 index 3f3eb7b17cac..806eb85343fb 100644
3277 --- a/fs/btrfs/ctree.h
3278 +++ b/fs/btrfs/ctree.h
3279 @@ -723,7 +723,7 @@ struct btrfs_delayed_root;
3280 * Indicate that a whole-filesystem exclusive operation is running
3281 * (device replace, resize, device add/delete, balance)
3282 */
3283 -#define BTRFS_FS_EXCL_OP 14
3284 +#define BTRFS_FS_EXCL_OP 16
3285
3286 struct btrfs_fs_info {
3287 u8 fsid[BTRFS_FSID_SIZE];
3288 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
3289 index 0aff9b278c19..4aa3d4c27dfe 100644
3290 --- a/fs/btrfs/extent_io.c
3291 +++ b/fs/btrfs/extent_io.c
3292 @@ -2799,7 +2799,7 @@ static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
3293 }
3294 }
3295
3296 - bio = btrfs_bio_alloc(bdev, sector << 9);
3297 + bio = btrfs_bio_alloc(bdev, (u64)sector << 9);
3298 bio_add_page(bio, page, page_size, offset);
3299 bio->bi_end_io = end_io_func;
3300 bio->bi_private = tree;
3301 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
3302 index acb6f97deb97..196a07a87179 100644
3303 --- a/fs/overlayfs/copy_up.c
3304 +++ b/fs/overlayfs/copy_up.c
3305 @@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
3306 c->tmpfile = true;
3307 err = ovl_copy_up_locked(c);
3308 } else {
3309 - err = -EIO;
3310 - if (lock_rename(c->workdir, c->destdir) != NULL) {
3311 - pr_err("overlayfs: failed to lock workdir+upperdir\n");
3312 - } else {
3313 + err = ovl_lock_rename_workdir(c->workdir, c->destdir);
3314 + if (!err) {
3315 err = ovl_copy_up_locked(c);
3316 unlock_rename(c->workdir, c->destdir);
3317 }
3318 diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
3319 index 48b70e6490f3..9b97b35b39c8 100644
3320 --- a/fs/overlayfs/dir.c
3321 +++ b/fs/overlayfs/dir.c
3322 @@ -216,26 +216,6 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
3323 return err;
3324 }
3325
3326 -static int ovl_lock_rename_workdir(struct dentry *workdir,
3327 - struct dentry *upperdir)
3328 -{
3329 - /* Workdir should not be the same as upperdir */
3330 - if (workdir == upperdir)
3331 - goto err;
3332 -
3333 - /* Workdir should not be subdir of upperdir and vice versa */
3334 - if (lock_rename(workdir, upperdir) != NULL)
3335 - goto err_unlock;
3336 -
3337 - return 0;
3338 -
3339 -err_unlock:
3340 - unlock_rename(workdir, upperdir);
3341 -err:
3342 - pr_err("overlayfs: failed to lock workdir+upperdir\n");
3343 - return -EIO;
3344 -}
3345 -
3346 static struct dentry *ovl_clear_empty(struct dentry *dentry,
3347 struct list_head *list)
3348 {
3349 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
3350 index 8aef2b304b2d..9deec68075dc 100644
3351 --- a/fs/overlayfs/namei.c
3352 +++ b/fs/overlayfs/namei.c
3353 @@ -506,6 +506,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
3354
3355 index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
3356 if (IS_ERR(index)) {
3357 + err = PTR_ERR(index);
3358 pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
3359 "overlayfs: mount with '-o index=off' to disable inodes index.\n",
3360 d_inode(origin)->i_ino, name.len, name.name,
3361 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
3362 index e927a62c97ae..f57f47742f5f 100644
3363 --- a/fs/overlayfs/overlayfs.h
3364 +++ b/fs/overlayfs/overlayfs.h
3365 @@ -234,6 +234,7 @@ bool ovl_inuse_trylock(struct dentry *dentry);
3366 void ovl_inuse_unlock(struct dentry *dentry);
3367 int ovl_nlink_start(struct dentry *dentry, bool *locked);
3368 void ovl_nlink_end(struct dentry *dentry, bool locked);
3369 +int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
3370
3371 static inline bool ovl_is_impuredir(struct dentry *dentry)
3372 {
3373 diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
3374 index 878a750986dd..25d9b5adcd42 100644
3375 --- a/fs/overlayfs/ovl_entry.h
3376 +++ b/fs/overlayfs/ovl_entry.h
3377 @@ -37,6 +37,9 @@ struct ovl_fs {
3378 bool noxattr;
3379 /* sb common to all layers */
3380 struct super_block *same_sb;
3381 + /* Did we take the inuse lock? */
3382 + bool upperdir_locked;
3383 + bool workdir_locked;
3384 };
3385
3386 /* private information held for every overlayfs dentry */
3387 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
3388 index 3d424a51cabb..74f7ead442f0 100644
3389 --- a/fs/overlayfs/readdir.c
3390 +++ b/fs/overlayfs/readdir.c
3391 @@ -672,6 +672,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
3392 struct path *lowerstack, unsigned int numlower)
3393 {
3394 int err;
3395 + struct dentry *index = NULL;
3396 struct inode *dir = dentry->d_inode;
3397 struct path path = { .mnt = mnt, .dentry = dentry };
3398 LIST_HEAD(list);
3399 @@ -690,8 +691,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
3400
3401 inode_lock_nested(dir, I_MUTEX_PARENT);
3402 list_for_each_entry(p, &list, l_node) {
3403 - struct dentry *index;
3404 -
3405 if (p->name[0] == '.') {
3406 if (p->len == 1)
3407 continue;
3408 @@ -701,6 +700,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
3409 index = lookup_one_len(p->name, dentry, p->len);
3410 if (IS_ERR(index)) {
3411 err = PTR_ERR(index);
3412 + index = NULL;
3413 break;
3414 }
3415 err = ovl_verify_index(index, lowerstack, numlower);
3416 @@ -712,7 +712,9 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
3417 break;
3418 }
3419 dput(index);
3420 + index = NULL;
3421 }
3422 + dput(index);
3423 inode_unlock(dir);
3424 out:
3425 ovl_cache_free(&list);
3426 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
3427 index d86e89f97201..a1464905c1ea 100644
3428 --- a/fs/overlayfs/super.c
3429 +++ b/fs/overlayfs/super.c
3430 @@ -210,9 +210,10 @@ static void ovl_put_super(struct super_block *sb)
3431
3432 dput(ufs->indexdir);
3433 dput(ufs->workdir);
3434 - ovl_inuse_unlock(ufs->workbasedir);
3435 + if (ufs->workdir_locked)
3436 + ovl_inuse_unlock(ufs->workbasedir);
3437 dput(ufs->workbasedir);
3438 - if (ufs->upper_mnt)
3439 + if (ufs->upper_mnt && ufs->upperdir_locked)
3440 ovl_inuse_unlock(ufs->upper_mnt->mnt_root);
3441 mntput(ufs->upper_mnt);
3442 for (i = 0; i < ufs->numlower; i++)
3443 @@ -880,9 +881,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3444 goto out_put_upperpath;
3445
3446 err = -EBUSY;
3447 - if (!ovl_inuse_trylock(upperpath.dentry)) {
3448 - pr_err("overlayfs: upperdir is in-use by another mount\n");
3449 + if (ovl_inuse_trylock(upperpath.dentry)) {
3450 + ufs->upperdir_locked = true;
3451 + } else if (ufs->config.index) {
3452 + pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
3453 goto out_put_upperpath;
3454 + } else {
3455 + pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
3456 }
3457
3458 err = ovl_mount_dir(ufs->config.workdir, &workpath);
3459 @@ -900,9 +905,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3460 }
3461
3462 err = -EBUSY;
3463 - if (!ovl_inuse_trylock(workpath.dentry)) {
3464 - pr_err("overlayfs: workdir is in-use by another mount\n");
3465 + if (ovl_inuse_trylock(workpath.dentry)) {
3466 + ufs->workdir_locked = true;
3467 + } else if (ufs->config.index) {
3468 + pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
3469 goto out_put_workpath;
3470 + } else {
3471 + pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
3472 }
3473
3474 ufs->workbasedir = workpath.dentry;
3475 @@ -1155,11 +1164,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3476 out_free_lowertmp:
3477 kfree(lowertmp);
3478 out_unlock_workdentry:
3479 - ovl_inuse_unlock(workpath.dentry);
3480 + if (ufs->workdir_locked)
3481 + ovl_inuse_unlock(workpath.dentry);
3482 out_put_workpath:
3483 path_put(&workpath);
3484 out_unlock_upperdentry:
3485 - ovl_inuse_unlock(upperpath.dentry);
3486 + if (ufs->upperdir_locked)
3487 + ovl_inuse_unlock(upperpath.dentry);
3488 out_put_upperpath:
3489 path_put(&upperpath);
3490 out_free_config:
3491 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
3492 index f46ad75dc96a..8bff64f14190 100644
3493 --- a/fs/overlayfs/util.c
3494 +++ b/fs/overlayfs/util.c
3495 @@ -418,7 +418,7 @@ void ovl_inuse_unlock(struct dentry *dentry)
3496 }
3497 }
3498
3499 -/* Called must hold OVL_I(inode)->oi_lock */
3500 +/* Caller must hold OVL_I(inode)->lock */
3501 static void ovl_cleanup_index(struct dentry *dentry)
3502 {
3503 struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode;
3504 @@ -457,6 +457,9 @@ static void ovl_cleanup_index(struct dentry *dentry)
3505 err = PTR_ERR(index);
3506 if (!IS_ERR(index))
3507 err = ovl_cleanup(dir, index);
3508 + else
3509 + index = NULL;
3510 +
3511 inode_unlock(dir);
3512 if (err)
3513 goto fail;
3514 @@ -545,3 +548,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked)
3515 mutex_unlock(&OVL_I(d_inode(dentry))->lock);
3516 }
3517 }
3518 +
3519 +int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
3520 +{
3521 + /* Workdir should not be the same as upperdir */
3522 + if (workdir == upperdir)
3523 + goto err;
3524 +
3525 + /* Workdir should not be subdir of upperdir and vice versa */
3526 + if (lock_rename(workdir, upperdir) != NULL)
3527 + goto err_unlock;
3528 +
3529 + return 0;
3530 +
3531 +err_unlock:
3532 + unlock_rename(workdir, upperdir);
3533 +err:
3534 + pr_err("overlayfs: failed to lock workdir+upperdir\n");
3535 + return -EIO;
3536 +}
3537 diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
3538 index b0d5897bc4e6..be795bf20147 100644
3539 --- a/fs/userfaultfd.c
3540 +++ b/fs/userfaultfd.c
3541 @@ -566,6 +566,12 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
3542 break;
3543 if (ACCESS_ONCE(ctx->released) ||
3544 fatal_signal_pending(current)) {
3545 + /*
3546 + * &ewq->wq may be queued in fork_event, but
3547 + * __remove_wait_queue ignores the head
3548 + * parameter. It would be a problem if it
3549 + * didn't.
3550 + */
3551 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
3552 if (ewq->msg.event == UFFD_EVENT_FORK) {
3553 struct userfaultfd_ctx *new;
3554 @@ -1039,6 +1045,12 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
3555 (unsigned long)
3556 uwq->msg.arg.reserved.reserved1;
3557 list_move(&uwq->wq.entry, &fork_event);
3558 + /*
3559 + * fork_nctx can be freed as soon as
3560 + * we drop the lock, unless we take a
3561 + * reference on it.
3562 + */
3563 + userfaultfd_ctx_get(fork_nctx);
3564 spin_unlock(&ctx->event_wqh.lock);
3565 ret = 0;
3566 break;
3567 @@ -1069,19 +1081,53 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
3568
3569 if (!ret && msg->event == UFFD_EVENT_FORK) {
3570 ret = resolve_userfault_fork(ctx, fork_nctx, msg);
3571 + spin_lock(&ctx->event_wqh.lock);
3572 + if (!list_empty(&fork_event)) {
3573 + /*
3574 + * The fork thread didn't abort, so we can
3575 + * drop the temporary refcount.
3576 + */
3577 + userfaultfd_ctx_put(fork_nctx);
3578 +
3579 + uwq = list_first_entry(&fork_event,
3580 + typeof(*uwq),
3581 + wq.entry);
3582 + /*
3583 + * If fork_event list wasn't empty and in turn
3584 + * the event wasn't already released by fork
3585 + * (the event is allocated on fork kernel
3586 + * stack), put the event back to its place in
3587 + * the event_wq. fork_event head will be freed
3588 + * as soon as we return so the event cannot
3589 + * stay queued there no matter the current
3590 + * "ret" value.
3591 + */
3592 + list_del(&uwq->wq.entry);
3593 + __add_wait_queue(&ctx->event_wqh, &uwq->wq);
3594
3595 - if (!ret) {
3596 - spin_lock(&ctx->event_wqh.lock);
3597 - if (!list_empty(&fork_event)) {
3598 - uwq = list_first_entry(&fork_event,
3599 - typeof(*uwq),
3600 - wq.entry);
3601 - list_del(&uwq->wq.entry);
3602 - __add_wait_queue(&ctx->event_wqh, &uwq->wq);
3603 + /*
3604 + * Leave the event in the waitqueue and report
3605 + * error to userland if we failed to resolve
3606 + * the userfault fork.
3607 + */
3608 + if (likely(!ret))
3609 userfaultfd_event_complete(ctx, uwq);
3610 - }
3611 - spin_unlock(&ctx->event_wqh.lock);
3612 + } else {
3613 + /*
3614 + * Here the fork thread aborted and the
3615 + * refcount from the fork thread on fork_nctx
3616 + * has already been released. We still hold
3617 + * the reference we took before releasing the
3618 + * lock above. If resolve_userfault_fork
3619 + * failed we've to drop it because the
3620 + * fork_nctx has to be freed in such case. If
3621 + * it succeeded we'll hold it because the new
3622 + * uffd references it.
3623 + */
3624 + if (ret)
3625 + userfaultfd_ctx_put(fork_nctx);
3626 }
3627 + spin_unlock(&ctx->event_wqh.lock);
3628 }
3629
3630 return ret;
3631 diff --git a/fs/xattr.c b/fs/xattr.c
3632 index 464c94bf65f9..5441a6d95396 100644
3633 --- a/fs/xattr.c
3634 +++ b/fs/xattr.c
3635 @@ -249,7 +249,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value,
3636 }
3637 memcpy(value, buffer, len);
3638 out:
3639 - security_release_secctx(buffer, len);
3640 + kfree(buffer);
3641 out_noalloc:
3642 return len;
3643 }
3644 diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
3645 index 0504ef8f3aa3..976f8ac26665 100644
3646 --- a/include/asm-generic/percpu.h
3647 +++ b/include/asm-generic/percpu.h
3648 @@ -115,15 +115,35 @@ do { \
3649 (__ret); \
3650 })
3651
3652 -#define this_cpu_generic_read(pcp) \
3653 +#define __this_cpu_generic_read_nopreempt(pcp) \
3654 ({ \
3655 typeof(pcp) __ret; \
3656 preempt_disable_notrace(); \
3657 - __ret = raw_cpu_generic_read(pcp); \
3658 + __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
3659 preempt_enable_notrace(); \
3660 __ret; \
3661 })
3662
3663 +#define __this_cpu_generic_read_noirq(pcp) \
3664 +({ \
3665 + typeof(pcp) __ret; \
3666 + unsigned long __flags; \
3667 + raw_local_irq_save(__flags); \
3668 + __ret = raw_cpu_generic_read(pcp); \
3669 + raw_local_irq_restore(__flags); \
3670 + __ret; \
3671 +})
3672 +
3673 +#define this_cpu_generic_read(pcp) \
3674 +({ \
3675 + typeof(pcp) __ret; \
3676 + if (__native_word(pcp)) \
3677 + __ret = __this_cpu_generic_read_nopreempt(pcp); \
3678 + else \
3679 + __ret = __this_cpu_generic_read_noirq(pcp); \
3680 + __ret; \
3681 +})
3682 +
3683 #define this_cpu_generic_to_op(pcp, val, op) \
3684 do { \
3685 unsigned long __flags; \
3686 diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
3687 index 5ba430cc9a87..1fc7abd28b0b 100644
3688 --- a/include/linux/iio/adc/ad_sigma_delta.h
3689 +++ b/include/linux/iio/adc/ad_sigma_delta.h
3690 @@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
3691 int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
3692 unsigned int size, unsigned int *val);
3693
3694 +int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
3695 + unsigned int reset_length);
3696 +
3697 int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
3698 const struct iio_chan_spec *chan, int *val);
3699 int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
3700 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
3701 index 7b2e31b1745a..6866e8126982 100644
3702 --- a/include/linux/mmu_notifier.h
3703 +++ b/include/linux/mmu_notifier.h
3704 @@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
3705
3706 #else /* CONFIG_MMU_NOTIFIER */
3707
3708 +static inline int mm_has_notifiers(struct mm_struct *mm)
3709 +{
3710 + return 0;
3711 +}
3712 +
3713 static inline void mmu_notifier_release(struct mm_struct *mm)
3714 {
3715 }
3716 diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
3717 index 5012b524283d..60248d644b6f 100644
3718 --- a/include/linux/trace_events.h
3719 +++ b/include/linux/trace_events.h
3720 @@ -277,6 +277,7 @@ struct trace_event_call {
3721 int perf_refcount;
3722 struct hlist_head __percpu *perf_events;
3723 struct bpf_prog *prog;
3724 + struct perf_event *bpf_prog_owner;
3725
3726 int (*perf_perm)(struct trace_event_call *,
3727 struct perf_event *);
3728 diff --git a/include/net/netlink.h b/include/net/netlink.h
3729 index ef8e6c3a80a6..4c72c7866da5 100644
3730 --- a/include/net/netlink.h
3731 +++ b/include/net/netlink.h
3732 @@ -768,7 +768,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
3733 */
3734 static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
3735 {
3736 - return nla_put(skb, attrtype, sizeof(u8), &value);
3737 + /* temporary variables to work around GCC PR81715 with asan-stack=1 */
3738 + u8 tmp = value;
3739 +
3740 + return nla_put(skb, attrtype, sizeof(u8), &tmp);
3741 }
3742
3743 /**
3744 @@ -779,7 +782,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
3745 */
3746 static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
3747 {
3748 - return nla_put(skb, attrtype, sizeof(u16), &value);
3749 + u16 tmp = value;
3750 +
3751 + return nla_put(skb, attrtype, sizeof(u16), &tmp);
3752 }
3753
3754 /**
3755 @@ -790,7 +795,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
3756 */
3757 static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
3758 {
3759 - return nla_put(skb, attrtype, sizeof(__be16), &value);
3760 + __be16 tmp = value;
3761 +
3762 + return nla_put(skb, attrtype, sizeof(__be16), &tmp);
3763 }
3764
3765 /**
3766 @@ -801,7 +808,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
3767 */
3768 static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
3769 {
3770 - return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
3771 + __be16 tmp = value;
3772 +
3773 + return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
3774 }
3775
3776 /**
3777 @@ -812,7 +821,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
3778 */
3779 static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
3780 {
3781 - return nla_put(skb, attrtype, sizeof(__le16), &value);
3782 + __le16 tmp = value;
3783 +
3784 + return nla_put(skb, attrtype, sizeof(__le16), &tmp);
3785 }
3786
3787 /**
3788 @@ -823,7 +834,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
3789 */
3790 static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
3791 {
3792 - return nla_put(skb, attrtype, sizeof(u32), &value);
3793 + u32 tmp = value;
3794 +
3795 + return nla_put(skb, attrtype, sizeof(u32), &tmp);
3796 }
3797
3798 /**
3799 @@ -834,7 +847,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
3800 */
3801 static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
3802 {
3803 - return nla_put(skb, attrtype, sizeof(__be32), &value);
3804 + __be32 tmp = value;
3805 +
3806 + return nla_put(skb, attrtype, sizeof(__be32), &tmp);
3807 }
3808
3809 /**
3810 @@ -845,7 +860,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
3811 */
3812 static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
3813 {
3814 - return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
3815 + __be32 tmp = value;
3816 +
3817 + return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
3818 }
3819
3820 /**
3821 @@ -856,7 +873,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
3822 */
3823 static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
3824 {
3825 - return nla_put(skb, attrtype, sizeof(__le32), &value);
3826 + __le32 tmp = value;
3827 +
3828 + return nla_put(skb, attrtype, sizeof(__le32), &tmp);
3829 }
3830
3831 /**
3832 @@ -869,7 +888,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
3833 static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
3834 u64 value, int padattr)
3835 {
3836 - return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
3837 + u64 tmp = value;
3838 +
3839 + return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
3840 }
3841
3842 /**
3843 @@ -882,7 +903,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
3844 static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
3845 int padattr)
3846 {
3847 - return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
3848 + __be64 tmp = value;
3849 +
3850 + return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
3851 }
3852
3853 /**
3854 @@ -895,7 +918,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
3855 static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
3856 int padattr)
3857 {
3858 - return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
3859 + __be64 tmp = value;
3860 +
3861 + return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
3862 padattr);
3863 }
3864
3865 @@ -909,7 +934,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
3866 static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
3867 int padattr)
3868 {
3869 - return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
3870 + __le64 tmp = value;
3871 +
3872 + return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
3873 }
3874
3875 /**
3876 @@ -920,7 +947,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
3877 */
3878 static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
3879 {
3880 - return nla_put(skb, attrtype, sizeof(s8), &value);
3881 + s8 tmp = value;
3882 +
3883 + return nla_put(skb, attrtype, sizeof(s8), &tmp);
3884 }
3885
3886 /**
3887 @@ -931,7 +960,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
3888 */
3889 static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
3890 {
3891 - return nla_put(skb, attrtype, sizeof(s16), &value);
3892 + s16 tmp = value;
3893 +
3894 + return nla_put(skb, attrtype, sizeof(s16), &tmp);
3895 }
3896
3897 /**
3898 @@ -942,7 +973,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
3899 */
3900 static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
3901 {
3902 - return nla_put(skb, attrtype, sizeof(s32), &value);
3903 + s32 tmp = value;
3904 +
3905 + return nla_put(skb, attrtype, sizeof(s32), &tmp);
3906 }
3907
3908 /**
3909 @@ -955,7 +988,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
3910 static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
3911 int padattr)
3912 {
3913 - return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
3914 + s64 tmp = value;
3915 +
3916 + return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
3917 }
3918
3919 /**
3920 @@ -1005,7 +1040,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
3921 static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
3922 __be32 addr)
3923 {
3924 - return nla_put_be32(skb, attrtype, addr);
3925 + __be32 tmp = addr;
3926 +
3927 + return nla_put_be32(skb, attrtype, tmp);
3928 }
3929
3930 /**
3931 diff --git a/include/net/protocol.h b/include/net/protocol.h
3932 index 65ba335b0e7e..4fc75f7ae23b 100644
3933 --- a/include/net/protocol.h
3934 +++ b/include/net/protocol.h
3935 @@ -39,8 +39,8 @@
3936
3937 /* This is used to register protocols. */
3938 struct net_protocol {
3939 - void (*early_demux)(struct sk_buff *skb);
3940 - void (*early_demux_handler)(struct sk_buff *skb);
3941 + int (*early_demux)(struct sk_buff *skb);
3942 + int (*early_demux_handler)(struct sk_buff *skb);
3943 int (*handler)(struct sk_buff *skb);
3944 void (*err_handler)(struct sk_buff *skb, u32 info);
3945 unsigned int no_policy:1,
3946 diff --git a/include/net/route.h b/include/net/route.h
3947 index cb0a76d9dde1..58458966e31e 100644
3948 --- a/include/net/route.h
3949 +++ b/include/net/route.h
3950 @@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
3951 fl4->fl4_gre_key = gre_key;
3952 return ip_route_output_key(net, fl4);
3953 }
3954 -
3955 +int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
3956 + u8 tos, struct net_device *dev,
3957 + struct in_device *in_dev, u32 *itag);
3958 int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
3959 u8 tos, struct net_device *devin);
3960 int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
3961 diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
3962 index 1060494ac230..b8c86ec1a8f5 100644
3963 --- a/include/net/sctp/ulpevent.h
3964 +++ b/include/net/sctp/ulpevent.h
3965 @@ -153,8 +153,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
3966 static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
3967 struct sctp_event_subscribe *mask)
3968 {
3969 + int offset = sn_type - SCTP_SN_TYPE_BASE;
3970 char *amask = (char *) mask;
3971 - return amask[sn_type - SCTP_SN_TYPE_BASE];
3972 +
3973 + if (offset >= sizeof(struct sctp_event_subscribe))
3974 + return 0;
3975 + return amask[offset];
3976 }
3977
3978 /* Given an event subscription, is this event enabled? */
3979 diff --git a/include/net/tcp.h b/include/net/tcp.h
3980 index f642a39f9eee..48978125947b 100644
3981 --- a/include/net/tcp.h
3982 +++ b/include/net/tcp.h
3983 @@ -347,7 +347,7 @@ void tcp_v4_err(struct sk_buff *skb, u32);
3984
3985 void tcp_shutdown(struct sock *sk, int how);
3986
3987 -void tcp_v4_early_demux(struct sk_buff *skb);
3988 +int tcp_v4_early_demux(struct sk_buff *skb);
3989 int tcp_v4_rcv(struct sk_buff *skb);
3990
3991 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
3992 diff --git a/include/net/udp.h b/include/net/udp.h
3993 index 626c2d8a70c5..1e6b2476d427 100644
3994 --- a/include/net/udp.h
3995 +++ b/include/net/udp.h
3996 @@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
3997 return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
3998 }
3999
4000 -void udp_v4_early_demux(struct sk_buff *skb);
4001 +int udp_v4_early_demux(struct sk_buff *skb);
4002 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
4003 int udp_get_port(struct sock *sk, unsigned short snum,
4004 int (*saddr_cmp)(const struct sock *,
4005 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
4006 index 0979a5f3b69a..6ff1bab23679 100644
4007 --- a/include/scsi/scsi_device.h
4008 +++ b/include/scsi/scsi_device.h
4009 @@ -182,6 +182,7 @@ struct scsi_device {
4010 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
4011 unsigned broken_fua:1; /* Don't set FUA bit */
4012 unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
4013 + unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
4014
4015 atomic_t disk_events_disable_depth; /* disable depth for disk events */
4016
4017 diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
4018 index 9592570e092a..36b03013d629 100644
4019 --- a/include/scsi/scsi_devinfo.h
4020 +++ b/include/scsi/scsi_devinfo.h
4021 @@ -29,5 +29,6 @@
4022 #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
4023 #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
4024 #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
4025 +#define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */
4026
4027 #endif
4028 diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
4029 index e99e3e6f8b37..f0add86219f0 100644
4030 --- a/include/uapi/linux/bpf.h
4031 +++ b/include/uapi/linux/bpf.h
4032 @@ -294,7 +294,7 @@ union bpf_attr {
4033 * jump into another BPF program
4034 * @ctx: context pointer passed to next program
4035 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
4036 - * @index: index inside array that selects specific program to run
4037 + * @index: 32-bit index inside array that selects specific program to run
4038 * Return: 0 on success or negative error
4039 *
4040 * int bpf_clone_redirect(skb, ifindex, flags)
4041 diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
4042 index 412c06a624c8..ccaea525340b 100644
4043 --- a/include/uapi/linux/dm-ioctl.h
4044 +++ b/include/uapi/linux/dm-ioctl.h
4045 @@ -269,9 +269,9 @@ enum {
4046 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
4047
4048 #define DM_VERSION_MAJOR 4
4049 -#define DM_VERSION_MINOR 36
4050 +#define DM_VERSION_MINOR 37
4051 #define DM_VERSION_PATCHLEVEL 0
4052 -#define DM_VERSION_EXTRA "-ioctl (2017-06-09)"
4053 +#define DM_VERSION_EXTRA "-ioctl (2017-09-20)"
4054
4055 /* Status bits */
4056 #define DM_READONLY_FLAG (1 << 0) /* In/Out */
4057 diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
4058 index ce1169af39d7..2a5d63040a0b 100644
4059 --- a/include/uapi/linux/usb/ch9.h
4060 +++ b/include/uapi/linux/usb/ch9.h
4061 @@ -780,6 +780,7 @@ struct usb_interface_assoc_descriptor {
4062 __u8 iFunction;
4063 } __attribute__ ((packed));
4064
4065 +#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8
4066
4067 /*-------------------------------------------------------------------------*/
4068
4069 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
4070 index ad5f55922a13..9a1bed1f3029 100644
4071 --- a/kernel/bpf/core.c
4072 +++ b/kernel/bpf/core.c
4073 @@ -1010,7 +1010,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
4074 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
4075 struct bpf_array *array = container_of(map, struct bpf_array, map);
4076 struct bpf_prog *prog;
4077 - u64 index = BPF_R3;
4078 + u32 index = BPF_R3;
4079
4080 if (unlikely(index >= array->map.max_entries))
4081 goto out;
4082 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4083 index 6c772adabad2..1939d91da1f8 100644
4084 --- a/kernel/bpf/syscall.c
4085 +++ b/kernel/bpf/syscall.c
4086 @@ -144,15 +144,17 @@ static int bpf_map_alloc_id(struct bpf_map *map)
4087
4088 static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
4089 {
4090 + unsigned long flags;
4091 +
4092 if (do_idr_lock)
4093 - spin_lock_bh(&map_idr_lock);
4094 + spin_lock_irqsave(&map_idr_lock, flags);
4095 else
4096 __acquire(&map_idr_lock);
4097
4098 idr_remove(&map_idr, map->id);
4099
4100 if (do_idr_lock)
4101 - spin_unlock_bh(&map_idr_lock);
4102 + spin_unlock_irqrestore(&map_idr_lock, flags);
4103 else
4104 __release(&map_idr_lock);
4105 }
4106 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4107 index 664d93972373..3940019b9740 100644
4108 --- a/kernel/bpf/verifier.c
4109 +++ b/kernel/bpf/verifier.c
4110 @@ -1978,7 +1978,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
4111 }
4112 } else {
4113 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
4114 - (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
4115 + (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
4116 + BPF_CLASS(insn->code) == BPF_ALU64) {
4117 verbose("BPF_END uses reserved fields\n");
4118 return -EINVAL;
4119 }
4120 diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
4121 index df2e0f14a95d..6d60aafbe8c1 100644
4122 --- a/kernel/cgroup/cgroup.c
4123 +++ b/kernel/cgroup/cgroup.c
4124 @@ -2168,6 +2168,14 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
4125 list_del_init(&cset->mg_node);
4126 }
4127 spin_unlock_irq(&css_set_lock);
4128 +
4129 + /*
4130 + * Re-initialize the cgroup_taskset structure in case it is reused
4131 + * again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
4132 + * iteration.
4133 + */
4134 + tset->nr_tasks = 0;
4135 + tset->csets = &tset->src_csets;
4136 return ret;
4137 }
4138
4139 diff --git a/kernel/events/core.c b/kernel/events/core.c
4140 index 03ac9c8b02fb..7242a6e1ec76 100644
4141 --- a/kernel/events/core.c
4142 +++ b/kernel/events/core.c
4143 @@ -8121,6 +8121,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
4144 }
4145 }
4146 event->tp_event->prog = prog;
4147 + event->tp_event->bpf_prog_owner = event;
4148
4149 return 0;
4150 }
4151 @@ -8135,7 +8136,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
4152 return;
4153
4154 prog = event->tp_event->prog;
4155 - if (prog) {
4156 + if (prog && event->tp_event->bpf_prog_owner == event) {
4157 event->tp_event->prog = NULL;
4158 bpf_prog_put(prog);
4159 }
4160 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4161 index 725819569fa7..2ee3e3345ff3 100644
4162 --- a/kernel/trace/ftrace.c
4163 +++ b/kernel/trace/ftrace.c
4164 @@ -4954,9 +4954,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4165 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4166 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
4167
4168 -static unsigned long save_global_trampoline;
4169 -static unsigned long save_global_flags;
4170 -
4171 static int __init set_graph_function(char *str)
4172 {
4173 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4174 @@ -6756,17 +6753,6 @@ void unregister_ftrace_graph(void)
4175 unregister_pm_notifier(&ftrace_suspend_notifier);
4176 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4177
4178 -#ifdef CONFIG_DYNAMIC_FTRACE
4179 - /*
4180 - * Function graph does not allocate the trampoline, but
4181 - * other global_ops do. We need to reset the ALLOC_TRAMP flag
4182 - * if one was used.
4183 - */
4184 - global_ops.trampoline = save_global_trampoline;
4185 - if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
4186 - global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
4187 -#endif
4188 -
4189 out:
4190 mutex_unlock(&ftrace_lock);
4191 }
4192 diff --git a/lib/ratelimit.c b/lib/ratelimit.c
4193 index 08f8043cac61..d01f47135239 100644
4194 --- a/lib/ratelimit.c
4195 +++ b/lib/ratelimit.c
4196 @@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
4197 if (time_is_before_jiffies(rs->begin + rs->interval)) {
4198 if (rs->missed) {
4199 if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
4200 - pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
4201 + printk_deferred(KERN_WARNING
4202 + "%s: %d callbacks suppressed\n",
4203 + func, rs->missed);
4204 rs->missed = 0;
4205 }
4206 }
4207 diff --git a/mm/ksm.c b/mm/ksm.c
4208 index db20f8436bc3..86f0db3d6cdb 100644
4209 --- a/mm/ksm.c
4210 +++ b/mm/ksm.c
4211 @@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
4212 */
4213 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
4214 {
4215 + struct mm_struct *mm = rmap_item->mm;
4216 struct rmap_item *tree_rmap_item;
4217 struct page *tree_page = NULL;
4218 struct stable_node *stable_node;
4219 @@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
4220 if (ksm_use_zero_pages && (checksum == zero_checksum)) {
4221 struct vm_area_struct *vma;
4222
4223 - vma = find_mergeable_vma(rmap_item->mm, rmap_item->address);
4224 + down_read(&mm->mmap_sem);
4225 + vma = find_mergeable_vma(mm, rmap_item->address);
4226 err = try_to_merge_one_page(vma, page,
4227 ZERO_PAGE(rmap_item->address));
4228 + up_read(&mm->mmap_sem);
4229 /*
4230 * In case of failure, the page was not really empty, so we
4231 * need to continue. Otherwise we're done.
4232 diff --git a/mm/madvise.c b/mm/madvise.c
4233 index 4d7d1e5ddba9..4edca1d86339 100644
4234 --- a/mm/madvise.c
4235 +++ b/mm/madvise.c
4236 @@ -614,18 +614,26 @@ static int madvise_inject_error(int behavior,
4237 {
4238 struct page *page;
4239 struct zone *zone;
4240 + unsigned int order;
4241
4242 if (!capable(CAP_SYS_ADMIN))
4243 return -EPERM;
4244
4245 - for (; start < end; start += PAGE_SIZE <<
4246 - compound_order(compound_head(page))) {
4247 +
4248 + for (; start < end; start += PAGE_SIZE << order) {
4249 int ret;
4250
4251 ret = get_user_pages_fast(start, 1, 0, &page);
4252 if (ret != 1)
4253 return ret;
4254
4255 + /*
4256 + * When soft offlining hugepages, after migrating the page
4257 + * we dissolve it, therefore in the second loop "page" will
4258 + * no longer be a compound page, and order will be 0.
4259 + */
4260 + order = compound_order(compound_head(page));
4261 +
4262 if (PageHWPoison(page)) {
4263 put_page(page);
4264 continue;
4265 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
4266 index 9e8b4f030c1c..5f6a52903770 100644
4267 --- a/mm/oom_kill.c
4268 +++ b/mm/oom_kill.c
4269 @@ -40,6 +40,7 @@
4270 #include <linux/ratelimit.h>
4271 #include <linux/kthread.h>
4272 #include <linux/init.h>
4273 +#include <linux/mmu_notifier.h>
4274
4275 #include <asm/tlb.h>
4276 #include "internal.h"
4277 @@ -494,6 +495,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
4278 goto unlock_oom;
4279 }
4280
4281 + /*
4282 + * If the mm has notifiers then we would need to invalidate them around
4283 + * unmap_page_range and that is risky because notifiers can sleep and
4284 + * what they do is basically undeterministic. So let's have a short
4285 + * sleep to give the oom victim some more time.
4286 + * TODO: we really want to get rid of this ugly hack and make sure that
4287 + * notifiers cannot block for unbounded amount of time and add
4288 + * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
4289 + */
4290 + if (mm_has_notifiers(mm)) {
4291 + up_read(&mm->mmap_sem);
4292 + schedule_timeout_idle(HZ);
4293 + goto unlock_oom;
4294 + }
4295 +
4296 /*
4297 * increase mm_users only after we know we will reap something so
4298 * that the mmput_async is called only when we have reaped something
4299 diff --git a/mm/rodata_test.c b/mm/rodata_test.c
4300 index 6bb4deb12e78..d908c8769b48 100644
4301 --- a/mm/rodata_test.c
4302 +++ b/mm/rodata_test.c
4303 @@ -14,7 +14,7 @@
4304 #include <linux/uaccess.h>
4305 #include <asm/sections.h>
4306
4307 -const int rodata_test_data = 0xC3;
4308 +static const int rodata_test_data = 0xC3;
4309
4310 void rodata_test(void)
4311 {
4312 diff --git a/mm/swap.c b/mm/swap.c
4313 index 60b1d2a75852..ea84f04d75a4 100644
4314 --- a/mm/swap.c
4315 +++ b/mm/swap.c
4316 @@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
4317 void *arg)
4318 {
4319 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
4320 - !PageUnevictable(page)) {
4321 + !PageSwapCache(page) && !PageUnevictable(page)) {
4322 bool active = PageActive(page);
4323
4324 del_page_from_lru_list(page, lruvec,
4325 @@ -665,7 +665,7 @@ void deactivate_file_page(struct page *page)
4326 void mark_page_lazyfree(struct page *page)
4327 {
4328 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
4329 - !PageUnevictable(page)) {
4330 + !PageSwapCache(page) && !PageUnevictable(page)) {
4331 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
4332
4333 get_page(page);
4334 diff --git a/mm/swap_state.c b/mm/swap_state.c
4335 index b68c93014f50..fe9309ba948c 100644
4336 --- a/mm/swap_state.c
4337 +++ b/mm/swap_state.c
4338 @@ -219,6 +219,17 @@ int add_to_swap(struct page *page)
4339 * clear SWAP_HAS_CACHE flag.
4340 */
4341 goto fail;
4342 + /*
4343 + * Normally the page will be dirtied in unmap because its pte should be
4344 + * dirty. A special case is MADV_FREE page. The page'e pte could have
4345 + * dirty bit cleared but the page's SwapBacked bit is still set because
4346 + * clearing the dirty bit and SwapBacked bit has no lock protected. For
4347 + * such page, unmap will not set dirty bit for it, so page reclaim will
4348 + * not write the page out. This can cause data corruption when the page
4349 + * is swap in later. Always setting the dirty bit for the page solves
4350 + * the problem.
4351 + */
4352 + set_page_dirty(page);
4353
4354 return 1;
4355
4356 diff --git a/net/core/dev.c b/net/core/dev.c
4357 index 86b4b0a79e7a..6fa30a4c60ef 100644
4358 --- a/net/core/dev.c
4359 +++ b/net/core/dev.c
4360 @@ -4408,6 +4408,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4361 __skb_pull(skb, off);
4362 else if (off < 0)
4363 __skb_push(skb, -off);
4364 + skb->mac_header += off;
4365
4366 switch (act) {
4367 case XDP_TX:
4368 diff --git a/net/core/filter.c b/net/core/filter.c
4369 index 169974998c76..18d591f1ae5a 100644
4370 --- a/net/core/filter.c
4371 +++ b/net/core/filter.c
4372 @@ -975,10 +975,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
4373
4374 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
4375 {
4376 - bool ret = __sk_filter_charge(sk, fp);
4377 - if (ret)
4378 - refcount_inc(&fp->refcnt);
4379 - return ret;
4380 + if (!refcount_inc_not_zero(&fp->refcnt))
4381 + return false;
4382 +
4383 + if (!__sk_filter_charge(sk, fp)) {
4384 + sk_filter_release(fp);
4385 + return false;
4386 + }
4387 + return true;
4388 }
4389
4390 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
4391 diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
4392 index 0385dece1f6f..7c1ffd6f9501 100644
4393 --- a/net/core/gen_estimator.c
4394 +++ b/net/core/gen_estimator.c
4395 @@ -83,10 +83,10 @@ static void est_timer(unsigned long arg)
4396 u64 rate, brate;
4397
4398 est_fetch_counters(est, &b);
4399 - brate = (b.bytes - est->last_bytes) << (8 - est->ewma_log);
4400 + brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
4401 brate -= (est->avbps >> est->ewma_log);
4402
4403 - rate = (u64)(b.packets - est->last_packets) << (8 - est->ewma_log);
4404 + rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
4405 rate -= (est->avpps >> est->ewma_log);
4406
4407 write_seqcount_begin(&est->seq);
4408 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4409 index 9201e3621351..e07c8847c6cf 100644
4410 --- a/net/core/rtnetlink.c
4411 +++ b/net/core/rtnetlink.c
4412 @@ -3867,6 +3867,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
4413 return -EMSGSIZE;
4414
4415 ifsm = nlmsg_data(nlh);
4416 + ifsm->family = PF_UNSPEC;
4417 + ifsm->pad1 = 0;
4418 + ifsm->pad2 = 0;
4419 ifsm->ifindex = dev->ifindex;
4420 ifsm->filter_mask = filter_mask;
4421
4422 diff --git a/net/core/sock.c b/net/core/sock.c
4423 index ac2a404c73eb..0967da925022 100644
4424 --- a/net/core/sock.c
4425 +++ b/net/core/sock.c
4426 @@ -1646,6 +1646,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
4427
4428 sock_copy(newsk, sk);
4429
4430 + newsk->sk_prot_creator = sk->sk_prot;
4431 +
4432 /* SANITY */
4433 if (likely(newsk->sk_net_refcnt))
4434 get_net(sock_net(newsk));
4435 @@ -1673,13 +1675,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
4436
4437 sock_reset_flag(newsk, SOCK_DONE);
4438
4439 - filter = rcu_dereference_protected(newsk->sk_filter, 1);
4440 + rcu_read_lock();
4441 + filter = rcu_dereference(sk->sk_filter);
4442 if (filter != NULL)
4443 /* though it's an empty new sock, the charging may fail
4444 * if sysctl_optmem_max was changed between creation of
4445 * original socket and cloning
4446 */
4447 is_charged = sk_filter_charge(newsk, filter);
4448 + RCU_INIT_POINTER(newsk->sk_filter, filter);
4449 + rcu_read_unlock();
4450
4451 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
4452 /* We need to make sure that we don't uncharge the new
4453 diff --git a/net/dsa/slave.c b/net/dsa/slave.c
4454 index 9507bd38cf04..07677540129a 100644
4455 --- a/net/dsa/slave.c
4456 +++ b/net/dsa/slave.c
4457 @@ -1180,26 +1180,32 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
4458 p->old_duplex = -1;
4459
4460 ds->ports[port].netdev = slave_dev;
4461 - ret = register_netdev(slave_dev);
4462 - if (ret) {
4463 - netdev_err(master, "error %d registering interface %s\n",
4464 - ret, slave_dev->name);
4465 - ds->ports[port].netdev = NULL;
4466 - free_netdev(slave_dev);
4467 - return ret;
4468 - }
4469
4470 netif_carrier_off(slave_dev);
4471
4472 ret = dsa_slave_phy_setup(p, slave_dev);
4473 if (ret) {
4474 netdev_err(master, "error %d setting up slave phy\n", ret);
4475 - unregister_netdev(slave_dev);
4476 - free_netdev(slave_dev);
4477 - return ret;
4478 + goto out_free;
4479 + }
4480 +
4481 + ret = register_netdev(slave_dev);
4482 + if (ret) {
4483 + netdev_err(master, "error %d registering interface %s\n",
4484 + ret, slave_dev->name);
4485 + goto out_phy;
4486 }
4487
4488 return 0;
4489 +
4490 +out_phy:
4491 + phy_disconnect(p->phy);
4492 + if (of_phy_is_fixed_link(p->dp->dn))
4493 + of_phy_deregister_fixed_link(p->dp->dn);
4494 +out_free:
4495 + free_netdev(slave_dev);
4496 + ds->ports[port].netdev = NULL;
4497 + return ret;
4498 }
4499
4500 void dsa_slave_destroy(struct net_device *slave_dev)
4501 diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
4502 index fa2dc8f692c6..57fc13c6ab2b 100644
4503 --- a/net/ipv4/ip_input.c
4504 +++ b/net/ipv4/ip_input.c
4505 @@ -311,9 +311,10 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
4506 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
4507 {
4508 const struct iphdr *iph = ip_hdr(skb);
4509 - struct rtable *rt;
4510 + int (*edemux)(struct sk_buff *skb);
4511 struct net_device *dev = skb->dev;
4512 - void (*edemux)(struct sk_buff *skb);
4513 + struct rtable *rt;
4514 + int err;
4515
4516 /* if ingress device is enslaved to an L3 master device pass the
4517 * skb to its handler for processing
4518 @@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
4519
4520 ipprot = rcu_dereference(inet_protos[protocol]);
4521 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
4522 - edemux(skb);
4523 + err = edemux(skb);
4524 + if (unlikely(err))
4525 + goto drop_error;
4526 /* must reload iph, skb->head might have changed */
4527 iph = ip_hdr(skb);
4528 }
4529 @@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
4530 * how the packet travels inside Linux networking.
4531 */
4532 if (!skb_valid_dst(skb)) {
4533 - int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
4534 - iph->tos, dev);
4535 - if (unlikely(err)) {
4536 - if (err == -EXDEV)
4537 - __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
4538 - goto drop;
4539 - }
4540 + err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
4541 + iph->tos, dev);
4542 + if (unlikely(err))
4543 + goto drop_error;
4544 }
4545
4546 #ifdef CONFIG_IP_ROUTE_CLASSID
4547 @@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
4548 drop:
4549 kfree_skb(skb);
4550 return NET_RX_DROP;
4551 +
4552 +drop_error:
4553 + if (err == -EXDEV)
4554 + __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
4555 + goto drop;
4556 }
4557
4558 /*
4559 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
4560 index 0192c255e508..74bd46c5bda7 100644
4561 --- a/net/ipv4/ip_vti.c
4562 +++ b/net/ipv4/ip_vti.c
4563 @@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
4564 struct ip_tunnel_parm *parms = &tunnel->parms;
4565 struct dst_entry *dst = skb_dst(skb);
4566 struct net_device *tdev; /* Device to other host */
4567 + int pkt_len = skb->len;
4568 int err;
4569 int mtu;
4570
4571 @@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
4572
4573 err = dst_output(tunnel->net, skb->sk, skb);
4574 if (net_xmit_eval(err) == 0)
4575 - err = skb->len;
4576 + err = pkt_len;
4577 iptunnel_xmit_stats(dev, err);
4578 return NETDEV_TX_OK;
4579
4580 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
4581 index 2331de20ca50..c5aa25be7108 100644
4582 --- a/net/ipv4/route.c
4583 +++ b/net/ipv4/route.c
4584 @@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
4585 EXPORT_SYMBOL(rt_dst_alloc);
4586
4587 /* called in rcu_read_lock() section */
4588 -static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
4589 - u8 tos, struct net_device *dev, int our)
4590 +int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
4591 + u8 tos, struct net_device *dev,
4592 + struct in_device *in_dev, u32 *itag)
4593 {
4594 - struct rtable *rth;
4595 - struct in_device *in_dev = __in_dev_get_rcu(dev);
4596 - unsigned int flags = RTCF_MULTICAST;
4597 - u32 itag = 0;
4598 int err;
4599
4600 /* Primary sanity checks. */
4601 -
4602 if (!in_dev)
4603 return -EINVAL;
4604
4605 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
4606 skb->protocol != htons(ETH_P_IP))
4607 - goto e_inval;
4608 + return -EINVAL;
4609
4610 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
4611 - goto e_inval;
4612 + return -EINVAL;
4613
4614 if (ipv4_is_zeronet(saddr)) {
4615 if (!ipv4_is_local_multicast(daddr))
4616 - goto e_inval;
4617 + return -EINVAL;
4618 } else {
4619 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
4620 - in_dev, &itag);
4621 + in_dev, itag);
4622 if (err < 0)
4623 - goto e_err;
4624 + return err;
4625 }
4626 + return 0;
4627 +}
4628 +
4629 +/* called in rcu_read_lock() section */
4630 +static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
4631 + u8 tos, struct net_device *dev, int our)
4632 +{
4633 + struct in_device *in_dev = __in_dev_get_rcu(dev);
4634 + unsigned int flags = RTCF_MULTICAST;
4635 + struct rtable *rth;
4636 + u32 itag = 0;
4637 + int err;
4638 +
4639 + err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
4640 + if (err)
4641 + return err;
4642 +
4643 if (our)
4644 flags |= RTCF_LOCAL;
4645
4646 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
4647 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
4648 if (!rth)
4649 - goto e_nobufs;
4650 + return -ENOBUFS;
4651
4652 #ifdef CONFIG_IP_ROUTE_CLASSID
4653 rth->dst.tclassid = itag;
4654 @@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
4655
4656 skb_dst_set(skb, &rth->dst);
4657 return 0;
4658 -
4659 -e_nobufs:
4660 - return -ENOBUFS;
4661 -e_inval:
4662 - return -EINVAL;
4663 -e_err:
4664 - return err;
4665 }
4666
4667
4668 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
4669 index 21022db7a2a6..b1441bc8192f 100644
4670 --- a/net/ipv4/tcp_ipv4.c
4671 +++ b/net/ipv4/tcp_ipv4.c
4672 @@ -1504,23 +1504,23 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
4673 }
4674 EXPORT_SYMBOL(tcp_v4_do_rcv);
4675
4676 -void tcp_v4_early_demux(struct sk_buff *skb)
4677 +int tcp_v4_early_demux(struct sk_buff *skb)
4678 {
4679 const struct iphdr *iph;
4680 const struct tcphdr *th;
4681 struct sock *sk;
4682
4683 if (skb->pkt_type != PACKET_HOST)
4684 - return;
4685 + return 0;
4686
4687 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
4688 - return;
4689 + return 0;
4690
4691 iph = ip_hdr(skb);
4692 th = tcp_hdr(skb);
4693
4694 if (th->doff < sizeof(struct tcphdr) / 4)
4695 - return;
4696 + return 0;
4697
4698 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
4699 iph->saddr, th->source,
4700 @@ -1539,6 +1539,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
4701 skb_dst_set_noref(skb, dst);
4702 }
4703 }
4704 + return 0;
4705 }
4706
4707 /* Packet is added to VJ-style prequeue for processing in process
4708 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
4709 index b7661a68d498..40f7c8ee9ba6 100644
4710 --- a/net/ipv4/tcp_output.c
4711 +++ b/net/ipv4/tcp_output.c
4712 @@ -991,6 +991,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
4713 struct tcp_skb_cb *tcb;
4714 struct tcp_out_options opts;
4715 unsigned int tcp_options_size, tcp_header_size;
4716 + struct sk_buff *oskb = NULL;
4717 struct tcp_md5sig_key *md5;
4718 struct tcphdr *th;
4719 int err;
4720 @@ -998,12 +999,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
4721 BUG_ON(!skb || !tcp_skb_pcount(skb));
4722 tp = tcp_sk(sk);
4723
4724 - skb->skb_mstamp = tp->tcp_mstamp;
4725 if (clone_it) {
4726 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
4727 - tp->snd_una;
4728 - tcp_rate_skb_sent(sk, skb);
4729 -
4730 + oskb = skb;
4731 if (unlikely(skb_cloned(skb)))
4732 skb = pskb_copy(skb, gfp_mask);
4733 else
4734 @@ -1011,6 +1010,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
4735 if (unlikely(!skb))
4736 return -ENOBUFS;
4737 }
4738 + skb->skb_mstamp = tp->tcp_mstamp;
4739
4740 inet = inet_sk(sk);
4741 tcb = TCP_SKB_CB(skb);
4742 @@ -1122,12 +1122,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
4743
4744 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
4745
4746 - if (likely(err <= 0))
4747 - return err;
4748 -
4749 - tcp_enter_cwr(sk);
4750 -
4751 - return net_xmit_eval(err);
4752 + if (unlikely(err > 0)) {
4753 + tcp_enter_cwr(sk);
4754 + err = net_xmit_eval(err);
4755 + }
4756 + if (!err && oskb) {
4757 + oskb->skb_mstamp = tp->tcp_mstamp;
4758 + tcp_rate_skb_sent(sk, oskb);
4759 + }
4760 + return err;
4761 }
4762
4763 /* This routine just queues the buffer for sending.
4764 @@ -2866,10 +2869,11 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
4765 skb_headroom(skb) >= 0xFFFF)) {
4766 struct sk_buff *nskb;
4767
4768 - skb->skb_mstamp = tp->tcp_mstamp;
4769 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
4770 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
4771 -ENOBUFS;
4772 + if (!err)
4773 + skb->skb_mstamp = tp->tcp_mstamp;
4774 } else {
4775 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
4776 }
4777 @@ -3416,6 +3420,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
4778 goto done;
4779 }
4780
4781 + /* data was not sent, this is our new send_head */
4782 + sk->sk_send_head = syn_data;
4783 + tp->packets_out -= tcp_skb_pcount(syn_data);
4784 +
4785 fallback:
4786 /* Send a regular SYN with Fast Open cookie request option */
4787 if (fo->cookie.len > 0)
4788 @@ -3468,6 +3476,11 @@ int tcp_connect(struct sock *sk)
4789 */
4790 tp->snd_nxt = tp->write_seq;
4791 tp->pushed_seq = tp->write_seq;
4792 + buff = tcp_send_head(sk);
4793 + if (unlikely(buff)) {
4794 + tp->snd_nxt = TCP_SKB_CB(buff)->seq;
4795 + tp->pushed_seq = TCP_SKB_CB(buff)->seq;
4796 + }
4797 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
4798
4799 /* Timer for repeating the SYN until an answer. */
4800 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4801 index 979e4d8526ba..84861d71face 100644
4802 --- a/net/ipv4/udp.c
4803 +++ b/net/ipv4/udp.c
4804 @@ -2217,9 +2217,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
4805 return NULL;
4806 }
4807
4808 -void udp_v4_early_demux(struct sk_buff *skb)
4809 +int udp_v4_early_demux(struct sk_buff *skb)
4810 {
4811 struct net *net = dev_net(skb->dev);
4812 + struct in_device *in_dev = NULL;
4813 const struct iphdr *iph;
4814 const struct udphdr *uh;
4815 struct sock *sk = NULL;
4816 @@ -2229,25 +2230,21 @@ void udp_v4_early_demux(struct sk_buff *skb)
4817
4818 /* validate the packet */
4819 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
4820 - return;
4821 + return 0;
4822
4823 iph = ip_hdr(skb);
4824 uh = udp_hdr(skb);
4825
4826 - if (skb->pkt_type == PACKET_BROADCAST ||
4827 - skb->pkt_type == PACKET_MULTICAST) {
4828 - struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
4829 + if (skb->pkt_type == PACKET_MULTICAST) {
4830 + in_dev = __in_dev_get_rcu(skb->dev);
4831
4832 if (!in_dev)
4833 - return;
4834 + return 0;
4835
4836 - /* we are supposed to accept bcast packets */
4837 - if (skb->pkt_type == PACKET_MULTICAST) {
4838 - ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
4839 - iph->protocol);
4840 - if (!ours)
4841 - return;
4842 - }
4843 + ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
4844 + iph->protocol);
4845 + if (!ours)
4846 + return 0;
4847
4848 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
4849 uh->source, iph->saddr, dif);
4850 @@ -2257,7 +2254,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
4851 }
4852
4853 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4854 - return;
4855 + return 0;
4856
4857 skb->sk = sk;
4858 skb->destructor = sock_efree;
4859 @@ -2266,12 +2263,23 @@ void udp_v4_early_demux(struct sk_buff *skb)
4860 if (dst)
4861 dst = dst_check(dst, 0);
4862 if (dst) {
4863 + u32 itag = 0;
4864 +
4865 /* set noref for now.
4866 * any place which wants to hold dst has to call
4867 * dst_hold_safe()
4868 */
4869 skb_dst_set_noref(skb, dst);
4870 +
4871 + /* for unconnected multicast sockets we need to validate
4872 + * the source on each packet
4873 + */
4874 + if (!inet_sk(sk)->inet_daddr && in_dev)
4875 + return ip_mc_validate_source(skb, iph->daddr,
4876 + iph->saddr, iph->tos,
4877 + skb->dev, in_dev, &itag);
4878 }
4879 + return 0;
4880 }
4881
4882 int udp_rcv(struct sk_buff *skb)
4883 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
4884 index 936e9ab4dda5..ba757c28a301 100644
4885 --- a/net/ipv6/addrconf.c
4886 +++ b/net/ipv6/addrconf.c
4887 @@ -4982,9 +4982,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
4888
4889 /* Don't send DELADDR notification for TENTATIVE address,
4890 * since NEWADDR notification is sent only after removing
4891 - * TENTATIVE flag.
4892 + * TENTATIVE flag, if DAD has not failed.
4893 */
4894 - if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
4895 + if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
4896 + event == RTM_DELADDR)
4897 return;
4898
4899 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
4900 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4901 index b7a72d409334..1602b491b281 100644
4902 --- a/net/ipv6/ip6_gre.c
4903 +++ b/net/ipv6/ip6_gre.c
4904 @@ -940,24 +940,25 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
4905 }
4906
4907 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
4908 - unsigned short type,
4909 - const void *daddr, const void *saddr, unsigned int len)
4910 + unsigned short type, const void *daddr,
4911 + const void *saddr, unsigned int len)
4912 {
4913 struct ip6_tnl *t = netdev_priv(dev);
4914 - struct ipv6hdr *ipv6h = skb_push(skb, t->hlen);
4915 - __be16 *p = (__be16 *)(ipv6h+1);
4916 + struct ipv6hdr *ipv6h;
4917 + __be16 *p;
4918
4919 - ip6_flow_hdr(ipv6h, 0,
4920 - ip6_make_flowlabel(dev_net(dev), skb,
4921 - t->fl.u.ip6.flowlabel, true,
4922 - &t->fl.u.ip6));
4923 + ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
4924 + ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
4925 + t->fl.u.ip6.flowlabel,
4926 + true, &t->fl.u.ip6));
4927 ipv6h->hop_limit = t->parms.hop_limit;
4928 ipv6h->nexthdr = NEXTHDR_GRE;
4929 ipv6h->saddr = t->parms.laddr;
4930 ipv6h->daddr = t->parms.raddr;
4931
4932 - p[0] = t->parms.o_flags;
4933 - p[1] = htons(type);
4934 + p = (__be16 *)(ipv6h + 1);
4935 + p[0] = t->parms.o_flags;
4936 + p[1] = htons(type);
4937
4938 /*
4939 * Set the source hardware address.
4940 @@ -1310,6 +1311,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
4941 dev->features |= NETIF_F_NETNS_LOCAL;
4942 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
4943 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4944 + netif_keep_dst(dev);
4945 }
4946
4947 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
4948 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
4949 index 3a0ba2ae4b0f..4425b4411bb9 100644
4950 --- a/net/ipv6/ip6_tunnel.c
4951 +++ b/net/ipv6/ip6_tunnel.c
4952 @@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
4953 struct dst_entry *dst = NULL, *ndst = NULL;
4954 struct net_device *tdev;
4955 int mtu;
4956 + unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
4957 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
4958 unsigned int max_headroom = psh_hlen;
4959 bool use_cache = false;
4960 @@ -1124,7 +1125,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
4961 t->parms.name);
4962 goto tx_err_dst_release;
4963 }
4964 - mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
4965 + mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
4966 if (encap_limit >= 0) {
4967 max_headroom += 8;
4968 mtu -= 8;
4969 @@ -1133,7 +1134,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
4970 mtu = IPV6_MIN_MTU;
4971 if (skb_dst(skb) && !t->parms.collect_md)
4972 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
4973 - if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
4974 + if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
4975 *pmtu = mtu;
4976 err = -EMSGSIZE;
4977 goto tx_err_dst_release;
4978 @@ -2258,6 +2259,9 @@ static int __init ip6_tunnel_init(void)
4979 {
4980 int err;
4981
4982 + if (!ipv6_mod_enabled())
4983 + return -EOPNOTSUPP;
4984 +
4985 err = register_pernet_device(&ip6_tnl_net_ops);
4986 if (err < 0)
4987 goto out_pernet;
4988 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
4989 index 486c2305f53c..e3e3ea655464 100644
4990 --- a/net/ipv6/ip6_vti.c
4991 +++ b/net/ipv6/ip6_vti.c
4992 @@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
4993 struct dst_entry *dst = skb_dst(skb);
4994 struct net_device *tdev;
4995 struct xfrm_state *x;
4996 + int pkt_len = skb->len;
4997 int err = -1;
4998 int mtu;
4999
5000 @@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5001 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
5002
5003 u64_stats_update_begin(&tstats->syncp);
5004 - tstats->tx_bytes += skb->len;
5005 + tstats->tx_bytes += pkt_len;
5006 tstats->tx_packets++;
5007 u64_stats_update_end(&tstats->syncp);
5008 } else {
5009 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5010 index d6886228e1d0..30b4d55e88f3 100644
5011 --- a/net/ipv6/udp.c
5012 +++ b/net/ipv6/udp.c
5013 @@ -1011,6 +1011,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
5014 */
5015 offset = skb_transport_offset(skb);
5016 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
5017 + csum = skb->csum;
5018
5019 skb->ip_summed = CHECKSUM_NONE;
5020
5021 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
5022 index 90165a6874bc..525c66f1121a 100644
5023 --- a/net/l2tp/l2tp_core.c
5024 +++ b/net/l2tp/l2tp_core.c
5025 @@ -1665,14 +1665,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
5026
5027 /* This function is used by the netlink TUNNEL_DELETE command.
5028 */
5029 -int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
5030 +void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
5031 {
5032 - l2tp_tunnel_inc_refcount(tunnel);
5033 - if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
5034 - l2tp_tunnel_dec_refcount(tunnel);
5035 - return 1;
5036 + if (!test_and_set_bit(0, &tunnel->dead)) {
5037 + l2tp_tunnel_inc_refcount(tunnel);
5038 + queue_work(l2tp_wq, &tunnel->del_work);
5039 }
5040 - return 0;
5041 }
5042 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
5043
5044 diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
5045 index 9101297f27ad..7c5a51f62afc 100644
5046 --- a/net/l2tp/l2tp_core.h
5047 +++ b/net/l2tp/l2tp_core.h
5048 @@ -160,6 +160,9 @@ struct l2tp_tunnel_cfg {
5049
5050 struct l2tp_tunnel {
5051 int magic; /* Should be L2TP_TUNNEL_MAGIC */
5052 +
5053 + unsigned long dead;
5054 +
5055 struct rcu_head rcu;
5056 rwlock_t hlist_lock; /* protect session_hlist */
5057 struct hlist_head session_hlist[L2TP_HASH_SIZE];
5058 @@ -248,7 +251,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
5059 u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
5060 struct l2tp_tunnel **tunnelp);
5061 void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
5062 -int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
5063 +void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
5064 struct l2tp_session *l2tp_session_create(int priv_size,
5065 struct l2tp_tunnel *tunnel,
5066 u32 session_id, u32 peer_session_id,
5067 diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
5068 index 4de2ec94b08c..cf456720930c 100644
5069 --- a/net/l2tp/l2tp_eth.c
5070 +++ b/net/l2tp/l2tp_eth.c
5071 @@ -44,7 +44,6 @@ struct l2tp_eth {
5072 struct net_device *dev;
5073 struct sock *tunnel_sock;
5074 struct l2tp_session *session;
5075 - struct list_head list;
5076 atomic_long_t tx_bytes;
5077 atomic_long_t tx_packets;
5078 atomic_long_t tx_dropped;
5079 @@ -58,17 +57,6 @@ struct l2tp_eth_sess {
5080 struct net_device *dev;
5081 };
5082
5083 -/* per-net private data for this module */
5084 -static unsigned int l2tp_eth_net_id;
5085 -struct l2tp_eth_net {
5086 - struct list_head l2tp_eth_dev_list;
5087 - spinlock_t l2tp_eth_lock;
5088 -};
5089 -
5090 -static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
5091 -{
5092 - return net_generic(net, l2tp_eth_net_id);
5093 -}
5094
5095 static int l2tp_eth_dev_init(struct net_device *dev)
5096 {
5097 @@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
5098
5099 static void l2tp_eth_dev_uninit(struct net_device *dev)
5100 {
5101 - struct l2tp_eth *priv = netdev_priv(dev);
5102 - struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
5103 -
5104 - spin_lock(&pn->l2tp_eth_lock);
5105 - list_del_init(&priv->list);
5106 - spin_unlock(&pn->l2tp_eth_lock);
5107 dev_put(dev);
5108 }
5109
5110 @@ -272,7 +254,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
5111 struct l2tp_eth *priv;
5112 struct l2tp_eth_sess *spriv;
5113 int rc;
5114 - struct l2tp_eth_net *pn;
5115
5116 tunnel = l2tp_tunnel_find(net, tunnel_id);
5117 if (!tunnel) {
5118 @@ -310,7 +291,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
5119 priv = netdev_priv(dev);
5120 priv->dev = dev;
5121 priv->session = session;
5122 - INIT_LIST_HEAD(&priv->list);
5123
5124 priv->tunnel_sock = tunnel->sock;
5125 session->recv_skb = l2tp_eth_dev_recv;
5126 @@ -331,10 +311,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
5127 strlcpy(session->ifname, dev->name, IFNAMSIZ);
5128
5129 dev_hold(dev);
5130 - pn = l2tp_eth_pernet(dev_net(dev));
5131 - spin_lock(&pn->l2tp_eth_lock);
5132 - list_add(&priv->list, &pn->l2tp_eth_dev_list);
5133 - spin_unlock(&pn->l2tp_eth_lock);
5134
5135 return 0;
5136
5137 @@ -347,22 +323,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
5138 return rc;
5139 }
5140
5141 -static __net_init int l2tp_eth_init_net(struct net *net)
5142 -{
5143 - struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
5144 -
5145 - INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
5146 - spin_lock_init(&pn->l2tp_eth_lock);
5147 -
5148 - return 0;
5149 -}
5150 -
5151 -static struct pernet_operations l2tp_eth_net_ops = {
5152 - .init = l2tp_eth_init_net,
5153 - .id = &l2tp_eth_net_id,
5154 - .size = sizeof(struct l2tp_eth_net),
5155 -};
5156 -
5157
5158 static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
5159 .session_create = l2tp_eth_create,
5160 @@ -376,25 +336,18 @@ static int __init l2tp_eth_init(void)
5161
5162 err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
5163 if (err)
5164 - goto out;
5165 -
5166 - err = register_pernet_device(&l2tp_eth_net_ops);
5167 - if (err)
5168 - goto out_unreg;
5169 + goto err;
5170
5171 pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
5172
5173 return 0;
5174
5175 -out_unreg:
5176 - l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
5177 -out:
5178 +err:
5179 return err;
5180 }
5181
5182 static void __exit l2tp_eth_exit(void)
5183 {
5184 - unregister_pernet_device(&l2tp_eth_net_ops);
5185 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
5186 }
5187
5188 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
5189 index 5acee49db90b..7e794ad50cb0 100644
5190 --- a/net/netlink/af_netlink.c
5191 +++ b/net/netlink/af_netlink.c
5192 @@ -2262,10 +2262,13 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
5193
5194 mutex_unlock(nlk->cb_mutex);
5195
5196 + ret = 0;
5197 if (cb->start)
5198 - cb->start(cb);
5199 + ret = cb->start(cb);
5200 +
5201 + if (!ret)
5202 + ret = netlink_dump(sk);
5203
5204 - ret = netlink_dump(sk);
5205 sock_put(sk);
5206
5207 if (ret)
5208 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
5209 index 6b44fe405282..294444bb075c 100644
5210 --- a/net/openvswitch/datapath.c
5211 +++ b/net/openvswitch/datapath.c
5212 @@ -1126,7 +1126,8 @@ static int ovs_nla_init_match_and_action(struct net *net,
5213 if (!a[OVS_FLOW_ATTR_KEY]) {
5214 OVS_NLERR(log,
5215 "Flow key attribute not present in set flow.");
5216 - return -EINVAL;
5217 + error = -EINVAL;
5218 + goto error;
5219 }
5220
5221 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
5222 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5223 index 1c61af9af67d..29d7b7e5b128 100644
5224 --- a/net/packet/af_packet.c
5225 +++ b/net/packet/af_packet.c
5226 @@ -1686,10 +1686,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
5227
5228 mutex_lock(&fanout_mutex);
5229
5230 - err = -EINVAL;
5231 - if (!po->running)
5232 - goto out;
5233 -
5234 err = -EALREADY;
5235 if (po->fanout)
5236 goto out;
5237 @@ -1751,7 +1747,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
5238 list_add(&match->list, &fanout_list);
5239 }
5240 err = -EINVAL;
5241 - if (match->type == type &&
5242 +
5243 + spin_lock(&po->bind_lock);
5244 + if (po->running &&
5245 + match->type == type &&
5246 match->prot_hook.type == po->prot_hook.type &&
5247 match->prot_hook.dev == po->prot_hook.dev) {
5248 err = -ENOSPC;
5249 @@ -1763,6 +1762,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
5250 err = 0;
5251 }
5252 }
5253 + spin_unlock(&po->bind_lock);
5254 +
5255 + if (err && !refcount_read(&match->sk_ref)) {
5256 + list_del(&match->list);
5257 + kfree(match);
5258 + }
5259 +
5260 out:
5261 if (err && rollover) {
5262 kfree(rollover);
5263 @@ -2836,6 +2842,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
5264 struct virtio_net_hdr vnet_hdr = { 0 };
5265 int offset = 0;
5266 struct packet_sock *po = pkt_sk(sk);
5267 + bool has_vnet_hdr = false;
5268 int hlen, tlen, linear;
5269 int extra_len = 0;
5270
5271 @@ -2879,6 +2886,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
5272 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
5273 if (err)
5274 goto out_unlock;
5275 + has_vnet_hdr = true;
5276 }
5277
5278 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
5279 @@ -2937,7 +2945,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
5280 skb->priority = sk->sk_priority;
5281 skb->mark = sockc.mark;
5282
5283 - if (po->has_vnet_hdr) {
5284 + if (has_vnet_hdr) {
5285 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
5286 if (err)
5287 goto out_free;
5288 @@ -3065,13 +3073,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
5289 int ret = 0;
5290 bool unlisted = false;
5291
5292 - if (po->fanout)
5293 - return -EINVAL;
5294 -
5295 lock_sock(sk);
5296 spin_lock(&po->bind_lock);
5297 rcu_read_lock();
5298
5299 + if (po->fanout) {
5300 + ret = -EINVAL;
5301 + goto out_unlock;
5302 + }
5303 +
5304 if (name) {
5305 dev = dev_get_by_name_rcu(sock_net(sk), name);
5306 if (!dev) {
5307 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
5308 index f2e9ed34a963..0c5dbb172437 100644
5309 --- a/net/sched/act_api.c
5310 +++ b/net/sched/act_api.c
5311 @@ -174,7 +174,7 @@ static int tcf_del_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
5312 hlist_for_each_entry_safe(p, n, head, tcfa_head) {
5313 ret = __tcf_hash_release(p, false, true);
5314 if (ret == ACT_P_DELETED) {
5315 - module_put(p->ops->owner);
5316 + module_put(ops->owner);
5317 n_i++;
5318 } else if (ret < 0)
5319 goto nla_put_failure;
5320 @@ -506,13 +506,15 @@ EXPORT_SYMBOL(tcf_action_exec);
5321
5322 int tcf_action_destroy(struct list_head *actions, int bind)
5323 {
5324 + const struct tc_action_ops *ops;
5325 struct tc_action *a, *tmp;
5326 int ret = 0;
5327
5328 list_for_each_entry_safe(a, tmp, actions, list) {
5329 + ops = a->ops;
5330 ret = __tcf_hash_release(a, bind, true);
5331 if (ret == ACT_P_DELETED)
5332 - module_put(a->ops->owner);
5333 + module_put(ops->owner);
5334 else if (ret < 0)
5335 return ret;
5336 }
5337 diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
5338 index 9dc26c32cf32..d720f9376add 100644
5339 --- a/net/sched/cls_matchall.c
5340 +++ b/net/sched/cls_matchall.c
5341 @@ -32,6 +32,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
5342 if (tc_skip_sw(head->flags))
5343 return -1;
5344
5345 + *res = head->res;
5346 return tcf_exts_exec(skb, &head->exts, res);
5347 }
5348
5349 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
5350 index 4ba6da5fb254..ff49421ee48a 100644
5351 --- a/net/sched/sch_generic.c
5352 +++ b/net/sched/sch_generic.c
5353 @@ -681,6 +681,7 @@ void qdisc_reset(struct Qdisc *qdisc)
5354 qdisc->gso_skb = NULL;
5355 }
5356 qdisc->q.qlen = 0;
5357 + qdisc->qstats.backlog = 0;
5358 }
5359 EXPORT_SYMBOL(qdisc_reset);
5360
5361 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
5362 index 6ef379f004ac..121e59a1d0e7 100644
5363 --- a/net/tipc/msg.c
5364 +++ b/net/tipc/msg.c
5365 @@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
5366 return false;
5367 if (msg_errcode(msg))
5368 return false;
5369 - *err = -TIPC_ERR_NO_NAME;
5370 + *err = TIPC_ERR_NO_NAME;
5371 if (skb_linearize(skb))
5372 return false;
5373 msg = buf_msg(skb);
5374 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5375 index 750ba5d24a49..359b1f34c805 100644
5376 --- a/net/wireless/nl80211.c
5377 +++ b/net/wireless/nl80211.c
5378 @@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
5379 [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
5380 };
5381
5382 +/* policy for packet pattern attributes */
5383 +static const struct nla_policy
5384 +nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
5385 + [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
5386 + [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
5387 + [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
5388 +};
5389 +
5390 static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
5391 struct netlink_callback *cb,
5392 struct cfg80211_registered_device **rdev,
5393 @@ -10529,7 +10537,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
5394 u8 *mask_pat;
5395
5396 nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
5397 - NULL, info->extack);
5398 + nl80211_packet_pattern_policy,
5399 + info->extack);
5400 err = -EINVAL;
5401 if (!pat_tb[NL80211_PKTPAT_MASK] ||
5402 !pat_tb[NL80211_PKTPAT_PATTERN])
5403 @@ -10778,7 +10787,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
5404 rem) {
5405 u8 *mask_pat;
5406
5407 - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL);
5408 + nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
5409 + nl80211_packet_pattern_policy, NULL);
5410 if (!pat_tb[NL80211_PKTPAT_MASK] ||
5411 !pat_tb[NL80211_PKTPAT_PATTERN])
5412 return -EINVAL;
5413 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
5414 index 463af86812c7..a9e89177a346 100644
5415 --- a/security/smack/smack_lsm.c
5416 +++ b/security/smack/smack_lsm.c
5417 @@ -1499,7 +1499,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
5418 * @inode: the object
5419 * @name: attribute name
5420 * @buffer: where to put the result
5421 - * @alloc: unused
5422 + * @alloc: duplicate memory
5423 *
5424 * Returns the size of the attribute or an error code
5425 */
5426 @@ -1512,43 +1512,38 @@ static int smack_inode_getsecurity(struct inode *inode,
5427 struct super_block *sbp;
5428 struct inode *ip = (struct inode *)inode;
5429 struct smack_known *isp;
5430 - int ilen;
5431 - int rc = 0;
5432
5433 - if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
5434 + if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
5435 isp = smk_of_inode(inode);
5436 - ilen = strlen(isp->smk_known);
5437 - *buffer = isp->smk_known;
5438 - return ilen;
5439 - }
5440 + else {
5441 + /*
5442 + * The rest of the Smack xattrs are only on sockets.
5443 + */
5444 + sbp = ip->i_sb;
5445 + if (sbp->s_magic != SOCKFS_MAGIC)
5446 + return -EOPNOTSUPP;
5447
5448 - /*
5449 - * The rest of the Smack xattrs are only on sockets.
5450 - */
5451 - sbp = ip->i_sb;
5452 - if (sbp->s_magic != SOCKFS_MAGIC)
5453 - return -EOPNOTSUPP;
5454 + sock = SOCKET_I(ip);
5455 + if (sock == NULL || sock->sk == NULL)
5456 + return -EOPNOTSUPP;
5457
5458 - sock = SOCKET_I(ip);
5459 - if (sock == NULL || sock->sk == NULL)
5460 - return -EOPNOTSUPP;
5461 -
5462 - ssp = sock->sk->sk_security;
5463 + ssp = sock->sk->sk_security;
5464
5465 - if (strcmp(name, XATTR_SMACK_IPIN) == 0)
5466 - isp = ssp->smk_in;
5467 - else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
5468 - isp = ssp->smk_out;
5469 - else
5470 - return -EOPNOTSUPP;
5471 + if (strcmp(name, XATTR_SMACK_IPIN) == 0)
5472 + isp = ssp->smk_in;
5473 + else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
5474 + isp = ssp->smk_out;
5475 + else
5476 + return -EOPNOTSUPP;
5477 + }
5478
5479 - ilen = strlen(isp->smk_known);
5480 - if (rc == 0) {
5481 - *buffer = isp->smk_known;
5482 - rc = ilen;
5483 + if (alloc) {
5484 + *buffer = kstrdup(isp->smk_known, GFP_KERNEL);
5485 + if (*buffer == NULL)
5486 + return -ENOMEM;
5487 }
5488
5489 - return rc;
5490 + return strlen(isp->smk_known);
5491 }
5492
5493
5494 diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
5495 index fec1dfdb14ad..4490a699030b 100644
5496 --- a/sound/core/compress_offload.c
5497 +++ b/sound/core/compress_offload.c
5498 @@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = {
5499 static int snd_compress_dev_register(struct snd_device *device)
5500 {
5501 int ret = -EINVAL;
5502 - char str[16];
5503 struct snd_compr *compr;
5504
5505 if (snd_BUG_ON(!device || !device->device_data))
5506 return -EBADFD;
5507 compr = device->device_data;
5508
5509 - pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
5510 + pr_debug("reg device %s, direction %d\n", compr->name,
5511 compr->direction);
5512 /* register compressed device */
5513 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
5514 diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
5515 index d15ecf9febbf..e54f5f549e38 100644
5516 --- a/sound/pci/echoaudio/echoaudio.c
5517 +++ b/sound/pci/echoaudio/echoaudio.c
5518 @@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
5519
5520 chip = snd_kcontrol_chip(kcontrol);
5521 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
5522 + uinfo->count = 1;
5523 uinfo->value.integer.min = ECHOGAIN_MINOUT;
5524 uinfo->value.integer.max = ECHOGAIN_MAXOUT;
5525 uinfo->dimen.d[0] = num_busses_out(chip);
5526 uinfo->dimen.d[1] = num_busses_in(chip);
5527 - uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
5528 return 0;
5529 }
5530
5531 @@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
5532
5533 chip = snd_kcontrol_chip(kcontrol);
5534 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
5535 + uinfo->count = 1;
5536 uinfo->value.integer.min = ECHOGAIN_MINOUT;
5537 uinfo->value.integer.max = ECHOGAIN_MAXOUT;
5538 uinfo->dimen.d[0] = num_busses_out(chip);
5539 uinfo->dimen.d[1] = num_pipes_out(chip);
5540 - uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
5541 return 0;
5542 }
5543
5544 @@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
5545 struct snd_ctl_elem_info *uinfo)
5546 {
5547 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
5548 + uinfo->count = 96;
5549 uinfo->value.integer.min = ECHOGAIN_MINOUT;
5550 uinfo->value.integer.max = 0;
5551 #ifdef ECHOCARD_HAS_VMIXER
5552 @@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
5553 #endif
5554 uinfo->dimen.d[1] = 16; /* 16 channels */
5555 uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */
5556 - uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
5557 return 0;
5558 }
5559
5560 diff --git a/sound/usb/card.c b/sound/usb/card.c
5561 index 6640277a725b..383facf2dc11 100644
5562 --- a/sound/usb/card.c
5563 +++ b/sound/usb/card.c
5564 @@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
5565 struct usb_interface_descriptor *altsd;
5566 void *control_header;
5567 int i, protocol;
5568 + int rest_bytes;
5569
5570 /* find audiocontrol interface */
5571 host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
5572 @@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
5573 return -EINVAL;
5574 }
5575
5576 + rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
5577 + control_header;
5578 +
5579 + /* just to be sure -- this shouldn't hit at all */
5580 + if (rest_bytes <= 0) {
5581 + dev_err(&dev->dev, "invalid control header\n");
5582 + return -EINVAL;
5583 + }
5584 +
5585 switch (protocol) {
5586 default:
5587 dev_warn(&dev->dev,
5588 @@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
5589 case UAC_VERSION_1: {
5590 struct uac1_ac_header_descriptor *h1 = control_header;
5591
5592 + if (rest_bytes < sizeof(*h1)) {
5593 + dev_err(&dev->dev, "too short v1 buffer descriptor\n");
5594 + return -EINVAL;
5595 + }
5596 +
5597 if (!h1->bInCollection) {
5598 dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
5599 return -EINVAL;
5600 }
5601
5602 + if (rest_bytes < h1->bLength) {
5603 + dev_err(&dev->dev, "invalid buffer length (v1)\n");
5604 + return -EINVAL;
5605 + }
5606 +
5607 if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
5608 dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
5609 return -EINVAL;
5610 diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
5611 index bf618e1500ac..e7b934f4d837 100644
5612 --- a/sound/usb/usx2y/usb_stream.c
5613 +++ b/sound/usb/usx2y/usb_stream.c
5614 @@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
5615 }
5616
5617 pg = get_order(read_size);
5618 - sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
5619 + sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
5620 + __GFP_NOWARN, pg);
5621 if (!sk->s) {
5622 snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
5623 goto out;
5624 @@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
5625 pg = get_order(write_size);
5626
5627 sk->write_page =
5628 - (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
5629 + (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
5630 + __GFP_NOWARN, pg);
5631 if (!sk->write_page) {
5632 snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
5633 usb_stream_free(sk);
5634 diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
5635 index d3ed7324105e..48c145eeeaf7 100644
5636 --- a/tools/testing/selftests/bpf/test_verifier.c
5637 +++ b/tools/testing/selftests/bpf/test_verifier.c
5638 @@ -6009,6 +6009,22 @@ static struct bpf_test tests[] = {
5639 .result = REJECT,
5640 .result_unpriv = REJECT,
5641 },
5642 + {
5643 + "invalid 64-bit BPF_END",
5644 + .insns = {
5645 + BPF_MOV32_IMM(BPF_REG_0, 0),
5646 + {
5647 + .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
5648 + .dst_reg = BPF_REG_0,
5649 + .src_reg = 0,
5650 + .off = 0,
5651 + .imm = 32,
5652 + },
5653 + BPF_EXIT_INSN(),
5654 + },
5655 + .errstr = "BPF_END uses reserved fields",
5656 + .result = REJECT,
5657 + },
5658 };
5659
5660 static int probe_filter_length(const struct bpf_insn *fp)