Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0136-4.19.37-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3415 - (show annotations) (download)
Fri Aug 2 11:47:46 2019 UTC (4 years, 9 months ago) by niro
File size: 176695 byte(s)
-linux-4.19.37
1 diff --git a/Makefile b/Makefile
2 index 3fac08f6a11e..7b495cad8c2e 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 36
10 +SUBLEVEL = 37
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 @@ -661,8 +661,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
15 KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
16
17 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
18 -KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
19 -KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
20 +KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
21 else
22 ifdef CONFIG_PROFILE_ALL_BRANCHES
23 KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
24 diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
25 index b447b4db423a..fd1e722f3821 100644
26 --- a/arch/arm64/include/asm/futex.h
27 +++ b/arch/arm64/include/asm/futex.h
28 @@ -50,7 +50,7 @@ do { \
29 static inline int
30 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
31 {
32 - int oldval, ret, tmp;
33 + int oldval = 0, ret, tmp;
34 u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
35
36 pagefault_disable();
37 diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
38 index 3b6e70d085da..8457cdd47f75 100644
39 --- a/arch/x86/crypto/poly1305-avx2-x86_64.S
40 +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
41 @@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
42 vpaddq t2,t1,t1
43 vmovq t1x,d4
44
45 + # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
46 + # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
47 + # amount. Careful: we must not assume the carry bits 'd0 >> 26',
48 + # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
49 + # integers. It's true in a single-block implementation, but not here.
50 +
51 # d1 += d0 >> 26
52 mov d0,%rax
53 shr $26,%rax
54 @@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
55 # h0 += (d4 >> 26) * 5
56 mov d4,%rax
57 shr $26,%rax
58 - lea (%eax,%eax,4),%eax
59 - add %eax,%ebx
60 + lea (%rax,%rax,4),%rax
61 + add %rax,%rbx
62 # h4 = d4 & 0x3ffffff
63 mov d4,%rax
64 and $0x3ffffff,%eax
65 mov %eax,h4
66
67 # h1 += h0 >> 26
68 - mov %ebx,%eax
69 - shr $26,%eax
70 + mov %rbx,%rax
71 + shr $26,%rax
72 add %eax,h1
73 # h0 = h0 & 0x3ffffff
74 andl $0x3ffffff,%ebx
75 diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
76 index c88c670cb5fc..5851c7418fb7 100644
77 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S
78 +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
79 @@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
80 # h0 += (d4 >> 26) * 5
81 mov d4,%rax
82 shr $26,%rax
83 - lea (%eax,%eax,4),%eax
84 - add %eax,%ebx
85 + lea (%rax,%rax,4),%rax
86 + add %rax,%rbx
87 # h4 = d4 & 0x3ffffff
88 mov d4,%rax
89 and $0x3ffffff,%eax
90 mov %eax,h4
91
92 # h1 += h0 >> 26
93 - mov %ebx,%eax
94 - shr $26,%eax
95 + mov %rbx,%rax
96 + shr $26,%rax
97 add %eax,h1
98 # h0 = h0 & 0x3ffffff
99 andl $0x3ffffff,%ebx
100 @@ -520,6 +520,12 @@ ENTRY(poly1305_2block_sse2)
101 paddq t2,t1
102 movq t1,d4
103
104 + # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
105 + # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
106 + # amount. Careful: we must not assume the carry bits 'd0 >> 26',
107 + # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
108 + # integers. It's true in a single-block implementation, but not here.
109 +
110 # d1 += d0 >> 26
111 mov d0,%rax
112 shr $26,%rax
113 @@ -558,16 +564,16 @@ ENTRY(poly1305_2block_sse2)
114 # h0 += (d4 >> 26) * 5
115 mov d4,%rax
116 shr $26,%rax
117 - lea (%eax,%eax,4),%eax
118 - add %eax,%ebx
119 + lea (%rax,%rax,4),%rax
120 + add %rax,%rbx
121 # h4 = d4 & 0x3ffffff
122 mov d4,%rax
123 and $0x3ffffff,%eax
124 mov %eax,h4
125
126 # h1 += h0 >> 26
127 - mov %ebx,%eax
128 - shr $26,%eax
129 + mov %rbx,%rax
130 + shr $26,%rax
131 add %eax,h1
132 # h0 = h0 & 0x3ffffff
133 andl $0x3ffffff,%ebx
134 diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
135 index 3e5dd85b019a..263af6312329 100644
136 --- a/arch/x86/events/amd/core.c
137 +++ b/arch/x86/events/amd/core.c
138 @@ -117,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
139 };
140
141 /*
142 - * AMD Performance Monitor K7 and later.
143 + * AMD Performance Monitor K7 and later, up to and including Family 16h:
144 */
145 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
146 {
147 - [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
148 - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
149 - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
150 - [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
151 - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
152 - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
153 - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
154 - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
155 + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
156 + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
157 + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
158 + [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
159 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
160 + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
161 + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
162 + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
163 +};
164 +
165 +/*
166 + * AMD Performance Monitor Family 17h and later:
167 + */
168 +static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
169 +{
170 + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
171 + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
172 + [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
173 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
174 + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
175 + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
176 + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
177 };
178
179 static u64 amd_pmu_event_map(int hw_event)
180 {
181 + if (boot_cpu_data.x86 >= 0x17)
182 + return amd_f17h_perfmon_event_map[hw_event];
183 +
184 return amd_perfmon_event_map[hw_event];
185 }
186
187 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
188 index 12453cf7c11b..3dd204d1dd19 100644
189 --- a/arch/x86/events/intel/core.c
190 +++ b/arch/x86/events/intel/core.c
191 @@ -3014,7 +3014,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
192 flags &= ~PERF_SAMPLE_TIME;
193 if (!event->attr.exclude_kernel)
194 flags &= ~PERF_SAMPLE_REGS_USER;
195 - if (event->attr.sample_regs_user & ~PEBS_REGS)
196 + if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
197 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
198 return flags;
199 }
200 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
201 index 42a36280d168..05659c7b43d4 100644
202 --- a/arch/x86/events/perf_event.h
203 +++ b/arch/x86/events/perf_event.h
204 @@ -96,25 +96,25 @@ struct amd_nb {
205 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
206 PERF_SAMPLE_PERIOD)
207
208 -#define PEBS_REGS \
209 - (PERF_REG_X86_AX | \
210 - PERF_REG_X86_BX | \
211 - PERF_REG_X86_CX | \
212 - PERF_REG_X86_DX | \
213 - PERF_REG_X86_DI | \
214 - PERF_REG_X86_SI | \
215 - PERF_REG_X86_SP | \
216 - PERF_REG_X86_BP | \
217 - PERF_REG_X86_IP | \
218 - PERF_REG_X86_FLAGS | \
219 - PERF_REG_X86_R8 | \
220 - PERF_REG_X86_R9 | \
221 - PERF_REG_X86_R10 | \
222 - PERF_REG_X86_R11 | \
223 - PERF_REG_X86_R12 | \
224 - PERF_REG_X86_R13 | \
225 - PERF_REG_X86_R14 | \
226 - PERF_REG_X86_R15)
227 +#define PEBS_GP_REGS \
228 + ((1ULL << PERF_REG_X86_AX) | \
229 + (1ULL << PERF_REG_X86_BX) | \
230 + (1ULL << PERF_REG_X86_CX) | \
231 + (1ULL << PERF_REG_X86_DX) | \
232 + (1ULL << PERF_REG_X86_DI) | \
233 + (1ULL << PERF_REG_X86_SI) | \
234 + (1ULL << PERF_REG_X86_SP) | \
235 + (1ULL << PERF_REG_X86_BP) | \
236 + (1ULL << PERF_REG_X86_IP) | \
237 + (1ULL << PERF_REG_X86_FLAGS) | \
238 + (1ULL << PERF_REG_X86_R8) | \
239 + (1ULL << PERF_REG_X86_R9) | \
240 + (1ULL << PERF_REG_X86_R10) | \
241 + (1ULL << PERF_REG_X86_R11) | \
242 + (1ULL << PERF_REG_X86_R12) | \
243 + (1ULL << PERF_REG_X86_R13) | \
244 + (1ULL << PERF_REG_X86_R14) | \
245 + (1ULL << PERF_REG_X86_R15))
246
247 /*
248 * Per register state.
249 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
250 index 1e0c4c74195c..e5258bd64200 100644
251 --- a/arch/x86/kernel/cpu/bugs.c
252 +++ b/arch/x86/kernel/cpu/bugs.c
253 @@ -272,7 +272,7 @@ static const struct {
254 const char *option;
255 enum spectre_v2_user_cmd cmd;
256 bool secure;
257 -} v2_user_options[] __initdata = {
258 +} v2_user_options[] __initconst = {
259 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
260 { "off", SPECTRE_V2_USER_CMD_NONE, false },
261 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
262 @@ -407,7 +407,7 @@ static const struct {
263 const char *option;
264 enum spectre_v2_mitigation_cmd cmd;
265 bool secure;
266 -} mitigation_options[] __initdata = {
267 +} mitigation_options[] __initconst = {
268 { "off", SPECTRE_V2_CMD_NONE, false },
269 { "on", SPECTRE_V2_CMD_FORCE, true },
270 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
271 @@ -643,7 +643,7 @@ static const char * const ssb_strings[] = {
272 static const struct {
273 const char *option;
274 enum ssb_mitigation_cmd cmd;
275 -} ssb_mitigation_options[] __initdata = {
276 +} ssb_mitigation_options[] __initconst = {
277 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
278 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
279 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
280 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
281 index b0d1e81c96bb..acb901b43ce4 100644
282 --- a/arch/x86/kernel/kprobes/core.c
283 +++ b/arch/x86/kernel/kprobes/core.c
284 @@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
285 unsigned long *sara = stack_addr(regs);
286
287 ri->ret_addr = (kprobe_opcode_t *) *sara;
288 + ri->fp = sara;
289
290 /* Replace the return addr with trampoline addr */
291 *sara = (unsigned long) &kretprobe_trampoline;
292 @@ -759,15 +760,21 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
293 unsigned long flags, orig_ret_address = 0;
294 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
295 kprobe_opcode_t *correct_ret_addr = NULL;
296 + void *frame_pointer;
297 + bool skipped = false;
298
299 INIT_HLIST_HEAD(&empty_rp);
300 kretprobe_hash_lock(current, &head, &flags);
301 /* fixup registers */
302 #ifdef CONFIG_X86_64
303 regs->cs = __KERNEL_CS;
304 + /* On x86-64, we use pt_regs->sp for return address holder. */
305 + frame_pointer = &regs->sp;
306 #else
307 regs->cs = __KERNEL_CS | get_kernel_rpl();
308 regs->gs = 0;
309 + /* On x86-32, we use pt_regs->flags for return address holder. */
310 + frame_pointer = &regs->flags;
311 #endif
312 regs->ip = trampoline_address;
313 regs->orig_ax = ~0UL;
314 @@ -789,8 +796,25 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
315 if (ri->task != current)
316 /* another task is sharing our hash bucket */
317 continue;
318 + /*
319 + * Return probes must be pushed on this hash list correct
320 + * order (same as return order) so that it can be poped
321 + * correctly. However, if we find it is pushed it incorrect
322 + * order, this means we find a function which should not be
323 + * probed, because the wrong order entry is pushed on the
324 + * path of processing other kretprobe itself.
325 + */
326 + if (ri->fp != frame_pointer) {
327 + if (!skipped)
328 + pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
329 + skipped = true;
330 + continue;
331 + }
332
333 orig_ret_address = (unsigned long)ri->ret_addr;
334 + if (skipped)
335 + pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
336 + ri->rp->kp.addr);
337
338 if (orig_ret_address != trampoline_address)
339 /*
340 @@ -808,6 +832,8 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
341 if (ri->task != current)
342 /* another task is sharing our hash bucket */
343 continue;
344 + if (ri->fp != frame_pointer)
345 + continue;
346
347 orig_ret_address = (unsigned long)ri->ret_addr;
348 if (ri->rp && ri->rp->handler) {
349 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
350 index 7d31192296a8..b8b08e61ac73 100644
351 --- a/arch/x86/kernel/process.c
352 +++ b/arch/x86/kernel/process.c
353 @@ -411,6 +411,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
354 u64 msr = x86_spec_ctrl_base;
355 bool updmsr = false;
356
357 + lockdep_assert_irqs_disabled();
358 +
359 /*
360 * If TIF_SSBD is different, select the proper mitigation
361 * method. Note that if SSBD mitigation is disabled or permanentely
362 @@ -462,10 +464,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
363
364 void speculation_ctrl_update(unsigned long tif)
365 {
366 + unsigned long flags;
367 +
368 /* Forced update. Make sure all relevant TIF flags are different */
369 - preempt_disable();
370 + local_irq_save(flags);
371 __speculation_ctrl_update(~tif, tif);
372 - preempt_enable();
373 + local_irq_restore(flags);
374 }
375
376 /* Called from seccomp/prctl update */
377 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
378 index 106482da6388..860bd271619d 100644
379 --- a/arch/x86/kvm/emulate.c
380 +++ b/arch/x86/kvm/emulate.c
381 @@ -2575,15 +2575,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
382 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
383 * supports long mode.
384 */
385 - cr4 = ctxt->ops->get_cr(ctxt, 4);
386 if (emulator_has_longmode(ctxt)) {
387 struct desc_struct cs_desc;
388
389 /* Zero CR4.PCIDE before CR0.PG. */
390 - if (cr4 & X86_CR4_PCIDE) {
391 + cr4 = ctxt->ops->get_cr(ctxt, 4);
392 + if (cr4 & X86_CR4_PCIDE)
393 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
394 - cr4 &= ~X86_CR4_PCIDE;
395 - }
396
397 /* A 32-bit code segment is required to clear EFER.LMA. */
398 memset(&cs_desc, 0, sizeof(cs_desc));
399 @@ -2597,13 +2595,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
400 if (cr0 & X86_CR0_PE)
401 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
402
403 - /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
404 - if (cr4 & X86_CR4_PAE)
405 - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
406 + if (emulator_has_longmode(ctxt)) {
407 + /* Clear CR4.PAE before clearing EFER.LME. */
408 + cr4 = ctxt->ops->get_cr(ctxt, 4);
409 + if (cr4 & X86_CR4_PAE)
410 + ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
411
412 - /* And finally go back to 32-bit mode. */
413 - efer = 0;
414 - ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
415 + /* And finally go back to 32-bit mode. */
416 + efer = 0;
417 + ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
418 + }
419
420 smbase = ctxt->ops->get_smbase(ctxt);
421
422 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
423 index 6dc72804fe6e..813cb60eb401 100644
424 --- a/arch/x86/kvm/svm.c
425 +++ b/arch/x86/kvm/svm.c
426 @@ -2679,6 +2679,7 @@ static int npf_interception(struct vcpu_svm *svm)
427 static int db_interception(struct vcpu_svm *svm)
428 {
429 struct kvm_run *kvm_run = svm->vcpu.run;
430 + struct kvm_vcpu *vcpu = &svm->vcpu;
431
432 if (!(svm->vcpu.guest_debug &
433 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
434 @@ -2689,6 +2690,8 @@ static int db_interception(struct vcpu_svm *svm)
435
436 if (svm->nmi_singlestep) {
437 disable_nmi_singlestep(svm);
438 + /* Make sure we check for pending NMIs upon entry */
439 + kvm_make_request(KVM_REQ_EVENT, vcpu);
440 }
441
442 if (svm->vcpu.guest_debug &
443 @@ -4493,14 +4496,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
444 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
445 break;
446 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
447 + int i;
448 + struct kvm_vcpu *vcpu;
449 + struct kvm *kvm = svm->vcpu.kvm;
450 struct kvm_lapic *apic = svm->vcpu.arch.apic;
451
452 /*
453 - * Update ICR high and low, then emulate sending IPI,
454 - * which is handled when writing APIC_ICR.
455 + * At this point, we expect that the AVIC HW has already
456 + * set the appropriate IRR bits on the valid target
457 + * vcpus. So, we just need to kick the appropriate vcpu.
458 */
459 - kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
460 - kvm_lapic_reg_write(apic, APIC_ICR, icrl);
461 + kvm_for_each_vcpu(i, vcpu, kvm) {
462 + bool m = kvm_apic_match_dest(vcpu, apic,
463 + icrl & KVM_APIC_SHORT_MASK,
464 + GET_APIC_DEST_FIELD(icrh),
465 + icrl & KVM_APIC_DEST_MASK);
466 +
467 + if (m && !avic_vcpu_is_running(vcpu))
468 + kvm_vcpu_wake_up(vcpu);
469 + }
470 break;
471 }
472 case AVIC_IPI_FAILURE_INVALID_TARGET:
473 diff --git a/crypto/testmgr.h b/crypto/testmgr.h
474 index 862ee1d04263..74e1454cae1e 100644
475 --- a/crypto/testmgr.h
476 +++ b/crypto/testmgr.h
477 @@ -5592,7 +5592,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
478 .psize = 80,
479 .digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
480 "\x00\x00\x00\x00\x00\x00\x00\x00",
481 - },
482 + }, { /* Regression test for overflow in AVX2 implementation */
483 + .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
484 + "\xff\xff\xff\xff\xff\xff\xff\xff"
485 + "\xff\xff\xff\xff\xff\xff\xff\xff"
486 + "\xff\xff\xff\xff\xff\xff\xff\xff"
487 + "\xff\xff\xff\xff\xff\xff\xff\xff"
488 + "\xff\xff\xff\xff\xff\xff\xff\xff"
489 + "\xff\xff\xff\xff\xff\xff\xff\xff"
490 + "\xff\xff\xff\xff\xff\xff\xff\xff"
491 + "\xff\xff\xff\xff\xff\xff\xff\xff"
492 + "\xff\xff\xff\xff\xff\xff\xff\xff"
493 + "\xff\xff\xff\xff\xff\xff\xff\xff"
494 + "\xff\xff\xff\xff\xff\xff\xff\xff"
495 + "\xff\xff\xff\xff\xff\xff\xff\xff"
496 + "\xff\xff\xff\xff\xff\xff\xff\xff"
497 + "\xff\xff\xff\xff\xff\xff\xff\xff"
498 + "\xff\xff\xff\xff\xff\xff\xff\xff"
499 + "\xff\xff\xff\xff\xff\xff\xff\xff"
500 + "\xff\xff\xff\xff\xff\xff\xff\xff"
501 + "\xff\xff\xff\xff\xff\xff\xff\xff"
502 + "\xff\xff\xff\xff\xff\xff\xff\xff"
503 + "\xff\xff\xff\xff\xff\xff\xff\xff"
504 + "\xff\xff\xff\xff\xff\xff\xff\xff"
505 + "\xff\xff\xff\xff\xff\xff\xff\xff"
506 + "\xff\xff\xff\xff\xff\xff\xff\xff"
507 + "\xff\xff\xff\xff\xff\xff\xff\xff"
508 + "\xff\xff\xff\xff\xff\xff\xff\xff"
509 + "\xff\xff\xff\xff\xff\xff\xff\xff"
510 + "\xff\xff\xff\xff\xff\xff\xff\xff"
511 + "\xff\xff\xff\xff\xff\xff\xff\xff"
512 + "\xff\xff\xff\xff\xff\xff\xff\xff"
513 + "\xff\xff\xff\xff\xff\xff\xff\xff"
514 + "\xff\xff\xff\xff\xff\xff\xff\xff"
515 + "\xff\xff\xff\xff\xff\xff\xff\xff"
516 + "\xff\xff\xff\xff\xff\xff\xff\xff"
517 + "\xff\xff\xff\xff\xff\xff\xff\xff"
518 + "\xff\xff\xff\xff\xff\xff\xff\xff"
519 + "\xff\xff\xff\xff\xff\xff\xff\xff"
520 + "\xff\xff\xff\xff",
521 + .psize = 300,
522 + .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
523 + "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
524 + }
525 };
526
527 /*
528 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
529 index df2175b1169a..925dbc751322 100644
530 --- a/drivers/acpi/nfit/core.c
531 +++ b/drivers/acpi/nfit/core.c
532 @@ -1298,19 +1298,30 @@ static ssize_t scrub_show(struct device *dev,
533 struct device_attribute *attr, char *buf)
534 {
535 struct nvdimm_bus_descriptor *nd_desc;
536 + struct acpi_nfit_desc *acpi_desc;
537 ssize_t rc = -ENXIO;
538 + bool busy;
539
540 device_lock(dev);
541 nd_desc = dev_get_drvdata(dev);
542 - if (nd_desc) {
543 - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
544 + if (!nd_desc) {
545 + device_unlock(dev);
546 + return rc;
547 + }
548 + acpi_desc = to_acpi_desc(nd_desc);
549
550 - mutex_lock(&acpi_desc->init_mutex);
551 - rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
552 - acpi_desc->scrub_busy
553 - && !acpi_desc->cancel ? "+\n" : "\n");
554 - mutex_unlock(&acpi_desc->init_mutex);
555 + mutex_lock(&acpi_desc->init_mutex);
556 + busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
557 + && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
558 + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
559 + /* Allow an admin to poll the busy state at a higher rate */
560 + if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
561 + &acpi_desc->scrub_flags)) {
562 + acpi_desc->scrub_tmo = 1;
563 + mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
564 }
565 +
566 + mutex_unlock(&acpi_desc->init_mutex);
567 device_unlock(dev);
568 return rc;
569 }
570 @@ -2529,7 +2540,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc,
571
572 if (rc < 0)
573 return rc;
574 - return cmd_rc;
575 + if (cmd_rc < 0)
576 + return cmd_rc;
577 + set_bit(ARS_VALID, &acpi_desc->scrub_flags);
578 + return 0;
579 }
580
581 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
582 @@ -2539,11 +2553,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
583 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
584 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
585
586 - memset(&ars_start, 0, sizeof(ars_start));
587 - ars_start.address = ars_status->restart_address;
588 - ars_start.length = ars_status->restart_length;
589 - ars_start.type = ars_status->type;
590 - ars_start.flags = acpi_desc->ars_start_flags;
591 + ars_start = (struct nd_cmd_ars_start) {
592 + .address = ars_status->restart_address,
593 + .length = ars_status->restart_length,
594 + .type = ars_status->type,
595 + };
596 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
597 sizeof(ars_start), &cmd_rc);
598 if (rc < 0)
599 @@ -2622,6 +2636,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
600 */
601 if (ars_status->out_length < 44)
602 return 0;
603 +
604 + /*
605 + * Ignore potentially stale results that are only refreshed
606 + * after a start-ARS event.
607 + */
608 + if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
609 + dev_dbg(acpi_desc->dev, "skip %d stale records\n",
610 + ars_status->num_records);
611 + return 0;
612 + }
613 +
614 for (i = 0; i < ars_status->num_records; i++) {
615 /* only process full records */
616 if (ars_status->out_length
617 @@ -2960,7 +2985,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
618
619 lockdep_assert_held(&acpi_desc->init_mutex);
620
621 - if (acpi_desc->cancel)
622 + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
623 return 0;
624
625 if (query_rc == -EBUSY) {
626 @@ -3034,7 +3059,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
627 {
628 lockdep_assert_held(&acpi_desc->init_mutex);
629
630 - acpi_desc->scrub_busy = 1;
631 + set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
632 /* note this should only be set from within the workqueue */
633 if (tmo)
634 acpi_desc->scrub_tmo = tmo;
635 @@ -3050,7 +3075,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
636 {
637 lockdep_assert_held(&acpi_desc->init_mutex);
638
639 - acpi_desc->scrub_busy = 0;
640 + clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
641 acpi_desc->scrub_count++;
642 if (acpi_desc->scrub_count_state)
643 sysfs_notify_dirent(acpi_desc->scrub_count_state);
644 @@ -3071,6 +3096,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
645 else
646 notify_ars_done(acpi_desc);
647 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
648 + clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
649 mutex_unlock(&acpi_desc->init_mutex);
650 }
651
652 @@ -3105,6 +3131,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
653 struct nfit_spa *nfit_spa;
654 int rc;
655
656 + set_bit(ARS_VALID, &acpi_desc->scrub_flags);
657 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
658 switch (nfit_spa_type(nfit_spa->spa)) {
659 case NFIT_SPA_VOLATILE:
660 @@ -3322,7 +3349,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
661 struct nfit_spa *nfit_spa;
662
663 mutex_lock(&acpi_desc->init_mutex);
664 - if (acpi_desc->cancel) {
665 + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
666 mutex_unlock(&acpi_desc->init_mutex);
667 return 0;
668 }
669 @@ -3401,7 +3428,7 @@ void acpi_nfit_shutdown(void *data)
670 mutex_unlock(&acpi_desc_lock);
671
672 mutex_lock(&acpi_desc->init_mutex);
673 - acpi_desc->cancel = 1;
674 + set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
675 cancel_delayed_work_sync(&acpi_desc->dwork);
676 mutex_unlock(&acpi_desc->init_mutex);
677
678 diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
679 index 02c10de50386..68848fc4b7c9 100644
680 --- a/drivers/acpi/nfit/nfit.h
681 +++ b/drivers/acpi/nfit/nfit.h
682 @@ -181,6 +181,13 @@ struct nfit_mem {
683 bool has_lsw;
684 };
685
686 +enum scrub_flags {
687 + ARS_BUSY,
688 + ARS_CANCEL,
689 + ARS_VALID,
690 + ARS_POLL,
691 +};
692 +
693 struct acpi_nfit_desc {
694 struct nvdimm_bus_descriptor nd_desc;
695 struct acpi_table_header acpi_header;
696 @@ -194,7 +201,6 @@ struct acpi_nfit_desc {
697 struct list_head idts;
698 struct nvdimm_bus *nvdimm_bus;
699 struct device *dev;
700 - u8 ars_start_flags;
701 struct nd_cmd_ars_status *ars_status;
702 struct nfit_spa *scrub_spa;
703 struct delayed_work dwork;
704 @@ -203,8 +209,7 @@ struct acpi_nfit_desc {
705 unsigned int max_ars;
706 unsigned int scrub_count;
707 unsigned int scrub_mode;
708 - unsigned int scrub_busy:1;
709 - unsigned int cancel:1;
710 + unsigned long scrub_flags;
711 unsigned long dimm_cmd_force_en;
712 unsigned long bus_cmd_force_en;
713 unsigned long bus_nfit_cmd_force_en;
714 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
715 index d5f7a12e350e..3fb297b5fb17 100644
716 --- a/drivers/char/ipmi/ipmi_msghandler.c
717 +++ b/drivers/char/ipmi/ipmi_msghandler.c
718 @@ -213,6 +213,9 @@ struct ipmi_user {
719
720 /* Does this interface receive IPMI events? */
721 bool gets_events;
722 +
723 + /* Free must run in process context for RCU cleanup. */
724 + struct work_struct remove_work;
725 };
726
727 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
728 @@ -1078,6 +1081,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
729 }
730
731
732 +static void free_user_work(struct work_struct *work)
733 +{
734 + struct ipmi_user *user = container_of(work, struct ipmi_user,
735 + remove_work);
736 +
737 + cleanup_srcu_struct(&user->release_barrier);
738 + kfree(user);
739 +}
740 +
741 int ipmi_create_user(unsigned int if_num,
742 const struct ipmi_user_hndl *handler,
743 void *handler_data,
744 @@ -1121,6 +1133,8 @@ int ipmi_create_user(unsigned int if_num,
745 goto out_kfree;
746
747 found:
748 + INIT_WORK(&new_user->remove_work, free_user_work);
749 +
750 rv = init_srcu_struct(&new_user->release_barrier);
751 if (rv)
752 goto out_kfree;
753 @@ -1183,8 +1197,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
754 static void free_user(struct kref *ref)
755 {
756 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
757 - cleanup_srcu_struct(&user->release_barrier);
758 - kfree(user);
759 +
760 + /* SRCU cleanup must happen in task context. */
761 + schedule_work(&user->remove_work);
762 }
763
764 static void _ipmi_destroy_user(struct ipmi_user *user)
765 diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
766 index 1b8fa9de2cac..41b9f6c92da7 100644
767 --- a/drivers/char/tpm/eventlog/tpm2.c
768 +++ b/drivers/char/tpm/eventlog/tpm2.c
769 @@ -37,8 +37,8 @@
770 *
771 * Returns size of the event. If it is an invalid event, returns 0.
772 */
773 -static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
774 - struct tcg_pcr_event *event_header)
775 +static size_t calc_tpm2_event_size(struct tcg_pcr_event2 *event,
776 + struct tcg_pcr_event *event_header)
777 {
778 struct tcg_efi_specid_event *efispecid;
779 struct tcg_event_field *event_field;
780 diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
781 index 32a8e27c5382..cc4e642d3180 100644
782 --- a/drivers/char/tpm/tpm_i2c_atmel.c
783 +++ b/drivers/char/tpm/tpm_i2c_atmel.c
784 @@ -69,6 +69,10 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
785 if (status < 0)
786 return status;
787
788 + /* The upper layer does not support incomplete sends. */
789 + if (status != len)
790 + return -E2BIG;
791 +
792 return 0;
793 }
794
795 diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
796 index e70a0d4d6db4..c963eec58c70 100644
797 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
798 +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
799 @@ -164,6 +164,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
800 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
801 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
802 }
803 + WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
804
805 tmp = mmVM_L2_CNTL4_DEFAULT;
806 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
807 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
808 index f841accc2c00..f77c81db161b 100644
809 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
810 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
811 @@ -730,7 +730,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
812 }
813
814 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
815 - if (!(flags & TTM_PAGE_FLAG_DMA32)) {
816 + if (!(flags & TTM_PAGE_FLAG_DMA32) &&
817 + (npages - i) >= HPAGE_PMD_NR) {
818 for (j = 0; j < HPAGE_PMD_NR; ++j)
819 if (p++ != pages[i + j])
820 break;
821 @@ -759,7 +760,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
822 unsigned max_size, n2free;
823
824 spin_lock_irqsave(&huge->lock, irq_flags);
825 - while (i < npages) {
826 + while ((npages - i) >= HPAGE_PMD_NR) {
827 struct page *p = pages[i];
828 unsigned j;
829
830 diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
831 index 1d645c9ab417..cac262a912c1 100644
832 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
833 +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
834 @@ -337,7 +337,8 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
835 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
836 },
837 .driver_data = (void *)&sipodev_desc
838 - }
839 + },
840 + { } /* Terminate list */
841 };
842
843
844 diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
845 index 471caa5323e4..e5fdca74a630 100644
846 --- a/drivers/iio/accel/kxcjk-1013.c
847 +++ b/drivers/iio/accel/kxcjk-1013.c
848 @@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
849
850 mutex_lock(&data->mutex);
851 ret = kxcjk1013_set_mode(data, OPERATION);
852 + if (ret == 0)
853 + ret = kxcjk1013_set_range(data, data->range);
854 mutex_unlock(&data->mutex);
855
856 return ret;
857 diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
858 index fc9510716ac7..ae2a5097f449 100644
859 --- a/drivers/iio/adc/ad_sigma_delta.c
860 +++ b/drivers/iio/adc/ad_sigma_delta.c
861 @@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
862 if (sigma_delta->info->has_registers) {
863 data[0] = reg << sigma_delta->info->addr_shift;
864 data[0] |= sigma_delta->info->read_mask;
865 + data[0] |= sigma_delta->comm;
866 spi_message_add_tail(&t[0], &m);
867 }
868 spi_message_add_tail(&t[1], &m);
869 diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
870 index 75d2f73582a3..596841a3c4db 100644
871 --- a/drivers/iio/adc/at91_adc.c
872 +++ b/drivers/iio/adc/at91_adc.c
873 @@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
874 ret = wait_event_interruptible_timeout(st->wq_data_avail,
875 st->done,
876 msecs_to_jiffies(1000));
877 - if (ret == 0)
878 - ret = -ETIMEDOUT;
879 - if (ret < 0) {
880 - mutex_unlock(&st->lock);
881 - return ret;
882 - }
883 -
884 - *val = st->last_value;
885
886 + /* Disable interrupts, regardless if adc conversion was
887 + * successful or not
888 + */
889 at91_adc_writel(st, AT91_ADC_CHDR,
890 AT91_ADC_CH(chan->channel));
891 at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
892
893 - st->last_value = 0;
894 - st->done = false;
895 + if (ret > 0) {
896 + /* a valid conversion took place */
897 + *val = st->last_value;
898 + st->last_value = 0;
899 + st->done = false;
900 + ret = IIO_VAL_INT;
901 + } else if (ret == 0) {
902 + /* conversion timeout */
903 + dev_err(&idev->dev, "ADC Channel %d timeout.\n",
904 + chan->channel);
905 + ret = -ETIMEDOUT;
906 + }
907 +
908 mutex_unlock(&st->lock);
909 - return IIO_VAL_INT;
910 + return ret;
911
912 case IIO_CHAN_INFO_SCALE:
913 *val = st->vref_mv;
914 diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
915 index e049323f209a..71dd635fce2d 100644
916 --- a/drivers/iio/chemical/bme680.h
917 +++ b/drivers/iio/chemical/bme680.h
918 @@ -2,11 +2,9 @@
919 #ifndef BME680_H_
920 #define BME680_H_
921
922 -#define BME680_REG_CHIP_I2C_ID 0xD0
923 -#define BME680_REG_CHIP_SPI_ID 0x50
924 +#define BME680_REG_CHIP_ID 0xD0
925 #define BME680_CHIP_ID_VAL 0x61
926 -#define BME680_REG_SOFT_RESET_I2C 0xE0
927 -#define BME680_REG_SOFT_RESET_SPI 0x60
928 +#define BME680_REG_SOFT_RESET 0xE0
929 #define BME680_CMD_SOFTRESET 0xB6
930 #define BME680_REG_STATUS 0x73
931 #define BME680_SPI_MEM_PAGE_BIT BIT(4)
932 diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
933 index 7d9bb62baa3f..b2db59812755 100644
934 --- a/drivers/iio/chemical/bme680_core.c
935 +++ b/drivers/iio/chemical/bme680_core.c
936 @@ -63,9 +63,23 @@ struct bme680_data {
937 s32 t_fine;
938 };
939
940 +static const struct regmap_range bme680_volatile_ranges[] = {
941 + regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
942 + regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
943 + regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
944 +};
945 +
946 +static const struct regmap_access_table bme680_volatile_table = {
947 + .yes_ranges = bme680_volatile_ranges,
948 + .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges),
949 +};
950 +
951 const struct regmap_config bme680_regmap_config = {
952 .reg_bits = 8,
953 .val_bits = 8,
954 + .max_register = 0xef,
955 + .volatile_table = &bme680_volatile_table,
956 + .cache_type = REGCACHE_RBTREE,
957 };
958 EXPORT_SYMBOL(bme680_regmap_config);
959
960 @@ -330,6 +344,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
961 s64 var1, var2, var3;
962 s16 calc_temp;
963
964 + /* If the calibration is invalid, attempt to reload it */
965 + if (!calib->par_t2)
966 + bme680_read_calib(data, calib);
967 +
968 var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
969 var2 = (var1 * calib->par_t2) >> 11;
970 var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
971 @@ -591,8 +609,7 @@ static int bme680_gas_config(struct bme680_data *data)
972 return ret;
973 }
974
975 -static int bme680_read_temp(struct bme680_data *data,
976 - int *val, int *val2)
977 +static int bme680_read_temp(struct bme680_data *data, int *val)
978 {
979 struct device *dev = regmap_get_device(data->regmap);
980 int ret;
981 @@ -625,10 +642,9 @@ static int bme680_read_temp(struct bme680_data *data,
982 * compensate_press/compensate_humid to get compensated
983 * pressure/humidity readings.
984 */
985 - if (val && val2) {
986 - *val = comp_temp;
987 - *val2 = 100;
988 - return IIO_VAL_FRACTIONAL;
989 + if (val) {
990 + *val = comp_temp * 10; /* Centidegrees to millidegrees */
991 + return IIO_VAL_INT;
992 }
993
994 return ret;
995 @@ -643,7 +659,7 @@ static int bme680_read_press(struct bme680_data *data,
996 s32 adc_press;
997
998 /* Read and compensate temperature to get a reading of t_fine */
999 - ret = bme680_read_temp(data, NULL, NULL);
1000 + ret = bme680_read_temp(data, NULL);
1001 if (ret < 0)
1002 return ret;
1003
1004 @@ -676,7 +692,7 @@ static int bme680_read_humid(struct bme680_data *data,
1005 u32 comp_humidity;
1006
1007 /* Read and compensate temperature to get a reading of t_fine */
1008 - ret = bme680_read_temp(data, NULL, NULL);
1009 + ret = bme680_read_temp(data, NULL);
1010 if (ret < 0)
1011 return ret;
1012
1013 @@ -769,7 +785,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
1014 case IIO_CHAN_INFO_PROCESSED:
1015 switch (chan->type) {
1016 case IIO_TEMP:
1017 - return bme680_read_temp(data, val, val2);
1018 + return bme680_read_temp(data, val);
1019 case IIO_PRESSURE:
1020 return bme680_read_press(data, val, val2);
1021 case IIO_HUMIDITYRELATIVE:
1022 @@ -905,8 +921,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
1023 {
1024 struct iio_dev *indio_dev;
1025 struct bme680_data *data;
1026 + unsigned int val;
1027 int ret;
1028
1029 + ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
1030 + BME680_CMD_SOFTRESET);
1031 + if (ret < 0) {
1032 + dev_err(dev, "Failed to reset chip\n");
1033 + return ret;
1034 + }
1035 +
1036 + ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
1037 + if (ret < 0) {
1038 + dev_err(dev, "Error reading chip ID\n");
1039 + return ret;
1040 + }
1041 +
1042 + if (val != BME680_CHIP_ID_VAL) {
1043 + dev_err(dev, "Wrong chip ID, got %x expected %x\n",
1044 + val, BME680_CHIP_ID_VAL);
1045 + return -ENODEV;
1046 + }
1047 +
1048 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
1049 if (!indio_dev)
1050 return -ENOMEM;
1051 diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
1052 index 06d4be539d2e..cfc4449edf1b 100644
1053 --- a/drivers/iio/chemical/bme680_i2c.c
1054 +++ b/drivers/iio/chemical/bme680_i2c.c
1055 @@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
1056 {
1057 struct regmap *regmap;
1058 const char *name = NULL;
1059 - unsigned int val;
1060 - int ret;
1061
1062 regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
1063 if (IS_ERR(regmap)) {
1064 @@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
1065 return PTR_ERR(regmap);
1066 }
1067
1068 - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
1069 - BME680_CMD_SOFTRESET);
1070 - if (ret < 0) {
1071 - dev_err(&client->dev, "Failed to reset chip\n");
1072 - return ret;
1073 - }
1074 -
1075 - ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
1076 - if (ret < 0) {
1077 - dev_err(&client->dev, "Error reading I2C chip ID\n");
1078 - return ret;
1079 - }
1080 -
1081 - if (val != BME680_CHIP_ID_VAL) {
1082 - dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
1083 - val, BME680_CHIP_ID_VAL);
1084 - return -ENODEV;
1085 - }
1086 -
1087 if (id)
1088 name = id->name;
1089
1090 diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
1091 index c9fb05e8d0b9..881778e55d38 100644
1092 --- a/drivers/iio/chemical/bme680_spi.c
1093 +++ b/drivers/iio/chemical/bme680_spi.c
1094 @@ -11,28 +11,93 @@
1095
1096 #include "bme680.h"
1097
1098 +struct bme680_spi_bus_context {
1099 + struct spi_device *spi;
1100 + u8 current_page;
1101 +};
1102 +
1103 +/*
1104 + * In SPI mode there are only 7 address bits, a "page" register determines
1105 + * which part of the 8-bit range is active. This function looks at the address
1106 + * and writes the page selection bit if needed
1107 + */
1108 +static int bme680_regmap_spi_select_page(
1109 + struct bme680_spi_bus_context *ctx, u8 reg)
1110 +{
1111 + struct spi_device *spi = ctx->spi;
1112 + int ret;
1113 + u8 buf[2];
1114 + u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
1115 +
1116 + if (page == ctx->current_page)
1117 + return 0;
1118 +
1119 + /*
1120 + * Data sheet claims we're only allowed to change bit 4, so we must do
1121 + * a read-modify-write on each and every page select
1122 + */
1123 + buf[0] = BME680_REG_STATUS;
1124 + ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
1125 + if (ret < 0) {
1126 + dev_err(&spi->dev, "failed to set page %u\n", page);
1127 + return ret;
1128 + }
1129 +
1130 + buf[0] = BME680_REG_STATUS;
1131 + if (page)
1132 + buf[1] |= BME680_SPI_MEM_PAGE_BIT;
1133 + else
1134 + buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
1135 +
1136 + ret = spi_write(spi, buf, 2);
1137 + if (ret < 0) {
1138 + dev_err(&spi->dev, "failed to set page %u\n", page);
1139 + return ret;
1140 + }
1141 +
1142 + ctx->current_page = page;
1143 +
1144 + return 0;
1145 +}
1146 +
1147 static int bme680_regmap_spi_write(void *context, const void *data,
1148 size_t count)
1149 {
1150 - struct spi_device *spi = context;
1151 + struct bme680_spi_bus_context *ctx = context;
1152 + struct spi_device *spi = ctx->spi;
1153 + int ret;
1154 u8 buf[2];
1155
1156 memcpy(buf, data, 2);
1157 +
1158 + ret = bme680_regmap_spi_select_page(ctx, buf[0]);
1159 + if (ret)
1160 + return ret;
1161 +
1162 /*
1163 * The SPI register address (= full register address without bit 7)
1164 * and the write command (bit7 = RW = '0')
1165 */
1166 buf[0] &= ~0x80;
1167
1168 - return spi_write_then_read(spi, buf, 2, NULL, 0);
1169 + return spi_write(spi, buf, 2);
1170 }
1171
1172 static int bme680_regmap_spi_read(void *context, const void *reg,
1173 size_t reg_size, void *val, size_t val_size)
1174 {
1175 - struct spi_device *spi = context;
1176 + struct bme680_spi_bus_context *ctx = context;
1177 + struct spi_device *spi = ctx->spi;
1178 + int ret;
1179 + u8 addr = *(const u8 *)reg;
1180 +
1181 + ret = bme680_regmap_spi_select_page(ctx, addr);
1182 + if (ret)
1183 + return ret;
1184
1185 - return spi_write_then_read(spi, reg, reg_size, val, val_size);
1186 + addr |= 0x80; /* bit7 = RW = '1' */
1187 +
1188 + return spi_write_then_read(spi, &addr, 1, val, val_size);
1189 }
1190
1191 static struct regmap_bus bme680_regmap_bus = {
1192 @@ -45,8 +110,8 @@ static struct regmap_bus bme680_regmap_bus = {
1193 static int bme680_spi_probe(struct spi_device *spi)
1194 {
1195 const struct spi_device_id *id = spi_get_device_id(spi);
1196 + struct bme680_spi_bus_context *bus_context;
1197 struct regmap *regmap;
1198 - unsigned int val;
1199 int ret;
1200
1201 spi->bits_per_word = 8;
1202 @@ -56,45 +121,21 @@ static int bme680_spi_probe(struct spi_device *spi)
1203 return ret;
1204 }
1205
1206 + bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
1207 + if (!bus_context)
1208 + return -ENOMEM;
1209 +
1210 + bus_context->spi = spi;
1211 + bus_context->current_page = 0xff; /* Undefined on warm boot */
1212 +
1213 regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
1214 - &spi->dev, &bme680_regmap_config);
1215 + bus_context, &bme680_regmap_config);
1216 if (IS_ERR(regmap)) {
1217 dev_err(&spi->dev, "Failed to register spi regmap %d\n",
1218 (int)PTR_ERR(regmap));
1219 return PTR_ERR(regmap);
1220 }
1221
1222 - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
1223 - BME680_CMD_SOFTRESET);
1224 - if (ret < 0) {
1225 - dev_err(&spi->dev, "Failed to reset chip\n");
1226 - return ret;
1227 - }
1228 -
1229 - /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
1230 - ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
1231 - if (ret < 0) {
1232 - dev_err(&spi->dev, "Error reading SPI chip ID\n");
1233 - return ret;
1234 - }
1235 -
1236 - if (val != BME680_CHIP_ID_VAL) {
1237 - dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
1238 - val, BME680_CHIP_ID_VAL);
1239 - return -ENODEV;
1240 - }
1241 - /*
1242 - * select Page 1 of spi_mem_page to enable access to
1243 - * to registers from address 0x00 to 0x7F.
1244 - */
1245 - ret = regmap_write_bits(regmap, BME680_REG_STATUS,
1246 - BME680_SPI_MEM_PAGE_BIT,
1247 - BME680_SPI_MEM_PAGE_1_VAL);
1248 - if (ret < 0) {
1249 - dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
1250 - return ret;
1251 - }
1252 -
1253 return bme680_core_probe(&spi->dev, regmap, id->name);
1254 }
1255
1256 diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
1257 index 89cb0066a6e0..8d76afb87d87 100644
1258 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
1259 +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
1260 @@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
1261 * Do not use IIO_DEGREE_TO_RAD to avoid precision
1262 * loss. Round to the nearest integer.
1263 */
1264 - *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
1265 - *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
1266 - ret = IIO_VAL_FRACTIONAL;
1267 + *val = 0;
1268 + *val2 = div_s64(val64 * 3141592653ULL,
1269 + 180 << (CROS_EC_SENSOR_BITS - 1));
1270 + ret = IIO_VAL_INT_PLUS_NANO;
1271 break;
1272 case MOTIONSENSE_TYPE_MAG:
1273 /*
1274 diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
1275 index 8b5aad4c32d9..30dc2775cbfb 100644
1276 --- a/drivers/iio/dac/mcp4725.c
1277 +++ b/drivers/iio/dac/mcp4725.c
1278 @@ -98,6 +98,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
1279
1280 inoutbuf[0] = 0x60; /* write EEPROM */
1281 inoutbuf[0] |= data->ref_mode << 3;
1282 + inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
1283 inoutbuf[1] = data->dac_value >> 4;
1284 inoutbuf[2] = (data->dac_value & 0xf) << 4;
1285
1286 diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
1287 index 63ca31628a93..92c07ab826eb 100644
1288 --- a/drivers/iio/gyro/bmg160_core.c
1289 +++ b/drivers/iio/gyro/bmg160_core.c
1290 @@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
1291 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
1292 return bmg160_get_filter(data, val);
1293 case IIO_CHAN_INFO_SCALE:
1294 - *val = 0;
1295 switch (chan->type) {
1296 case IIO_TEMP:
1297 - *val2 = 500000;
1298 - return IIO_VAL_INT_PLUS_MICRO;
1299 + *val = 500;
1300 + return IIO_VAL_INT;
1301 case IIO_ANGL_VEL:
1302 {
1303 int i;
1304 @@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
1305 for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
1306 if (bmg160_scale_table[i].dps_range ==
1307 data->dps_range) {
1308 + *val = 0;
1309 *val2 = bmg160_scale_table[i].scale;
1310 return IIO_VAL_INT_PLUS_MICRO;
1311 }
1312 diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
1313 index 77fac81a3adc..5ddebede31a6 100644
1314 --- a/drivers/iio/gyro/mpu3050-core.c
1315 +++ b/drivers/iio/gyro/mpu3050-core.c
1316 @@ -29,7 +29,8 @@
1317
1318 #include "mpu3050.h"
1319
1320 -#define MPU3050_CHIP_ID 0x69
1321 +#define MPU3050_CHIP_ID 0x68
1322 +#define MPU3050_CHIP_ID_MASK 0x7E
1323
1324 /*
1325 * Register map: anything suffixed *_H is a big-endian high byte and always
1326 @@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
1327 goto err_power_down;
1328 }
1329
1330 - if (val != MPU3050_CHIP_ID) {
1331 - dev_err(dev, "unsupported chip id %02x\n", (u8)val);
1332 + if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
1333 + dev_err(dev, "unsupported chip id %02x\n",
1334 + (u8)(val & MPU3050_CHIP_ID_MASK));
1335 ret = -ENODEV;
1336 goto err_power_down;
1337 }
1338 diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
1339 index cd5bfe39591b..dadd921a4a30 100644
1340 --- a/drivers/iio/industrialio-buffer.c
1341 +++ b/drivers/iio/industrialio-buffer.c
1342 @@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
1343 const unsigned long *mask;
1344 unsigned long *trialmask;
1345
1346 - trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
1347 - sizeof(*trialmask),
1348 - GFP_KERNEL);
1349 + trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
1350 + sizeof(*trialmask), GFP_KERNEL);
1351 if (trialmask == NULL)
1352 return -ENOMEM;
1353 if (!indio_dev->masklength) {
1354 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
1355 index a062cfddc5af..49d4b4f1a457 100644
1356 --- a/drivers/iio/industrialio-core.c
1357 +++ b/drivers/iio/industrialio-core.c
1358 @@ -1735,10 +1735,10 @@ EXPORT_SYMBOL(__iio_device_register);
1359 **/
1360 void iio_device_unregister(struct iio_dev *indio_dev)
1361 {
1362 - mutex_lock(&indio_dev->info_exist_lock);
1363 -
1364 cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
1365
1366 + mutex_lock(&indio_dev->info_exist_lock);
1367 +
1368 iio_device_unregister_debugfs(indio_dev);
1369
1370 iio_disable_all_buffers(indio_dev);
1371 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1372 index 628ef617bb2f..f9525d6f0bfe 100644
1373 --- a/drivers/input/mouse/elan_i2c_core.c
1374 +++ b/drivers/input/mouse/elan_i2c_core.c
1375 @@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
1376 { "ELAN0600", 0 },
1377 { "ELAN0601", 0 },
1378 { "ELAN0602", 0 },
1379 + { "ELAN0603", 0 },
1380 + { "ELAN0604", 0 },
1381 { "ELAN0605", 0 },
1382 + { "ELAN0606", 0 },
1383 + { "ELAN0607", 0 },
1384 { "ELAN0608", 0 },
1385 { "ELAN0609", 0 },
1386 { "ELAN060B", 0 },
1387 { "ELAN060C", 0 },
1388 + { "ELAN060F", 0 },
1389 + { "ELAN0610", 0 },
1390 { "ELAN0611", 0 },
1391 { "ELAN0612", 0 },
1392 + { "ELAN0615", 0 },
1393 + { "ELAN0616", 0 },
1394 { "ELAN0617", 0 },
1395 { "ELAN0618", 0 },
1396 + { "ELAN0619", 0 },
1397 + { "ELAN061A", 0 },
1398 + { "ELAN061B", 0 },
1399 { "ELAN061C", 0 },
1400 { "ELAN061D", 0 },
1401 { "ELAN061E", 0 },
1402 + { "ELAN061F", 0 },
1403 { "ELAN0620", 0 },
1404 { "ELAN0621", 0 },
1405 { "ELAN0622", 0 },
1406 + { "ELAN0623", 0 },
1407 + { "ELAN0624", 0 },
1408 + { "ELAN0625", 0 },
1409 + { "ELAN0626", 0 },
1410 + { "ELAN0627", 0 },
1411 + { "ELAN0628", 0 },
1412 + { "ELAN0629", 0 },
1413 + { "ELAN062A", 0 },
1414 + { "ELAN062B", 0 },
1415 + { "ELAN062C", 0 },
1416 + { "ELAN062D", 0 },
1417 + { "ELAN0631", 0 },
1418 + { "ELAN0632", 0 },
1419 { "ELAN1000", 0 },
1420 { }
1421 };
1422 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
1423 index 8dae12b841b3..629860f7327c 100644
1424 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
1425 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
1426 @@ -429,7 +429,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
1427 val = readl(host->ioaddr + ESDHC_MIX_CTRL);
1428 else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
1429 /* the std tuning bits is in ACMD12_ERR for imx6sl */
1430 - val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
1431 + val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1432 }
1433
1434 if (val & ESDHC_MIX_CTRL_EXE_TUNE)
1435 @@ -494,7 +494,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
1436 }
1437 writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
1438 } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
1439 - u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
1440 + u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1441 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
1442 if (val & SDHCI_CTRL_TUNED_CLK) {
1443 v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
1444 @@ -512,7 +512,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
1445 v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
1446 }
1447
1448 - writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
1449 + writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1450 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
1451 }
1452 return;
1453 @@ -957,9 +957,9 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
1454 writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
1455 writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
1456 } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
1457 - ctrl = readl(host->ioaddr + SDHCI_ACMD12_ERR);
1458 + ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1459 ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
1460 - writel(ctrl, host->ioaddr + SDHCI_ACMD12_ERR);
1461 + writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1462 }
1463 }
1464 }
1465 @@ -1319,7 +1319,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1466
1467 /* clear tuning bits in case ROM has set it already */
1468 writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
1469 - writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR);
1470 + writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1471 writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
1472 }
1473
1474 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1475 index 654051e00117..c749d3dc1d36 100644
1476 --- a/drivers/mmc/host/sdhci.c
1477 +++ b/drivers/mmc/host/sdhci.c
1478 @@ -82,8 +82,8 @@ void sdhci_dumpregs(struct sdhci_host *host)
1479 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
1480 sdhci_readl(host, SDHCI_INT_ENABLE),
1481 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
1482 - SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
1483 - sdhci_readw(host, SDHCI_ACMD12_ERR),
1484 + SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
1485 + sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
1486 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
1487 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
1488 sdhci_readl(host, SDHCI_CAPABILITIES),
1489 @@ -841,6 +841,11 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1490 else
1491 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1492
1493 + if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1494 + host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1495 + else
1496 + host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1497 +
1498 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1499 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1500 }
1501 @@ -1078,8 +1083,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1502 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1503 ((mrq->cmd && mrq->cmd->error) ||
1504 (mrq->sbc && mrq->sbc->error) ||
1505 - (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
1506 - (mrq->data->stop && mrq->data->stop->error))) ||
1507 + (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1508 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1509 }
1510
1511 @@ -1131,6 +1135,16 @@ static void sdhci_finish_data(struct sdhci_host *host)
1512 host->data = NULL;
1513 host->data_cmd = NULL;
1514
1515 + /*
1516 + * The controller needs a reset of internal state machines upon error
1517 + * conditions.
1518 + */
1519 + if (data->error) {
1520 + if (!host->cmd || host->cmd == data_cmd)
1521 + sdhci_do_reset(host, SDHCI_RESET_CMD);
1522 + sdhci_do_reset(host, SDHCI_RESET_DATA);
1523 + }
1524 +
1525 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1526 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1527 sdhci_adma_table_post(host, data);
1528 @@ -1155,17 +1169,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
1529 if (data->stop &&
1530 (data->error ||
1531 !data->mrq->sbc)) {
1532 -
1533 - /*
1534 - * The controller needs a reset of internal state machines
1535 - * upon error conditions.
1536 - */
1537 - if (data->error) {
1538 - if (!host->cmd || host->cmd == data_cmd)
1539 - sdhci_do_reset(host, SDHCI_RESET_CMD);
1540 - sdhci_do_reset(host, SDHCI_RESET_DATA);
1541 - }
1542 -
1543 /*
1544 * 'cap_cmd_during_tfr' request must not use the command line
1545 * after mmc_command_done() has been called. It is upper layer's
1546 @@ -2642,8 +2645,23 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
1547 * *
1548 \*****************************************************************************/
1549
1550 -static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1551 +static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
1552 {
1553 + /* Handle auto-CMD12 error */
1554 + if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
1555 + struct mmc_request *mrq = host->data_cmd->mrq;
1556 + u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
1557 + int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
1558 + SDHCI_INT_DATA_TIMEOUT :
1559 + SDHCI_INT_DATA_CRC;
1560 +
1561 + /* Treat auto-CMD12 error the same as data error */
1562 + if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1563 + *intmask_p |= data_err_bit;
1564 + return;
1565 + }
1566 + }
1567 +
1568 if (!host->cmd) {
1569 /*
1570 * SDHCI recovers from errors by resetting the cmd and data
1571 @@ -2665,20 +2683,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1572 else
1573 host->cmd->error = -EILSEQ;
1574
1575 - /*
1576 - * If this command initiates a data phase and a response
1577 - * CRC error is signalled, the card can start transferring
1578 - * data - the card may have received the command without
1579 - * error. We must not terminate the mmc_request early.
1580 - *
1581 - * If the card did not receive the command or returned an
1582 - * error which prevented it sending data, the data phase
1583 - * will time out.
1584 - */
1585 + /* Treat data command CRC error the same as data CRC error */
1586 if (host->cmd->data &&
1587 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
1588 SDHCI_INT_CRC) {
1589 host->cmd = NULL;
1590 + *intmask_p |= SDHCI_INT_DATA_CRC;
1591 return;
1592 }
1593
1594 @@ -2686,6 +2696,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1595 return;
1596 }
1597
1598 + /* Handle auto-CMD23 error */
1599 + if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
1600 + struct mmc_request *mrq = host->cmd->mrq;
1601 + u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
1602 + int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
1603 + -ETIMEDOUT :
1604 + -EILSEQ;
1605 +
1606 + if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1607 + mrq->sbc->error = err;
1608 + sdhci_finish_mrq(host, mrq);
1609 + return;
1610 + }
1611 + }
1612 +
1613 if (intmask & SDHCI_INT_RESPONSE)
1614 sdhci_finish_command(host);
1615 }
1616 @@ -2906,7 +2931,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
1617 }
1618
1619 if (intmask & SDHCI_INT_CMD_MASK)
1620 - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1621 + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
1622
1623 if (intmask & SDHCI_INT_DATA_MASK)
1624 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1625 diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
1626 index f0bd36ce3817..0f8c4f3ccafc 100644
1627 --- a/drivers/mmc/host/sdhci.h
1628 +++ b/drivers/mmc/host/sdhci.h
1629 @@ -144,14 +144,15 @@
1630 #define SDHCI_INT_DATA_CRC 0x00200000
1631 #define SDHCI_INT_DATA_END_BIT 0x00400000
1632 #define SDHCI_INT_BUS_POWER 0x00800000
1633 -#define SDHCI_INT_ACMD12ERR 0x01000000
1634 +#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
1635 #define SDHCI_INT_ADMA_ERROR 0x02000000
1636
1637 #define SDHCI_INT_NORMAL_MASK 0x00007FFF
1638 #define SDHCI_INT_ERROR_MASK 0xFFFF8000
1639
1640 #define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
1641 - SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
1642 + SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
1643 + SDHCI_INT_AUTO_CMD_ERR)
1644 #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
1645 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
1646 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
1647 @@ -166,7 +167,11 @@
1648
1649 #define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE)
1650
1651 -#define SDHCI_ACMD12_ERR 0x3C
1652 +#define SDHCI_AUTO_CMD_STATUS 0x3C
1653 +#define SDHCI_AUTO_CMD_TIMEOUT 0x00000002
1654 +#define SDHCI_AUTO_CMD_CRC 0x00000004
1655 +#define SDHCI_AUTO_CMD_END_BIT 0x00000008
1656 +#define SDHCI_AUTO_CMD_INDEX 0x00000010
1657
1658 #define SDHCI_HOST_CONTROL2 0x3E
1659 #define SDHCI_CTRL_UHS_MASK 0x0007
1660 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1661 index b2c42cae3081..091b454e83fc 100644
1662 --- a/drivers/net/bonding/bond_main.c
1663 +++ b/drivers/net/bonding/bond_main.c
1664 @@ -3198,8 +3198,12 @@ static int bond_netdev_event(struct notifier_block *this,
1665 return NOTIFY_DONE;
1666
1667 if (event_dev->flags & IFF_MASTER) {
1668 + int ret;
1669 +
1670 netdev_dbg(event_dev, "IFF_MASTER\n");
1671 - return bond_master_netdev_event(event, event_dev);
1672 + ret = bond_master_netdev_event(event, event_dev);
1673 + if (ret != NOTIFY_DONE)
1674 + return ret;
1675 }
1676
1677 if (event_dev->flags & IFF_SLAVE) {
1678 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1679 index 9800738448ec..dca02b35c231 100644
1680 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1681 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1682 @@ -32,6 +32,13 @@
1683 #define DRV_NAME "nicvf"
1684 #define DRV_VERSION "1.0"
1685
1686 +/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
1687 + * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
1688 + * this value, keeping headroom for the 14 byte Ethernet header and two
1689 + * VLAN tags (for QinQ)
1690 + */
1691 +#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
1692 +
1693 /* Supported devices */
1694 static const struct pci_device_id nicvf_id_table[] = {
1695 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
1696 @@ -1547,6 +1554,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1697 struct nicvf *nic = netdev_priv(netdev);
1698 int orig_mtu = netdev->mtu;
1699
1700 + /* For now just support only the usual MTU sized frames,
1701 + * plus some headroom for VLAN, QinQ.
1702 + */
1703 + if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
1704 + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1705 + netdev->mtu);
1706 + return -EINVAL;
1707 + }
1708 +
1709 netdev->mtu = new_mtu;
1710
1711 if (!netif_running(netdev))
1712 @@ -1795,8 +1811,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1713 bool bpf_attached = false;
1714 int ret = 0;
1715
1716 - /* For now just support only the usual MTU sized frames */
1717 - if (prog && (dev->mtu > 1500)) {
1718 + /* For now just support only the usual MTU sized frames,
1719 + * plus some headroom for VLAN, QinQ.
1720 + */
1721 + if (prog && dev->mtu > MAX_XDP_MTU) {
1722 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1723 dev->mtu);
1724 return -EOPNOTSUPP;
1725 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1726 index 8de64e88c670..22a2ef111514 100644
1727 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1728 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1729 @@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
1730 return ret;
1731 }
1732
1733 -static void mlx5_fpga_tls_release_swid(struct idr *idr,
1734 - spinlock_t *idr_spinlock, u32 swid)
1735 +static void *mlx5_fpga_tls_release_swid(struct idr *idr,
1736 + spinlock_t *idr_spinlock, u32 swid)
1737 {
1738 unsigned long flags;
1739 + void *ptr;
1740
1741 spin_lock_irqsave(idr_spinlock, flags);
1742 - idr_remove(idr, swid);
1743 + ptr = idr_remove(idr, swid);
1744 spin_unlock_irqrestore(idr_spinlock, flags);
1745 + return ptr;
1746 }
1747
1748 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
1749 @@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
1750 kfree(buf);
1751 }
1752
1753 -struct mlx5_teardown_stream_context {
1754 - struct mlx5_fpga_tls_command_context cmd;
1755 - u32 swid;
1756 -};
1757 -
1758 static void
1759 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
1760 struct mlx5_fpga_device *fdev,
1761 struct mlx5_fpga_tls_command_context *cmd,
1762 struct mlx5_fpga_dma_buf *resp)
1763 {
1764 - struct mlx5_teardown_stream_context *ctx =
1765 - container_of(cmd, struct mlx5_teardown_stream_context, cmd);
1766 -
1767 if (resp) {
1768 u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
1769
1770 @@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
1771 mlx5_fpga_err(fdev,
1772 "Teardown stream failed with syndrome = %d",
1773 syndrome);
1774 - else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
1775 - mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
1776 - &fdev->tls->tx_idr_spinlock,
1777 - ctx->swid);
1778 - else
1779 - mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
1780 - &fdev->tls->rx_idr_spinlock,
1781 - ctx->swid);
1782 }
1783 mlx5_fpga_tls_put_command_ctx(cmd);
1784 }
1785 @@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1786 void *cmd;
1787 int ret;
1788
1789 - rcu_read_lock();
1790 - flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1791 - rcu_read_unlock();
1792 -
1793 - if (!flow) {
1794 - WARN_ONCE(1, "Received NULL pointer for handle\n");
1795 - return -EINVAL;
1796 - }
1797 -
1798 buf = kzalloc(size, GFP_ATOMIC);
1799 if (!buf)
1800 return -ENOMEM;
1801
1802 cmd = (buf + 1);
1803
1804 + rcu_read_lock();
1805 + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1806 + if (unlikely(!flow)) {
1807 + rcu_read_unlock();
1808 + WARN_ONCE(1, "Received NULL pointer for handle\n");
1809 + kfree(buf);
1810 + return -EINVAL;
1811 + }
1812 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
1813 + rcu_read_unlock();
1814
1815 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
1816 MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
1817 @@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1818 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
1819 void *flow, u32 swid, gfp_t flags)
1820 {
1821 - struct mlx5_teardown_stream_context *ctx;
1822 + struct mlx5_fpga_tls_command_context *ctx;
1823 struct mlx5_fpga_dma_buf *buf;
1824 void *cmd;
1825
1826 @@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
1827 if (!ctx)
1828 return;
1829
1830 - buf = &ctx->cmd.buf;
1831 + buf = &ctx->buf;
1832 cmd = (ctx + 1);
1833 MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
1834 MLX5_SET(tls_cmd, cmd, swid, swid);
1835 @@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
1836 buf->sg[0].data = cmd;
1837 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
1838
1839 - ctx->swid = swid;
1840 - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
1841 + mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
1842 mlx5_fpga_tls_teardown_completion);
1843 }
1844
1845 @@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
1846 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
1847 void *flow;
1848
1849 - rcu_read_lock();
1850 if (direction_sx)
1851 - flow = idr_find(&tls->tx_idr, swid);
1852 + flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
1853 + &tls->tx_idr_spinlock,
1854 + swid);
1855 else
1856 - flow = idr_find(&tls->rx_idr, swid);
1857 -
1858 - rcu_read_unlock();
1859 + flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
1860 + &tls->rx_idr_spinlock,
1861 + swid);
1862
1863 if (!flow) {
1864 mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
1865 @@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
1866 return;
1867 }
1868
1869 + synchronize_rcu(); /* before kfree(flow) */
1870 mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
1871 }
1872
1873 diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
1874 index 7a1e9cd9cc62..777b99416062 100644
1875 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c
1876 +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
1877 @@ -80,8 +80,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
1878
1879 tmp_push_vlan_tci =
1880 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
1881 - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
1882 - NFP_FL_PUSH_VLAN_CFI;
1883 + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action));
1884 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
1885 }
1886
1887 diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
1888 index 325954b829c8..9b018321e24e 100644
1889 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
1890 +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
1891 @@ -55,7 +55,7 @@
1892 #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
1893
1894 #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
1895 -#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
1896 +#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
1897 #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
1898
1899 #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
1900 @@ -109,7 +109,6 @@
1901 #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
1902
1903 #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
1904 -#define NFP_FL_PUSH_VLAN_CFI BIT(12)
1905 #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
1906
1907 /* LAG ports */
1908 diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
1909 index 17acb8cc6044..b99d55cf81f1 100644
1910 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c
1911 +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
1912 @@ -56,14 +56,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
1913 FLOW_DISSECTOR_KEY_VLAN,
1914 target);
1915 /* Populate the tci field. */
1916 - if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
1917 - tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
1918 - flow_vlan->vlan_priority) |
1919 - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
1920 - flow_vlan->vlan_id) |
1921 - NFP_FLOWER_MASK_VLAN_CFI;
1922 - frame->tci = cpu_to_be16(tmp_tci);
1923 - }
1924 + tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
1925 + tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
1926 + flow_vlan->vlan_priority) |
1927 + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
1928 + flow_vlan->vlan_id);
1929 + frame->tci = cpu_to_be16(tmp_tci);
1930 }
1931 }
1932
1933 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1934 index 95ee9d815d76..e23eaf3f6d03 100644
1935 --- a/drivers/net/team/team.c
1936 +++ b/drivers/net/team/team.c
1937 @@ -1250,6 +1250,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1938 goto err_option_port_add;
1939 }
1940
1941 + /* set promiscuity level to new slave */
1942 + if (dev->flags & IFF_PROMISC) {
1943 + err = dev_set_promiscuity(port_dev, 1);
1944 + if (err)
1945 + goto err_set_slave_promisc;
1946 + }
1947 +
1948 + /* set allmulti level to new slave */
1949 + if (dev->flags & IFF_ALLMULTI) {
1950 + err = dev_set_allmulti(port_dev, 1);
1951 + if (err) {
1952 + if (dev->flags & IFF_PROMISC)
1953 + dev_set_promiscuity(port_dev, -1);
1954 + goto err_set_slave_promisc;
1955 + }
1956 + }
1957 +
1958 netif_addr_lock_bh(dev);
1959 dev_uc_sync_multiple(port_dev, dev);
1960 dev_mc_sync_multiple(port_dev, dev);
1961 @@ -1266,6 +1283,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1962
1963 return 0;
1964
1965 +err_set_slave_promisc:
1966 + __team_option_inst_del_port(team, port);
1967 +
1968 err_option_port_add:
1969 team_upper_dev_unlink(team, port);
1970
1971 @@ -1311,6 +1331,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1972
1973 team_port_disable(team, port);
1974 list_del_rcu(&port->list);
1975 +
1976 + if (dev->flags & IFF_PROMISC)
1977 + dev_set_promiscuity(port_dev, -1);
1978 + if (dev->flags & IFF_ALLMULTI)
1979 + dev_set_allmulti(port_dev, -1);
1980 +
1981 team_upper_dev_unlink(team, port);
1982 netdev_rx_handler_unregister(port_dev);
1983 team_port_disable_netpoll(port);
1984 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
1985 index a279a4363bc1..1d21424eae8a 100644
1986 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
1987 +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
1988 @@ -672,7 +672,6 @@ enum rt2x00_state_flags {
1989 CONFIG_CHANNEL_HT40,
1990 CONFIG_POWERSAVING,
1991 CONFIG_HT_DISABLED,
1992 - CONFIG_QOS_DISABLED,
1993 CONFIG_MONITORING,
1994
1995 /*
1996 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
1997 index fa2fd64084ac..da526684596f 100644
1998 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
1999 +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
2000 @@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
2001 rt2x00dev->intf_associated--;
2002
2003 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
2004 -
2005 - clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
2006 }
2007
2008 - /*
2009 - * Check for access point which do not support 802.11e . We have to
2010 - * generate data frames sequence number in S/W for such AP, because
2011 - * of H/W bug.
2012 - */
2013 - if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
2014 - set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
2015 -
2016 /*
2017 * When the erp information has changed, we should perform
2018 * additional configuration steps. For all other changes we are done.
2019 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
2020 index 710e9641552e..85e320178a0e 100644
2021 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
2022 +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
2023 @@ -200,15 +200,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
2024 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
2025 /*
2026 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
2027 - * seqno on retransmited data (non-QOS) frames. To workaround
2028 - * the problem let's generate seqno in software if QOS is
2029 - * disabled.
2030 + * seqno on retransmitted data (non-QOS) and management frames.
2031 + * To workaround the problem let's generate seqno in software.
2032 + * Except for beacons which are transmitted periodically by H/W
2033 + * hence hardware has to assign seqno for them.
2034 */
2035 - if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
2036 - __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2037 - else
2038 + if (ieee80211_is_beacon(hdr->frame_control)) {
2039 + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2040 /* H/W will generate sequence number */
2041 return;
2042 + }
2043 +
2044 + __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2045 }
2046
2047 /*
2048 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
2049 index 1797e47fab38..3d51a936f6d5 100644
2050 --- a/drivers/scsi/libfc/fc_rport.c
2051 +++ b/drivers/scsi/libfc/fc_rport.c
2052 @@ -2153,7 +2153,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
2053 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
2054 fc_rport_state(rdata));
2055
2056 - rdata->flags &= ~FC_RP_STARTED;
2057 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
2058 mutex_unlock(&rdata->rp_mutex);
2059 kref_put(&rdata->kref, fc_rport_destroy);
2060 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2061 index 655790f30434..1fc832751a4f 100644
2062 --- a/drivers/scsi/scsi_lib.c
2063 +++ b/drivers/scsi/scsi_lib.c
2064 @@ -2149,8 +2149,12 @@ out_put_budget:
2065 ret = BLK_STS_DEV_RESOURCE;
2066 break;
2067 default:
2068 + if (unlikely(!scsi_device_online(sdev)))
2069 + scsi_req(req)->result = DID_NO_CONNECT << 16;
2070 + else
2071 + scsi_req(req)->result = DID_ERROR << 16;
2072 /*
2073 - * Make sure to release all allocated ressources when
2074 + * Make sure to release all allocated resources when
2075 * we hit an error, as we will never see this command
2076 * again.
2077 */
2078 diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
2079 index 808ed92ed66f..1bb1cb651349 100644
2080 --- a/drivers/staging/comedi/drivers/ni_usb6501.c
2081 +++ b/drivers/staging/comedi/drivers/ni_usb6501.c
2082 @@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
2083
2084 size = usb_endpoint_maxp(devpriv->ep_tx);
2085 devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
2086 - if (!devpriv->usb_tx_buf) {
2087 - kfree(devpriv->usb_rx_buf);
2088 + if (!devpriv->usb_tx_buf)
2089 return -ENOMEM;
2090 - }
2091
2092 return 0;
2093 }
2094 @@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
2095 if (!devpriv)
2096 return -ENOMEM;
2097
2098 + mutex_init(&devpriv->mut);
2099 + usb_set_intfdata(intf, devpriv);
2100 +
2101 ret = ni6501_find_endpoints(dev);
2102 if (ret)
2103 return ret;
2104 @@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
2105 if (ret)
2106 return ret;
2107
2108 - mutex_init(&devpriv->mut);
2109 - usb_set_intfdata(intf, devpriv);
2110 -
2111 ret = comedi_alloc_subdevices(dev, 2);
2112 if (ret)
2113 return ret;
2114 diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
2115 index 6234b649d887..65dc6c51037e 100644
2116 --- a/drivers/staging/comedi/drivers/vmk80xx.c
2117 +++ b/drivers/staging/comedi/drivers/vmk80xx.c
2118 @@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
2119
2120 size = usb_endpoint_maxp(devpriv->ep_tx);
2121 devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
2122 - if (!devpriv->usb_tx_buf) {
2123 - kfree(devpriv->usb_rx_buf);
2124 + if (!devpriv->usb_tx_buf)
2125 return -ENOMEM;
2126 - }
2127
2128 return 0;
2129 }
2130 @@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
2131
2132 devpriv->model = board->model;
2133
2134 + sema_init(&devpriv->limit_sem, 8);
2135 +
2136 ret = vmk80xx_find_usb_endpoints(dev);
2137 if (ret)
2138 return ret;
2139 @@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
2140 if (ret)
2141 return ret;
2142
2143 - sema_init(&devpriv->limit_sem, 8);
2144 -
2145 usb_set_intfdata(intf, devpriv);
2146
2147 if (devpriv->model == VMK8055_MODEL)
2148 diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
2149 index df0499fc4802..6857a4bf7297 100644
2150 --- a/drivers/staging/iio/adc/ad7192.c
2151 +++ b/drivers/staging/iio/adc/ad7192.c
2152 @@ -109,10 +109,10 @@
2153 #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
2154 #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
2155
2156 -#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
2157 -#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
2158 -#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
2159 -#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
2160 +#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */
2161 +#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */
2162 +#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */
2163 +#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */
2164 #define AD7193_CH_TEMP 0x100 /* Temp senseor */
2165 #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
2166 #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
2167 diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
2168 index 029c3bf42d4d..07774c000c5a 100644
2169 --- a/drivers/staging/iio/meter/ade7854.c
2170 +++ b/drivers/staging/iio/meter/ade7854.c
2171 @@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
2172 static IIO_DEV_ATTR_IPEAK(0644,
2173 ade7854_read_32bit,
2174 ade7854_write_32bit,
2175 - ADE7854_VPEAK);
2176 + ADE7854_IPEAK);
2177 static IIO_DEV_ATTR_APHCAL(0644,
2178 ade7854_read_16bit,
2179 ade7854_write_16bit,
2180 diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
2181 index 52ad62722996..25a077f4ea94 100644
2182 --- a/drivers/staging/most/core.c
2183 +++ b/drivers/staging/most/core.c
2184 @@ -1412,7 +1412,7 @@ int most_register_interface(struct most_interface *iface)
2185
2186 INIT_LIST_HEAD(&iface->p->channel_list);
2187 iface->p->dev_id = id;
2188 - snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
2189 + strcpy(iface->p->name, iface->description);
2190 iface->dev.init_name = iface->p->name;
2191 iface->dev.bus = &mc.bus;
2192 iface->dev.parent = &mc.dev;
2193 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2194 index cbbf239aea0f..03fe3fb4bff6 100644
2195 --- a/drivers/tty/serial/sh-sci.c
2196 +++ b/drivers/tty/serial/sh-sci.c
2197 @@ -2497,14 +2497,16 @@ done:
2198 * center of the last stop bit in sampling clocks.
2199 */
2200 int last_stop = bits * 2 - 1;
2201 - int deviation = min_err * srr * last_stop / 2 / baud;
2202 + int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
2203 + (int)(srr + 1),
2204 + 2 * (int)baud);
2205
2206 if (abs(deviation) >= 2) {
2207 /* At least two sampling clocks off at the
2208 * last stop bit; we can increase the error
2209 * margin by shifting the sampling point.
2210 */
2211 - int shift = min(-8, max(7, deviation / 2));
2212 + int shift = clamp(deviation / 2, -8, 7);
2213
2214 hssrr |= (shift << HSCIF_SRHP_SHIFT) &
2215 HSCIF_SRHP_MASK;
2216 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2217 index b9a9a07f1ee9..3e5ec1cee059 100644
2218 --- a/drivers/tty/vt/vt.c
2219 +++ b/drivers/tty/vt/vt.c
2220 @@ -1521,7 +1521,8 @@ static void csi_J(struct vc_data *vc, int vpar)
2221 return;
2222 }
2223 scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
2224 - update_region(vc, (unsigned long) start, count);
2225 + if (con_should_update(vc))
2226 + do_update_region(vc, (unsigned long) start, count);
2227 vc->vc_need_wrap = 0;
2228 }
2229
2230 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2231 index b214a72d5caa..c163bc15976a 100644
2232 --- a/drivers/vhost/vhost.c
2233 +++ b/drivers/vhost/vhost.c
2234 @@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
2235 u64 start, u64 size, u64 end,
2236 u64 userspace_addr, int perm)
2237 {
2238 - struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
2239 + struct vhost_umem_node *tmp, *node;
2240
2241 + if (!size)
2242 + return -EFAULT;
2243 +
2244 + node = kmalloc(sizeof(*node), GFP_ATOMIC);
2245 if (!node)
2246 return -ENOMEM;
2247
2248 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2249 index 80f33582059e..6f227cc781e5 100644
2250 --- a/fs/cifs/cifsglob.h
2251 +++ b/fs/cifs/cifsglob.h
2252 @@ -1263,6 +1263,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
2253 }
2254
2255 struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
2256 +void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
2257 void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
2258
2259 #define CIFS_CACHE_READ_FLG 1
2260 @@ -1763,6 +1764,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
2261 #endif /* CONFIG_CIFS_ACL */
2262
2263 void cifs_oplock_break(struct work_struct *work);
2264 +void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
2265
2266 extern const struct slow_work_ops cifs_oplock_break_ops;
2267 extern struct workqueue_struct *cifsiod_wq;
2268 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2269 index d847132ab027..d6b45682833b 100644
2270 --- a/fs/cifs/file.c
2271 +++ b/fs/cifs/file.c
2272 @@ -358,12 +358,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
2273 return cifs_file;
2274 }
2275
2276 -/*
2277 - * Release a reference on the file private data. This may involve closing
2278 - * the filehandle out on the server. Must be called without holding
2279 - * tcon->open_file_lock and cifs_file->file_info_lock.
2280 +/**
2281 + * cifsFileInfo_put - release a reference of file priv data
2282 + *
2283 + * Always potentially wait for oplock handler. See _cifsFileInfo_put().
2284 */
2285 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2286 +{
2287 + _cifsFileInfo_put(cifs_file, true);
2288 +}
2289 +
2290 +/**
2291 + * _cifsFileInfo_put - release a reference of file priv data
2292 + *
2293 + * This may involve closing the filehandle @cifs_file out on the
2294 + * server. Must be called without holding tcon->open_file_lock and
2295 + * cifs_file->file_info_lock.
2296 + *
2297 + * If @wait_for_oplock_handler is true and we are releasing the last
2298 + * reference, wait for any running oplock break handler of the file
2299 + * and cancel any pending one. If calling this function from the
2300 + * oplock break handler, you need to pass false.
2301 + *
2302 + */
2303 +void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
2304 {
2305 struct inode *inode = d_inode(cifs_file->dentry);
2306 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
2307 @@ -411,7 +429,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2308
2309 spin_unlock(&tcon->open_file_lock);
2310
2311 - oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
2312 + oplock_break_cancelled = wait_oplock_handler ?
2313 + cancel_work_sync(&cifs_file->oplock_break) : false;
2314
2315 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
2316 struct TCP_Server_Info *server = tcon->ses->server;
2317 @@ -4170,6 +4189,7 @@ void cifs_oplock_break(struct work_struct *work)
2318 cinode);
2319 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
2320 }
2321 + _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
2322 cifs_done_oplock_break(cinode);
2323 }
2324
2325 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2326 index 6926685e513c..facc94e159a1 100644
2327 --- a/fs/cifs/misc.c
2328 +++ b/fs/cifs/misc.c
2329 @@ -490,8 +490,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
2330 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2331 &pCifsInode->flags);
2332
2333 - queue_work(cifsoplockd_wq,
2334 - &netfile->oplock_break);
2335 + cifs_queue_oplock_break(netfile);
2336 netfile->oplock_break_cancelled = false;
2337
2338 spin_unlock(&tcon->open_file_lock);
2339 @@ -588,6 +587,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
2340 spin_unlock(&cinode->writers_lock);
2341 }
2342
2343 +/**
2344 + * cifs_queue_oplock_break - queue the oplock break handler for cfile
2345 + *
2346 + * This function is called from the demultiplex thread when it
2347 + * receives an oplock break for @cfile.
2348 + *
2349 + * Assumes the tcon->open_file_lock is held.
2350 + * Assumes cfile->file_info_lock is NOT held.
2351 + */
2352 +void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
2353 +{
2354 + /*
2355 + * Bump the handle refcount now while we hold the
2356 + * open_file_lock to enforce the validity of it for the oplock
2357 + * break handler. The matching put is done at the end of the
2358 + * handler.
2359 + */
2360 + cifsFileInfo_get(cfile);
2361 +
2362 + queue_work(cifsoplockd_wq, &cfile->oplock_break);
2363 +}
2364 +
2365 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
2366 {
2367 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
2368 diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
2369 index 58700d2ba8cd..0a7ed2e3ad4f 100644
2370 --- a/fs/cifs/smb2misc.c
2371 +++ b/fs/cifs/smb2misc.c
2372 @@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
2373 clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2374 &cinode->flags);
2375
2376 - queue_work(cifsoplockd_wq, &cfile->oplock_break);
2377 + cifs_queue_oplock_break(cfile);
2378 kfree(lw);
2379 return true;
2380 }
2381 @@ -719,8 +719,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2382 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2383 &cinode->flags);
2384 spin_unlock(&cfile->file_info_lock);
2385 - queue_work(cifsoplockd_wq,
2386 - &cfile->oplock_break);
2387 +
2388 + cifs_queue_oplock_break(cfile);
2389
2390 spin_unlock(&tcon->open_file_lock);
2391 spin_unlock(&cifs_tcp_ses_lock);
2392 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2393 index d4d7d61a6fe2..2001184afe70 100644
2394 --- a/fs/cifs/smb2ops.c
2395 +++ b/fs/cifs/smb2ops.c
2396 @@ -1906,6 +1906,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2397
2398 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
2399 &resp_buftype);
2400 + if (!rc)
2401 + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2402 if (!rc || !err_iov.iov_base) {
2403 rc = -ENOENT;
2404 goto free_path;
2405 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2406 index 71f32d983384..c6fd3acc5560 100644
2407 --- a/fs/cifs/smb2pdu.c
2408 +++ b/fs/cifs/smb2pdu.c
2409 @@ -3273,8 +3273,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2410 rqst.rq_nvec = 1;
2411
2412 rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
2413 - cifs_small_buf_release(req);
2414 -
2415 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
2416
2417 if (rc) {
2418 @@ -3293,6 +3291,8 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2419 io_parms->tcon->tid, ses->Suid,
2420 io_parms->offset, io_parms->length);
2421
2422 + cifs_small_buf_release(req);
2423 +
2424 *nbytes = le32_to_cpu(rsp->DataLength);
2425 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
2426 (*nbytes > io_parms->length)) {
2427 @@ -3591,7 +3591,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
2428
2429 rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
2430 &resp_buftype, flags, &rsp_iov);
2431 - cifs_small_buf_release(req);
2432 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
2433
2434 if (rc) {
2435 @@ -3609,6 +3608,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
2436 io_parms->offset, *nbytes);
2437 }
2438
2439 + cifs_small_buf_release(req);
2440 free_rsp_buf(resp_buftype, rsp);
2441 return rc;
2442 }
2443 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2444 index d76fe166f6ce..c5819baee35c 100644
2445 --- a/fs/proc/task_mmu.c
2446 +++ b/fs/proc/task_mmu.c
2447 @@ -1138,6 +1138,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
2448 count = -EINTR;
2449 goto out_mm;
2450 }
2451 + /*
2452 + * Avoid to modify vma->vm_flags
2453 + * without locked ops while the
2454 + * coredump reads the vm_flags.
2455 + */
2456 + if (!mmget_still_valid(mm)) {
2457 + /*
2458 + * Silently return "count"
2459 + * like if get_task_mm()
2460 + * failed. FIXME: should this
2461 + * function have returned
2462 + * -ESRCH if get_task_mm()
2463 + * failed like if
2464 + * get_proc_task() fails?
2465 + */
2466 + up_write(&mm->mmap_sem);
2467 + goto out_mm;
2468 + }
2469 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2470 vma->vm_flags &= ~VM_SOFTDIRTY;
2471 vma_set_page_prot(vma);
2472 diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
2473 index d8b8323e80f4..aaca81b5e119 100644
2474 --- a/fs/userfaultfd.c
2475 +++ b/fs/userfaultfd.c
2476 @@ -630,6 +630,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
2477
2478 /* the various vma->vm_userfaultfd_ctx still points to it */
2479 down_write(&mm->mmap_sem);
2480 + /* no task can run (and in turn coredump) yet */
2481 + VM_WARN_ON(!mmget_still_valid(mm));
2482 for (vma = mm->mmap; vma; vma = vma->vm_next)
2483 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
2484 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
2485 @@ -884,6 +886,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2486 * taking the mmap_sem for writing.
2487 */
2488 down_write(&mm->mmap_sem);
2489 + if (!mmget_still_valid(mm))
2490 + goto skip_mm;
2491 prev = NULL;
2492 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2493 cond_resched();
2494 @@ -906,6 +910,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2495 vma->vm_flags = new_flags;
2496 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
2497 }
2498 +skip_mm:
2499 up_write(&mm->mmap_sem);
2500 mmput(mm);
2501 wakeup:
2502 @@ -1334,6 +1339,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
2503 goto out;
2504
2505 down_write(&mm->mmap_sem);
2506 + if (!mmget_still_valid(mm))
2507 + goto out_unlock;
2508 vma = find_vma_prev(mm, start, &prev);
2509 if (!vma)
2510 goto out_unlock;
2511 @@ -1521,6 +1528,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
2512 goto out;
2513
2514 down_write(&mm->mmap_sem);
2515 + if (!mmget_still_valid(mm))
2516 + goto out_unlock;
2517 vma = find_vma_prev(mm, start, &prev);
2518 if (!vma)
2519 goto out_unlock;
2520 diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
2521 index e909413e4e38..32cae0f35b9d 100644
2522 --- a/include/linux/kprobes.h
2523 +++ b/include/linux/kprobes.h
2524 @@ -173,6 +173,7 @@ struct kretprobe_instance {
2525 struct kretprobe *rp;
2526 kprobe_opcode_t *ret_addr;
2527 struct task_struct *task;
2528 + void *fp;
2529 char data[0];
2530 };
2531
2532 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2533 index 21fef8c5eca7..8c2fec0bcb26 100644
2534 --- a/include/linux/netdevice.h
2535 +++ b/include/linux/netdevice.h
2536 @@ -1456,6 +1456,7 @@ struct net_device_ops {
2537 * @IFF_FAILOVER: device is a failover master device
2538 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
2539 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
2540 + * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
2541 */
2542 enum netdev_priv_flags {
2543 IFF_802_1Q_VLAN = 1<<0,
2544 @@ -1488,6 +1489,7 @@ enum netdev_priv_flags {
2545 IFF_FAILOVER = 1<<27,
2546 IFF_FAILOVER_SLAVE = 1<<28,
2547 IFF_L3MDEV_RX_HANDLER = 1<<29,
2548 + IFF_LIVE_RENAME_OK = 1<<30,
2549 };
2550
2551 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
2552 @@ -1519,6 +1521,7 @@ enum netdev_priv_flags {
2553 #define IFF_FAILOVER IFF_FAILOVER
2554 #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
2555 #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
2556 +#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
2557
2558 /**
2559 * struct net_device - The DEVICE structure.
2560 diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
2561 index aebb370a0006..cebb79fe2c72 100644
2562 --- a/include/linux/sched/mm.h
2563 +++ b/include/linux/sched/mm.h
2564 @@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
2565 __mmdrop(mm);
2566 }
2567
2568 +/*
2569 + * This has to be called after a get_task_mm()/mmget_not_zero()
2570 + * followed by taking the mmap_sem for writing before modifying the
2571 + * vmas or anything the coredump pretends not to change from under it.
2572 + *
2573 + * NOTE: find_extend_vma() called from GUP context is the only place
2574 + * that can modify the "mm" (notably the vm_start/end) under mmap_sem
2575 + * for reading and outside the context of the process, so it is also
2576 + * the only case that holds the mmap_sem for reading that must call
2577 + * this function. Generally if the mmap_sem is hold for reading
2578 + * there's no need of this check after get_task_mm()/mmget_not_zero().
2579 + *
2580 + * This function can be obsoleted and the check can be removed, after
2581 + * the coredump code will hold the mmap_sem for writing before
2582 + * invoking the ->core_dump methods.
2583 + */
2584 +static inline bool mmget_still_valid(struct mm_struct *mm)
2585 +{
2586 + return likely(!mm->core_state);
2587 +}
2588 +
2589 /**
2590 * mmget() - Pin the address space associated with a &struct mm_struct.
2591 * @mm: The address space to pin.
2592 diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
2593 index 1662cbc0b46b..b02bf737d019 100644
2594 --- a/include/net/inet_frag.h
2595 +++ b/include/net/inet_frag.h
2596 @@ -77,8 +77,8 @@ struct inet_frag_queue {
2597 struct timer_list timer;
2598 spinlock_t lock;
2599 refcount_t refcnt;
2600 - struct sk_buff *fragments; /* Used in IPv6. */
2601 - struct rb_root rb_fragments; /* Used in IPv4. */
2602 + struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
2603 + struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
2604 struct sk_buff *fragments_tail;
2605 struct sk_buff *last_run_head;
2606 ktime_t stamp;
2607 @@ -153,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
2608
2609 extern const u8 ip_frag_ecn_table[16];
2610
2611 +/* Return values of inet_frag_queue_insert() */
2612 +#define IPFRAG_OK 0
2613 +#define IPFRAG_DUP 1
2614 +#define IPFRAG_OVERLAP 2
2615 +int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
2616 + int offset, int end);
2617 +void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
2618 + struct sk_buff *parent);
2619 +void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
2620 + void *reasm_data);
2621 +struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
2622 +
2623 #endif
2624 diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
2625 index 6ced1e6899b6..28aa9b30aece 100644
2626 --- a/include/net/ipv6_frag.h
2627 +++ b/include/net/ipv6_frag.h
2628 @@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
2629 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
2630
2631 /* Don't send error if the first segment did not arrive. */
2632 - head = fq->q.fragments;
2633 - if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
2634 + if (!(fq->q.flags & INET_FRAG_FIRST_IN))
2635 + goto out;
2636 +
2637 + /* sk_buff::dev and sk_buff::rbnode are unionized. So we
2638 + * pull the head out of the tree in order to be able to
2639 + * deal with head->dev.
2640 + */
2641 + head = inet_frag_pull_head(&fq->q);
2642 + if (!head)
2643 goto out;
2644
2645 head->dev = dev;
2646 diff --git a/include/net/tls.h b/include/net/tls.h
2647 index 0a769cf2f5f3..c423b7d0b6ab 100644
2648 --- a/include/net/tls.h
2649 +++ b/include/net/tls.h
2650 @@ -317,7 +317,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
2651 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
2652 {
2653 #ifdef CONFIG_SOCK_VALIDATE_XMIT
2654 - return sk_fullsock(sk) &
2655 + return sk_fullsock(sk) &&
2656 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
2657 &tls_validate_xmit_skb);
2658 #else
2659 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2660 index 4344381664cc..29ff6635d259 100644
2661 --- a/kernel/kprobes.c
2662 +++ b/kernel/kprobes.c
2663 @@ -703,7 +703,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
2664 static int reuse_unused_kprobe(struct kprobe *ap)
2665 {
2666 struct optimized_kprobe *op;
2667 - int ret;
2668
2669 BUG_ON(!kprobe_unused(ap));
2670 /*
2671 @@ -715,9 +714,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
2672 /* Enable the probe again */
2673 ap->flags &= ~KPROBE_FLAG_DISABLED;
2674 /* Optimize it again (remove from op->list) */
2675 - ret = kprobe_optready(ap);
2676 - if (ret)
2677 - return ret;
2678 + if (!kprobe_optready(ap))
2679 + return -EINVAL;
2680
2681 optimize_kprobe(ap);
2682 return 0;
2683 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
2684 index 0cbdbbb0729f..26b57e24476f 100644
2685 --- a/kernel/locking/lockdep.c
2686 +++ b/kernel/locking/lockdep.c
2687 @@ -3567,9 +3567,6 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2688 unsigned int depth;
2689 int i;
2690
2691 - if (unlikely(!debug_locks))
2692 - return 0;
2693 -
2694 depth = curr->lockdep_depth;
2695 /*
2696 * This function is about (re)setting the class of a held lock,
2697 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2698 index 640094391169..4aa8e7d90c25 100644
2699 --- a/kernel/sched/fair.c
2700 +++ b/kernel/sched/fair.c
2701 @@ -4847,12 +4847,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2702 return HRTIMER_NORESTART;
2703 }
2704
2705 +extern const u64 max_cfs_quota_period;
2706 +
2707 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2708 {
2709 struct cfs_bandwidth *cfs_b =
2710 container_of(timer, struct cfs_bandwidth, period_timer);
2711 int overrun;
2712 int idle = 0;
2713 + int count = 0;
2714
2715 raw_spin_lock(&cfs_b->lock);
2716 for (;;) {
2717 @@ -4860,6 +4863,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2718 if (!overrun)
2719 break;
2720
2721 + if (++count > 3) {
2722 + u64 new, old = ktime_to_ns(cfs_b->period);
2723 +
2724 + new = (old * 147) / 128; /* ~115% */
2725 + new = min(new, max_cfs_quota_period);
2726 +
2727 + cfs_b->period = ns_to_ktime(new);
2728 +
2729 + /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
2730 + cfs_b->quota *= new;
2731 + cfs_b->quota = div64_u64(cfs_b->quota, old);
2732 +
2733 + pr_warn_ratelimited(
2734 + "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
2735 + smp_processor_id(),
2736 + div_u64(new, NSEC_PER_USEC),
2737 + div_u64(cfs_b->quota, NSEC_PER_USEC));
2738 +
2739 + /* reset count so we don't come right back in here */
2740 + count = 0;
2741 + }
2742 +
2743 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2744 }
2745 if (idle)
2746 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2747 index 9e22660153ff..9a85c7ae7362 100644
2748 --- a/kernel/sysctl.c
2749 +++ b/kernel/sysctl.c
2750 @@ -125,6 +125,7 @@ static int zero;
2751 static int __maybe_unused one = 1;
2752 static int __maybe_unused two = 2;
2753 static int __maybe_unused four = 4;
2754 +static unsigned long zero_ul;
2755 static unsigned long one_ul = 1;
2756 static unsigned long long_max = LONG_MAX;
2757 static int one_hundred = 100;
2758 @@ -1696,7 +1697,7 @@ static struct ctl_table fs_table[] = {
2759 .maxlen = sizeof(files_stat.max_files),
2760 .mode = 0644,
2761 .proc_handler = proc_doulongvec_minmax,
2762 - .extra1 = &zero,
2763 + .extra1 = &zero_ul,
2764 .extra2 = &long_max,
2765 },
2766 {
2767 diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
2768 index cbc72c2c1fca..78eb05aa8003 100644
2769 --- a/kernel/time/sched_clock.c
2770 +++ b/kernel/time/sched_clock.c
2771 @@ -275,7 +275,7 @@ static u64 notrace suspended_sched_clock_read(void)
2772 return cd.read_data[seq & 1].epoch_cyc;
2773 }
2774
2775 -static int sched_clock_suspend(void)
2776 +int sched_clock_suspend(void)
2777 {
2778 struct clock_read_data *rd = &cd.read_data[0];
2779
2780 @@ -286,7 +286,7 @@ static int sched_clock_suspend(void)
2781 return 0;
2782 }
2783
2784 -static void sched_clock_resume(void)
2785 +void sched_clock_resume(void)
2786 {
2787 struct clock_read_data *rd = &cd.read_data[0];
2788
2789 diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
2790 index 14de3727b18e..a02e0f6b287c 100644
2791 --- a/kernel/time/tick-common.c
2792 +++ b/kernel/time/tick-common.c
2793 @@ -491,6 +491,7 @@ void tick_freeze(void)
2794 trace_suspend_resume(TPS("timekeeping_freeze"),
2795 smp_processor_id(), true);
2796 system_state = SYSTEM_SUSPEND;
2797 + sched_clock_suspend();
2798 timekeeping_suspend();
2799 } else {
2800 tick_suspend_local();
2801 @@ -514,6 +515,7 @@ void tick_unfreeze(void)
2802
2803 if (tick_freeze_depth == num_online_cpus()) {
2804 timekeeping_resume();
2805 + sched_clock_resume();
2806 system_state = SYSTEM_RUNNING;
2807 trace_suspend_resume(TPS("timekeeping_freeze"),
2808 smp_processor_id(), false);
2809 diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
2810 index 7a9b4eb7a1d5..141ab3ab0354 100644
2811 --- a/kernel/time/timekeeping.h
2812 +++ b/kernel/time/timekeeping.h
2813 @@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
2814 extern void timekeeping_warp_clock(void);
2815 extern int timekeeping_suspend(void);
2816 extern void timekeeping_resume(void);
2817 +#ifdef CONFIG_GENERIC_SCHED_CLOCK
2818 +extern int sched_clock_suspend(void);
2819 +extern void sched_clock_resume(void);
2820 +#else
2821 +static inline int sched_clock_suspend(void) { return 0; }
2822 +static inline void sched_clock_resume(void) { }
2823 +#endif
2824
2825 extern void do_timer(unsigned long ticks);
2826 extern void update_wall_time(void);
2827 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2828 index e23eb9fc77aa..1688782f3dfb 100644
2829 --- a/kernel/trace/ftrace.c
2830 +++ b/kernel/trace/ftrace.c
2831 @@ -34,6 +34,7 @@
2832 #include <linux/list.h>
2833 #include <linux/hash.h>
2834 #include <linux/rcupdate.h>
2835 +#include <linux/kprobes.h>
2836
2837 #include <trace/events/sched.h>
2838
2839 @@ -6250,7 +6251,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
2840 tr->ops->func = ftrace_stub;
2841 }
2842
2843 -static inline void
2844 +static nokprobe_inline void
2845 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
2846 struct ftrace_ops *ignored, struct pt_regs *regs)
2847 {
2848 @@ -6310,11 +6311,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
2849 {
2850 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
2851 }
2852 +NOKPROBE_SYMBOL(ftrace_ops_list_func);
2853 #else
2854 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
2855 {
2856 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
2857 }
2858 +NOKPROBE_SYMBOL(ftrace_ops_no_ops);
2859 #endif
2860
2861 /*
2862 @@ -6341,6 +6344,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
2863 preempt_enable_notrace();
2864 trace_clear_recursion(bit);
2865 }
2866 +NOKPROBE_SYMBOL(ftrace_ops_assist_func);
2867
2868 /**
2869 * ftrace_ops_get_func - get the function a trampoline should call
2870 diff --git a/mm/mmap.c b/mm/mmap.c
2871 index 43507f7e66b4..1480880ff814 100644
2872 --- a/mm/mmap.c
2873 +++ b/mm/mmap.c
2874 @@ -45,6 +45,7 @@
2875 #include <linux/moduleparam.h>
2876 #include <linux/pkeys.h>
2877 #include <linux/oom.h>
2878 +#include <linux/sched/mm.h>
2879
2880 #include <linux/uaccess.h>
2881 #include <asm/cacheflush.h>
2882 @@ -2491,7 +2492,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2883 vma = find_vma_prev(mm, addr, &prev);
2884 if (vma && (vma->vm_start <= addr))
2885 return vma;
2886 - if (!prev || expand_stack(prev, addr))
2887 + /* don't alter vm_end if the coredump is running */
2888 + if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
2889 return NULL;
2890 if (prev->vm_flags & VM_LOCKED)
2891 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2892 @@ -2517,6 +2519,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2893 return vma;
2894 if (!(vma->vm_flags & VM_GROWSDOWN))
2895 return NULL;
2896 + /* don't alter vm_start if the coredump is running */
2897 + if (!mmget_still_valid(mm))
2898 + return NULL;
2899 start = vma->vm_start;
2900 if (expand_stack(vma, addr))
2901 return NULL;
2902 diff --git a/mm/percpu.c b/mm/percpu.c
2903 index 4b90682623e9..41e58f3d8fbf 100644
2904 --- a/mm/percpu.c
2905 +++ b/mm/percpu.c
2906 @@ -2529,8 +2529,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2907 ai->groups[group].base_offset = areas[group] - base;
2908 }
2909
2910 - pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2911 - PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2912 + pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2913 + PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2914 ai->dyn_size, ai->unit_size);
2915
2916 rc = pcpu_setup_first_chunk(ai, base);
2917 @@ -2651,8 +2651,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
2918 }
2919
2920 /* we're ready, commit */
2921 - pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2922 - unit_pages, psize_str, vm.addr, ai->static_size,
2923 + pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
2924 + unit_pages, psize_str, ai->static_size,
2925 ai->reserved_size, ai->dyn_size);
2926
2927 rc = pcpu_setup_first_chunk(ai, vm.addr);
2928 diff --git a/mm/vmstat.c b/mm/vmstat.c
2929 index 2878dc4e9af6..4a387937f9f5 100644
2930 --- a/mm/vmstat.c
2931 +++ b/mm/vmstat.c
2932 @@ -1272,13 +1272,8 @@ const char * const vmstat_text[] = {
2933 #endif
2934 #endif /* CONFIG_MEMORY_BALLOON */
2935 #ifdef CONFIG_DEBUG_TLBFLUSH
2936 -#ifdef CONFIG_SMP
2937 "nr_tlb_remote_flush",
2938 "nr_tlb_remote_flush_received",
2939 -#else
2940 - "", /* nr_tlb_remote_flush */
2941 - "", /* nr_tlb_remote_flush_received */
2942 -#endif /* CONFIG_SMP */
2943 "nr_tlb_local_flush_all",
2944 "nr_tlb_local_flush_one",
2945 #endif /* CONFIG_DEBUG_TLBFLUSH */
2946 diff --git a/net/atm/lec.c b/net/atm/lec.c
2947 index d7f5cf5b7594..ad4f829193f0 100644
2948 --- a/net/atm/lec.c
2949 +++ b/net/atm/lec.c
2950 @@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
2951
2952 static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
2953 {
2954 - if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
2955 + if (arg < 0 || arg >= MAX_LEC_ITF)
2956 + return -EINVAL;
2957 + arg = array_index_nospec(arg, MAX_LEC_ITF);
2958 + if (!dev_lec[arg])
2959 return -EINVAL;
2960 vcc->proto_data = dev_lec[arg];
2961 return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
2962 @@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
2963 i = arg;
2964 if (arg >= MAX_LEC_ITF)
2965 return -EINVAL;
2966 + i = array_index_nospec(arg, MAX_LEC_ITF);
2967 if (!dev_lec[i]) {
2968 int size;
2969
2970 diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
2971 index 72074276c088..fed0ff446abb 100644
2972 --- a/net/bridge/br_input.c
2973 +++ b/net/bridge/br_input.c
2974 @@ -195,13 +195,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
2975 /* note: already called with rcu_read_lock */
2976 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2977 {
2978 - struct net_bridge_port *p = br_port_get_rcu(skb->dev);
2979 -
2980 __br_handle_local_finish(skb);
2981
2982 - BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
2983 - br_pass_frame_up(skb);
2984 - return 0;
2985 + /* return 1 to signal the okfn() was called so it's ok to use the skb */
2986 + return 1;
2987 }
2988
2989 /*
2990 @@ -278,10 +275,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
2991 goto forward;
2992 }
2993
2994 - /* Deliver packet to local host only */
2995 - NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
2996 - NULL, skb, skb->dev, NULL, br_handle_local_finish);
2997 - return RX_HANDLER_CONSUMED;
2998 + /* The else clause should be hit when nf_hook():
2999 + * - returns < 0 (drop/error)
3000 + * - returns = 0 (stolen/nf_queue)
3001 + * Thus return 1 from the okfn() to signal the skb is ok to pass
3002 + */
3003 + if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
3004 + dev_net(skb->dev), NULL, skb, skb->dev, NULL,
3005 + br_handle_local_finish) == 1) {
3006 + return RX_HANDLER_PASS;
3007 + } else {
3008 + return RX_HANDLER_CONSUMED;
3009 + }
3010 }
3011
3012 forward:
3013 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3014 index 20ed7adcf1cc..75901c4641b1 100644
3015 --- a/net/bridge/br_multicast.c
3016 +++ b/net/bridge/br_multicast.c
3017 @@ -2152,7 +2152,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
3018
3019 __br_multicast_open(br, query);
3020
3021 - list_for_each_entry(port, &br->port_list, list) {
3022 + rcu_read_lock();
3023 + list_for_each_entry_rcu(port, &br->port_list, list) {
3024 if (port->state == BR_STATE_DISABLED ||
3025 port->state == BR_STATE_BLOCKING)
3026 continue;
3027 @@ -2164,6 +2165,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
3028 br_multicast_enable(&port->ip6_own_query);
3029 #endif
3030 }
3031 + rcu_read_unlock();
3032 }
3033
3034 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
3035 diff --git a/net/core/dev.c b/net/core/dev.c
3036 index d47554307a6d..3bcec116a5f2 100644
3037 --- a/net/core/dev.c
3038 +++ b/net/core/dev.c
3039 @@ -1180,7 +1180,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
3040 BUG_ON(!dev_net(dev));
3041
3042 net = dev_net(dev);
3043 - if (dev->flags & IFF_UP)
3044 +
3045 + /* Some auto-enslaved devices e.g. failover slaves are
3046 + * special, as userspace might rename the device after
3047 + * the interface had been brought up and running since
3048 + * the point kernel initiated auto-enslavement. Allow
3049 + * live name change even when these slave devices are
3050 + * up and running.
3051 + *
3052 + * Typically, users of these auto-enslaving devices
3053 + * don't actually care about slave name change, as
3054 + * they are supposed to operate on master interface
3055 + * directly.
3056 + */
3057 + if (dev->flags & IFF_UP &&
3058 + likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
3059 return -EBUSY;
3060
3061 write_seqcount_begin(&devnet_rename_seq);
3062 diff --git a/net/core/failover.c b/net/core/failover.c
3063 index 4a92a98ccce9..b5cd3c727285 100644
3064 --- a/net/core/failover.c
3065 +++ b/net/core/failover.c
3066 @@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
3067 goto err_upper_link;
3068 }
3069
3070 - slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
3071 + slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
3072
3073 if (fops && fops->slave_register &&
3074 !fops->slave_register(slave_dev, failover_dev))
3075 return NOTIFY_OK;
3076
3077 netdev_upper_dev_unlink(slave_dev, failover_dev);
3078 - slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
3079 + slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
3080 err_upper_link:
3081 netdev_rx_handler_unregister(slave_dev);
3082 done:
3083 @@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
3084
3085 netdev_rx_handler_unregister(slave_dev);
3086 netdev_upper_dev_unlink(slave_dev, failover_dev);
3087 - slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
3088 + slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
3089
3090 if (fops && fops->slave_unregister &&
3091 !fops->slave_unregister(slave_dev, failover_dev))
3092 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3093 index ceee28e184af..8b5768113acd 100644
3094 --- a/net/core/skbuff.c
3095 +++ b/net/core/skbuff.c
3096 @@ -5071,7 +5071,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
3097
3098 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
3099 {
3100 - int mac_len;
3101 + int mac_len, meta_len;
3102 + void *meta;
3103
3104 if (skb_cow(skb, skb_headroom(skb)) < 0) {
3105 kfree_skb(skb);
3106 @@ -5083,6 +5084,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
3107 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
3108 mac_len - VLAN_HLEN - ETH_TLEN);
3109 }
3110 +
3111 + meta_len = skb_metadata_len(skb);
3112 + if (meta_len) {
3113 + meta = skb_metadata_end(skb) - meta_len;
3114 + memmove(meta + VLAN_HLEN, meta, meta_len);
3115 + }
3116 +
3117 skb->mac_header += VLAN_HLEN;
3118 return skb;
3119 }
3120 diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
3121 index 500a59906b87..854ff1e4c41f 100644
3122 --- a/net/ipv4/fou.c
3123 +++ b/net/ipv4/fou.c
3124 @@ -120,6 +120,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
3125 struct guehdr *guehdr;
3126 void *data;
3127 u16 doffset = 0;
3128 + u8 proto_ctype;
3129
3130 if (!fou)
3131 return 1;
3132 @@ -211,13 +212,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
3133 if (unlikely(guehdr->control))
3134 return gue_control_message(skb, guehdr);
3135
3136 + proto_ctype = guehdr->proto_ctype;
3137 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
3138 skb_reset_transport_header(skb);
3139
3140 if (iptunnel_pull_offloads(skb))
3141 goto drop;
3142
3143 - return -guehdr->proto_ctype;
3144 + return -proto_ctype;
3145
3146 drop:
3147 kfree_skb(skb);
3148 diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
3149 index 760a9e52e02b..9f69411251d0 100644
3150 --- a/net/ipv4/inet_fragment.c
3151 +++ b/net/ipv4/inet_fragment.c
3152 @@ -25,6 +25,62 @@
3153 #include <net/sock.h>
3154 #include <net/inet_frag.h>
3155 #include <net/inet_ecn.h>
3156 +#include <net/ip.h>
3157 +#include <net/ipv6.h>
3158 +
3159 +/* Use skb->cb to track consecutive/adjacent fragments coming at
3160 + * the end of the queue. Nodes in the rb-tree queue will
3161 + * contain "runs" of one or more adjacent fragments.
3162 + *
3163 + * Invariants:
3164 + * - next_frag is NULL at the tail of a "run";
3165 + * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
3166 + */
3167 +struct ipfrag_skb_cb {
3168 + union {
3169 + struct inet_skb_parm h4;
3170 + struct inet6_skb_parm h6;
3171 + };
3172 + struct sk_buff *next_frag;
3173 + int frag_run_len;
3174 +};
3175 +
3176 +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
3177 +
3178 +static void fragcb_clear(struct sk_buff *skb)
3179 +{
3180 + RB_CLEAR_NODE(&skb->rbnode);
3181 + FRAG_CB(skb)->next_frag = NULL;
3182 + FRAG_CB(skb)->frag_run_len = skb->len;
3183 +}
3184 +
3185 +/* Append skb to the last "run". */
3186 +static void fragrun_append_to_last(struct inet_frag_queue *q,
3187 + struct sk_buff *skb)
3188 +{
3189 + fragcb_clear(skb);
3190 +
3191 + FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
3192 + FRAG_CB(q->fragments_tail)->next_frag = skb;
3193 + q->fragments_tail = skb;
3194 +}
3195 +
3196 +/* Create a new "run" with the skb. */
3197 +static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
3198 +{
3199 + BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
3200 + fragcb_clear(skb);
3201 +
3202 + if (q->last_run_head)
3203 + rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
3204 + &q->last_run_head->rbnode.rb_right);
3205 + else
3206 + rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
3207 + rb_insert_color(&skb->rbnode, &q->rb_fragments);
3208 +
3209 + q->fragments_tail = skb;
3210 + q->last_run_head = skb;
3211 +}
3212
3213 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
3214 * Value : 0xff if frame should be dropped.
3215 @@ -123,6 +179,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
3216 kmem_cache_free(f->frags_cachep, q);
3217 }
3218
3219 +unsigned int inet_frag_rbtree_purge(struct rb_root *root)
3220 +{
3221 + struct rb_node *p = rb_first(root);
3222 + unsigned int sum = 0;
3223 +
3224 + while (p) {
3225 + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3226 +
3227 + p = rb_next(p);
3228 + rb_erase(&skb->rbnode, root);
3229 + while (skb) {
3230 + struct sk_buff *next = FRAG_CB(skb)->next_frag;
3231 +
3232 + sum += skb->truesize;
3233 + kfree_skb(skb);
3234 + skb = next;
3235 + }
3236 + }
3237 + return sum;
3238 +}
3239 +EXPORT_SYMBOL(inet_frag_rbtree_purge);
3240 +
3241 void inet_frag_destroy(struct inet_frag_queue *q)
3242 {
3243 struct sk_buff *fp;
3244 @@ -224,3 +302,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
3245 return fq;
3246 }
3247 EXPORT_SYMBOL(inet_frag_find);
3248 +
3249 +int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
3250 + int offset, int end)
3251 +{
3252 + struct sk_buff *last = q->fragments_tail;
3253 +
3254 + /* RFC5722, Section 4, amended by Errata ID : 3089
3255 + * When reassembling an IPv6 datagram, if
3256 + * one or more its constituent fragments is determined to be an
3257 + * overlapping fragment, the entire datagram (and any constituent
3258 + * fragments) MUST be silently discarded.
3259 + *
3260 + * Duplicates, however, should be ignored (i.e. skb dropped, but the
3261 + * queue/fragments kept for later reassembly).
3262 + */
3263 + if (!last)
3264 + fragrun_create(q, skb); /* First fragment. */
3265 + else if (last->ip_defrag_offset + last->len < end) {
3266 + /* This is the common case: skb goes to the end. */
3267 + /* Detect and discard overlaps. */
3268 + if (offset < last->ip_defrag_offset + last->len)
3269 + return IPFRAG_OVERLAP;
3270 + if (offset == last->ip_defrag_offset + last->len)
3271 + fragrun_append_to_last(q, skb);
3272 + else
3273 + fragrun_create(q, skb);
3274 + } else {
3275 + /* Binary search. Note that skb can become the first fragment,
3276 + * but not the last (covered above).
3277 + */
3278 + struct rb_node **rbn, *parent;
3279 +
3280 + rbn = &q->rb_fragments.rb_node;
3281 + do {
3282 + struct sk_buff *curr;
3283 + int curr_run_end;
3284 +
3285 + parent = *rbn;
3286 + curr = rb_to_skb(parent);
3287 + curr_run_end = curr->ip_defrag_offset +
3288 + FRAG_CB(curr)->frag_run_len;
3289 + if (end <= curr->ip_defrag_offset)
3290 + rbn = &parent->rb_left;
3291 + else if (offset >= curr_run_end)
3292 + rbn = &parent->rb_right;
3293 + else if (offset >= curr->ip_defrag_offset &&
3294 + end <= curr_run_end)
3295 + return IPFRAG_DUP;
3296 + else
3297 + return IPFRAG_OVERLAP;
3298 + } while (*rbn);
3299 + /* Here we have parent properly set, and rbn pointing to
3300 + * one of its NULL left/right children. Insert skb.
3301 + */
3302 + fragcb_clear(skb);
3303 + rb_link_node(&skb->rbnode, parent, rbn);
3304 + rb_insert_color(&skb->rbnode, &q->rb_fragments);
3305 + }
3306 +
3307 + skb->ip_defrag_offset = offset;
3308 +
3309 + return IPFRAG_OK;
3310 +}
3311 +EXPORT_SYMBOL(inet_frag_queue_insert);
3312 +
3313 +void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
3314 + struct sk_buff *parent)
3315 +{
3316 + struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
3317 + struct sk_buff **nextp;
3318 + int delta;
3319 +
3320 + if (head != skb) {
3321 + fp = skb_clone(skb, GFP_ATOMIC);
3322 + if (!fp)
3323 + return NULL;
3324 + FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
3325 + if (RB_EMPTY_NODE(&skb->rbnode))
3326 + FRAG_CB(parent)->next_frag = fp;
3327 + else
3328 + rb_replace_node(&skb->rbnode, &fp->rbnode,
3329 + &q->rb_fragments);
3330 + if (q->fragments_tail == skb)
3331 + q->fragments_tail = fp;
3332 + skb_morph(skb, head);
3333 + FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
3334 + rb_replace_node(&head->rbnode, &skb->rbnode,
3335 + &q->rb_fragments);
3336 + consume_skb(head);
3337 + head = skb;
3338 + }
3339 + WARN_ON(head->ip_defrag_offset != 0);
3340 +
3341 + delta = -head->truesize;
3342 +
3343 + /* Head of list must not be cloned. */
3344 + if (skb_unclone(head, GFP_ATOMIC))
3345 + return NULL;
3346 +
3347 + delta += head->truesize;
3348 + if (delta)
3349 + add_frag_mem_limit(q->net, delta);
3350 +
3351 + /* If the first fragment is fragmented itself, we split
3352 + * it to two chunks: the first with data and paged part
3353 + * and the second, holding only fragments.
3354 + */
3355 + if (skb_has_frag_list(head)) {
3356 + struct sk_buff *clone;
3357 + int i, plen = 0;
3358 +
3359 + clone = alloc_skb(0, GFP_ATOMIC);
3360 + if (!clone)
3361 + return NULL;
3362 + skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
3363 + skb_frag_list_init(head);
3364 + for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
3365 + plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
3366 + clone->data_len = head->data_len - plen;
3367 + clone->len = clone->data_len;
3368 + head->truesize += clone->truesize;
3369 + clone->csum = 0;
3370 + clone->ip_summed = head->ip_summed;
3371 + add_frag_mem_limit(q->net, clone->truesize);
3372 + skb_shinfo(head)->frag_list = clone;
3373 + nextp = &clone->next;
3374 + } else {
3375 + nextp = &skb_shinfo(head)->frag_list;
3376 + }
3377 +
3378 + return nextp;
3379 +}
3380 +EXPORT_SYMBOL(inet_frag_reasm_prepare);
3381 +
3382 +void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
3383 + void *reasm_data)
3384 +{
3385 + struct sk_buff **nextp = (struct sk_buff **)reasm_data;
3386 + struct rb_node *rbn;
3387 + struct sk_buff *fp;
3388 +
3389 + skb_push(head, head->data - skb_network_header(head));
3390 +
3391 + /* Traverse the tree in order, to build frag_list. */
3392 + fp = FRAG_CB(head)->next_frag;
3393 + rbn = rb_next(&head->rbnode);
3394 + rb_erase(&head->rbnode, &q->rb_fragments);
3395 + while (rbn || fp) {
3396 + /* fp points to the next sk_buff in the current run;
3397 + * rbn points to the next run.
3398 + */
3399 + /* Go through the current run. */
3400 + while (fp) {
3401 + *nextp = fp;
3402 + nextp = &fp->next;
3403 + fp->prev = NULL;
3404 + memset(&fp->rbnode, 0, sizeof(fp->rbnode));
3405 + fp->sk = NULL;
3406 + head->data_len += fp->len;
3407 + head->len += fp->len;
3408 + if (head->ip_summed != fp->ip_summed)
3409 + head->ip_summed = CHECKSUM_NONE;
3410 + else if (head->ip_summed == CHECKSUM_COMPLETE)
3411 + head->csum = csum_add(head->csum, fp->csum);
3412 + head->truesize += fp->truesize;
3413 + fp = FRAG_CB(fp)->next_frag;
3414 + }
3415 + /* Move to the next run. */
3416 + if (rbn) {
3417 + struct rb_node *rbnext = rb_next(rbn);
3418 +
3419 + fp = rb_to_skb(rbn);
3420 + rb_erase(rbn, &q->rb_fragments);
3421 + rbn = rbnext;
3422 + }
3423 + }
3424 + sub_frag_mem_limit(q->net, head->truesize);
3425 +
3426 + *nextp = NULL;
3427 + skb_mark_not_on_list(head);
3428 + head->prev = NULL;
3429 + head->tstamp = q->stamp;
3430 +}
3431 +EXPORT_SYMBOL(inet_frag_reasm_finish);
3432 +
3433 +struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
3434 +{
3435 + struct sk_buff *head;
3436 +
3437 + if (q->fragments) {
3438 + head = q->fragments;
3439 + q->fragments = head->next;
3440 + } else {
3441 + struct sk_buff *skb;
3442 +
3443 + head = skb_rb_first(&q->rb_fragments);
3444 + if (!head)
3445 + return NULL;
3446 + skb = FRAG_CB(head)->next_frag;
3447 + if (skb)
3448 + rb_replace_node(&head->rbnode, &skb->rbnode,
3449 + &q->rb_fragments);
3450 + else
3451 + rb_erase(&head->rbnode, &q->rb_fragments);
3452 + memset(&head->rbnode, 0, sizeof(head->rbnode));
3453 + barrier();
3454 + }
3455 + if (head == q->fragments_tail)
3456 + q->fragments_tail = NULL;
3457 +
3458 + sub_frag_mem_limit(q->net, head->truesize);
3459 +
3460 + return head;
3461 +}
3462 +EXPORT_SYMBOL(inet_frag_pull_head);
3463 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
3464 index d95b32af4a0e..5a1d39e32196 100644
3465 --- a/net/ipv4/ip_fragment.c
3466 +++ b/net/ipv4/ip_fragment.c
3467 @@ -57,57 +57,6 @@
3468 */
3469 static const char ip_frag_cache_name[] = "ip4-frags";
3470
3471 -/* Use skb->cb to track consecutive/adjacent fragments coming at
3472 - * the end of the queue. Nodes in the rb-tree queue will
3473 - * contain "runs" of one or more adjacent fragments.
3474 - *
3475 - * Invariants:
3476 - * - next_frag is NULL at the tail of a "run";
3477 - * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
3478 - */
3479 -struct ipfrag_skb_cb {
3480 - struct inet_skb_parm h;
3481 - struct sk_buff *next_frag;
3482 - int frag_run_len;
3483 -};
3484 -
3485 -#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
3486 -
3487 -static void ip4_frag_init_run(struct sk_buff *skb)
3488 -{
3489 - BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
3490 -
3491 - FRAG_CB(skb)->next_frag = NULL;
3492 - FRAG_CB(skb)->frag_run_len = skb->len;
3493 -}
3494 -
3495 -/* Append skb to the last "run". */
3496 -static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
3497 - struct sk_buff *skb)
3498 -{
3499 - RB_CLEAR_NODE(&skb->rbnode);
3500 - FRAG_CB(skb)->next_frag = NULL;
3501 -
3502 - FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
3503 - FRAG_CB(q->fragments_tail)->next_frag = skb;
3504 - q->fragments_tail = skb;
3505 -}
3506 -
3507 -/* Create a new "run" with the skb. */
3508 -static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
3509 -{
3510 - if (q->last_run_head)
3511 - rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
3512 - &q->last_run_head->rbnode.rb_right);
3513 - else
3514 - rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
3515 - rb_insert_color(&skb->rbnode, &q->rb_fragments);
3516 -
3517 - ip4_frag_init_run(skb);
3518 - q->fragments_tail = skb;
3519 - q->last_run_head = skb;
3520 -}
3521 -
3522 /* Describe an entry in the "incomplete datagrams" queue. */
3523 struct ipq {
3524 struct inet_frag_queue q;
3525 @@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
3526 * pull the head out of the tree in order to be able to
3527 * deal with head->dev.
3528 */
3529 - if (qp->q.fragments) {
3530 - head = qp->q.fragments;
3531 - qp->q.fragments = head->next;
3532 - } else {
3533 - head = skb_rb_first(&qp->q.rb_fragments);
3534 - if (!head)
3535 - goto out;
3536 - if (FRAG_CB(head)->next_frag)
3537 - rb_replace_node(&head->rbnode,
3538 - &FRAG_CB(head)->next_frag->rbnode,
3539 - &qp->q.rb_fragments);
3540 - else
3541 - rb_erase(&head->rbnode, &qp->q.rb_fragments);
3542 - memset(&head->rbnode, 0, sizeof(head->rbnode));
3543 - barrier();
3544 - }
3545 - if (head == qp->q.fragments_tail)
3546 - qp->q.fragments_tail = NULL;
3547 -
3548 - sub_frag_mem_limit(qp->q.net, head->truesize);
3549 -
3550 + head = inet_frag_pull_head(&qp->q);
3551 + if (!head)
3552 + goto out;
3553 head->dev = dev_get_by_index_rcu(net, qp->iif);
3554 if (!head->dev)
3555 goto out;
3556 @@ -345,12 +276,10 @@ static int ip_frag_reinit(struct ipq *qp)
3557 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3558 {
3559 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
3560 - struct rb_node **rbn, *parent;
3561 - struct sk_buff *skb1, *prev_tail;
3562 - int ihl, end, skb1_run_end;
3563 + int ihl, end, flags, offset;
3564 + struct sk_buff *prev_tail;
3565 struct net_device *dev;
3566 unsigned int fragsize;
3567 - int flags, offset;
3568 int err = -ENOENT;
3569 u8 ecn;
3570
3571 @@ -382,7 +311,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3572 */
3573 if (end < qp->q.len ||
3574 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
3575 - goto err;
3576 + goto discard_qp;
3577 qp->q.flags |= INET_FRAG_LAST_IN;
3578 qp->q.len = end;
3579 } else {
3580 @@ -394,82 +323,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3581 if (end > qp->q.len) {
3582 /* Some bits beyond end -> corruption. */
3583 if (qp->q.flags & INET_FRAG_LAST_IN)
3584 - goto err;
3585 + goto discard_qp;
3586 qp->q.len = end;
3587 }
3588 }
3589 if (end == offset)
3590 - goto err;
3591 + goto discard_qp;
3592
3593 err = -ENOMEM;
3594 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
3595 - goto err;
3596 + goto discard_qp;
3597
3598 err = pskb_trim_rcsum(skb, end - offset);
3599 if (err)
3600 - goto err;
3601 + goto discard_qp;
3602
3603 /* Note : skb->rbnode and skb->dev share the same location. */
3604 dev = skb->dev;
3605 /* Makes sure compiler wont do silly aliasing games */
3606 barrier();
3607
3608 - /* RFC5722, Section 4, amended by Errata ID : 3089
3609 - * When reassembling an IPv6 datagram, if
3610 - * one or more its constituent fragments is determined to be an
3611 - * overlapping fragment, the entire datagram (and any constituent
3612 - * fragments) MUST be silently discarded.
3613 - *
3614 - * We do the same here for IPv4 (and increment an snmp counter) but
3615 - * we do not want to drop the whole queue in response to a duplicate
3616 - * fragment.
3617 - */
3618 -
3619 - err = -EINVAL;
3620 - /* Find out where to put this fragment. */
3621 prev_tail = qp->q.fragments_tail;
3622 - if (!prev_tail)
3623 - ip4_frag_create_run(&qp->q, skb); /* First fragment. */
3624 - else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
3625 - /* This is the common case: skb goes to the end. */
3626 - /* Detect and discard overlaps. */
3627 - if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
3628 - goto discard_qp;
3629 - if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
3630 - ip4_frag_append_to_last_run(&qp->q, skb);
3631 - else
3632 - ip4_frag_create_run(&qp->q, skb);
3633 - } else {
3634 - /* Binary search. Note that skb can become the first fragment,
3635 - * but not the last (covered above).
3636 - */
3637 - rbn = &qp->q.rb_fragments.rb_node;
3638 - do {
3639 - parent = *rbn;
3640 - skb1 = rb_to_skb(parent);
3641 - skb1_run_end = skb1->ip_defrag_offset +
3642 - FRAG_CB(skb1)->frag_run_len;
3643 - if (end <= skb1->ip_defrag_offset)
3644 - rbn = &parent->rb_left;
3645 - else if (offset >= skb1_run_end)
3646 - rbn = &parent->rb_right;
3647 - else if (offset >= skb1->ip_defrag_offset &&
3648 - end <= skb1_run_end)
3649 - goto err; /* No new data, potential duplicate */
3650 - else
3651 - goto discard_qp; /* Found an overlap */
3652 - } while (*rbn);
3653 - /* Here we have parent properly set, and rbn pointing to
3654 - * one of its NULL left/right children. Insert skb.
3655 - */
3656 - ip4_frag_init_run(skb);
3657 - rb_link_node(&skb->rbnode, parent, rbn);
3658 - rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
3659 - }
3660 + err = inet_frag_queue_insert(&qp->q, skb, offset, end);
3661 + if (err)
3662 + goto insert_error;
3663
3664 if (dev)
3665 qp->iif = dev->ifindex;
3666 - skb->ip_defrag_offset = offset;
3667
3668 qp->q.stamp = skb->tstamp;
3669 qp->q.meat += skb->len;
3670 @@ -494,15 +374,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3671 skb->_skb_refdst = 0UL;
3672 err = ip_frag_reasm(qp, skb, prev_tail, dev);
3673 skb->_skb_refdst = orefdst;
3674 + if (err)
3675 + inet_frag_kill(&qp->q);
3676 return err;
3677 }
3678
3679 skb_dst_drop(skb);
3680 return -EINPROGRESS;
3681
3682 +insert_error:
3683 + if (err == IPFRAG_DUP) {
3684 + kfree_skb(skb);
3685 + return -EINVAL;
3686 + }
3687 + err = -EINVAL;
3688 + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
3689 discard_qp:
3690 inet_frag_kill(&qp->q);
3691 - __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
3692 + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
3693 err:
3694 kfree_skb(skb);
3695 return err;
3696 @@ -514,13 +403,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3697 {
3698 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
3699 struct iphdr *iph;
3700 - struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
3701 - struct sk_buff **nextp; /* To build frag_list. */
3702 - struct rb_node *rbn;
3703 - int len;
3704 - int ihlen;
3705 - int delta;
3706 - int err;
3707 + void *reasm_data;
3708 + int len, err;
3709 u8 ecn;
3710
3711 ipq_kill(qp);
3712 @@ -530,117 +414,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3713 err = -EINVAL;
3714 goto out_fail;
3715 }
3716 - /* Make the one we just received the head. */
3717 - if (head != skb) {
3718 - fp = skb_clone(skb, GFP_ATOMIC);
3719 - if (!fp)
3720 - goto out_nomem;
3721 - FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
3722 - if (RB_EMPTY_NODE(&skb->rbnode))
3723 - FRAG_CB(prev_tail)->next_frag = fp;
3724 - else
3725 - rb_replace_node(&skb->rbnode, &fp->rbnode,
3726 - &qp->q.rb_fragments);
3727 - if (qp->q.fragments_tail == skb)
3728 - qp->q.fragments_tail = fp;
3729 - skb_morph(skb, head);
3730 - FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
3731 - rb_replace_node(&head->rbnode, &skb->rbnode,
3732 - &qp->q.rb_fragments);
3733 - consume_skb(head);
3734 - head = skb;
3735 - }
3736
3737 - WARN_ON(head->ip_defrag_offset != 0);
3738 -
3739 - /* Allocate a new buffer for the datagram. */
3740 - ihlen = ip_hdrlen(head);
3741 - len = ihlen + qp->q.len;
3742 + /* Make the one we just received the head. */
3743 + reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
3744 + if (!reasm_data)
3745 + goto out_nomem;
3746
3747 + len = ip_hdrlen(skb) + qp->q.len;
3748 err = -E2BIG;
3749 if (len > 65535)
3750 goto out_oversize;
3751
3752 - delta = - head->truesize;
3753 -
3754 - /* Head of list must not be cloned. */
3755 - if (skb_unclone(head, GFP_ATOMIC))
3756 - goto out_nomem;
3757 -
3758 - delta += head->truesize;
3759 - if (delta)
3760 - add_frag_mem_limit(qp->q.net, delta);
3761 -
3762 - /* If the first fragment is fragmented itself, we split
3763 - * it to two chunks: the first with data and paged part
3764 - * and the second, holding only fragments. */
3765 - if (skb_has_frag_list(head)) {
3766 - struct sk_buff *clone;
3767 - int i, plen = 0;
3768 -
3769 - clone = alloc_skb(0, GFP_ATOMIC);
3770 - if (!clone)
3771 - goto out_nomem;
3772 - skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
3773 - skb_frag_list_init(head);
3774 - for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
3775 - plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
3776 - clone->len = clone->data_len = head->data_len - plen;
3777 - head->truesize += clone->truesize;
3778 - clone->csum = 0;
3779 - clone->ip_summed = head->ip_summed;
3780 - add_frag_mem_limit(qp->q.net, clone->truesize);
3781 - skb_shinfo(head)->frag_list = clone;
3782 - nextp = &clone->next;
3783 - } else {
3784 - nextp = &skb_shinfo(head)->frag_list;
3785 - }
3786 + inet_frag_reasm_finish(&qp->q, skb, reasm_data);
3787
3788 - skb_push(head, head->data - skb_network_header(head));
3789 + skb->dev = dev;
3790 + IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
3791
3792 - /* Traverse the tree in order, to build frag_list. */
3793 - fp = FRAG_CB(head)->next_frag;
3794 - rbn = rb_next(&head->rbnode);
3795 - rb_erase(&head->rbnode, &qp->q.rb_fragments);
3796 - while (rbn || fp) {
3797 - /* fp points to the next sk_buff in the current run;
3798 - * rbn points to the next run.
3799 - */
3800 - /* Go through the current run. */
3801 - while (fp) {
3802 - *nextp = fp;
3803 - nextp = &fp->next;
3804 - fp->prev = NULL;
3805 - memset(&fp->rbnode, 0, sizeof(fp->rbnode));
3806 - fp->sk = NULL;
3807 - head->data_len += fp->len;
3808 - head->len += fp->len;
3809 - if (head->ip_summed != fp->ip_summed)
3810 - head->ip_summed = CHECKSUM_NONE;
3811 - else if (head->ip_summed == CHECKSUM_COMPLETE)
3812 - head->csum = csum_add(head->csum, fp->csum);
3813 - head->truesize += fp->truesize;
3814 - fp = FRAG_CB(fp)->next_frag;
3815 - }
3816 - /* Move to the next run. */
3817 - if (rbn) {
3818 - struct rb_node *rbnext = rb_next(rbn);
3819 -
3820 - fp = rb_to_skb(rbn);
3821 - rb_erase(rbn, &qp->q.rb_fragments);
3822 - rbn = rbnext;
3823 - }
3824 - }
3825 - sub_frag_mem_limit(qp->q.net, head->truesize);
3826 -
3827 - *nextp = NULL;
3828 - head->next = NULL;
3829 - head->prev = NULL;
3830 - head->dev = dev;
3831 - head->tstamp = qp->q.stamp;
3832 - IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
3833 -
3834 - iph = ip_hdr(head);
3835 + iph = ip_hdr(skb);
3836 iph->tot_len = htons(len);
3837 iph->tos |= ecn;
3838
3839 @@ -653,7 +443,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3840 * from one very small df-fragment and one large non-df frag.
3841 */
3842 if (qp->max_df_size == qp->q.max_size) {
3843 - IPCB(head)->flags |= IPSKB_FRAG_PMTU;
3844 + IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
3845 iph->frag_off = htons(IP_DF);
3846 } else {
3847 iph->frag_off = 0;
3848 @@ -751,28 +541,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
3849 }
3850 EXPORT_SYMBOL(ip_check_defrag);
3851
3852 -unsigned int inet_frag_rbtree_purge(struct rb_root *root)
3853 -{
3854 - struct rb_node *p = rb_first(root);
3855 - unsigned int sum = 0;
3856 -
3857 - while (p) {
3858 - struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3859 -
3860 - p = rb_next(p);
3861 - rb_erase(&skb->rbnode, root);
3862 - while (skb) {
3863 - struct sk_buff *next = FRAG_CB(skb)->next_frag;
3864 -
3865 - sum += skb->truesize;
3866 - kfree_skb(skb);
3867 - skb = next;
3868 - }
3869 - }
3870 - return sum;
3871 -}
3872 -EXPORT_SYMBOL(inet_frag_rbtree_purge);
3873 -
3874 #ifdef CONFIG_SYSCTL
3875 static int dist_min;
3876
3877 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3878 index 7a556e459375..98c81c21b753 100644
3879 --- a/net/ipv4/route.c
3880 +++ b/net/ipv4/route.c
3881 @@ -1187,9 +1187,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
3882
3883 static void ipv4_link_failure(struct sk_buff *skb)
3884 {
3885 + struct ip_options opt;
3886 struct rtable *rt;
3887 + int res;
3888
3889 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
3890 + /* Recompile ip options since IPCB may not be valid anymore.
3891 + */
3892 + memset(&opt, 0, sizeof(opt));
3893 + opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
3894 +
3895 + rcu_read_lock();
3896 + res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
3897 + rcu_read_unlock();
3898 +
3899 + if (res)
3900 + return;
3901 +
3902 + __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
3903
3904 rt = skb_rtable(skb);
3905 if (rt)
3906 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3907 index 572f79abd393..cfdd70e32755 100644
3908 --- a/net/ipv4/tcp_input.c
3909 +++ b/net/ipv4/tcp_input.c
3910 @@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
3911 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
3912 {
3913 struct tcp_sock *tp = tcp_sk(sk);
3914 + int room;
3915 +
3916 + room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
3917
3918 /* Check #1 */
3919 - if (tp->rcv_ssthresh < tp->window_clamp &&
3920 - (int)tp->rcv_ssthresh < tcp_space(sk) &&
3921 - !tcp_under_memory_pressure(sk)) {
3922 + if (room > 0 && !tcp_under_memory_pressure(sk)) {
3923 int incr;
3924
3925 /* Check #2. Increase window, if skb with such overhead
3926 @@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
3927
3928 if (incr) {
3929 incr = max_t(int, incr, 2 * skb->len);
3930 - tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
3931 - tp->window_clamp);
3932 + tp->rcv_ssthresh += min(room, incr);
3933 inet_csk(sk)->icsk_ack.quick |= 1;
3934 }
3935 }
3936 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3937 index 043ed8eb0ab9..cb1b4772dac0 100644
3938 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3939 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3940 @@ -136,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
3941 }
3942 #endif
3943
3944 +static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
3945 + struct sk_buff *prev_tail, struct net_device *dev);
3946 +
3947 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
3948 {
3949 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
3950 @@ -177,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
3951 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
3952 const struct frag_hdr *fhdr, int nhoff)
3953 {
3954 - struct sk_buff *prev, *next;
3955 unsigned int payload_len;
3956 - int offset, end;
3957 + struct net_device *dev;
3958 + struct sk_buff *prev;
3959 + int offset, end, err;
3960 u8 ecn;
3961
3962 if (fq->q.flags & INET_FRAG_COMPLETE) {
3963 @@ -254,55 +258,18 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
3964 goto err;
3965 }
3966
3967 - /* Find out which fragments are in front and at the back of us
3968 - * in the chain of fragments so far. We must know where to put
3969 - * this fragment, right?
3970 - */
3971 - prev = fq->q.fragments_tail;
3972 - if (!prev || prev->ip_defrag_offset < offset) {
3973 - next = NULL;
3974 - goto found;
3975 - }
3976 - prev = NULL;
3977 - for (next = fq->q.fragments; next != NULL; next = next->next) {
3978 - if (next->ip_defrag_offset >= offset)
3979 - break; /* bingo! */
3980 - prev = next;
3981 - }
3982 -
3983 -found:
3984 - /* RFC5722, Section 4:
3985 - * When reassembling an IPv6 datagram, if
3986 - * one or more its constituent fragments is determined to be an
3987 - * overlapping fragment, the entire datagram (and any constituent
3988 - * fragments, including those not yet received) MUST be silently
3989 - * discarded.
3990 - */
3991 -
3992 - /* Check for overlap with preceding fragment. */
3993 - if (prev &&
3994 - (prev->ip_defrag_offset + prev->len) > offset)
3995 - goto discard_fq;
3996 -
3997 - /* Look for overlap with succeeding segment. */
3998 - if (next && next->ip_defrag_offset < end)
3999 - goto discard_fq;
4000 -
4001 - /* Note : skb->ip_defrag_offset and skb->dev share the same location */
4002 - if (skb->dev)
4003 - fq->iif = skb->dev->ifindex;
4004 + /* Note : skb->rbnode and skb->dev share the same location. */
4005 + dev = skb->dev;
4006 /* Makes sure compiler wont do silly aliasing games */
4007 barrier();
4008 - skb->ip_defrag_offset = offset;
4009
4010 - /* Insert this fragment in the chain of fragments. */
4011 - skb->next = next;
4012 - if (!next)
4013 - fq->q.fragments_tail = skb;
4014 - if (prev)
4015 - prev->next = skb;
4016 - else
4017 - fq->q.fragments = skb;
4018 + prev = fq->q.fragments_tail;
4019 + err = inet_frag_queue_insert(&fq->q, skb, offset, end);
4020 + if (err)
4021 + goto insert_error;
4022 +
4023 + if (dev)
4024 + fq->iif = dev->ifindex;
4025
4026 fq->q.stamp = skb->tstamp;
4027 fq->q.meat += skb->len;
4028 @@ -319,11 +286,25 @@ found:
4029 fq->q.flags |= INET_FRAG_FIRST_IN;
4030 }
4031
4032 - return 0;
4033 + if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
4034 + fq->q.meat == fq->q.len) {
4035 + unsigned long orefdst = skb->_skb_refdst;
4036 +
4037 + skb->_skb_refdst = 0UL;
4038 + err = nf_ct_frag6_reasm(fq, skb, prev, dev);
4039 + skb->_skb_refdst = orefdst;
4040 + return err;
4041 + }
4042 +
4043 + skb_dst_drop(skb);
4044 + return -EINPROGRESS;
4045
4046 -discard_fq:
4047 +insert_error:
4048 + if (err == IPFRAG_DUP)
4049 + goto err;
4050 inet_frag_kill(&fq->q);
4051 err:
4052 + skb_dst_drop(skb);
4053 return -EINVAL;
4054 }
4055
4056 @@ -333,147 +314,67 @@ err:
4057 * It is called with locked fq, and caller must check that
4058 * queue is eligible for reassembly i.e. it is not COMPLETE,
4059 * the last and the first frames arrived and all the bits are here.
4060 - *
4061 - * returns true if *prev skb has been transformed into the reassembled
4062 - * skb, false otherwise.
4063 */
4064 -static bool
4065 -nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
4066 +static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
4067 + struct sk_buff *prev_tail, struct net_device *dev)
4068 {
4069 - struct sk_buff *fp, *head = fq->q.fragments;
4070 - int payload_len, delta;
4071 + void *reasm_data;
4072 + int payload_len;
4073 u8 ecn;
4074
4075 inet_frag_kill(&fq->q);
4076
4077 - WARN_ON(head == NULL);
4078 - WARN_ON(head->ip_defrag_offset != 0);
4079 -
4080 ecn = ip_frag_ecn_table[fq->ecn];
4081 if (unlikely(ecn == 0xff))
4082 - return false;
4083 + goto err;
4084 +
4085 + reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
4086 + if (!reasm_data)
4087 + goto err;
4088
4089 - /* Unfragmented part is taken from the first segment. */
4090 - payload_len = ((head->data - skb_network_header(head)) -
4091 + payload_len = ((skb->data - skb_network_header(skb)) -
4092 sizeof(struct ipv6hdr) + fq->q.len -
4093 sizeof(struct frag_hdr));
4094 if (payload_len > IPV6_MAXPLEN) {
4095 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
4096 payload_len);
4097 - return false;
4098 - }
4099 -
4100 - delta = - head->truesize;
4101 -
4102 - /* Head of list must not be cloned. */
4103 - if (skb_unclone(head, GFP_ATOMIC))
4104 - return false;
4105 -
4106 - delta += head->truesize;
4107 - if (delta)
4108 - add_frag_mem_limit(fq->q.net, delta);
4109 -
4110 - /* If the first fragment is fragmented itself, we split
4111 - * it to two chunks: the first with data and paged part
4112 - * and the second, holding only fragments. */
4113 - if (skb_has_frag_list(head)) {
4114 - struct sk_buff *clone;
4115 - int i, plen = 0;
4116 -
4117 - clone = alloc_skb(0, GFP_ATOMIC);
4118 - if (clone == NULL)
4119 - return false;
4120 -
4121 - clone->next = head->next;
4122 - head->next = clone;
4123 - skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
4124 - skb_frag_list_init(head);
4125 - for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
4126 - plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
4127 - clone->len = clone->data_len = head->data_len - plen;
4128 - head->data_len -= clone->len;
4129 - head->len -= clone->len;
4130 - clone->csum = 0;
4131 - clone->ip_summed = head->ip_summed;
4132 -
4133 - add_frag_mem_limit(fq->q.net, clone->truesize);
4134 - }
4135 -
4136 - /* morph head into last received skb: prev.
4137 - *
4138 - * This allows callers of ipv6 conntrack defrag to continue
4139 - * to use the last skb(frag) passed into the reasm engine.
4140 - * The last skb frag 'silently' turns into the full reassembled skb.
4141 - *
4142 - * Since prev is also part of q->fragments we have to clone it first.
4143 - */
4144 - if (head != prev) {
4145 - struct sk_buff *iter;
4146 -
4147 - fp = skb_clone(prev, GFP_ATOMIC);
4148 - if (!fp)
4149 - return false;
4150 -
4151 - fp->next = prev->next;
4152 -
4153 - iter = head;
4154 - while (iter) {
4155 - if (iter->next == prev) {
4156 - iter->next = fp;
4157 - break;
4158 - }
4159 - iter = iter->next;
4160 - }
4161 -
4162 - skb_morph(prev, head);
4163 - prev->next = head->next;
4164 - consume_skb(head);
4165 - head = prev;
4166 + goto err;
4167 }
4168
4169 /* We have to remove fragment header from datagram and to relocate
4170 * header in order to calculate ICV correctly. */
4171 - skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
4172 - memmove(head->head + sizeof(struct frag_hdr), head->head,
4173 - (head->data - head->head) - sizeof(struct frag_hdr));
4174 - head->mac_header += sizeof(struct frag_hdr);
4175 - head->network_header += sizeof(struct frag_hdr);
4176 -
4177 - skb_shinfo(head)->frag_list = head->next;
4178 - skb_reset_transport_header(head);
4179 - skb_push(head, head->data - skb_network_header(head));
4180 -
4181 - for (fp = head->next; fp; fp = fp->next) {
4182 - head->data_len += fp->len;
4183 - head->len += fp->len;
4184 - if (head->ip_summed != fp->ip_summed)
4185 - head->ip_summed = CHECKSUM_NONE;
4186 - else if (head->ip_summed == CHECKSUM_COMPLETE)
4187 - head->csum = csum_add(head->csum, fp->csum);
4188 - head->truesize += fp->truesize;
4189 - fp->sk = NULL;
4190 - }
4191 - sub_frag_mem_limit(fq->q.net, head->truesize);
4192 + skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
4193 + memmove(skb->head + sizeof(struct frag_hdr), skb->head,
4194 + (skb->data - skb->head) - sizeof(struct frag_hdr));
4195 + skb->mac_header += sizeof(struct frag_hdr);
4196 + skb->network_header += sizeof(struct frag_hdr);
4197 +
4198 + skb_reset_transport_header(skb);
4199 +
4200 + inet_frag_reasm_finish(&fq->q, skb, reasm_data);
4201
4202 - head->ignore_df = 1;
4203 - head->next = NULL;
4204 - head->dev = dev;
4205 - head->tstamp = fq->q.stamp;
4206 - ipv6_hdr(head)->payload_len = htons(payload_len);
4207 - ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
4208 - IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
4209 + skb->ignore_df = 1;
4210 + skb->dev = dev;
4211 + ipv6_hdr(skb)->payload_len = htons(payload_len);
4212 + ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
4213 + IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
4214
4215 /* Yes, and fold redundant checksum back. 8) */
4216 - if (head->ip_summed == CHECKSUM_COMPLETE)
4217 - head->csum = csum_partial(skb_network_header(head),
4218 - skb_network_header_len(head),
4219 - head->csum);
4220 + if (skb->ip_summed == CHECKSUM_COMPLETE)
4221 + skb->csum = csum_partial(skb_network_header(skb),
4222 + skb_network_header_len(skb),
4223 + skb->csum);
4224
4225 fq->q.fragments = NULL;
4226 fq->q.rb_fragments = RB_ROOT;
4227 fq->q.fragments_tail = NULL;
4228 + fq->q.last_run_head = NULL;
4229
4230 - return true;
4231 + return 0;
4232 +
4233 +err:
4234 + inet_frag_kill(&fq->q);
4235 + return -EINVAL;
4236 }
4237
4238 /*
4239 @@ -542,7 +443,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
4240 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4241 {
4242 u16 savethdr = skb->transport_header;
4243 - struct net_device *dev = skb->dev;
4244 int fhoff, nhoff, ret;
4245 struct frag_hdr *fhdr;
4246 struct frag_queue *fq;
4247 @@ -565,10 +465,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4248 hdr = ipv6_hdr(skb);
4249 fhdr = (struct frag_hdr *)skb_transport_header(skb);
4250
4251 - if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
4252 - fhdr->frag_off & htons(IP6_MF))
4253 - return -EINVAL;
4254 -
4255 skb_orphan(skb);
4256 fq = fq_find(net, fhdr->identification, user, hdr,
4257 skb->dev ? skb->dev->ifindex : 0);
4258 @@ -580,31 +476,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4259 spin_lock_bh(&fq->q.lock);
4260
4261 ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
4262 - if (ret < 0) {
4263 - if (ret == -EPROTO) {
4264 - skb->transport_header = savethdr;
4265 - ret = 0;
4266 - }
4267 - goto out_unlock;
4268 + if (ret == -EPROTO) {
4269 + skb->transport_header = savethdr;
4270 + ret = 0;
4271 }
4272
4273 /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
4274 * must be returned.
4275 */
4276 - ret = -EINPROGRESS;
4277 - if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
4278 - fq->q.meat == fq->q.len) {
4279 - unsigned long orefdst = skb->_skb_refdst;
4280 -
4281 - skb->_skb_refdst = 0UL;
4282 - if (nf_ct_frag6_reasm(fq, skb, dev))
4283 - ret = 0;
4284 - skb->_skb_refdst = orefdst;
4285 - } else {
4286 - skb_dst_drop(skb);
4287 - }
4288 + if (ret)
4289 + ret = -EINPROGRESS;
4290
4291 -out_unlock:
4292 spin_unlock_bh(&fq->q.lock);
4293 inet_frag_put(&fq->q);
4294 return ret;
4295 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
4296 index 7c943392c128..095825f964e2 100644
4297 --- a/net/ipv6/reassembly.c
4298 +++ b/net/ipv6/reassembly.c
4299 @@ -69,8 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
4300
4301 static struct inet_frags ip6_frags;
4302
4303 -static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4304 - struct net_device *dev);
4305 +static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
4306 + struct sk_buff *prev_tail, struct net_device *dev);
4307
4308 static void ip6_frag_expire(struct timer_list *t)
4309 {
4310 @@ -111,21 +111,26 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
4311 struct frag_hdr *fhdr, int nhoff,
4312 u32 *prob_offset)
4313 {
4314 - struct sk_buff *prev, *next;
4315 - struct net_device *dev;
4316 - int offset, end, fragsize;
4317 struct net *net = dev_net(skb_dst(skb)->dev);
4318 + int offset, end, fragsize;
4319 + struct sk_buff *prev_tail;
4320 + struct net_device *dev;
4321 + int err = -ENOENT;
4322 u8 ecn;
4323
4324 if (fq->q.flags & INET_FRAG_COMPLETE)
4325 goto err;
4326
4327 + err = -EINVAL;
4328 offset = ntohs(fhdr->frag_off) & ~0x7;
4329 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
4330 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
4331
4332 if ((unsigned int)end > IPV6_MAXPLEN) {
4333 *prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
4334 + /* note that if prob_offset is set, the skb is freed elsewhere,
4335 + * we do not free it here.
4336 + */
4337 return -1;
4338 }
4339
4340 @@ -145,7 +150,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
4341 */
4342 if (end < fq->q.len ||
4343 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
4344 - goto err;
4345 + goto discard_fq;
4346 fq->q.flags |= INET_FRAG_LAST_IN;
4347 fq->q.len = end;
4348 } else {
4349 @@ -162,70 +167,35 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
4350 if (end > fq->q.len) {
4351 /* Some bits beyond end -> corruption. */
4352 if (fq->q.flags & INET_FRAG_LAST_IN)
4353 - goto err;
4354 + goto discard_fq;
4355 fq->q.len = end;
4356 }
4357 }
4358
4359 if (end == offset)
4360 - goto err;
4361 + goto discard_fq;
4362
4363 + err = -ENOMEM;
4364 /* Point into the IP datagram 'data' part. */
4365 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
4366 - goto err;
4367 -
4368 - if (pskb_trim_rcsum(skb, end - offset))
4369 - goto err;
4370 -
4371 - /* Find out which fragments are in front and at the back of us
4372 - * in the chain of fragments so far. We must know where to put
4373 - * this fragment, right?
4374 - */
4375 - prev = fq->q.fragments_tail;
4376 - if (!prev || prev->ip_defrag_offset < offset) {
4377 - next = NULL;
4378 - goto found;
4379 - }
4380 - prev = NULL;
4381 - for (next = fq->q.fragments; next != NULL; next = next->next) {
4382 - if (next->ip_defrag_offset >= offset)
4383 - break; /* bingo! */
4384 - prev = next;
4385 - }
4386 -
4387 -found:
4388 - /* RFC5722, Section 4, amended by Errata ID : 3089
4389 - * When reassembling an IPv6 datagram, if
4390 - * one or more its constituent fragments is determined to be an
4391 - * overlapping fragment, the entire datagram (and any constituent
4392 - * fragments) MUST be silently discarded.
4393 - */
4394 -
4395 - /* Check for overlap with preceding fragment. */
4396 - if (prev &&
4397 - (prev->ip_defrag_offset + prev->len) > offset)
4398 goto discard_fq;
4399
4400 - /* Look for overlap with succeeding segment. */
4401 - if (next && next->ip_defrag_offset < end)
4402 + err = pskb_trim_rcsum(skb, end - offset);
4403 + if (err)
4404 goto discard_fq;
4405
4406 - /* Note : skb->ip_defrag_offset and skb->dev share the same location */
4407 + /* Note : skb->rbnode and skb->dev share the same location. */
4408 dev = skb->dev;
4409 - if (dev)
4410 - fq->iif = dev->ifindex;
4411 /* Makes sure compiler wont do silly aliasing games */
4412 barrier();
4413 - skb->ip_defrag_offset = offset;
4414
4415 - /* Insert this fragment in the chain of fragments. */
4416 - skb->next = next;
4417 - if (!next)
4418 - fq->q.fragments_tail = skb;
4419 - if (prev)
4420 - prev->next = skb;
4421 - else
4422 - fq->q.fragments = skb;
4423 + prev_tail = fq->q.fragments_tail;
4424 + err = inet_frag_queue_insert(&fq->q, skb, offset, end);
4425 + if (err)
4426 + goto insert_error;
4427 +
4428 + if (dev)
4429 + fq->iif = dev->ifindex;
4430
4431 fq->q.stamp = skb->tstamp;
4432 fq->q.meat += skb->len;
4433 @@ -246,44 +216,48 @@ found:
4434
4435 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
4436 fq->q.meat == fq->q.len) {
4437 - int res;
4438 unsigned long orefdst = skb->_skb_refdst;
4439
4440 skb->_skb_refdst = 0UL;
4441 - res = ip6_frag_reasm(fq, prev, dev);
4442 + err = ip6_frag_reasm(fq, skb, prev_tail, dev);
4443 skb->_skb_refdst = orefdst;
4444 - return res;
4445 + return err;
4446 }
4447
4448 skb_dst_drop(skb);
4449 - return -1;
4450 + return -EINPROGRESS;
4451
4452 +insert_error:
4453 + if (err == IPFRAG_DUP) {
4454 + kfree_skb(skb);
4455 + return -EINVAL;
4456 + }
4457 + err = -EINVAL;
4458 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
4459 + IPSTATS_MIB_REASM_OVERLAPS);
4460 discard_fq:
4461 inet_frag_kill(&fq->q);
4462 -err:
4463 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
4464 IPSTATS_MIB_REASMFAILS);
4465 +err:
4466 kfree_skb(skb);
4467 - return -1;
4468 + return err;
4469 }
4470
4471 /*
4472 * Check if this packet is complete.
4473 - * Returns NULL on failure by any reason, and pointer
4474 - * to current nexthdr field in reassembled frame.
4475 *
4476 * It is called with locked fq, and caller must check that
4477 * queue is eligible for reassembly i.e. it is not COMPLETE,
4478 * the last and the first frames arrived and all the bits are here.
4479 */
4480 -static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4481 - struct net_device *dev)
4482 +static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
4483 + struct sk_buff *prev_tail, struct net_device *dev)
4484 {
4485 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
4486 - struct sk_buff *fp, *head = fq->q.fragments;
4487 - int payload_len, delta;
4488 unsigned int nhoff;
4489 - int sum_truesize;
4490 + void *reasm_data;
4491 + int payload_len;
4492 u8 ecn;
4493
4494 inet_frag_kill(&fq->q);
4495 @@ -292,121 +266,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4496 if (unlikely(ecn == 0xff))
4497 goto out_fail;
4498
4499 - /* Make the one we just received the head. */
4500 - if (prev) {
4501 - head = prev->next;
4502 - fp = skb_clone(head, GFP_ATOMIC);
4503 -
4504 - if (!fp)
4505 - goto out_oom;
4506 -
4507 - fp->next = head->next;
4508 - if (!fp->next)
4509 - fq->q.fragments_tail = fp;
4510 - prev->next = fp;
4511 -
4512 - skb_morph(head, fq->q.fragments);
4513 - head->next = fq->q.fragments->next;
4514 -
4515 - consume_skb(fq->q.fragments);
4516 - fq->q.fragments = head;
4517 - }
4518 -
4519 - WARN_ON(head == NULL);
4520 - WARN_ON(head->ip_defrag_offset != 0);
4521 + reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
4522 + if (!reasm_data)
4523 + goto out_oom;
4524
4525 - /* Unfragmented part is taken from the first segment. */
4526 - payload_len = ((head->data - skb_network_header(head)) -
4527 + payload_len = ((skb->data - skb_network_header(skb)) -
4528 sizeof(struct ipv6hdr) + fq->q.len -
4529 sizeof(struct frag_hdr));
4530 if (payload_len > IPV6_MAXPLEN)
4531 goto out_oversize;
4532
4533 - delta = - head->truesize;
4534 -
4535 - /* Head of list must not be cloned. */
4536 - if (skb_unclone(head, GFP_ATOMIC))
4537 - goto out_oom;
4538 -
4539 - delta += head->truesize;
4540 - if (delta)
4541 - add_frag_mem_limit(fq->q.net, delta);
4542 -
4543 - /* If the first fragment is fragmented itself, we split
4544 - * it to two chunks: the first with data and paged part
4545 - * and the second, holding only fragments. */
4546 - if (skb_has_frag_list(head)) {
4547 - struct sk_buff *clone;
4548 - int i, plen = 0;
4549 -
4550 - clone = alloc_skb(0, GFP_ATOMIC);
4551 - if (!clone)
4552 - goto out_oom;
4553 - clone->next = head->next;
4554 - head->next = clone;
4555 - skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
4556 - skb_frag_list_init(head);
4557 - for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
4558 - plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
4559 - clone->len = clone->data_len = head->data_len - plen;
4560 - head->data_len -= clone->len;
4561 - head->len -= clone->len;
4562 - clone->csum = 0;
4563 - clone->ip_summed = head->ip_summed;
4564 - add_frag_mem_limit(fq->q.net, clone->truesize);
4565 - }
4566 -
4567 /* We have to remove fragment header from datagram and to relocate
4568 * header in order to calculate ICV correctly. */
4569 nhoff = fq->nhoffset;
4570 - skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
4571 - memmove(head->head + sizeof(struct frag_hdr), head->head,
4572 - (head->data - head->head) - sizeof(struct frag_hdr));
4573 - if (skb_mac_header_was_set(head))
4574 - head->mac_header += sizeof(struct frag_hdr);
4575 - head->network_header += sizeof(struct frag_hdr);
4576 -
4577 - skb_reset_transport_header(head);
4578 - skb_push(head, head->data - skb_network_header(head));
4579 -
4580 - sum_truesize = head->truesize;
4581 - for (fp = head->next; fp;) {
4582 - bool headstolen;
4583 - int delta;
4584 - struct sk_buff *next = fp->next;
4585 -
4586 - sum_truesize += fp->truesize;
4587 - if (head->ip_summed != fp->ip_summed)
4588 - head->ip_summed = CHECKSUM_NONE;
4589 - else if (head->ip_summed == CHECKSUM_COMPLETE)
4590 - head->csum = csum_add(head->csum, fp->csum);
4591 -
4592 - if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
4593 - kfree_skb_partial(fp, headstolen);
4594 - } else {
4595 - fp->sk = NULL;
4596 - if (!skb_shinfo(head)->frag_list)
4597 - skb_shinfo(head)->frag_list = fp;
4598 - head->data_len += fp->len;
4599 - head->len += fp->len;
4600 - head->truesize += fp->truesize;
4601 - }
4602 - fp = next;
4603 - }
4604 - sub_frag_mem_limit(fq->q.net, sum_truesize);
4605 + skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
4606 + memmove(skb->head + sizeof(struct frag_hdr), skb->head,
4607 + (skb->data - skb->head) - sizeof(struct frag_hdr));
4608 + if (skb_mac_header_was_set(skb))
4609 + skb->mac_header += sizeof(struct frag_hdr);
4610 + skb->network_header += sizeof(struct frag_hdr);
4611 +
4612 + skb_reset_transport_header(skb);
4613 +
4614 + inet_frag_reasm_finish(&fq->q, skb, reasm_data);
4615
4616 - head->next = NULL;
4617 - head->dev = dev;
4618 - head->tstamp = fq->q.stamp;
4619 - ipv6_hdr(head)->payload_len = htons(payload_len);
4620 - ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
4621 - IP6CB(head)->nhoff = nhoff;
4622 - IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
4623 - IP6CB(head)->frag_max_size = fq->q.max_size;
4624 + skb->dev = dev;
4625 + ipv6_hdr(skb)->payload_len = htons(payload_len);
4626 + ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
4627 + IP6CB(skb)->nhoff = nhoff;
4628 + IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
4629 + IP6CB(skb)->frag_max_size = fq->q.max_size;
4630
4631 /* Yes, and fold redundant checksum back. 8) */
4632 - skb_postpush_rcsum(head, skb_network_header(head),
4633 - skb_network_header_len(head));
4634 + skb_postpush_rcsum(skb, skb_network_header(skb),
4635 + skb_network_header_len(skb));
4636
4637 rcu_read_lock();
4638 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
4639 @@ -414,6 +307,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4640 fq->q.fragments = NULL;
4641 fq->q.rb_fragments = RB_ROOT;
4642 fq->q.fragments_tail = NULL;
4643 + fq->q.last_run_head = NULL;
4644 return 1;
4645
4646 out_oversize:
4647 @@ -425,6 +319,7 @@ out_fail:
4648 rcu_read_lock();
4649 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
4650 rcu_read_unlock();
4651 + inet_frag_kill(&fq->q);
4652 return -1;
4653 }
4654
4655 @@ -463,10 +358,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
4656 return 1;
4657 }
4658
4659 - if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
4660 - fhdr->frag_off & htons(IP6_MF))
4661 - goto fail_hdr;
4662 -
4663 iif = skb->dev ? skb->dev->ifindex : 0;
4664 fq = fq_find(net, fhdr->identification, hdr, iif);
4665 if (fq) {
4666 @@ -484,6 +375,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
4667 if (prob_offset) {
4668 __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
4669 IPSTATS_MIB_INHDRERRORS);
4670 + /* icmpv6_param_prob() calls kfree_skb(skb) */
4671 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
4672 }
4673 return ret;
4674 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4675 index 9006bb3c9e72..06fa8425d82c 100644
4676 --- a/net/ipv6/route.c
4677 +++ b/net/ipv6/route.c
4678 @@ -2367,6 +2367,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
4679
4680 rcu_read_lock();
4681 from = rcu_dereference(rt6->from);
4682 + if (!from) {
4683 + rcu_read_unlock();
4684 + return;
4685 + }
4686 nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
4687 if (nrt6) {
4688 rt6_do_update_pmtu(nrt6, mtu);
4689 diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
4690 index 8f6998091d26..2123f6e90fc0 100644
4691 --- a/net/mac80211/driver-ops.h
4692 +++ b/net/mac80211/driver-ops.h
4693 @@ -1166,6 +1166,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
4694 {
4695 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
4696
4697 + if (local->in_reconfig)
4698 + return;
4699 +
4700 if (!check_sdata_in_driver(sdata))
4701 return;
4702
4703 diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
4704 index 793016d722ec..9fd37d91b5ed 100644
4705 --- a/net/sched/sch_cake.c
4706 +++ b/net/sched/sch_cake.c
4707 @@ -1508,32 +1508,29 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
4708 return idx + (tin << 16);
4709 }
4710
4711 -static void cake_wash_diffserv(struct sk_buff *skb)
4712 -{
4713 - switch (skb->protocol) {
4714 - case htons(ETH_P_IP):
4715 - ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
4716 - break;
4717 - case htons(ETH_P_IPV6):
4718 - ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
4719 - break;
4720 - default:
4721 - break;
4722 - }
4723 -}
4724 -
4725 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
4726 {
4727 + int wlen = skb_network_offset(skb);
4728 u8 dscp;
4729
4730 - switch (skb->protocol) {
4731 + switch (tc_skb_protocol(skb)) {
4732 case htons(ETH_P_IP):
4733 + wlen += sizeof(struct iphdr);
4734 + if (!pskb_may_pull(skb, wlen) ||
4735 + skb_try_make_writable(skb, wlen))
4736 + return 0;
4737 +
4738 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
4739 if (wash && dscp)
4740 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
4741 return dscp;
4742
4743 case htons(ETH_P_IPV6):
4744 + wlen += sizeof(struct ipv6hdr);
4745 + if (!pskb_may_pull(skb, wlen) ||
4746 + skb_try_make_writable(skb, wlen))
4747 + return 0;
4748 +
4749 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
4750 if (wash && dscp)
4751 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
4752 @@ -1553,25 +1550,27 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
4753 {
4754 struct cake_sched_data *q = qdisc_priv(sch);
4755 u32 tin;
4756 + u8 dscp;
4757 +
4758 + /* Tin selection: Default to diffserv-based selection, allow overriding
4759 + * using firewall marks or skb->priority.
4760 + */
4761 + dscp = cake_handle_diffserv(skb,
4762 + q->rate_flags & CAKE_FLAG_WASH);
4763
4764 - if (TC_H_MAJ(skb->priority) == sch->handle &&
4765 - TC_H_MIN(skb->priority) > 0 &&
4766 - TC_H_MIN(skb->priority) <= q->tin_cnt) {
4767 + if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
4768 + tin = 0;
4769 +
4770 + else if (TC_H_MAJ(skb->priority) == sch->handle &&
4771 + TC_H_MIN(skb->priority) > 0 &&
4772 + TC_H_MIN(skb->priority) <= q->tin_cnt)
4773 tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
4774
4775 - if (q->rate_flags & CAKE_FLAG_WASH)
4776 - cake_wash_diffserv(skb);
4777 - } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
4778 - /* extract the Diffserv Precedence field, if it exists */
4779 - /* and clear DSCP bits if washing */
4780 - tin = q->tin_index[cake_handle_diffserv(skb,
4781 - q->rate_flags & CAKE_FLAG_WASH)];
4782 + else {
4783 + tin = q->tin_index[dscp];
4784 +
4785 if (unlikely(tin >= q->tin_cnt))
4786 tin = 0;
4787 - } else {
4788 - tin = 0;
4789 - if (q->rate_flags & CAKE_FLAG_WASH)
4790 - cake_wash_diffserv(skb);
4791 }
4792
4793 return &q->tins[tin];
4794 diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
4795 index 66d5b2c5987a..d72985ca1d55 100644
4796 --- a/net/tipc/name_table.c
4797 +++ b/net/tipc/name_table.c
4798 @@ -908,7 +908,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
4799 for (; i < TIPC_NAMETBL_SIZE; i++) {
4800 head = &tn->nametbl->services[i];
4801
4802 - if (*last_type) {
4803 + if (*last_type ||
4804 + (!i && *last_key && (*last_lower == *last_key))) {
4805 service = tipc_service_find(net, *last_type);
4806 if (!service)
4807 return -EPIPE;
4808 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
4809 index 7be43697ff84..7f40b6aab689 100644
4810 --- a/scripts/mod/file2alias.c
4811 +++ b/scripts/mod/file2alias.c
4812 @@ -47,49 +47,9 @@ typedef struct {
4813 struct devtable {
4814 const char *device_id; /* name of table, __mod_<name>__*_device_table. */
4815 unsigned long id_size;
4816 - void *function;
4817 + int (*do_entry)(const char *filename, void *symval, char *alias);
4818 };
4819
4820 -#define ___cat(a,b) a ## b
4821 -#define __cat(a,b) ___cat(a,b)
4822 -
4823 -/* we need some special handling for this host tool running eventually on
4824 - * Darwin. The Mach-O section handling is a bit different than ELF section
4825 - * handling. The differnces in detail are:
4826 - * a) we have segments which have sections
4827 - * b) we need a API call to get the respective section symbols */
4828 -#if defined(__MACH__)
4829 -#include <mach-o/getsect.h>
4830 -
4831 -#define INIT_SECTION(name) do { \
4832 - unsigned long name ## _len; \
4833 - char *__cat(pstart_,name) = getsectdata("__TEXT", \
4834 - #name, &__cat(name,_len)); \
4835 - char *__cat(pstop_,name) = __cat(pstart_,name) + \
4836 - __cat(name, _len); \
4837 - __cat(__start_,name) = (void *)__cat(pstart_,name); \
4838 - __cat(__stop_,name) = (void *)__cat(pstop_,name); \
4839 - } while (0)
4840 -#define SECTION(name) __attribute__((section("__TEXT, " #name)))
4841 -
4842 -struct devtable **__start___devtable, **__stop___devtable;
4843 -#else
4844 -#define INIT_SECTION(name) /* no-op for ELF */
4845 -#define SECTION(name) __attribute__((section(#name)))
4846 -
4847 -/* We construct a table of pointers in an ELF section (pointers generally
4848 - * go unpadded by gcc). ld creates boundary syms for us. */
4849 -extern struct devtable *__start___devtable[], *__stop___devtable[];
4850 -#endif /* __MACH__ */
4851 -
4852 -#if !defined(__used)
4853 -# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
4854 -# define __used __attribute__((__unused__))
4855 -# else
4856 -# define __used __attribute__((__used__))
4857 -# endif
4858 -#endif
4859 -
4860 /* Define a variable f that holds the value of field f of struct devid
4861 * based at address m.
4862 */
4863 @@ -102,16 +62,6 @@ extern struct devtable *__start___devtable[], *__stop___devtable[];
4864 #define DEF_FIELD_ADDR(m, devid, f) \
4865 typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
4866
4867 -/* Add a table entry. We test function type matches while we're here. */
4868 -#define ADD_TO_DEVTABLE(device_id, type, function) \
4869 - static struct devtable __cat(devtable,__LINE__) = { \
4870 - device_id + 0*sizeof((function)((const char *)NULL, \
4871 - (void *)NULL, \
4872 - (char *)NULL)), \
4873 - SIZE_##type, (function) }; \
4874 - static struct devtable *SECTION(__devtable) __used \
4875 - __cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
4876 -
4877 #define ADD(str, sep, cond, field) \
4878 do { \
4879 strcat(str, sep); \
4880 @@ -431,7 +381,6 @@ static int do_hid_entry(const char *filename,
4881
4882 return 1;
4883 }
4884 -ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
4885
4886 /* Looks like: ieee1394:venNmoNspNverN */
4887 static int do_ieee1394_entry(const char *filename,
4888 @@ -456,7 +405,6 @@ static int do_ieee1394_entry(const char *filename,
4889 add_wildcard(alias);
4890 return 1;
4891 }
4892 -ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
4893
4894 /* Looks like: pci:vNdNsvNsdNbcNscNiN. */
4895 static int do_pci_entry(const char *filename,
4896 @@ -500,7 +448,6 @@ static int do_pci_entry(const char *filename,
4897 add_wildcard(alias);
4898 return 1;
4899 }
4900 -ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
4901
4902 /* looks like: "ccw:tNmNdtNdmN" */
4903 static int do_ccw_entry(const char *filename,
4904 @@ -524,7 +471,6 @@ static int do_ccw_entry(const char *filename,
4905 add_wildcard(alias);
4906 return 1;
4907 }
4908 -ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
4909
4910 /* looks like: "ap:tN" */
4911 static int do_ap_entry(const char *filename,
4912 @@ -535,7 +481,6 @@ static int do_ap_entry(const char *filename,
4913 sprintf(alias, "ap:t%02X*", dev_type);
4914 return 1;
4915 }
4916 -ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
4917
4918 /* looks like: "css:tN" */
4919 static int do_css_entry(const char *filename,
4920 @@ -546,7 +491,6 @@ static int do_css_entry(const char *filename,
4921 sprintf(alias, "css:t%01X", type);
4922 return 1;
4923 }
4924 -ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
4925
4926 /* Looks like: "serio:tyNprNidNexN" */
4927 static int do_serio_entry(const char *filename,
4928 @@ -566,7 +510,6 @@ static int do_serio_entry(const char *filename,
4929 add_wildcard(alias);
4930 return 1;
4931 }
4932 -ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
4933
4934 /* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
4935 * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
4936 @@ -604,7 +547,6 @@ static int do_acpi_entry(const char *filename,
4937 }
4938 return 1;
4939 }
4940 -ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
4941
4942 /* looks like: "pnp:dD" */
4943 static void do_pnp_device_entry(void *symval, unsigned long size,
4944 @@ -725,7 +667,6 @@ static int do_pcmcia_entry(const char *filename,
4945 add_wildcard(alias);
4946 return 1;
4947 }
4948 -ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
4949
4950 static int do_vio_entry(const char *filename, void *symval,
4951 char *alias)
4952 @@ -745,7 +686,6 @@ static int do_vio_entry(const char *filename, void *symval,
4953 add_wildcard(alias);
4954 return 1;
4955 }
4956 -ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
4957
4958 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
4959
4960 @@ -818,7 +758,6 @@ static int do_input_entry(const char *filename, void *symval,
4961 do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
4962 return 1;
4963 }
4964 -ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
4965
4966 static int do_eisa_entry(const char *filename, void *symval,
4967 char *alias)
4968 @@ -830,7 +769,6 @@ static int do_eisa_entry(const char *filename, void *symval,
4969 strcat(alias, "*");
4970 return 1;
4971 }
4972 -ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
4973
4974 /* Looks like: parisc:tNhvNrevNsvN */
4975 static int do_parisc_entry(const char *filename, void *symval,
4976 @@ -850,7 +788,6 @@ static int do_parisc_entry(const char *filename, void *symval,
4977 add_wildcard(alias);
4978 return 1;
4979 }
4980 -ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
4981
4982 /* Looks like: sdio:cNvNdN. */
4983 static int do_sdio_entry(const char *filename,
4984 @@ -867,7 +804,6 @@ static int do_sdio_entry(const char *filename,
4985 add_wildcard(alias);
4986 return 1;
4987 }
4988 -ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
4989
4990 /* Looks like: ssb:vNidNrevN. */
4991 static int do_ssb_entry(const char *filename,
4992 @@ -884,7 +820,6 @@ static int do_ssb_entry(const char *filename,
4993 add_wildcard(alias);
4994 return 1;
4995 }
4996 -ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
4997
4998 /* Looks like: bcma:mNidNrevNclN. */
4999 static int do_bcma_entry(const char *filename,
5000 @@ -903,7 +838,6 @@ static int do_bcma_entry(const char *filename,
5001 add_wildcard(alias);
5002 return 1;
5003 }
5004 -ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
5005
5006 /* Looks like: virtio:dNvN */
5007 static int do_virtio_entry(const char *filename, void *symval,
5008 @@ -919,7 +853,6 @@ static int do_virtio_entry(const char *filename, void *symval,
5009 add_wildcard(alias);
5010 return 1;
5011 }
5012 -ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
5013
5014 /*
5015 * Looks like: vmbus:guid
5016 @@ -942,7 +875,6 @@ static int do_vmbus_entry(const char *filename, void *symval,
5017
5018 return 1;
5019 }
5020 -ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
5021
5022 /* Looks like: rpmsg:S */
5023 static int do_rpmsg_entry(const char *filename, void *symval,
5024 @@ -953,7 +885,6 @@ static int do_rpmsg_entry(const char *filename, void *symval,
5025
5026 return 1;
5027 }
5028 -ADD_TO_DEVTABLE("rpmsg", rpmsg_device_id, do_rpmsg_entry);
5029
5030 /* Looks like: i2c:S */
5031 static int do_i2c_entry(const char *filename, void *symval,
5032 @@ -964,7 +895,6 @@ static int do_i2c_entry(const char *filename, void *symval,
5033
5034 return 1;
5035 }
5036 -ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
5037
5038 /* Looks like: spi:S */
5039 static int do_spi_entry(const char *filename, void *symval,
5040 @@ -975,7 +905,6 @@ static int do_spi_entry(const char *filename, void *symval,
5041
5042 return 1;
5043 }
5044 -ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
5045
5046 static const struct dmifield {
5047 const char *prefix;
5048 @@ -1030,7 +959,6 @@ static int do_dmi_entry(const char *filename, void *symval,
5049 strcat(alias, ":");
5050 return 1;
5051 }
5052 -ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
5053
5054 static int do_platform_entry(const char *filename,
5055 void *symval, char *alias)
5056 @@ -1039,7 +967,6 @@ static int do_platform_entry(const char *filename,
5057 sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
5058 return 1;
5059 }
5060 -ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
5061
5062 static int do_mdio_entry(const char *filename,
5063 void *symval, char *alias)
5064 @@ -1064,7 +991,6 @@ static int do_mdio_entry(const char *filename,
5065
5066 return 1;
5067 }
5068 -ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
5069
5070 /* Looks like: zorro:iN. */
5071 static int do_zorro_entry(const char *filename, void *symval,
5072 @@ -1075,7 +1001,6 @@ static int do_zorro_entry(const char *filename, void *symval,
5073 ADD(alias, "i", id != ZORRO_WILDCARD, id);
5074 return 1;
5075 }
5076 -ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
5077
5078 /* looks like: "pnp:dD" */
5079 static int do_isapnp_entry(const char *filename,
5080 @@ -1091,7 +1016,6 @@ static int do_isapnp_entry(const char *filename,
5081 (function >> 12) & 0x0f, (function >> 8) & 0x0f);
5082 return 1;
5083 }
5084 -ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
5085
5086 /* Looks like: "ipack:fNvNdN". */
5087 static int do_ipack_entry(const char *filename,
5088 @@ -1107,7 +1031,6 @@ static int do_ipack_entry(const char *filename,
5089 add_wildcard(alias);
5090 return 1;
5091 }
5092 -ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
5093
5094 /*
5095 * Append a match expression for a single masked hex digit.
5096 @@ -1178,7 +1101,6 @@ static int do_amba_entry(const char *filename,
5097
5098 return 1;
5099 }
5100 -ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
5101
5102 /*
5103 * looks like: "mipscdmm:tN"
5104 @@ -1194,7 +1116,6 @@ static int do_mips_cdmm_entry(const char *filename,
5105 sprintf(alias, "mipscdmm:t%02X*", type);
5106 return 1;
5107 }
5108 -ADD_TO_DEVTABLE("mipscdmm", mips_cdmm_device_id, do_mips_cdmm_entry);
5109
5110 /* LOOKS like cpu:type:x86,venVVVVfamFFFFmodMMMM:feature:*,FEAT,*
5111 * All fields are numbers. It would be nicer to use strings for vendor
5112 @@ -1219,7 +1140,6 @@ static int do_x86cpu_entry(const char *filename, void *symval,
5113 sprintf(alias + strlen(alias), "%04X*", feature);
5114 return 1;
5115 }
5116 -ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
5117
5118 /* LOOKS like cpu:type:*:feature:*FEAT* */
5119 static int do_cpu_entry(const char *filename, void *symval, char *alias)
5120 @@ -1229,7 +1149,6 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
5121 sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
5122 return 1;
5123 }
5124 -ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
5125
5126 /* Looks like: mei:S:uuid:N:* */
5127 static int do_mei_entry(const char *filename, void *symval,
5128 @@ -1248,7 +1167,6 @@ static int do_mei_entry(const char *filename, void *symval,
5129
5130 return 1;
5131 }
5132 -ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry);
5133
5134 /* Looks like: rapidio:vNdNavNadN */
5135 static int do_rio_entry(const char *filename,
5136 @@ -1268,7 +1186,6 @@ static int do_rio_entry(const char *filename,
5137 add_wildcard(alias);
5138 return 1;
5139 }
5140 -ADD_TO_DEVTABLE("rapidio", rio_device_id, do_rio_entry);
5141
5142 /* Looks like: ulpi:vNpN */
5143 static int do_ulpi_entry(const char *filename, void *symval,
5144 @@ -1281,7 +1198,6 @@ static int do_ulpi_entry(const char *filename, void *symval,
5145
5146 return 1;
5147 }
5148 -ADD_TO_DEVTABLE("ulpi", ulpi_device_id, do_ulpi_entry);
5149
5150 /* Looks like: hdaudio:vNrNaN */
5151 static int do_hda_entry(const char *filename, void *symval, char *alias)
5152 @@ -1298,7 +1214,6 @@ static int do_hda_entry(const char *filename, void *symval, char *alias)
5153 add_wildcard(alias);
5154 return 1;
5155 }
5156 -ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
5157
5158 /* Looks like: sdw:mNpN */
5159 static int do_sdw_entry(const char *filename, void *symval, char *alias)
5160 @@ -1313,7 +1228,6 @@ static int do_sdw_entry(const char *filename, void *symval, char *alias)
5161 add_wildcard(alias);
5162 return 1;
5163 }
5164 -ADD_TO_DEVTABLE("sdw", sdw_device_id, do_sdw_entry);
5165
5166 /* Looks like: fsl-mc:vNdN */
5167 static int do_fsl_mc_entry(const char *filename, void *symval,
5168 @@ -1325,7 +1239,6 @@ static int do_fsl_mc_entry(const char *filename, void *symval,
5169 sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
5170 return 1;
5171 }
5172 -ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
5173
5174 /* Looks like: tbsvc:kSpNvNrN */
5175 static int do_tbsvc_entry(const char *filename, void *symval, char *alias)
5176 @@ -1350,7 +1263,6 @@ static int do_tbsvc_entry(const char *filename, void *symval, char *alias)
5177 add_wildcard(alias);
5178 return 1;
5179 }
5180 -ADD_TO_DEVTABLE("tbsvc", tb_service_id, do_tbsvc_entry);
5181
5182 /* Looks like: typec:idNmN */
5183 static int do_typec_entry(const char *filename, void *symval, char *alias)
5184 @@ -1363,7 +1275,6 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
5185
5186 return 1;
5187 }
5188 -ADD_TO_DEVTABLE("typec", typec_device_id, do_typec_entry);
5189
5190 /* Does namelen bytes of name exactly match the symbol? */
5191 static bool sym_is(const char *name, unsigned namelen, const char *symbol)
5192 @@ -1377,12 +1288,11 @@ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
5193 static void do_table(void *symval, unsigned long size,
5194 unsigned long id_size,
5195 const char *device_id,
5196 - void *function,
5197 + int (*do_entry)(const char *filename, void *symval, char *alias),
5198 struct module *mod)
5199 {
5200 unsigned int i;
5201 char alias[500];
5202 - int (*do_entry)(const char *, void *entry, char *alias) = function;
5203
5204 device_id_check(mod->name, device_id, size, id_size, symval);
5205 /* Leave last one: it's the terminator. */
5206 @@ -1396,6 +1306,48 @@ static void do_table(void *symval, unsigned long size,
5207 }
5208 }
5209
5210 +static const struct devtable devtable[] = {
5211 + {"hid", SIZE_hid_device_id, do_hid_entry},
5212 + {"ieee1394", SIZE_ieee1394_device_id, do_ieee1394_entry},
5213 + {"pci", SIZE_pci_device_id, do_pci_entry},
5214 + {"ccw", SIZE_ccw_device_id, do_ccw_entry},
5215 + {"ap", SIZE_ap_device_id, do_ap_entry},
5216 + {"css", SIZE_css_device_id, do_css_entry},
5217 + {"serio", SIZE_serio_device_id, do_serio_entry},
5218 + {"acpi", SIZE_acpi_device_id, do_acpi_entry},
5219 + {"pcmcia", SIZE_pcmcia_device_id, do_pcmcia_entry},
5220 + {"vio", SIZE_vio_device_id, do_vio_entry},
5221 + {"input", SIZE_input_device_id, do_input_entry},
5222 + {"eisa", SIZE_eisa_device_id, do_eisa_entry},
5223 + {"parisc", SIZE_parisc_device_id, do_parisc_entry},
5224 + {"sdio", SIZE_sdio_device_id, do_sdio_entry},
5225 + {"ssb", SIZE_ssb_device_id, do_ssb_entry},
5226 + {"bcma", SIZE_bcma_device_id, do_bcma_entry},
5227 + {"virtio", SIZE_virtio_device_id, do_virtio_entry},
5228 + {"vmbus", SIZE_hv_vmbus_device_id, do_vmbus_entry},
5229 + {"rpmsg", SIZE_rpmsg_device_id, do_rpmsg_entry},
5230 + {"i2c", SIZE_i2c_device_id, do_i2c_entry},
5231 + {"spi", SIZE_spi_device_id, do_spi_entry},
5232 + {"dmi", SIZE_dmi_system_id, do_dmi_entry},
5233 + {"platform", SIZE_platform_device_id, do_platform_entry},
5234 + {"mdio", SIZE_mdio_device_id, do_mdio_entry},
5235 + {"zorro", SIZE_zorro_device_id, do_zorro_entry},
5236 + {"isapnp", SIZE_isapnp_device_id, do_isapnp_entry},
5237 + {"ipack", SIZE_ipack_device_id, do_ipack_entry},
5238 + {"amba", SIZE_amba_id, do_amba_entry},
5239 + {"mipscdmm", SIZE_mips_cdmm_device_id, do_mips_cdmm_entry},
5240 + {"x86cpu", SIZE_x86_cpu_id, do_x86cpu_entry},
5241 + {"cpu", SIZE_cpu_feature, do_cpu_entry},
5242 + {"mei", SIZE_mei_cl_device_id, do_mei_entry},
5243 + {"rapidio", SIZE_rio_device_id, do_rio_entry},
5244 + {"ulpi", SIZE_ulpi_device_id, do_ulpi_entry},
5245 + {"hdaudio", SIZE_hda_device_id, do_hda_entry},
5246 + {"sdw", SIZE_sdw_device_id, do_sdw_entry},
5247 + {"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
5248 + {"tbsvc", SIZE_tb_service_id, do_tbsvc_entry},
5249 + {"typec", SIZE_typec_device_id, do_typec_entry},
5250 +};
5251 +
5252 /* Create MODULE_ALIAS() statements.
5253 * At this time, we cannot write the actual output C source yet,
5254 * so we write into the mod->dev_table_buf buffer. */
5255 @@ -1450,13 +1402,14 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
5256 else if (sym_is(name, namelen, "pnp_card"))
5257 do_pnp_card_entries(symval, sym->st_size, mod);
5258 else {
5259 - struct devtable **p;
5260 - INIT_SECTION(__devtable);
5261 + int i;
5262 +
5263 + for (i = 0; i < ARRAY_SIZE(devtable); i++) {
5264 + const struct devtable *p = &devtable[i];
5265
5266 - for (p = __start___devtable; p < __stop___devtable; p++) {
5267 - if (sym_is(name, namelen, (*p)->device_id)) {
5268 - do_table(symval, sym->st_size, (*p)->id_size,
5269 - (*p)->device_id, (*p)->function, mod);
5270 + if (sym_is(name, namelen, p->device_id)) {
5271 + do_table(symval, sym->st_size, p->id_size,
5272 + p->device_id, p->do_entry, mod);
5273 break;
5274 }
5275 }
5276 diff --git a/security/device_cgroup.c b/security/device_cgroup.c
5277 index cd97929fac66..dc28914fa72e 100644
5278 --- a/security/device_cgroup.c
5279 +++ b/security/device_cgroup.c
5280 @@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
5281 devcg->behavior == DEVCG_DEFAULT_ALLOW) {
5282 rc = dev_exception_add(devcg, ex);
5283 if (rc)
5284 - break;
5285 + return rc;
5286 } else {
5287 /*
5288 * in the other possible cases:
5289 diff --git a/sound/core/info.c b/sound/core/info.c
5290 index fe502bc5e6d2..679136fba730 100644
5291 --- a/sound/core/info.c
5292 +++ b/sound/core/info.c
5293 @@ -722,8 +722,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent)
5294 INIT_LIST_HEAD(&entry->children);
5295 INIT_LIST_HEAD(&entry->list);
5296 entry->parent = parent;
5297 - if (parent)
5298 + if (parent) {
5299 + mutex_lock(&parent->access);
5300 list_add_tail(&entry->list, &parent->children);
5301 + mutex_unlock(&parent->access);
5302 + }
5303 return entry;
5304 }
5305
5306 @@ -805,7 +808,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
5307 list_for_each_entry_safe(p, n, &entry->children, list)
5308 snd_info_free_entry(p);
5309
5310 - list_del(&entry->list);
5311 + p = entry->parent;
5312 + if (p) {
5313 + mutex_lock(&p->access);
5314 + list_del(&entry->list);
5315 + mutex_unlock(&p->access);
5316 + }
5317 kfree(entry->name);
5318 if (entry->private_free)
5319 entry->private_free(entry);
5320 diff --git a/sound/core/init.c b/sound/core/init.c
5321 index 4849c611c0fe..16b7cc7aa66b 100644
5322 --- a/sound/core/init.c
5323 +++ b/sound/core/init.c
5324 @@ -407,14 +407,7 @@ int snd_card_disconnect(struct snd_card *card)
5325 card->shutdown = 1;
5326 spin_unlock(&card->files_lock);
5327
5328 - /* phase 1: disable fops (user space) operations for ALSA API */
5329 - mutex_lock(&snd_card_mutex);
5330 - snd_cards[card->number] = NULL;
5331 - clear_bit(card->number, snd_cards_lock);
5332 - mutex_unlock(&snd_card_mutex);
5333 -
5334 - /* phase 2: replace file->f_op with special dummy operations */
5335 -
5336 + /* replace file->f_op with special dummy operations */
5337 spin_lock(&card->files_lock);
5338 list_for_each_entry(mfile, &card->files_list, list) {
5339 /* it's critical part, use endless loop */
5340 @@ -430,7 +423,7 @@ int snd_card_disconnect(struct snd_card *card)
5341 }
5342 spin_unlock(&card->files_lock);
5343
5344 - /* phase 3: notify all connected devices about disconnection */
5345 + /* notify all connected devices about disconnection */
5346 /* at this point, they cannot respond to any calls except release() */
5347
5348 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
5349 @@ -446,6 +439,13 @@ int snd_card_disconnect(struct snd_card *card)
5350 device_del(&card->card_dev);
5351 card->registered = false;
5352 }
5353 +
5354 + /* disable fops (user space) operations for ALSA API */
5355 + mutex_lock(&snd_card_mutex);
5356 + snd_cards[card->number] = NULL;
5357 + clear_bit(card->number, snd_cards_lock);
5358 + mutex_unlock(&snd_card_mutex);
5359 +
5360 #ifdef CONFIG_PM
5361 wake_up(&card->power_sleep);
5362 #endif
5363 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5364 index bd60eb7168fa..0a745d677b1c 100644
5365 --- a/sound/pci/hda/patch_realtek.c
5366 +++ b/sound/pci/hda/patch_realtek.c
5367 @@ -7170,6 +7170,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5368 {0x12, 0x90a60140},
5369 {0x14, 0x90170150},
5370 {0x21, 0x02211020}),
5371 + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5372 + {0x21, 0x02211020}),
5373 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
5374 {0x14, 0x90170110},
5375 {0x21, 0x02211020}),
5376 @@ -7280,6 +7282,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5377 {0x21, 0x0221101f}),
5378 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5379 ALC256_STANDARD_PINS),
5380 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5381 + {0x14, 0x90170110},
5382 + {0x1b, 0x01011020},
5383 + {0x21, 0x0221101f}),
5384 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
5385 {0x14, 0x90170110},
5386 {0x1b, 0x90a70130},
5387 diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
5388 index 7029e0b85f9e..4ac78d7a4b2d 100644
5389 --- a/sound/soc/rockchip/rockchip_pcm.c
5390 +++ b/sound/soc/rockchip/rockchip_pcm.c
5391 @@ -21,7 +21,8 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
5392 .info = SNDRV_PCM_INFO_MMAP |
5393 SNDRV_PCM_INFO_MMAP_VALID |
5394 SNDRV_PCM_INFO_PAUSE |
5395 - SNDRV_PCM_INFO_RESUME,
5396 + SNDRV_PCM_INFO_RESUME |
5397 + SNDRV_PCM_INFO_INTERLEAVED,
5398 .period_bytes_min = 32,
5399 .period_bytes_max = 8192,
5400 .periods_min = 1,
5401 diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
5402 index acc704bd3998..0b0ef3abc966 100644
5403 --- a/tools/include/linux/bitops.h
5404 +++ b/tools/include/linux/bitops.h
5405 @@ -3,8 +3,6 @@
5406 #define _TOOLS_LINUX_BITOPS_H_
5407
5408 #include <asm/types.h>
5409 -#include <linux/compiler.h>
5410 -
5411 #ifndef __WORDSIZE
5412 #define __WORDSIZE (__SIZEOF_LONG__ * 8)
5413 #endif
5414 @@ -12,10 +10,9 @@
5415 #ifndef BITS_PER_LONG
5416 # define BITS_PER_LONG __WORDSIZE
5417 #endif
5418 +#include <linux/bits.h>
5419 +#include <linux/compiler.h>
5420
5421 -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
5422 -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
5423 -#define BITS_PER_BYTE 8
5424 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
5425 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
5426 #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
5427 diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
5428 new file mode 100644
5429 index 000000000000..2b7b532c1d51
5430 --- /dev/null
5431 +++ b/tools/include/linux/bits.h
5432 @@ -0,0 +1,26 @@
5433 +/* SPDX-License-Identifier: GPL-2.0 */
5434 +#ifndef __LINUX_BITS_H
5435 +#define __LINUX_BITS_H
5436 +#include <asm/bitsperlong.h>
5437 +
5438 +#define BIT(nr) (1UL << (nr))
5439 +#define BIT_ULL(nr) (1ULL << (nr))
5440 +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
5441 +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
5442 +#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
5443 +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
5444 +#define BITS_PER_BYTE 8
5445 +
5446 +/*
5447 + * Create a contiguous bitmask starting at bit position @l and ending at
5448 + * position @h. For example
5449 + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
5450 + */
5451 +#define GENMASK(h, l) \
5452 + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
5453 +
5454 +#define GENMASK_ULL(h, l) \
5455 + (((~0ULL) - (1ULL << (l)) + 1) & \
5456 + (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
5457 +
5458 +#endif /* __LINUX_BITS_H */
5459 diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
5460 index 466540ee8ea7..c72cc73a6b09 100755
5461 --- a/tools/perf/check-headers.sh
5462 +++ b/tools/perf/check-headers.sh
5463 @@ -14,6 +14,7 @@ include/uapi/linux/sched.h
5464 include/uapi/linux/stat.h
5465 include/uapi/linux/vhost.h
5466 include/uapi/sound/asound.h
5467 +include/linux/bits.h
5468 include/linux/hash.h
5469 include/uapi/linux/hw_breakpoint.h
5470 arch/x86/include/asm/disabled-features.h