Contents of /trunk/kernel-alx/patches-3.18/0121-3.18.22-all-fixes.patch
Parent Directory | Revision Log
Revision 2703 -
(show annotations)
(download)
Mon Oct 5 08:26:40 2015 UTC (8 years, 11 months ago) by niro
File size: 83163 byte(s)
Mon Oct 5 08:26:40 2015 UTC (8 years, 11 months ago) by niro
File size: 83163 byte(s)
-linux-3.18.22
1 | diff --git a/Makefile b/Makefile |
2 | index 6be90fab361b..7adbbbeeb421 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 18 |
8 | -SUBLEVEL = 21 |
9 | +SUBLEVEL = 22 |
10 | EXTRAVERSION = |
11 | NAME = Diseased Newt |
12 | |
13 | diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c |
14 | index 81a02a8762b0..86825f8883de 100644 |
15 | --- a/arch/arm64/kvm/inject_fault.c |
16 | +++ b/arch/arm64/kvm/inject_fault.c |
17 | @@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
18 | { |
19 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
20 | inject_abt32(vcpu, false, addr); |
21 | - |
22 | - inject_abt64(vcpu, false, addr); |
23 | + else |
24 | + inject_abt64(vcpu, false, addr); |
25 | } |
26 | |
27 | /** |
28 | @@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
29 | { |
30 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
31 | inject_abt32(vcpu, true, addr); |
32 | - |
33 | - inject_abt64(vcpu, true, addr); |
34 | + else |
35 | + inject_abt64(vcpu, true, addr); |
36 | } |
37 | |
38 | /** |
39 | @@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
40 | { |
41 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
42 | inject_undef32(vcpu); |
43 | - |
44 | - inject_undef64(vcpu); |
45 | + else |
46 | + inject_undef64(vcpu); |
47 | } |
48 | diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S |
49 | index 5251565e344b..a6576cf1e6d9 100644 |
50 | --- a/arch/mips/kernel/scall64-64.S |
51 | +++ b/arch/mips/kernel/scall64-64.S |
52 | @@ -80,7 +80,7 @@ syscall_trace_entry: |
53 | SAVE_STATIC |
54 | move s0, t2 |
55 | move a0, sp |
56 | - daddiu a1, v0, __NR_64_Linux |
57 | + move a1, v0 |
58 | jal syscall_trace_enter |
59 | |
60 | bltz v0, 2f # seccomp failed? Skip syscall |
61 | diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S |
62 | index 77e74398b828..a8eb6575edc0 100644 |
63 | --- a/arch/mips/kernel/scall64-n32.S |
64 | +++ b/arch/mips/kernel/scall64-n32.S |
65 | @@ -72,7 +72,7 @@ n32_syscall_trace_entry: |
66 | SAVE_STATIC |
67 | move s0, t2 |
68 | move a0, sp |
69 | - daddiu a1, v0, __NR_N32_Linux |
70 | + move a1, v0 |
71 | jal syscall_trace_enter |
72 | |
73 | bltz v0, 2f # seccomp failed? Skip syscall |
74 | diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h |
75 | index a94b82e8f156..69126184c609 100644 |
76 | --- a/arch/x86/include/asm/desc.h |
77 | +++ b/arch/x86/include/asm/desc.h |
78 | @@ -280,21 +280,6 @@ static inline void clear_LDT(void) |
79 | set_ldt(NULL, 0); |
80 | } |
81 | |
82 | -/* |
83 | - * load one particular LDT into the current CPU |
84 | - */ |
85 | -static inline void load_LDT_nolock(mm_context_t *pc) |
86 | -{ |
87 | - set_ldt(pc->ldt, pc->size); |
88 | -} |
89 | - |
90 | -static inline void load_LDT(mm_context_t *pc) |
91 | -{ |
92 | - preempt_disable(); |
93 | - load_LDT_nolock(pc); |
94 | - preempt_enable(); |
95 | -} |
96 | - |
97 | static inline unsigned long get_desc_base(const struct desc_struct *desc) |
98 | { |
99 | return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); |
100 | diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h |
101 | index 876e74e8eec7..b6b7bc3f5d26 100644 |
102 | --- a/arch/x86/include/asm/mmu.h |
103 | +++ b/arch/x86/include/asm/mmu.h |
104 | @@ -9,8 +9,7 @@ |
105 | * we put the segment information here. |
106 | */ |
107 | typedef struct { |
108 | - void *ldt; |
109 | - int size; |
110 | + struct ldt_struct *ldt; |
111 | |
112 | #ifdef CONFIG_X86_64 |
113 | /* True if mm supports a task running in 32 bit compatibility mode. */ |
114 | diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h |
115 | index 166af2a8e865..23e0625a6183 100644 |
116 | --- a/arch/x86/include/asm/mmu_context.h |
117 | +++ b/arch/x86/include/asm/mmu_context.h |
118 | @@ -20,6 +20,50 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, |
119 | #endif /* !CONFIG_PARAVIRT */ |
120 | |
121 | /* |
122 | + * ldt_structs can be allocated, used, and freed, but they are never |
123 | + * modified while live. |
124 | + */ |
125 | +struct ldt_struct { |
126 | + /* |
127 | + * Xen requires page-aligned LDTs with special permissions. This is |
128 | + * needed to prevent us from installing evil descriptors such as |
129 | + * call gates. On native, we could merge the ldt_struct and LDT |
130 | + * allocations, but it's not worth trying to optimize. |
131 | + */ |
132 | + struct desc_struct *entries; |
133 | + int size; |
134 | +}; |
135 | + |
136 | +static inline void load_mm_ldt(struct mm_struct *mm) |
137 | +{ |
138 | + struct ldt_struct *ldt; |
139 | + |
140 | + /* lockless_dereference synchronizes with smp_store_release */ |
141 | + ldt = lockless_dereference(mm->context.ldt); |
142 | + |
143 | + /* |
144 | + * Any change to mm->context.ldt is followed by an IPI to all |
145 | + * CPUs with the mm active. The LDT will not be freed until |
146 | + * after the IPI is handled by all such CPUs. This means that, |
147 | + * if the ldt_struct changes before we return, the values we see |
148 | + * will be safe, and the new values will be loaded before we run |
149 | + * any user code. |
150 | + * |
151 | + * NB: don't try to convert this to use RCU without extreme care. |
152 | + * We would still need IRQs off, because we don't want to change |
153 | + * the local LDT after an IPI loaded a newer value than the one |
154 | + * that we can see. |
155 | + */ |
156 | + |
157 | + if (unlikely(ldt)) |
158 | + set_ldt(ldt->entries, ldt->size); |
159 | + else |
160 | + clear_LDT(); |
161 | + |
162 | + DEBUG_LOCKS_WARN_ON(preemptible()); |
163 | +} |
164 | + |
165 | +/* |
166 | * Used for LDT copy/destruction. |
167 | */ |
168 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
169 | @@ -55,7 +99,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
170 | |
171 | /* Load the LDT, if the LDT is different: */ |
172 | if (unlikely(prev->context.ldt != next->context.ldt)) |
173 | - load_LDT_nolock(&next->context); |
174 | + load_mm_ldt(next); |
175 | } |
176 | #ifdef CONFIG_SMP |
177 | else { |
178 | @@ -77,7 +121,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
179 | */ |
180 | load_cr3(next->pgd); |
181 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
182 | - load_LDT_nolock(&next->context); |
183 | + load_mm_ldt(next); |
184 | } |
185 | } |
186 | #endif |
187 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
188 | index 7bc49c3b9684..e757fcbe90db 100644 |
189 | --- a/arch/x86/kernel/cpu/common.c |
190 | +++ b/arch/x86/kernel/cpu/common.c |
191 | @@ -1383,7 +1383,7 @@ void cpu_init(void) |
192 | load_sp0(t, ¤t->thread); |
193 | set_tss_desc(cpu, t); |
194 | load_TR_desc(); |
195 | - load_LDT(&init_mm.context); |
196 | + load_mm_ldt(&init_mm); |
197 | |
198 | clear_all_debug_regs(); |
199 | dbg_restore_debug_regs(); |
200 | @@ -1426,7 +1426,7 @@ void cpu_init(void) |
201 | load_sp0(t, thread); |
202 | set_tss_desc(cpu, t); |
203 | load_TR_desc(); |
204 | - load_LDT(&init_mm.context); |
205 | + load_mm_ldt(&init_mm); |
206 | |
207 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
208 | |
209 | diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c |
210 | index 6b5acd5f4a34..c832e9f54cd6 100644 |
211 | --- a/arch/x86/kernel/cpu/perf_event.c |
212 | +++ b/arch/x86/kernel/cpu/perf_event.c |
213 | @@ -32,6 +32,7 @@ |
214 | #include <asm/smp.h> |
215 | #include <asm/alternative.h> |
216 | #include <asm/tlbflush.h> |
217 | +#include <asm/mmu_context.h> |
218 | #include <asm/timer.h> |
219 | #include <asm/desc.h> |
220 | #include <asm/ldt.h> |
221 | @@ -1987,21 +1988,25 @@ static unsigned long get_segment_base(unsigned int segment) |
222 | int idx = segment >> 3; |
223 | |
224 | if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { |
225 | + struct ldt_struct *ldt; |
226 | + |
227 | if (idx > LDT_ENTRIES) |
228 | return 0; |
229 | |
230 | - if (idx > current->active_mm->context.size) |
231 | + /* IRQs are off, so this synchronizes with smp_store_release */ |
232 | + ldt = lockless_dereference(current->active_mm->context.ldt); |
233 | + if (!ldt || idx > ldt->size) |
234 | return 0; |
235 | |
236 | - desc = current->active_mm->context.ldt; |
237 | + desc = &ldt->entries[idx]; |
238 | } else { |
239 | if (idx > GDT_ENTRIES) |
240 | return 0; |
241 | |
242 | - desc = raw_cpu_ptr(gdt_page.gdt); |
243 | + desc = raw_cpu_ptr(gdt_page.gdt) + idx; |
244 | } |
245 | |
246 | - return get_desc_base(desc + idx); |
247 | + return get_desc_base(desc); |
248 | } |
249 | |
250 | #ifdef CONFIG_COMPAT |
251 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S |
252 | index e36d9815ef56..fad5cd9d7c4b 100644 |
253 | --- a/arch/x86/kernel/entry_64.S |
254 | +++ b/arch/x86/kernel/entry_64.S |
255 | @@ -1459,20 +1459,77 @@ ENTRY(nmi) |
256 | * a nested NMI that updated the copy interrupt stack frame, a |
257 | * jump will be made to the repeat_nmi code that will handle the second |
258 | * NMI. |
259 | + * |
260 | + * However, espfix prevents us from directly returning to userspace |
261 | + * with a single IRET instruction. Similarly, IRET to user mode |
262 | + * can fault. We therefore handle NMIs from user space like |
263 | + * other IST entries. |
264 | */ |
265 | |
266 | /* Use %rdx as out temp variable throughout */ |
267 | pushq_cfi %rdx |
268 | CFI_REL_OFFSET rdx, 0 |
269 | |
270 | + testb $3, CS-RIP+8(%rsp) |
271 | + jz .Lnmi_from_kernel |
272 | + |
273 | + /* |
274 | + * NMI from user mode. We need to run on the thread stack, but we |
275 | + * can't go through the normal entry paths: NMIs are masked, and |
276 | + * we don't want to enable interrupts, because then we'll end |
277 | + * up in an awkward situation in which IRQs are on but NMIs |
278 | + * are off. |
279 | + */ |
280 | + |
281 | + SWAPGS |
282 | + cld |
283 | + movq %rsp, %rdx |
284 | + movq PER_CPU_VAR(kernel_stack), %rsp |
285 | + addq $KERNEL_STACK_OFFSET, %rsp |
286 | + pushq 5*8(%rdx) /* pt_regs->ss */ |
287 | + pushq 4*8(%rdx) /* pt_regs->rsp */ |
288 | + pushq 3*8(%rdx) /* pt_regs->flags */ |
289 | + pushq 2*8(%rdx) /* pt_regs->cs */ |
290 | + pushq 1*8(%rdx) /* pt_regs->rip */ |
291 | + pushq $-1 /* pt_regs->orig_ax */ |
292 | + pushq %rdi /* pt_regs->di */ |
293 | + pushq %rsi /* pt_regs->si */ |
294 | + pushq (%rdx) /* pt_regs->dx */ |
295 | + pushq %rcx /* pt_regs->cx */ |
296 | + pushq %rax /* pt_regs->ax */ |
297 | + pushq %r8 /* pt_regs->r8 */ |
298 | + pushq %r9 /* pt_regs->r9 */ |
299 | + pushq %r10 /* pt_regs->r10 */ |
300 | + pushq %r11 /* pt_regs->r11 */ |
301 | + pushq %rbx /* pt_regs->rbx */ |
302 | + pushq %rbp /* pt_regs->rbp */ |
303 | + pushq %r12 /* pt_regs->r12 */ |
304 | + pushq %r13 /* pt_regs->r13 */ |
305 | + pushq %r14 /* pt_regs->r14 */ |
306 | + pushq %r15 /* pt_regs->r15 */ |
307 | + |
308 | /* |
309 | - * If %cs was not the kernel segment, then the NMI triggered in user |
310 | - * space, which means it is definitely not nested. |
311 | + * At this point we no longer need to worry about stack damage |
312 | + * due to nesting -- we're on the normal thread stack and we're |
313 | + * done with the NMI stack. |
314 | */ |
315 | - cmpl $__KERNEL_CS, 16(%rsp) |
316 | - jne first_nmi |
317 | + |
318 | + movq %rsp, %rdi |
319 | + movq $-1, %rsi |
320 | + call do_nmi |
321 | |
322 | /* |
323 | + * Return back to user mode. We must *not* do the normal exit |
324 | + * work, because we don't want to enable interrupts. Fortunately, |
325 | + * do_nmi doesn't modify pt_regs. |
326 | + */ |
327 | + SWAPGS |
328 | + |
329 | + addq $6*8, %rsp /* skip bx, bp, and r12-r15 */ |
330 | + jmp restore_args |
331 | + |
332 | +.Lnmi_from_kernel: |
333 | + /* |
334 | * Check the special variable on the stack to see if NMIs are |
335 | * executing. |
336 | */ |
337 | @@ -1629,29 +1686,11 @@ end_repeat_nmi: |
338 | call save_paranoid |
339 | DEFAULT_FRAME 0 |
340 | |
341 | - /* |
342 | - * Save off the CR2 register. If we take a page fault in the NMI then |
343 | - * it could corrupt the CR2 value. If the NMI preempts a page fault |
344 | - * handler before it was able to read the CR2 register, and then the |
345 | - * NMI itself takes a page fault, the page fault that was preempted |
346 | - * will read the information from the NMI page fault and not the |
347 | - * origin fault. Save it off and restore it if it changes. |
348 | - * Use the r12 callee-saved register. |
349 | - */ |
350 | - movq %cr2, %r12 |
351 | - |
352 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
353 | movq %rsp,%rdi |
354 | movq $-1,%rsi |
355 | call do_nmi |
356 | |
357 | - /* Did the NMI take a page fault? Restore cr2 if it did */ |
358 | - movq %cr2, %rcx |
359 | - cmpq %rcx, %r12 |
360 | - je 1f |
361 | - movq %r12, %cr2 |
362 | -1: |
363 | - |
364 | testl %ebx,%ebx /* swapgs needed? */ |
365 | jnz nmi_restore |
366 | nmi_swapgs: |
367 | diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c |
368 | index c37886d759cc..2bcc0525f1c1 100644 |
369 | --- a/arch/x86/kernel/ldt.c |
370 | +++ b/arch/x86/kernel/ldt.c |
371 | @@ -12,6 +12,7 @@ |
372 | #include <linux/string.h> |
373 | #include <linux/mm.h> |
374 | #include <linux/smp.h> |
375 | +#include <linux/slab.h> |
376 | #include <linux/vmalloc.h> |
377 | #include <linux/uaccess.h> |
378 | |
379 | @@ -20,82 +21,82 @@ |
380 | #include <asm/mmu_context.h> |
381 | #include <asm/syscalls.h> |
382 | |
383 | -#ifdef CONFIG_SMP |
384 | +/* context.lock is held for us, so we don't need any locking. */ |
385 | static void flush_ldt(void *current_mm) |
386 | { |
387 | - if (current->active_mm == current_mm) |
388 | - load_LDT(¤t->active_mm->context); |
389 | + mm_context_t *pc; |
390 | + |
391 | + if (current->active_mm != current_mm) |
392 | + return; |
393 | + |
394 | + pc = ¤t->active_mm->context; |
395 | + set_ldt(pc->ldt->entries, pc->ldt->size); |
396 | } |
397 | -#endif |
398 | |
399 | -static int alloc_ldt(mm_context_t *pc, int mincount, int reload) |
400 | +/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ |
401 | +static struct ldt_struct *alloc_ldt_struct(int size) |
402 | { |
403 | - void *oldldt, *newldt; |
404 | - int oldsize; |
405 | - |
406 | - if (mincount <= pc->size) |
407 | - return 0; |
408 | - oldsize = pc->size; |
409 | - mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & |
410 | - (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); |
411 | - if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) |
412 | - newldt = vmalloc(mincount * LDT_ENTRY_SIZE); |
413 | + struct ldt_struct *new_ldt; |
414 | + int alloc_size; |
415 | + |
416 | + if (size > LDT_ENTRIES) |
417 | + return NULL; |
418 | + |
419 | + new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); |
420 | + if (!new_ldt) |
421 | + return NULL; |
422 | + |
423 | + BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); |
424 | + alloc_size = size * LDT_ENTRY_SIZE; |
425 | + |
426 | + /* |
427 | + * Xen is very picky: it requires a page-aligned LDT that has no |
428 | + * trailing nonzero bytes in any page that contains LDT descriptors. |
429 | + * Keep it simple: zero the whole allocation and never allocate less |
430 | + * than PAGE_SIZE. |
431 | + */ |
432 | + if (alloc_size > PAGE_SIZE) |
433 | + new_ldt->entries = vzalloc(alloc_size); |
434 | else |
435 | - newldt = (void *)__get_free_page(GFP_KERNEL); |
436 | - |
437 | - if (!newldt) |
438 | - return -ENOMEM; |
439 | + new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); |
440 | |
441 | - if (oldsize) |
442 | - memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); |
443 | - oldldt = pc->ldt; |
444 | - memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, |
445 | - (mincount - oldsize) * LDT_ENTRY_SIZE); |
446 | + if (!new_ldt->entries) { |
447 | + kfree(new_ldt); |
448 | + return NULL; |
449 | + } |
450 | |
451 | - paravirt_alloc_ldt(newldt, mincount); |
452 | + new_ldt->size = size; |
453 | + return new_ldt; |
454 | +} |
455 | |
456 | -#ifdef CONFIG_X86_64 |
457 | - /* CHECKME: Do we really need this ? */ |
458 | - wmb(); |
459 | -#endif |
460 | - pc->ldt = newldt; |
461 | - wmb(); |
462 | - pc->size = mincount; |
463 | - wmb(); |
464 | - |
465 | - if (reload) { |
466 | -#ifdef CONFIG_SMP |
467 | - preempt_disable(); |
468 | - load_LDT(pc); |
469 | - if (!cpumask_equal(mm_cpumask(current->mm), |
470 | - cpumask_of(smp_processor_id()))) |
471 | - smp_call_function(flush_ldt, current->mm, 1); |
472 | - preempt_enable(); |
473 | -#else |
474 | - load_LDT(pc); |
475 | -#endif |
476 | - } |
477 | - if (oldsize) { |
478 | - paravirt_free_ldt(oldldt, oldsize); |
479 | - if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) |
480 | - vfree(oldldt); |
481 | - else |
482 | - put_page(virt_to_page(oldldt)); |
483 | - } |
484 | - return 0; |
485 | +/* After calling this, the LDT is immutable. */ |
486 | +static void finalize_ldt_struct(struct ldt_struct *ldt) |
487 | +{ |
488 | + paravirt_alloc_ldt(ldt->entries, ldt->size); |
489 | } |
490 | |
491 | -static inline int copy_ldt(mm_context_t *new, mm_context_t *old) |
492 | +/* context.lock is held */ |
493 | +static void install_ldt(struct mm_struct *current_mm, |
494 | + struct ldt_struct *ldt) |
495 | { |
496 | - int err = alloc_ldt(new, old->size, 0); |
497 | - int i; |
498 | + /* Synchronizes with lockless_dereference in load_mm_ldt. */ |
499 | + smp_store_release(¤t_mm->context.ldt, ldt); |
500 | + |
501 | + /* Activate the LDT for all CPUs using current_mm. */ |
502 | + on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); |
503 | +} |
504 | |
505 | - if (err < 0) |
506 | - return err; |
507 | +static void free_ldt_struct(struct ldt_struct *ldt) |
508 | +{ |
509 | + if (likely(!ldt)) |
510 | + return; |
511 | |
512 | - for (i = 0; i < old->size; i++) |
513 | - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); |
514 | - return 0; |
515 | + paravirt_free_ldt(ldt->entries, ldt->size); |
516 | + if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) |
517 | + vfree(ldt->entries); |
518 | + else |
519 | + kfree(ldt->entries); |
520 | + kfree(ldt); |
521 | } |
522 | |
523 | /* |
524 | @@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) |
525 | */ |
526 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
527 | { |
528 | + struct ldt_struct *new_ldt; |
529 | struct mm_struct *old_mm; |
530 | int retval = 0; |
531 | |
532 | mutex_init(&mm->context.lock); |
533 | - mm->context.size = 0; |
534 | old_mm = current->mm; |
535 | - if (old_mm && old_mm->context.size > 0) { |
536 | - mutex_lock(&old_mm->context.lock); |
537 | - retval = copy_ldt(&mm->context, &old_mm->context); |
538 | - mutex_unlock(&old_mm->context.lock); |
539 | + if (!old_mm) { |
540 | + mm->context.ldt = NULL; |
541 | + return 0; |
542 | } |
543 | + |
544 | + mutex_lock(&old_mm->context.lock); |
545 | + if (!old_mm->context.ldt) { |
546 | + mm->context.ldt = NULL; |
547 | + goto out_unlock; |
548 | + } |
549 | + |
550 | + new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); |
551 | + if (!new_ldt) { |
552 | + retval = -ENOMEM; |
553 | + goto out_unlock; |
554 | + } |
555 | + |
556 | + memcpy(new_ldt->entries, old_mm->context.ldt->entries, |
557 | + new_ldt->size * LDT_ENTRY_SIZE); |
558 | + finalize_ldt_struct(new_ldt); |
559 | + |
560 | + mm->context.ldt = new_ldt; |
561 | + |
562 | +out_unlock: |
563 | + mutex_unlock(&old_mm->context.lock); |
564 | return retval; |
565 | } |
566 | |
567 | @@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
568 | */ |
569 | void destroy_context(struct mm_struct *mm) |
570 | { |
571 | - if (mm->context.size) { |
572 | -#ifdef CONFIG_X86_32 |
573 | - /* CHECKME: Can this ever happen ? */ |
574 | - if (mm == current->active_mm) |
575 | - clear_LDT(); |
576 | -#endif |
577 | - paravirt_free_ldt(mm->context.ldt, mm->context.size); |
578 | - if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) |
579 | - vfree(mm->context.ldt); |
580 | - else |
581 | - put_page(virt_to_page(mm->context.ldt)); |
582 | - mm->context.size = 0; |
583 | - } |
584 | + free_ldt_struct(mm->context.ldt); |
585 | + mm->context.ldt = NULL; |
586 | } |
587 | |
588 | static int read_ldt(void __user *ptr, unsigned long bytecount) |
589 | { |
590 | - int err; |
591 | + int retval; |
592 | unsigned long size; |
593 | struct mm_struct *mm = current->mm; |
594 | |
595 | - if (!mm->context.size) |
596 | - return 0; |
597 | + mutex_lock(&mm->context.lock); |
598 | + |
599 | + if (!mm->context.ldt) { |
600 | + retval = 0; |
601 | + goto out_unlock; |
602 | + } |
603 | + |
604 | if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) |
605 | bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; |
606 | |
607 | - mutex_lock(&mm->context.lock); |
608 | - size = mm->context.size * LDT_ENTRY_SIZE; |
609 | + size = mm->context.ldt->size * LDT_ENTRY_SIZE; |
610 | if (size > bytecount) |
611 | size = bytecount; |
612 | |
613 | - err = 0; |
614 | - if (copy_to_user(ptr, mm->context.ldt, size)) |
615 | - err = -EFAULT; |
616 | - mutex_unlock(&mm->context.lock); |
617 | - if (err < 0) |
618 | - goto error_return; |
619 | + if (copy_to_user(ptr, mm->context.ldt->entries, size)) { |
620 | + retval = -EFAULT; |
621 | + goto out_unlock; |
622 | + } |
623 | + |
624 | if (size != bytecount) { |
625 | - /* zero-fill the rest */ |
626 | - if (clear_user(ptr + size, bytecount - size) != 0) { |
627 | - err = -EFAULT; |
628 | - goto error_return; |
629 | + /* Zero-fill the rest and pretend we read bytecount bytes. */ |
630 | + if (clear_user(ptr + size, bytecount - size)) { |
631 | + retval = -EFAULT; |
632 | + goto out_unlock; |
633 | } |
634 | } |
635 | - return bytecount; |
636 | -error_return: |
637 | - return err; |
638 | + retval = bytecount; |
639 | + |
640 | +out_unlock: |
641 | + mutex_unlock(&mm->context.lock); |
642 | + return retval; |
643 | } |
644 | |
645 | static int read_default_ldt(void __user *ptr, unsigned long bytecount) |
646 | @@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) |
647 | struct desc_struct ldt; |
648 | int error; |
649 | struct user_desc ldt_info; |
650 | + int oldsize, newsize; |
651 | + struct ldt_struct *new_ldt, *old_ldt; |
652 | |
653 | error = -EINVAL; |
654 | if (bytecount != sizeof(ldt_info)) |
655 | @@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) |
656 | goto out; |
657 | } |
658 | |
659 | - mutex_lock(&mm->context.lock); |
660 | - if (ldt_info.entry_number >= mm->context.size) { |
661 | - error = alloc_ldt(¤t->mm->context, |
662 | - ldt_info.entry_number + 1, 1); |
663 | - if (error < 0) |
664 | - goto out_unlock; |
665 | - } |
666 | - |
667 | - /* Allow LDTs to be cleared by the user. */ |
668 | - if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { |
669 | - if (oldmode || LDT_empty(&ldt_info)) { |
670 | - memset(&ldt, 0, sizeof(ldt)); |
671 | - goto install; |
672 | + if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || |
673 | + LDT_empty(&ldt_info)) { |
674 | + /* The user wants to clear the entry. */ |
675 | + memset(&ldt, 0, sizeof(ldt)); |
676 | + } else { |
677 | + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { |
678 | + error = -EINVAL; |
679 | + goto out; |
680 | } |
681 | + |
682 | + fill_ldt(&ldt, &ldt_info); |
683 | + if (oldmode) |
684 | + ldt.avl = 0; |
685 | } |
686 | |
687 | - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { |
688 | - error = -EINVAL; |
689 | + mutex_lock(&mm->context.lock); |
690 | + |
691 | + old_ldt = mm->context.ldt; |
692 | + oldsize = old_ldt ? old_ldt->size : 0; |
693 | + newsize = max((int)(ldt_info.entry_number + 1), oldsize); |
694 | + |
695 | + error = -ENOMEM; |
696 | + new_ldt = alloc_ldt_struct(newsize); |
697 | + if (!new_ldt) |
698 | goto out_unlock; |
699 | - } |
700 | |
701 | - fill_ldt(&ldt, &ldt_info); |
702 | - if (oldmode) |
703 | - ldt.avl = 0; |
704 | + if (old_ldt) |
705 | + memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); |
706 | + new_ldt->entries[ldt_info.entry_number] = ldt; |
707 | + finalize_ldt_struct(new_ldt); |
708 | |
709 | - /* Install the new entry ... */ |
710 | -install: |
711 | - write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); |
712 | + install_ldt(mm, new_ldt); |
713 | + free_ldt_struct(old_ldt); |
714 | error = 0; |
715 | |
716 | out_unlock: |
717 | diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c |
718 | index c3e985d1751c..5c5ec7d28d9b 100644 |
719 | --- a/arch/x86/kernel/nmi.c |
720 | +++ b/arch/x86/kernel/nmi.c |
721 | @@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs) |
722 | NOKPROBE_SYMBOL(default_do_nmi); |
723 | |
724 | /* |
725 | - * NMIs can hit breakpoints which will cause it to lose its |
726 | - * NMI context with the CPU when the breakpoint does an iret. |
727 | - */ |
728 | -#ifdef CONFIG_X86_32 |
729 | -/* |
730 | - * For i386, NMIs use the same stack as the kernel, and we can |
731 | - * add a workaround to the iret problem in C (preventing nested |
732 | - * NMIs if an NMI takes a trap). Simply have 3 states the NMI |
733 | - * can be in: |
734 | + * NMIs can hit breakpoints which will cause it to lose its NMI context |
735 | + * with the CPU when the breakpoint or page fault does an IRET. |
736 | + * |
737 | + * As a result, NMIs can nest if NMIs get unmasked due an IRET during |
738 | + * NMI processing. On x86_64, the asm glue protects us from nested NMIs |
739 | + * if the outer NMI came from kernel mode, but we can still nest if the |
740 | + * outer NMI came from user mode. |
741 | + * |
742 | + * To handle these nested NMIs, we have three states: |
743 | * |
744 | * 1) not running |
745 | * 2) executing |
746 | @@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi); |
747 | * (Note, the latch is binary, thus multiple NMIs triggering, |
748 | * when one is running, are ignored. Only one NMI is restarted.) |
749 | * |
750 | - * If an NMI hits a breakpoint that executes an iret, another |
751 | - * NMI can preempt it. We do not want to allow this new NMI |
752 | - * to run, but we want to execute it when the first one finishes. |
753 | - * We set the state to "latched", and the exit of the first NMI will |
754 | - * perform a dec_return, if the result is zero (NOT_RUNNING), then |
755 | - * it will simply exit the NMI handler. If not, the dec_return |
756 | - * would have set the state to NMI_EXECUTING (what we want it to |
757 | - * be when we are running). In this case, we simply jump back |
758 | - * to rerun the NMI handler again, and restart the 'latched' NMI. |
759 | + * If an NMI executes an iret, another NMI can preempt it. We do not |
760 | + * want to allow this new NMI to run, but we want to execute it when the |
761 | + * first one finishes. We set the state to "latched", and the exit of |
762 | + * the first NMI will perform a dec_return, if the result is zero |
763 | + * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the |
764 | + * dec_return would have set the state to NMI_EXECUTING (what we want it |
765 | + * to be when we are running). In this case, we simply jump back to |
766 | + * rerun the NMI handler again, and restart the 'latched' NMI. |
767 | * |
768 | * No trap (breakpoint or page fault) should be hit before nmi_restart, |
769 | * thus there is no race between the first check of state for NOT_RUNNING |
770 | @@ -461,49 +460,36 @@ enum nmi_states { |
771 | static DEFINE_PER_CPU(enum nmi_states, nmi_state); |
772 | static DEFINE_PER_CPU(unsigned long, nmi_cr2); |
773 | |
774 | -#define nmi_nesting_preprocess(regs) \ |
775 | - do { \ |
776 | - if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ |
777 | - this_cpu_write(nmi_state, NMI_LATCHED); \ |
778 | - return; \ |
779 | - } \ |
780 | - this_cpu_write(nmi_state, NMI_EXECUTING); \ |
781 | - this_cpu_write(nmi_cr2, read_cr2()); \ |
782 | - } while (0); \ |
783 | - nmi_restart: |
784 | - |
785 | -#define nmi_nesting_postprocess() \ |
786 | - do { \ |
787 | - if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ |
788 | - write_cr2(this_cpu_read(nmi_cr2)); \ |
789 | - if (this_cpu_dec_return(nmi_state)) \ |
790 | - goto nmi_restart; \ |
791 | - } while (0) |
792 | -#else /* x86_64 */ |
793 | +#ifdef CONFIG_X86_64 |
794 | /* |
795 | - * In x86_64 things are a bit more difficult. This has the same problem |
796 | - * where an NMI hitting a breakpoint that calls iret will remove the |
797 | - * NMI context, allowing a nested NMI to enter. What makes this more |
798 | - * difficult is that both NMIs and breakpoints have their own stack. |
799 | - * When a new NMI or breakpoint is executed, the stack is set to a fixed |
800 | - * point. If an NMI is nested, it will have its stack set at that same |
801 | - * fixed address that the first NMI had, and will start corrupting the |
802 | - * stack. This is handled in entry_64.S, but the same problem exists with |
803 | - * the breakpoint stack. |
804 | + * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without |
805 | + * some care, the inner breakpoint will clobber the outer breakpoint's |
806 | + * stack. |
807 | * |
808 | - * If a breakpoint is being processed, and the debug stack is being used, |
809 | - * if an NMI comes in and also hits a breakpoint, the stack pointer |
810 | - * will be set to the same fixed address as the breakpoint that was |
811 | - * interrupted, causing that stack to be corrupted. To handle this case, |
812 | - * check if the stack that was interrupted is the debug stack, and if |
813 | - * so, change the IDT so that new breakpoints will use the current stack |
814 | - * and not switch to the fixed address. On return of the NMI, switch back |
815 | - * to the original IDT. |
816 | + * If a breakpoint is being processed, and the debug stack is being |
817 | + * used, if an NMI comes in and also hits a breakpoint, the stack |
818 | + * pointer will be set to the same fixed address as the breakpoint that |
819 | + * was interrupted, causing that stack to be corrupted. To handle this |
820 | + * case, check if the stack that was interrupted is the debug stack, and |
821 | + * if so, change the IDT so that new breakpoints will use the current |
822 | + * stack and not switch to the fixed address. On return of the NMI, |
823 | + * switch back to the original IDT. |
824 | */ |
825 | static DEFINE_PER_CPU(int, update_debug_stack); |
826 | +#endif |
827 | |
828 | -static inline void nmi_nesting_preprocess(struct pt_regs *regs) |
829 | +dotraplinkage notrace __kprobes void |
830 | +do_nmi(struct pt_regs *regs, long error_code) |
831 | { |
832 | + if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { |
833 | + this_cpu_write(nmi_state, NMI_LATCHED); |
834 | + return; |
835 | + } |
836 | + this_cpu_write(nmi_state, NMI_EXECUTING); |
837 | + this_cpu_write(nmi_cr2, read_cr2()); |
838 | +nmi_restart: |
839 | + |
840 | +#ifdef CONFIG_X86_64 |
841 | /* |
842 | * If we interrupted a breakpoint, it is possible that |
843 | * the nmi handler will have breakpoints too. We need to |
844 | @@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs) |
845 | debug_stack_set_zero(); |
846 | this_cpu_write(update_debug_stack, 1); |
847 | } |
848 | -} |
849 | - |
850 | -static inline void nmi_nesting_postprocess(void) |
851 | -{ |
852 | - if (unlikely(this_cpu_read(update_debug_stack))) { |
853 | - debug_stack_reset(); |
854 | - this_cpu_write(update_debug_stack, 0); |
855 | - } |
856 | -} |
857 | #endif |
858 | |
859 | -dotraplinkage notrace void |
860 | -do_nmi(struct pt_regs *regs, long error_code) |
861 | -{ |
862 | - nmi_nesting_preprocess(regs); |
863 | - |
864 | nmi_enter(); |
865 | |
866 | inc_irq_stat(__nmi_count); |
867 | @@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code) |
868 | |
869 | nmi_exit(); |
870 | |
871 | - /* On i386, may loop back to preprocess */ |
872 | - nmi_nesting_postprocess(); |
873 | +#ifdef CONFIG_X86_64 |
874 | + if (unlikely(this_cpu_read(update_debug_stack))) { |
875 | + debug_stack_reset(); |
876 | + this_cpu_write(update_debug_stack, 0); |
877 | + } |
878 | +#endif |
879 | + |
880 | + if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) |
881 | + write_cr2(this_cpu_read(nmi_cr2)); |
882 | + if (this_cpu_dec_return(nmi_state)) |
883 | + goto nmi_restart; |
884 | } |
885 | NOKPROBE_SYMBOL(do_nmi); |
886 | |
887 | diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c |
888 | index 67fcc43577d2..63a4b5092203 100644 |
889 | --- a/arch/x86/kernel/process_64.c |
890 | +++ b/arch/x86/kernel/process_64.c |
891 | @@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all) |
892 | void release_thread(struct task_struct *dead_task) |
893 | { |
894 | if (dead_task->mm) { |
895 | - if (dead_task->mm->context.size) { |
896 | + if (dead_task->mm->context.ldt) { |
897 | pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", |
898 | dead_task->comm, |
899 | dead_task->mm->context.ldt, |
900 | - dead_task->mm->context.size); |
901 | + dead_task->mm->context.ldt->size); |
902 | BUG(); |
903 | } |
904 | } |
905 | diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c |
906 | index 9b4d51d0c0d0..0ccb53a9fcd9 100644 |
907 | --- a/arch/x86/kernel/step.c |
908 | +++ b/arch/x86/kernel/step.c |
909 | @@ -5,6 +5,7 @@ |
910 | #include <linux/mm.h> |
911 | #include <linux/ptrace.h> |
912 | #include <asm/desc.h> |
913 | +#include <asm/mmu_context.h> |
914 | |
915 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) |
916 | { |
917 | @@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re |
918 | struct desc_struct *desc; |
919 | unsigned long base; |
920 | |
921 | - seg &= ~7UL; |
922 | + seg >>= 3; |
923 | |
924 | mutex_lock(&child->mm->context.lock); |
925 | - if (unlikely((seg >> 3) >= child->mm->context.size)) |
926 | + if (unlikely(!child->mm->context.ldt || |
927 | + seg >= child->mm->context.ldt->size)) |
928 | addr = -1L; /* bogus selector, access would fault */ |
929 | else { |
930 | - desc = child->mm->context.ldt + seg; |
931 | + desc = &child->mm->context.ldt->entries[seg]; |
932 | base = get_desc_base(desc); |
933 | |
934 | /* 16-bit code segment? */ |
935 | diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c |
936 | index 3e32ed5648a0..a13a38830e76 100644 |
937 | --- a/arch/x86/power/cpu.c |
938 | +++ b/arch/x86/power/cpu.c |
939 | @@ -23,6 +23,7 @@ |
940 | #include <asm/debugreg.h> |
941 | #include <asm/fpu-internal.h> /* pcntxt_mask */ |
942 | #include <asm/cpu.h> |
943 | +#include <asm/mmu_context.h> |
944 | |
945 | #ifdef CONFIG_X86_32 |
946 | __visible unsigned long saved_context_ebx; |
947 | @@ -154,7 +155,7 @@ static void fix_processor_context(void) |
948 | syscall_init(); /* This sets MSR_*STAR and related */ |
949 | #endif |
950 | load_TR_desc(); /* This does ltr */ |
951 | - load_LDT(¤t->active_mm->context); /* This does lldt */ |
952 | + load_mm_ldt(current->active_mm); /* This does lldt */ |
953 | } |
954 | |
955 | /** |
956 | diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig |
957 | index e88fda867a33..484145368a24 100644 |
958 | --- a/arch/x86/xen/Kconfig |
959 | +++ b/arch/x86/xen/Kconfig |
960 | @@ -8,7 +8,7 @@ config XEN |
961 | select PARAVIRT_CLOCK |
962 | select XEN_HAVE_PVMMU |
963 | depends on X86_64 || (X86_32 && X86_PAE) |
964 | - depends on X86_TSC |
965 | + depends on X86_LOCAL_APIC && X86_TSC |
966 | help |
967 | This is the Linux Xen port. Enabling this will allow the |
968 | kernel to boot in a paravirtualized environment under the |
969 | @@ -17,7 +17,7 @@ config XEN |
970 | config XEN_DOM0 |
971 | def_bool y |
972 | depends on XEN && PCI_XEN && SWIOTLB_XEN |
973 | - depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI |
974 | + depends on X86_IO_APIC && ACPI && PCI |
975 | |
976 | config XEN_PVHVM |
977 | def_bool y |
978 | diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile |
979 | index 7322755f337a..4b6e29ac0968 100644 |
980 | --- a/arch/x86/xen/Makefile |
981 | +++ b/arch/x86/xen/Makefile |
982 | @@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp) |
983 | obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ |
984 | time.o xen-asm.o xen-asm_$(BITS).o \ |
985 | grant-table.o suspend.o platform-pci-unplug.o \ |
986 | - p2m.o |
987 | + p2m.o apic.o |
988 | |
989 | obj-$(CONFIG_EVENT_TRACING) += trace.o |
990 | |
991 | obj-$(CONFIG_SMP) += smp.o |
992 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o |
993 | obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o |
994 | -obj-$(CONFIG_XEN_DOM0) += apic.o vga.o |
995 | +obj-$(CONFIG_XEN_DOM0) += vga.o |
996 | obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o |
997 | obj-$(CONFIG_XEN_EFI) += efi.o |
998 | diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c |
999 | index e180e097a53a..d8d81d1aa1d5 100644 |
1000 | --- a/arch/x86/xen/enlighten.c |
1001 | +++ b/arch/x86/xen/enlighten.c |
1002 | @@ -1772,6 +1772,7 @@ asmlinkage __visible void __init xen_start_kernel(void) |
1003 | #ifdef CONFIG_X86_32 |
1004 | i386_start_kernel(); |
1005 | #else |
1006 | + cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ |
1007 | x86_64_start_reservations((char *)__pa_symbol(&boot_params)); |
1008 | #endif |
1009 | } |
1010 | diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h |
1011 | index 28c7e0be56e4..566004cc8a5b 100644 |
1012 | --- a/arch/x86/xen/xen-ops.h |
1013 | +++ b/arch/x86/xen/xen-ops.h |
1014 | @@ -94,17 +94,15 @@ struct dom0_vga_console_info; |
1015 | |
1016 | #ifdef CONFIG_XEN_DOM0 |
1017 | void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); |
1018 | -void __init xen_init_apic(void); |
1019 | #else |
1020 | static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, |
1021 | size_t size) |
1022 | { |
1023 | } |
1024 | -static inline void __init xen_init_apic(void) |
1025 | -{ |
1026 | -} |
1027 | #endif |
1028 | |
1029 | +void __init xen_init_apic(void); |
1030 | + |
1031 | #ifdef CONFIG_XEN_EFI |
1032 | extern void xen_efi_init(void); |
1033 | #else |
1034 | diff --git a/block/blk-settings.c b/block/blk-settings.c |
1035 | index aa02247d227e..d15c34a31633 100644 |
1036 | --- a/block/blk-settings.c |
1037 | +++ b/block/blk-settings.c |
1038 | @@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); |
1039 | * Description: |
1040 | * Enables a low level driver to set a hard upper limit, |
1041 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
1042 | - * the device driver based upon the combined capabilities of I/O |
1043 | - * controller and storage device. |
1044 | + * the device driver based upon the capabilities of the I/O |
1045 | + * controller. |
1046 | * |
1047 | * max_sectors is a soft limit imposed by the block layer for |
1048 | * filesystem type requests. This value can be overridden on a |
1049 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1050 | index 74e18e94bef2..7f15707b4850 100644 |
1051 | --- a/drivers/ata/libata-core.c |
1052 | +++ b/drivers/ata/libata-core.c |
1053 | @@ -4238,6 +4238,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
1054 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1055 | { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
1056 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1057 | + { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
1058 | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1059 | |
1060 | /* devices that don't properly handle TRIM commands */ |
1061 | { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, |
1062 | diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c |
1063 | index 9d09c5bb5874..bb39181e4c33 100644 |
1064 | --- a/drivers/base/regmap/regcache-rbtree.c |
1065 | +++ b/drivers/base/regmap/regcache-rbtree.c |
1066 | @@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, |
1067 | if (!blk) |
1068 | return -ENOMEM; |
1069 | |
1070 | - present = krealloc(rbnode->cache_present, |
1071 | - BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); |
1072 | - if (!present) { |
1073 | - kfree(blk); |
1074 | - return -ENOMEM; |
1075 | + if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { |
1076 | + present = krealloc(rbnode->cache_present, |
1077 | + BITS_TO_LONGS(blklen) * sizeof(*present), |
1078 | + GFP_KERNEL); |
1079 | + if (!present) { |
1080 | + kfree(blk); |
1081 | + return -ENOMEM; |
1082 | + } |
1083 | + |
1084 | + memset(present + BITS_TO_LONGS(rbnode->blklen), 0, |
1085 | + (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) |
1086 | + * sizeof(*present)); |
1087 | + } else { |
1088 | + present = rbnode->cache_present; |
1089 | } |
1090 | |
1091 | /* insert the register value in the correct place in the rbnode block */ |
1092 | diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c |
1093 | index 63fc7f06a014..0c858a60dc40 100644 |
1094 | --- a/drivers/block/xen-blkback/blkback.c |
1095 | +++ b/drivers/block/xen-blkback/blkback.c |
1096 | @@ -350,7 +350,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) |
1097 | return; |
1098 | } |
1099 | |
1100 | - if (work_pending(&blkif->persistent_purge_work)) { |
1101 | + if (work_busy(&blkif->persistent_purge_work)) { |
1102 | pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); |
1103 | return; |
1104 | } |
1105 | diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c |
1106 | index 5ac312f6e0be..218c4858f494 100644 |
1107 | --- a/drivers/block/xen-blkfront.c |
1108 | +++ b/drivers/block/xen-blkfront.c |
1109 | @@ -1099,8 +1099,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, |
1110 | * Add the used indirect page back to the list of |
1111 | * available pages for indirect grefs. |
1112 | */ |
1113 | - indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); |
1114 | - list_add(&indirect_page->lru, &info->indirect_pages); |
1115 | + if (!info->feature_persistent) { |
1116 | + indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); |
1117 | + list_add(&indirect_page->lru, &info->indirect_pages); |
1118 | + } |
1119 | s->indirect_grants[i]->gref = GRANT_INVALID_REF; |
1120 | list_add_tail(&s->indirect_grants[i]->node, &info->grants); |
1121 | } |
1122 | diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c |
1123 | index 08b0da23c4ab..5408450204b0 100644 |
1124 | --- a/drivers/crypto/caam/caamhash.c |
1125 | +++ b/drivers/crypto/caam/caamhash.c |
1126 | @@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req) |
1127 | state->buflen_1; |
1128 | u32 *sh_desc = ctx->sh_desc_fin, *desc; |
1129 | dma_addr_t ptr = ctx->sh_desc_fin_dma; |
1130 | - int sec4_sg_bytes; |
1131 | + int sec4_sg_bytes, sec4_sg_src_index; |
1132 | int digestsize = crypto_ahash_digestsize(ahash); |
1133 | struct ahash_edesc *edesc; |
1134 | int ret = 0; |
1135 | int sh_len; |
1136 | |
1137 | - sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); |
1138 | + sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
1139 | + sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); |
1140 | |
1141 | /* allocate space for base edesc and hw desc commands, link tables */ |
1142 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + |
1143 | @@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req) |
1144 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
1145 | buf, state->buf_dma, buflen, |
1146 | last_buflen); |
1147 | - (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; |
1148 | + (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; |
1149 | |
1150 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1151 | sec4_sg_bytes, DMA_TO_DEVICE); |
1152 | diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c |
1153 | index 0f04d5ead521..4c1991d4ce8b 100644 |
1154 | --- a/drivers/edac/ppc4xx_edac.c |
1155 | +++ b/drivers/edac/ppc4xx_edac.c |
1156 | @@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) |
1157 | */ |
1158 | |
1159 | for (row = 0; row < mci->nr_csrows; row++) { |
1160 | - struct csrow_info *csi = &mci->csrows[row]; |
1161 | + struct csrow_info *csi = mci->csrows[row]; |
1162 | |
1163 | /* |
1164 | * Get the configuration settings for this |
1165 | diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1166 | index bbcd754dc7d0..ccedb17580f7 100644 |
1167 | --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1168 | +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1169 | @@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) |
1170 | struct drm_mode_config *mode_config = &dev->mode_config; |
1171 | struct drm_connector *connector; |
1172 | |
1173 | + /* we can race here at startup, some boards seem to trigger |
1174 | + * hotplug irqs when they shouldn't. */ |
1175 | + if (!rdev->mode_info.mode_config_initialized) |
1176 | + return; |
1177 | + |
1178 | mutex_lock(&mode_config->mutex); |
1179 | if (mode_config->num_connector) { |
1180 | list_for_each_entry(connector, &mode_config->connector_list, head) |
1181 | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
1182 | index 50b52802f470..8ad66bbd4f28 100644 |
1183 | --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
1184 | +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
1185 | @@ -2489,7 +2489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, |
1186 | |
1187 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); |
1188 | if (unlikely(ret != 0)) |
1189 | - goto out_err; |
1190 | + goto out_err_nores; |
1191 | |
1192 | ret = vmw_validate_buffers(dev_priv, sw_context); |
1193 | if (unlikely(ret != 0)) |
1194 | @@ -2533,6 +2533,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, |
1195 | vmw_resource_relocations_free(&sw_context->res_relocations); |
1196 | |
1197 | vmw_fifo_commit(dev_priv, command_size); |
1198 | + mutex_unlock(&dev_priv->binding_mutex); |
1199 | |
1200 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
1201 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
1202 | @@ -2548,7 +2549,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, |
1203 | DRM_ERROR("Fence submission error. Syncing.\n"); |
1204 | |
1205 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
1206 | - mutex_unlock(&dev_priv->binding_mutex); |
1207 | |
1208 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
1209 | (void *) fence); |
1210 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
1211 | index 3603d0cb25d9..ef984eba8396 100644 |
1212 | --- a/drivers/hid/hid-ids.h |
1213 | +++ b/drivers/hid/hid-ids.h |
1214 | @@ -222,6 +222,7 @@ |
1215 | #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 |
1216 | #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d |
1217 | #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 |
1218 | +#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 |
1219 | #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 |
1220 | #define USB_DEVICE_ID_CHICONY_AK1D 0x1125 |
1221 | |
1222 | diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c |
1223 | index 509dee2e9b72..a4d1fe64c925 100644 |
1224 | --- a/drivers/hid/usbhid/hid-quirks.c |
1225 | +++ b/drivers/hid/usbhid/hid-quirks.c |
1226 | @@ -69,6 +69,7 @@ static const struct hid_blacklist { |
1227 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET }, |
1228 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, |
1229 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, |
1230 | + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
1231 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
1232 | { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, |
1233 | { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, |
1234 | diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c |
1235 | index 8c91fd5eb6fd..3ac9c4194814 100644 |
1236 | --- a/drivers/isdn/gigaset/ser-gigaset.c |
1237 | +++ b/drivers/isdn/gigaset/ser-gigaset.c |
1238 | @@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) |
1239 | cs->hw.ser->tty = tty; |
1240 | atomic_set(&cs->hw.ser->refcnt, 1); |
1241 | init_completion(&cs->hw.ser->dead_cmp); |
1242 | - |
1243 | tty->disc_data = cs; |
1244 | |
1245 | + /* Set the amount of data we're willing to receive per call |
1246 | + * from the hardware driver to half of the input buffer size |
1247 | + * to leave some reserve. |
1248 | + * Note: We don't do flow control towards the hardware driver. |
1249 | + * If more data is received than will fit into the input buffer, |
1250 | + * it will be dropped and an error will be logged. This should |
1251 | + * never happen as the device is slow and the buffer size ample. |
1252 | + */ |
1253 | + tty->receive_room = RBUFSIZE/2; |
1254 | + |
1255 | /* OK.. Initialization of the datastructures and the HW is done.. Now |
1256 | * startup system and notify the LL that we are ready to run |
1257 | */ |
1258 | diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c |
1259 | index e9d33ad59df5..3412b86e79fd 100644 |
1260 | --- a/drivers/md/dm-thin-metadata.c |
1261 | +++ b/drivers/md/dm-thin-metadata.c |
1262 | @@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) |
1263 | return r; |
1264 | |
1265 | disk_super = dm_block_data(copy); |
1266 | - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); |
1267 | - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); |
1268 | + dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); |
1269 | + dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); |
1270 | dm_sm_dec_block(pmd->metadata_sm, held_root); |
1271 | |
1272 | return dm_tm_unlock(pmd->tm, copy); |
1273 | diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c |
1274 | index bce7c0784b6b..633c63e7c32f 100644 |
1275 | --- a/drivers/mfd/arizona-core.c |
1276 | +++ b/drivers/mfd/arizona-core.c |
1277 | @@ -892,10 +892,6 @@ int arizona_dev_init(struct arizona *arizona) |
1278 | arizona->pdata.gpio_defaults[i]); |
1279 | } |
1280 | |
1281 | - pm_runtime_set_autosuspend_delay(arizona->dev, 100); |
1282 | - pm_runtime_use_autosuspend(arizona->dev); |
1283 | - pm_runtime_enable(arizona->dev); |
1284 | - |
1285 | /* Chip default */ |
1286 | if (!arizona->pdata.clk32k_src) |
1287 | arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; |
1288 | @@ -992,11 +988,17 @@ int arizona_dev_init(struct arizona *arizona) |
1289 | arizona->pdata.spk_fmt[i]); |
1290 | } |
1291 | |
1292 | + pm_runtime_set_active(arizona->dev); |
1293 | + pm_runtime_enable(arizona->dev); |
1294 | + |
1295 | /* Set up for interrupts */ |
1296 | ret = arizona_irq_init(arizona); |
1297 | if (ret != 0) |
1298 | goto err_reset; |
1299 | |
1300 | + pm_runtime_set_autosuspend_delay(arizona->dev, 100); |
1301 | + pm_runtime_use_autosuspend(arizona->dev); |
1302 | + |
1303 | arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", |
1304 | arizona_clkgen_err, arizona); |
1305 | arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", |
1306 | @@ -1024,10 +1026,6 @@ int arizona_dev_init(struct arizona *arizona) |
1307 | goto err_irq; |
1308 | } |
1309 | |
1310 | -#ifdef CONFIG_PM_RUNTIME |
1311 | - regulator_disable(arizona->dcvdd); |
1312 | -#endif |
1313 | - |
1314 | return 0; |
1315 | |
1316 | err_irq: |
1317 | diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c |
1318 | index f35d4280b2f7..c58fc62545a3 100644 |
1319 | --- a/drivers/mfd/lpc_ich.c |
1320 | +++ b/drivers/mfd/lpc_ich.c |
1321 | @@ -934,8 +934,8 @@ gpe0_done: |
1322 | lpc_ich_enable_gpio_space(dev); |
1323 | |
1324 | lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]); |
1325 | - ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO], |
1326 | - 1, NULL, 0, NULL); |
1327 | + ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, |
1328 | + &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL); |
1329 | |
1330 | gpio_done: |
1331 | if (acpi_conflict) |
1332 | @@ -1008,8 +1008,8 @@ static int lpc_ich_init_wdt(struct pci_dev *dev) |
1333 | } |
1334 | |
1335 | lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]); |
1336 | - ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT], |
1337 | - 1, NULL, 0, NULL); |
1338 | + ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, |
1339 | + &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL); |
1340 | |
1341 | wdt_done: |
1342 | return ret; |
1343 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
1344 | index 1cc06c0e3e92..081dd70813c8 100644 |
1345 | --- a/drivers/net/bonding/bond_main.c |
1346 | +++ b/drivers/net/bonding/bond_main.c |
1347 | @@ -622,6 +622,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, |
1348 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); |
1349 | } |
1350 | |
1351 | +static struct slave *bond_get_old_active(struct bonding *bond, |
1352 | + struct slave *new_active) |
1353 | +{ |
1354 | + struct slave *slave; |
1355 | + struct list_head *iter; |
1356 | + |
1357 | + bond_for_each_slave(bond, slave, iter) { |
1358 | + if (slave == new_active) |
1359 | + continue; |
1360 | + |
1361 | + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) |
1362 | + return slave; |
1363 | + } |
1364 | + |
1365 | + return NULL; |
1366 | +} |
1367 | + |
1368 | /* bond_do_fail_over_mac |
1369 | * |
1370 | * Perform special MAC address swapping for fail_over_mac settings |
1371 | @@ -649,6 +666,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, |
1372 | if (!new_active) |
1373 | return; |
1374 | |
1375 | + if (!old_active) |
1376 | + old_active = bond_get_old_active(bond, new_active); |
1377 | + |
1378 | if (old_active) { |
1379 | ether_addr_copy(tmp_mac, new_active->dev->dev_addr); |
1380 | ether_addr_copy(saddr.sa_data, |
1381 | @@ -1805,6 +1825,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, |
1382 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
1383 | netdev_info(bond_dev, "Destroying bond %s\n", |
1384 | bond_dev->name); |
1385 | + bond_remove_proc_entry(bond); |
1386 | unregister_netdevice(bond_dev); |
1387 | } |
1388 | return ret; |
1389 | diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c |
1390 | index 49290a405903..af67e7d410eb 100644 |
1391 | --- a/drivers/net/ethernet/mellanox/mlx4/eq.c |
1392 | +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c |
1393 | @@ -568,7 +568,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
1394 | continue; |
1395 | mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", |
1396 | __func__, i, port); |
1397 | - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; |
1398 | + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
1399 | if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
1400 | eqe->event.port_change.port = |
1401 | cpu_to_be32( |
1402 | @@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
1403 | continue; |
1404 | if (i == mlx4_master_func_num(dev)) |
1405 | continue; |
1406 | - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; |
1407 | + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
1408 | if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
1409 | eqe->event.port_change.port = |
1410 | cpu_to_be32( |
1411 | diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c |
1412 | index 223eb42992bd..775e7bc292f2 100644 |
1413 | --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c |
1414 | +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c |
1415 | @@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); |
1416 | module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); |
1417 | module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); |
1418 | module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); |
1419 | +module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); |
1420 | module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, |
1421 | bool, 0444); |
1422 | MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); |
1423 | diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c |
1424 | index 1b3a09473452..30f9ef0c0d4f 100644 |
1425 | --- a/drivers/scsi/libfc/fc_exch.c |
1426 | +++ b/drivers/scsi/libfc/fc_exch.c |
1427 | @@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, |
1428 | if (resp) { |
1429 | resp(sp, fp, arg); |
1430 | res = true; |
1431 | - } else if (!IS_ERR(fp)) { |
1432 | - fc_frame_free(fp); |
1433 | } |
1434 | |
1435 | spin_lock_bh(&ep->ex_lock); |
1436 | @@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) |
1437 | * If new exch resp handler is valid then call that |
1438 | * first. |
1439 | */ |
1440 | - fc_invoke_resp(ep, sp, fp); |
1441 | + if (!fc_invoke_resp(ep, sp, fp)) |
1442 | + fc_frame_free(fp); |
1443 | |
1444 | fc_exch_release(ep); |
1445 | return; |
1446 | @@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) |
1447 | fc_exch_hold(ep); |
1448 | if (!rc) |
1449 | fc_exch_delete(ep); |
1450 | - fc_invoke_resp(ep, sp, fp); |
1451 | + if (!fc_invoke_resp(ep, sp, fp)) |
1452 | + fc_frame_free(fp); |
1453 | if (has_rec) |
1454 | fc_exch_timer_set(ep, ep->r_a_tov); |
1455 | fc_exch_release(ep); |
1456 | diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c |
1457 | index 1d7e76e8b447..ae6fc1a94568 100644 |
1458 | --- a/drivers/scsi/libfc/fc_fcp.c |
1459 | +++ b/drivers/scsi/libfc/fc_fcp.c |
1460 | @@ -1039,11 +1039,26 @@ restart: |
1461 | fc_fcp_pkt_hold(fsp); |
1462 | spin_unlock_irqrestore(&si->scsi_queue_lock, flags); |
1463 | |
1464 | - if (!fc_fcp_lock_pkt(fsp)) { |
1465 | + spin_lock_bh(&fsp->scsi_pkt_lock); |
1466 | + if (!(fsp->state & FC_SRB_COMPL)) { |
1467 | + fsp->state |= FC_SRB_COMPL; |
1468 | + /* |
1469 | + * TODO: dropping scsi_pkt_lock and then reacquiring |
1470 | + * again around fc_fcp_cleanup_cmd() is required, |
1471 | + * since fc_fcp_cleanup_cmd() calls into |
1472 | + * fc_seq_set_resp() and that func preempts cpu using |
1473 | + * schedule. May be schedule and related code should be |
1474 | + * removed instead of unlocking here to avoid scheduling |
1475 | + * while atomic bug. |
1476 | + */ |
1477 | + spin_unlock_bh(&fsp->scsi_pkt_lock); |
1478 | + |
1479 | fc_fcp_cleanup_cmd(fsp, error); |
1480 | + |
1481 | + spin_lock_bh(&fsp->scsi_pkt_lock); |
1482 | fc_io_compl(fsp); |
1483 | - fc_fcp_unlock_pkt(fsp); |
1484 | } |
1485 | + spin_unlock_bh(&fsp->scsi_pkt_lock); |
1486 | |
1487 | fc_fcp_pkt_release(fsp); |
1488 | spin_lock_irqsave(&si->scsi_queue_lock, flags); |
1489 | diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
1490 | index 0d8bc6c66650..7854584ebd59 100644 |
1491 | --- a/drivers/scsi/libiscsi.c |
1492 | +++ b/drivers/scsi/libiscsi.c |
1493 | @@ -2960,10 +2960,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) |
1494 | { |
1495 | struct iscsi_conn *conn = cls_conn->dd_data; |
1496 | struct iscsi_session *session = conn->session; |
1497 | - unsigned long flags; |
1498 | |
1499 | del_timer_sync(&conn->transport_timer); |
1500 | |
1501 | + mutex_lock(&session->eh_mutex); |
1502 | spin_lock_bh(&session->frwd_lock); |
1503 | conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; |
1504 | if (session->leadconn == conn) { |
1505 | @@ -2975,28 +2975,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) |
1506 | } |
1507 | spin_unlock_bh(&session->frwd_lock); |
1508 | |
1509 | - /* |
1510 | - * Block until all in-progress commands for this connection |
1511 | - * time out or fail. |
1512 | - */ |
1513 | - for (;;) { |
1514 | - spin_lock_irqsave(session->host->host_lock, flags); |
1515 | - if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */ |
1516 | - spin_unlock_irqrestore(session->host->host_lock, flags); |
1517 | - break; |
1518 | - } |
1519 | - spin_unlock_irqrestore(session->host->host_lock, flags); |
1520 | - msleep_interruptible(500); |
1521 | - iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " |
1522 | - "host_busy %d host_failed %d\n", |
1523 | - atomic_read(&session->host->host_busy), |
1524 | - session->host->host_failed); |
1525 | - /* |
1526 | - * force eh_abort() to unblock |
1527 | - */ |
1528 | - wake_up(&conn->ehwait); |
1529 | - } |
1530 | - |
1531 | /* flush queued up work because we free the connection below */ |
1532 | iscsi_suspend_tx(conn); |
1533 | |
1534 | @@ -3013,6 +2991,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) |
1535 | if (session->leadconn == conn) |
1536 | session->leadconn = NULL; |
1537 | spin_unlock_bh(&session->frwd_lock); |
1538 | + mutex_unlock(&session->eh_mutex); |
1539 | |
1540 | iscsi_destroy_conn(cls_conn); |
1541 | } |
1542 | diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c |
1543 | index b99399fe2548..15acc808658f 100644 |
1544 | --- a/drivers/scsi/lpfc/lpfc_scsi.c |
1545 | +++ b/drivers/scsi/lpfc/lpfc_scsi.c |
1546 | @@ -3380,7 +3380,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) |
1547 | */ |
1548 | |
1549 | nseg = scsi_dma_map(scsi_cmnd); |
1550 | - if (unlikely(!nseg)) |
1551 | + if (unlikely(nseg <= 0)) |
1552 | return 1; |
1553 | sgl += 1; |
1554 | /* clear the last flag in the fcp_rsp map entry */ |
1555 | diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c |
1556 | index 7454498c4091..a2dcf6a54ec6 100644 |
1557 | --- a/drivers/scsi/scsi_pm.c |
1558 | +++ b/drivers/scsi/scsi_pm.c |
1559 | @@ -219,15 +219,15 @@ static int sdev_runtime_suspend(struct device *dev) |
1560 | { |
1561 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
1562 | struct scsi_device *sdev = to_scsi_device(dev); |
1563 | - int err; |
1564 | + int err = 0; |
1565 | |
1566 | - err = blk_pre_runtime_suspend(sdev->request_queue); |
1567 | - if (err) |
1568 | - return err; |
1569 | - if (pm && pm->runtime_suspend) |
1570 | + if (pm && pm->runtime_suspend) { |
1571 | + err = blk_pre_runtime_suspend(sdev->request_queue); |
1572 | + if (err) |
1573 | + return err; |
1574 | err = pm->runtime_suspend(dev); |
1575 | - blk_post_runtime_suspend(sdev->request_queue, err); |
1576 | - |
1577 | + blk_post_runtime_suspend(sdev->request_queue, err); |
1578 | + } |
1579 | return err; |
1580 | } |
1581 | |
1582 | @@ -250,11 +250,11 @@ static int sdev_runtime_resume(struct device *dev) |
1583 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
1584 | int err = 0; |
1585 | |
1586 | - blk_pre_runtime_resume(sdev->request_queue); |
1587 | - if (pm && pm->runtime_resume) |
1588 | + if (pm && pm->runtime_resume) { |
1589 | + blk_pre_runtime_resume(sdev->request_queue); |
1590 | err = pm->runtime_resume(dev); |
1591 | - blk_post_runtime_resume(sdev->request_queue, err); |
1592 | - |
1593 | + blk_post_runtime_resume(sdev->request_queue, err); |
1594 | + } |
1595 | return err; |
1596 | } |
1597 | |
1598 | diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c |
1599 | index ce382e858452..6d931d598d80 100644 |
1600 | --- a/drivers/scsi/sd.c |
1601 | +++ b/drivers/scsi/sd.c |
1602 | @@ -2812,9 +2812,9 @@ static int sd_revalidate_disk(struct gendisk *disk) |
1603 | max_xfer = sdkp->max_xfer_blocks; |
1604 | max_xfer <<= ilog2(sdp->sector_size) - 9; |
1605 | |
1606 | - max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), |
1607 | - max_xfer); |
1608 | - blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); |
1609 | + sdkp->disk->queue->limits.max_sectors = |
1610 | + min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); |
1611 | + |
1612 | set_capacity(disk, sdkp->capacity); |
1613 | sd_config_write_same(sdkp); |
1614 | kfree(buffer); |
1615 | diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h |
1616 | index 45c39a37f924..8bc073d297db 100644 |
1617 | --- a/include/drm/drm_pciids.h |
1618 | +++ b/include/drm/drm_pciids.h |
1619 | @@ -172,6 +172,7 @@ |
1620 | {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
1621 | {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
1622 | {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
1623 | + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1624 | {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1625 | {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1626 | {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1627 | diff --git a/include/linux/compiler.h b/include/linux/compiler.h |
1628 | index 000c5f90f08c..2bd394ed35f6 100644 |
1629 | --- a/include/linux/compiler.h |
1630 | +++ b/include/linux/compiler.h |
1631 | @@ -454,6 +454,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s |
1632 | */ |
1633 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
1634 | |
1635 | +/** |
1636 | + * lockless_dereference() - safely load a pointer for later dereference |
1637 | + * @p: The pointer to load |
1638 | + * |
1639 | + * Similar to rcu_dereference(), but for situations where the pointed-to |
1640 | + * object's lifetime is managed by something other than RCU. That |
1641 | + * "something other" might be reference counting or simple immortality. |
1642 | + */ |
1643 | +#define lockless_dereference(p) \ |
1644 | +({ \ |
1645 | + typeof(p) _________p1 = ACCESS_ONCE(p); \ |
1646 | + smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
1647 | + (_________p1); \ |
1648 | +}) |
1649 | + |
1650 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
1651 | #ifdef CONFIG_KPROBES |
1652 | # define __kprobes __attribute__((__section__(".kprobes.text"))) |
1653 | diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h |
1654 | index 53ff1a752d7e..a4a819ffb2d1 100644 |
1655 | --- a/include/linux/rcupdate.h |
1656 | +++ b/include/linux/rcupdate.h |
1657 | @@ -617,21 +617,6 @@ static inline void rcu_preempt_sleep_check(void) |
1658 | #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
1659 | |
1660 | /** |
1661 | - * lockless_dereference() - safely load a pointer for later dereference |
1662 | - * @p: The pointer to load |
1663 | - * |
1664 | - * Similar to rcu_dereference(), but for situations where the pointed-to |
1665 | - * object's lifetime is managed by something other than RCU. That |
1666 | - * "something other" might be reference counting or simple immortality. |
1667 | - */ |
1668 | -#define lockless_dereference(p) \ |
1669 | -({ \ |
1670 | - typeof(p) _________p1 = ACCESS_ONCE(p); \ |
1671 | - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
1672 | - (_________p1); \ |
1673 | -}) |
1674 | - |
1675 | -/** |
1676 | * rcu_assign_pointer() - assign to RCU-protected pointer |
1677 | * @p: pointer to assign to |
1678 | * @v: value to assign (publish) |
1679 | diff --git a/include/net/ip.h b/include/net/ip.h |
1680 | index c0c26c3deeb5..d00ebdf14ca4 100644 |
1681 | --- a/include/net/ip.h |
1682 | +++ b/include/net/ip.h |
1683 | @@ -160,6 +160,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) |
1684 | } |
1685 | |
1686 | /* datagram.c */ |
1687 | +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
1688 | int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
1689 | |
1690 | void ip4_datagram_release_cb(struct sock *sk); |
1691 | diff --git a/ipc/sem.c b/ipc/sem.c |
1692 | index 53c3310f41c6..85ad28aaf548 100644 |
1693 | --- a/ipc/sem.c |
1694 | +++ b/ipc/sem.c |
1695 | @@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head) |
1696 | } |
1697 | |
1698 | /* |
1699 | + * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they |
1700 | + * are only control barriers. |
1701 | + * The code must pair with spin_unlock(&sem->lock) or |
1702 | + * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. |
1703 | + * |
1704 | + * smp_rmb() is sufficient, as writes cannot pass the control barrier. |
1705 | + */ |
1706 | +#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() |
1707 | + |
1708 | +/* |
1709 | * Wait until all currently ongoing simple ops have completed. |
1710 | * Caller must own sem_perm.lock. |
1711 | * New simple ops cannot start, because simple ops first check |
1712 | @@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) |
1713 | sem = sma->sem_base + i; |
1714 | spin_unlock_wait(&sem->lock); |
1715 | } |
1716 | + ipc_smp_acquire__after_spin_is_unlocked(); |
1717 | } |
1718 | |
1719 | /* |
1720 | @@ -326,8 +337,13 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, |
1721 | |
1722 | /* Then check that the global lock is free */ |
1723 | if (!spin_is_locked(&sma->sem_perm.lock)) { |
1724 | - /* spin_is_locked() is not a memory barrier */ |
1725 | - smp_mb(); |
1726 | + /* |
1727 | + * We need a memory barrier with acquire semantics, |
1728 | + * otherwise we can race with another thread that does: |
1729 | + * complex_count++; |
1730 | + * spin_unlock(sem_perm.lock); |
1731 | + */ |
1732 | + ipc_smp_acquire__after_spin_is_unlocked(); |
1733 | |
1734 | /* Now repeat the test of complex_count: |
1735 | * It can't change anymore until we drop sem->lock. |
1736 | @@ -2067,17 +2083,28 @@ void exit_sem(struct task_struct *tsk) |
1737 | rcu_read_lock(); |
1738 | un = list_entry_rcu(ulp->list_proc.next, |
1739 | struct sem_undo, list_proc); |
1740 | - if (&un->list_proc == &ulp->list_proc) |
1741 | - semid = -1; |
1742 | - else |
1743 | - semid = un->semid; |
1744 | + if (&un->list_proc == &ulp->list_proc) { |
1745 | + /* |
1746 | + * We must wait for freeary() before freeing this ulp, |
1747 | + * in case we raced with last sem_undo. There is a small |
1748 | + * possibility where we exit while freeary() didn't |
1749 | + * finish unlocking sem_undo_list. |
1750 | + */ |
1751 | + spin_unlock_wait(&ulp->lock); |
1752 | + rcu_read_unlock(); |
1753 | + break; |
1754 | + } |
1755 | + spin_lock(&ulp->lock); |
1756 | + semid = un->semid; |
1757 | + spin_unlock(&ulp->lock); |
1758 | |
1759 | + /* exit_sem raced with IPC_RMID, nothing to do */ |
1760 | if (semid == -1) { |
1761 | rcu_read_unlock(); |
1762 | - break; |
1763 | + continue; |
1764 | } |
1765 | |
1766 | - sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); |
1767 | + sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); |
1768 | /* exit_sem raced with IPC_RMID, nothing to do */ |
1769 | if (IS_ERR(sma)) { |
1770 | rcu_read_unlock(); |
1771 | diff --git a/kernel/cpuset.c b/kernel/cpuset.c |
1772 | index 672310e1597e..71b52dd957de 100644 |
1773 | --- a/kernel/cpuset.c |
1774 | +++ b/kernel/cpuset.c |
1775 | @@ -1204,7 +1204,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1776 | mutex_unlock(&callback_mutex); |
1777 | |
1778 | /* use trialcs->mems_allowed as a temp variable */ |
1779 | - update_nodemasks_hier(cs, &cs->mems_allowed); |
1780 | + update_nodemasks_hier(cs, &trialcs->mems_allowed); |
1781 | done: |
1782 | return retval; |
1783 | } |
1784 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
1785 | index cb86038cad47..ff181a5a5562 100644 |
1786 | --- a/kernel/events/core.c |
1787 | +++ b/kernel/events/core.c |
1788 | @@ -3729,28 +3729,21 @@ static void perf_event_for_each(struct perf_event *event, |
1789 | mutex_unlock(&ctx->mutex); |
1790 | } |
1791 | |
1792 | -static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1793 | -{ |
1794 | - struct perf_event_context *ctx = event->ctx; |
1795 | - int ret = 0, active; |
1796 | +struct period_event { |
1797 | + struct perf_event *event; |
1798 | u64 value; |
1799 | +}; |
1800 | |
1801 | - if (!is_sampling_event(event)) |
1802 | - return -EINVAL; |
1803 | - |
1804 | - if (copy_from_user(&value, arg, sizeof(value))) |
1805 | - return -EFAULT; |
1806 | - |
1807 | - if (!value) |
1808 | - return -EINVAL; |
1809 | +static int __perf_event_period(void *info) |
1810 | +{ |
1811 | + struct period_event *pe = info; |
1812 | + struct perf_event *event = pe->event; |
1813 | + struct perf_event_context *ctx = event->ctx; |
1814 | + u64 value = pe->value; |
1815 | + bool active; |
1816 | |
1817 | - raw_spin_lock_irq(&ctx->lock); |
1818 | + raw_spin_lock(&ctx->lock); |
1819 | if (event->attr.freq) { |
1820 | - if (value > sysctl_perf_event_sample_rate) { |
1821 | - ret = -EINVAL; |
1822 | - goto unlock; |
1823 | - } |
1824 | - |
1825 | event->attr.sample_freq = value; |
1826 | } else { |
1827 | event->attr.sample_period = value; |
1828 | @@ -3769,11 +3762,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1829 | event->pmu->start(event, PERF_EF_RELOAD); |
1830 | perf_pmu_enable(ctx->pmu); |
1831 | } |
1832 | + raw_spin_unlock(&ctx->lock); |
1833 | |
1834 | -unlock: |
1835 | + return 0; |
1836 | +} |
1837 | + |
1838 | +static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1839 | +{ |
1840 | + struct period_event pe = { .event = event, }; |
1841 | + struct perf_event_context *ctx = event->ctx; |
1842 | + struct task_struct *task; |
1843 | + u64 value; |
1844 | + |
1845 | + if (!is_sampling_event(event)) |
1846 | + return -EINVAL; |
1847 | + |
1848 | + if (copy_from_user(&value, arg, sizeof(value))) |
1849 | + return -EFAULT; |
1850 | + |
1851 | + if (!value) |
1852 | + return -EINVAL; |
1853 | + |
1854 | + if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
1855 | + return -EINVAL; |
1856 | + |
1857 | + task = ctx->task; |
1858 | + pe.value = value; |
1859 | + |
1860 | + if (!task) { |
1861 | + cpu_function_call(event->cpu, __perf_event_period, &pe); |
1862 | + return 0; |
1863 | + } |
1864 | + |
1865 | +retry: |
1866 | + if (!task_function_call(task, __perf_event_period, &pe)) |
1867 | + return 0; |
1868 | + |
1869 | + raw_spin_lock_irq(&ctx->lock); |
1870 | + if (ctx->is_active) { |
1871 | + raw_spin_unlock_irq(&ctx->lock); |
1872 | + task = ctx->task; |
1873 | + goto retry; |
1874 | + } |
1875 | + |
1876 | + __perf_event_period(&pe); |
1877 | raw_spin_unlock_irq(&ctx->lock); |
1878 | |
1879 | - return ret; |
1880 | + return 0; |
1881 | } |
1882 | |
1883 | static const struct file_operations perf_fops; |
1884 | @@ -4398,12 +4433,20 @@ static const struct file_operations perf_fops = { |
1885 | * to user-space before waking everybody up. |
1886 | */ |
1887 | |
1888 | +static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) |
1889 | +{ |
1890 | + /* only the parent has fasync state */ |
1891 | + if (event->parent) |
1892 | + event = event->parent; |
1893 | + return &event->fasync; |
1894 | +} |
1895 | + |
1896 | void perf_event_wakeup(struct perf_event *event) |
1897 | { |
1898 | ring_buffer_wakeup(event); |
1899 | |
1900 | if (event->pending_kill) { |
1901 | - kill_fasync(&event->fasync, SIGIO, event->pending_kill); |
1902 | + kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); |
1903 | event->pending_kill = 0; |
1904 | } |
1905 | } |
1906 | @@ -5638,7 +5681,7 @@ static int __perf_event_overflow(struct perf_event *event, |
1907 | else |
1908 | perf_event_output(event, data, regs); |
1909 | |
1910 | - if (event->fasync && event->pending_kill) { |
1911 | + if (*perf_event_fasync(event) && event->pending_kill) { |
1912 | event->pending_wakeup = 1; |
1913 | irq_work_queue(&event->pending); |
1914 | } |
1915 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
1916 | index da8fa4e4237c..a1d4dfa62023 100644 |
1917 | --- a/mm/hugetlb.c |
1918 | +++ b/mm/hugetlb.c |
1919 | @@ -855,6 +855,31 @@ struct hstate *size_to_hstate(unsigned long size) |
1920 | return NULL; |
1921 | } |
1922 | |
1923 | +/* |
1924 | + * Test to determine whether the hugepage is "active/in-use" (i.e. being linked |
1925 | + * to hstate->hugepage_activelist.) |
1926 | + * |
1927 | + * This function can be called for tail pages, but never returns true for them. |
1928 | + */ |
1929 | +bool page_huge_active(struct page *page) |
1930 | +{ |
1931 | + VM_BUG_ON_PAGE(!PageHuge(page), page); |
1932 | + return PageHead(page) && PagePrivate(&page[1]); |
1933 | +} |
1934 | + |
1935 | +/* never called for tail page */ |
1936 | +static void set_page_huge_active(struct page *page) |
1937 | +{ |
1938 | + VM_BUG_ON_PAGE(!PageHeadHuge(page), page); |
1939 | + SetPagePrivate(&page[1]); |
1940 | +} |
1941 | + |
1942 | +static void clear_page_huge_active(struct page *page) |
1943 | +{ |
1944 | + VM_BUG_ON_PAGE(!PageHeadHuge(page), page); |
1945 | + ClearPagePrivate(&page[1]); |
1946 | +} |
1947 | + |
1948 | void free_huge_page(struct page *page) |
1949 | { |
1950 | /* |
1951 | @@ -875,6 +900,7 @@ void free_huge_page(struct page *page) |
1952 | ClearPagePrivate(page); |
1953 | |
1954 | spin_lock(&hugetlb_lock); |
1955 | + clear_page_huge_active(page); |
1956 | hugetlb_cgroup_uncharge_page(hstate_index(h), |
1957 | pages_per_huge_page(h), page); |
1958 | if (restore_reserve) |
1959 | @@ -2884,6 +2910,7 @@ retry_avoidcopy: |
1960 | copy_user_huge_page(new_page, old_page, address, vma, |
1961 | pages_per_huge_page(h)); |
1962 | __SetPageUptodate(new_page); |
1963 | + set_page_huge_active(new_page); |
1964 | |
1965 | mmun_start = address & huge_page_mask(h); |
1966 | mmun_end = mmun_start + huge_page_size(h); |
1967 | @@ -2995,6 +3022,7 @@ retry: |
1968 | } |
1969 | clear_huge_page(page, address, pages_per_huge_page(h)); |
1970 | __SetPageUptodate(page); |
1971 | + set_page_huge_active(page); |
1972 | |
1973 | if (vma->vm_flags & VM_MAYSHARE) { |
1974 | int err; |
1975 | @@ -3799,19 +3827,26 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage) |
1976 | |
1977 | bool isolate_huge_page(struct page *page, struct list_head *list) |
1978 | { |
1979 | + bool ret = true; |
1980 | + |
1981 | VM_BUG_ON_PAGE(!PageHead(page), page); |
1982 | - if (!get_page_unless_zero(page)) |
1983 | - return false; |
1984 | spin_lock(&hugetlb_lock); |
1985 | + if (!page_huge_active(page) || !get_page_unless_zero(page)) { |
1986 | + ret = false; |
1987 | + goto unlock; |
1988 | + } |
1989 | + clear_page_huge_active(page); |
1990 | list_move_tail(&page->lru, list); |
1991 | +unlock: |
1992 | spin_unlock(&hugetlb_lock); |
1993 | - return true; |
1994 | + return ret; |
1995 | } |
1996 | |
1997 | void putback_active_hugepage(struct page *page) |
1998 | { |
1999 | VM_BUG_ON_PAGE(!PageHead(page), page); |
2000 | spin_lock(&hugetlb_lock); |
2001 | + set_page_huge_active(page); |
2002 | list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); |
2003 | spin_unlock(&hugetlb_lock); |
2004 | put_page(page); |
2005 | diff --git a/mm/memory-failure.c b/mm/memory-failure.c |
2006 | index 22f047fbaa33..715bc57385b9 100644 |
2007 | --- a/mm/memory-failure.c |
2008 | +++ b/mm/memory-failure.c |
2009 | @@ -1524,6 +1524,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) |
2010 | */ |
2011 | ret = __get_any_page(page, pfn, 0); |
2012 | if (!PageLRU(page)) { |
2013 | + /* Drop page reference which is from __get_any_page() */ |
2014 | + put_page(page); |
2015 | pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", |
2016 | pfn, page->flags); |
2017 | return -EIO; |
2018 | @@ -1552,8 +1554,17 @@ static int soft_offline_huge_page(struct page *page, int flags) |
2019 | } |
2020 | unlock_page(hpage); |
2021 | |
2022 | - /* Keep page count to indicate a given hugepage is isolated. */ |
2023 | - list_move(&hpage->lru, &pagelist); |
2024 | + ret = isolate_huge_page(hpage, &pagelist); |
2025 | + /* |
2026 | + * get_any_page() and isolate_huge_page() takes a refcount each, |
2027 | + * so need to drop one here. |
2028 | + */ |
2029 | + put_page(hpage); |
2030 | + if (!ret) { |
2031 | + pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); |
2032 | + return -EBUSY; |
2033 | + } |
2034 | + |
2035 | ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, |
2036 | MIGRATE_SYNC, MR_MEMORY_FAILURE); |
2037 | if (ret) { |
2038 | diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c |
2039 | index 5df05269d17a..cc641541d38f 100644 |
2040 | --- a/net/bridge/br_mdb.c |
2041 | +++ b/net/bridge/br_mdb.c |
2042 | @@ -347,7 +347,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, |
2043 | return -ENOMEM; |
2044 | rcu_assign_pointer(*pp, p); |
2045 | |
2046 | - br_mdb_notify(br->dev, port, group, RTM_NEWMDB); |
2047 | return 0; |
2048 | } |
2049 | |
2050 | @@ -370,6 +369,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, |
2051 | if (!p || p->br != br || p->state == BR_STATE_DISABLED) |
2052 | return -EINVAL; |
2053 | |
2054 | + memset(&ip, 0, sizeof(ip)); |
2055 | ip.proto = entry->addr.proto; |
2056 | if (ip.proto == htons(ETH_P_IP)) |
2057 | ip.u.ip4 = entry->addr.u.ip4; |
2058 | @@ -416,6 +416,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) |
2059 | if (!netif_running(br->dev) || br->multicast_disabled) |
2060 | return -EINVAL; |
2061 | |
2062 | + memset(&ip, 0, sizeof(ip)); |
2063 | ip.proto = entry->addr.proto; |
2064 | if (ip.proto == htons(ETH_P_IP)) { |
2065 | if (timer_pending(&br->ip4_other_query.timer)) |
2066 | diff --git a/net/core/datagram.c b/net/core/datagram.c |
2067 | index fdbc9a81d4c2..3a402a7b20e9 100644 |
2068 | --- a/net/core/datagram.c |
2069 | +++ b/net/core/datagram.c |
2070 | @@ -744,7 +744,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
2071 | !skb->csum_complete_sw) |
2072 | netdev_rx_csum_fault(skb->dev); |
2073 | } |
2074 | - skb->csum_valid = !sum; |
2075 | + if (!skb_shared(skb)) |
2076 | + skb->csum_valid = !sum; |
2077 | return sum; |
2078 | } |
2079 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
2080 | @@ -764,11 +765,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) |
2081 | netdev_rx_csum_fault(skb->dev); |
2082 | } |
2083 | |
2084 | - /* Save full packet checksum */ |
2085 | - skb->csum = csum; |
2086 | - skb->ip_summed = CHECKSUM_COMPLETE; |
2087 | - skb->csum_complete_sw = 1; |
2088 | - skb->csum_valid = !sum; |
2089 | + if (!skb_shared(skb)) { |
2090 | + /* Save full packet checksum */ |
2091 | + skb->csum = csum; |
2092 | + skb->ip_summed = CHECKSUM_COMPLETE; |
2093 | + skb->csum_complete_sw = 1; |
2094 | + skb->csum_valid = !sum; |
2095 | + } |
2096 | |
2097 | return sum; |
2098 | } |
2099 | diff --git a/net/core/dev.c b/net/core/dev.c |
2100 | index fb9625874b3c..93612b2e3bbf 100644 |
2101 | --- a/net/core/dev.c |
2102 | +++ b/net/core/dev.c |
2103 | @@ -3309,6 +3309,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, |
2104 | local_irq_save(flags); |
2105 | |
2106 | rps_lock(sd); |
2107 | + if (!netif_running(skb->dev)) |
2108 | + goto drop; |
2109 | qlen = skb_queue_len(&sd->input_pkt_queue); |
2110 | if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { |
2111 | if (skb_queue_len(&sd->input_pkt_queue)) { |
2112 | @@ -3330,6 +3332,7 @@ enqueue: |
2113 | goto enqueue; |
2114 | } |
2115 | |
2116 | +drop: |
2117 | sd->dropped++; |
2118 | rps_unlock(sd); |
2119 | |
2120 | @@ -3638,8 +3641,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) |
2121 | |
2122 | pt_prev = NULL; |
2123 | |
2124 | - rcu_read_lock(); |
2125 | - |
2126 | another_round: |
2127 | skb->skb_iif = skb->dev->ifindex; |
2128 | |
2129 | @@ -3649,7 +3650,7 @@ another_round: |
2130 | skb->protocol == cpu_to_be16(ETH_P_8021AD)) { |
2131 | skb = skb_vlan_untag(skb); |
2132 | if (unlikely(!skb)) |
2133 | - goto unlock; |
2134 | + goto out; |
2135 | } |
2136 | |
2137 | #ifdef CONFIG_NET_CLS_ACT |
2138 | @@ -3674,7 +3675,7 @@ skip_taps: |
2139 | #ifdef CONFIG_NET_CLS_ACT |
2140 | skb = handle_ing(skb, &pt_prev, &ret, orig_dev); |
2141 | if (!skb) |
2142 | - goto unlock; |
2143 | + goto out; |
2144 | ncls: |
2145 | #endif |
2146 | |
2147 | @@ -3689,7 +3690,7 @@ ncls: |
2148 | if (vlan_do_receive(&skb)) |
2149 | goto another_round; |
2150 | else if (unlikely(!skb)) |
2151 | - goto unlock; |
2152 | + goto out; |
2153 | } |
2154 | |
2155 | rx_handler = rcu_dereference(skb->dev->rx_handler); |
2156 | @@ -3701,7 +3702,7 @@ ncls: |
2157 | switch (rx_handler(&skb)) { |
2158 | case RX_HANDLER_CONSUMED: |
2159 | ret = NET_RX_SUCCESS; |
2160 | - goto unlock; |
2161 | + goto out; |
2162 | case RX_HANDLER_ANOTHER: |
2163 | goto another_round; |
2164 | case RX_HANDLER_EXACT: |
2165 | @@ -3753,8 +3754,7 @@ drop: |
2166 | ret = NET_RX_DROP; |
2167 | } |
2168 | |
2169 | -unlock: |
2170 | - rcu_read_unlock(); |
2171 | +out: |
2172 | return ret; |
2173 | } |
2174 | |
2175 | @@ -3785,29 +3785,30 @@ static int __netif_receive_skb(struct sk_buff *skb) |
2176 | |
2177 | static int netif_receive_skb_internal(struct sk_buff *skb) |
2178 | { |
2179 | + int ret; |
2180 | + |
2181 | net_timestamp_check(netdev_tstamp_prequeue, skb); |
2182 | |
2183 | if (skb_defer_rx_timestamp(skb)) |
2184 | return NET_RX_SUCCESS; |
2185 | |
2186 | + rcu_read_lock(); |
2187 | + |
2188 | #ifdef CONFIG_RPS |
2189 | if (static_key_false(&rps_needed)) { |
2190 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
2191 | - int cpu, ret; |
2192 | - |
2193 | - rcu_read_lock(); |
2194 | - |
2195 | - cpu = get_rps_cpu(skb->dev, skb, &rflow); |
2196 | + int cpu = get_rps_cpu(skb->dev, skb, &rflow); |
2197 | |
2198 | if (cpu >= 0) { |
2199 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
2200 | rcu_read_unlock(); |
2201 | return ret; |
2202 | } |
2203 | - rcu_read_unlock(); |
2204 | } |
2205 | #endif |
2206 | - return __netif_receive_skb(skb); |
2207 | + ret = __netif_receive_skb(skb); |
2208 | + rcu_read_unlock(); |
2209 | + return ret; |
2210 | } |
2211 | |
2212 | /** |
2213 | @@ -4343,8 +4344,10 @@ static int process_backlog(struct napi_struct *napi, int quota) |
2214 | struct sk_buff *skb; |
2215 | |
2216 | while ((skb = __skb_dequeue(&sd->process_queue))) { |
2217 | + rcu_read_lock(); |
2218 | local_irq_enable(); |
2219 | __netif_receive_skb(skb); |
2220 | + rcu_read_unlock(); |
2221 | local_irq_disable(); |
2222 | input_queue_head_incr(sd); |
2223 | if (++work >= quota) { |
2224 | @@ -5867,6 +5870,7 @@ static void rollback_registered_many(struct list_head *head) |
2225 | unlist_netdevice(dev); |
2226 | |
2227 | dev->reg_state = NETREG_UNREGISTERING; |
2228 | + on_each_cpu(flush_backlog, dev, 1); |
2229 | } |
2230 | |
2231 | synchronize_net(); |
2232 | @@ -6128,7 +6132,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev) |
2233 | struct netdev_queue *tx; |
2234 | size_t sz = count * sizeof(*tx); |
2235 | |
2236 | - BUG_ON(count < 1 || count > 0xffff); |
2237 | + if (count < 1 || count > 0xffff) |
2238 | + return -EINVAL; |
2239 | |
2240 | tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); |
2241 | if (!tx) { |
2242 | @@ -6486,8 +6491,6 @@ void netdev_run_todo(void) |
2243 | |
2244 | dev->reg_state = NETREG_UNREGISTERED; |
2245 | |
2246 | - on_each_cpu(flush_backlog, dev, 1); |
2247 | - |
2248 | netdev_wait_allrefs(dev); |
2249 | |
2250 | /* paranoia */ |
2251 | diff --git a/net/core/pktgen.c b/net/core/pktgen.c |
2252 | index 0b320d93fb56..4ff3eacc99f5 100644 |
2253 | --- a/net/core/pktgen.c |
2254 | +++ b/net/core/pktgen.c |
2255 | @@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg) |
2256 | pktgen_rem_thread(t); |
2257 | |
2258 | /* Wait for kthread_stop */ |
2259 | - while (!kthread_should_stop()) { |
2260 | + for (;;) { |
2261 | set_current_state(TASK_INTERRUPTIBLE); |
2262 | + if (kthread_should_stop()) |
2263 | + break; |
2264 | schedule(); |
2265 | } |
2266 | __set_current_state(TASK_RUNNING); |
2267 | diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c |
2268 | index 90c0e8386116..574fad9cca05 100644 |
2269 | --- a/net/ipv4/datagram.c |
2270 | +++ b/net/ipv4/datagram.c |
2271 | @@ -20,7 +20,7 @@ |
2272 | #include <net/route.h> |
2273 | #include <net/tcp_states.h> |
2274 | |
2275 | -int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2276 | +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2277 | { |
2278 | struct inet_sock *inet = inet_sk(sk); |
2279 | struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; |
2280 | @@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2281 | |
2282 | sk_dst_reset(sk); |
2283 | |
2284 | - lock_sock(sk); |
2285 | - |
2286 | oif = sk->sk_bound_dev_if; |
2287 | saddr = inet->inet_saddr; |
2288 | if (ipv4_is_multicast(usin->sin_addr.s_addr)) { |
2289 | @@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2290 | sk_dst_set(sk, &rt->dst); |
2291 | err = 0; |
2292 | out: |
2293 | - release_sock(sk); |
2294 | return err; |
2295 | } |
2296 | +EXPORT_SYMBOL(__ip4_datagram_connect); |
2297 | + |
2298 | +int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2299 | +{ |
2300 | + int res; |
2301 | + |
2302 | + lock_sock(sk); |
2303 | + res = __ip4_datagram_connect(sk, uaddr, addr_len); |
2304 | + release_sock(sk); |
2305 | + return res; |
2306 | +} |
2307 | EXPORT_SYMBOL(ip4_datagram_connect); |
2308 | |
2309 | /* Because UDP xmit path can manipulate sk_dst_cache without holding |
2310 | diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c |
2311 | index b48e03cd6656..9516031847f1 100644 |
2312 | --- a/net/ipv4/ip_fragment.c |
2313 | +++ b/net/ipv4/ip_fragment.c |
2314 | @@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
2315 | ihl = ip_hdrlen(skb); |
2316 | |
2317 | /* Determine the position of this fragment. */ |
2318 | - end = offset + skb->len - ihl; |
2319 | + end = offset + skb->len - skb_network_offset(skb) - ihl; |
2320 | err = -EINVAL; |
2321 | |
2322 | /* Is this the final fragment? */ |
2323 | @@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
2324 | goto err; |
2325 | |
2326 | err = -ENOMEM; |
2327 | - if (pskb_pull(skb, ihl) == NULL) |
2328 | + if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) |
2329 | goto err; |
2330 | |
2331 | err = pskb_trim_rcsum(skb, end - offset); |
2332 | @@ -612,6 +612,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
2333 | iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; |
2334 | iph->tot_len = htons(len); |
2335 | iph->tos |= ecn; |
2336 | + |
2337 | + ip_send_check(iph); |
2338 | + |
2339 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
2340 | qp->q.fragments = NULL; |
2341 | qp->q.fragments_tail = NULL; |
2342 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
2343 | index 0bb8e141eacc..682257242971 100644 |
2344 | --- a/net/ipv4/ip_tunnel.c |
2345 | +++ b/net/ipv4/ip_tunnel.c |
2346 | @@ -587,7 +587,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, |
2347 | EXPORT_SYMBOL(ip_tunnel_encap); |
2348 | |
2349 | static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
2350 | - struct rtable *rt, __be16 df) |
2351 | + struct rtable *rt, __be16 df, |
2352 | + const struct iphdr *inner_iph) |
2353 | { |
2354 | struct ip_tunnel *tunnel = netdev_priv(dev); |
2355 | int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; |
2356 | @@ -604,7 +605,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
2357 | |
2358 | if (skb->protocol == htons(ETH_P_IP)) { |
2359 | if (!skb_is_gso(skb) && |
2360 | - (df & htons(IP_DF)) && mtu < pkt_size) { |
2361 | + (inner_iph->frag_off & htons(IP_DF)) && |
2362 | + mtu < pkt_size) { |
2363 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
2364 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
2365 | return -E2BIG; |
2366 | @@ -738,7 +740,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
2367 | goto tx_error; |
2368 | } |
2369 | |
2370 | - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { |
2371 | + if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { |
2372 | ip_rt_put(rt); |
2373 | goto tx_error; |
2374 | } |
2375 | diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
2376 | index 11e3945eeac7..e069aeb2cf72 100644 |
2377 | --- a/net/ipv6/datagram.c |
2378 | +++ b/net/ipv6/datagram.c |
2379 | @@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) |
2380 | return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); |
2381 | } |
2382 | |
2383 | -int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2384 | +static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2385 | { |
2386 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
2387 | struct inet_sock *inet = inet_sk(sk); |
2388 | @@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2389 | if (usin->sin6_family == AF_INET) { |
2390 | if (__ipv6_only_sock(sk)) |
2391 | return -EAFNOSUPPORT; |
2392 | - err = ip4_datagram_connect(sk, uaddr, addr_len); |
2393 | + err = __ip4_datagram_connect(sk, uaddr, addr_len); |
2394 | goto ipv4_connected; |
2395 | } |
2396 | |
2397 | @@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2398 | sin.sin_addr.s_addr = daddr->s6_addr32[3]; |
2399 | sin.sin_port = usin->sin6_port; |
2400 | |
2401 | - err = ip4_datagram_connect(sk, |
2402 | - (struct sockaddr *) &sin, |
2403 | - sizeof(sin)); |
2404 | + err = __ip4_datagram_connect(sk, |
2405 | + (struct sockaddr *) &sin, |
2406 | + sizeof(sin)); |
2407 | |
2408 | ipv4_connected: |
2409 | if (err) |
2410 | @@ -204,6 +204,16 @@ out: |
2411 | fl6_sock_release(flowlabel); |
2412 | return err; |
2413 | } |
2414 | + |
2415 | +int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2416 | +{ |
2417 | + int res; |
2418 | + |
2419 | + lock_sock(sk); |
2420 | + res = __ip6_datagram_connect(sk, uaddr, addr_len); |
2421 | + release_sock(sk); |
2422 | + return res; |
2423 | +} |
2424 | EXPORT_SYMBOL_GPL(ip6_datagram_connect); |
2425 | |
2426 | int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, |
2427 | diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c |
2428 | index a3084ab5df6c..ac5e973e9eb5 100644 |
2429 | --- a/net/ipv6/ip6_input.c |
2430 | +++ b/net/ipv6/ip6_input.c |
2431 | @@ -329,10 +329,10 @@ int ip6_mc_input(struct sk_buff *skb) |
2432 | if (offset < 0) |
2433 | goto out; |
2434 | |
2435 | - if (!ipv6_is_mld(skb, nexthdr, offset)) |
2436 | - goto out; |
2437 | + if (ipv6_is_mld(skb, nexthdr, offset)) |
2438 | + deliver = true; |
2439 | |
2440 | - deliver = true; |
2441 | + goto out; |
2442 | } |
2443 | /* unknown RA - process it normally */ |
2444 | } |
2445 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
2446 | index c82b2e37e652..6ffd1ebaba93 100644 |
2447 | --- a/net/netlink/af_netlink.c |
2448 | +++ b/net/netlink/af_netlink.c |
2449 | @@ -366,25 +366,52 @@ err1: |
2450 | return NULL; |
2451 | } |
2452 | |
2453 | + |
2454 | +static void |
2455 | +__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, |
2456 | + unsigned int order) |
2457 | +{ |
2458 | + struct netlink_sock *nlk = nlk_sk(sk); |
2459 | + struct sk_buff_head *queue; |
2460 | + struct netlink_ring *ring; |
2461 | + |
2462 | + queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
2463 | + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; |
2464 | + |
2465 | + spin_lock_bh(&queue->lock); |
2466 | + |
2467 | + ring->frame_max = req->nm_frame_nr - 1; |
2468 | + ring->head = 0; |
2469 | + ring->frame_size = req->nm_frame_size; |
2470 | + ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; |
2471 | + |
2472 | + swap(ring->pg_vec_len, req->nm_block_nr); |
2473 | + swap(ring->pg_vec_order, order); |
2474 | + swap(ring->pg_vec, pg_vec); |
2475 | + |
2476 | + __skb_queue_purge(queue); |
2477 | + spin_unlock_bh(&queue->lock); |
2478 | + |
2479 | + WARN_ON(atomic_read(&nlk->mapped)); |
2480 | + |
2481 | + if (pg_vec) |
2482 | + free_pg_vec(pg_vec, order, req->nm_block_nr); |
2483 | +} |
2484 | + |
2485 | static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, |
2486 | - bool closing, bool tx_ring) |
2487 | + bool tx_ring) |
2488 | { |
2489 | struct netlink_sock *nlk = nlk_sk(sk); |
2490 | struct netlink_ring *ring; |
2491 | - struct sk_buff_head *queue; |
2492 | void **pg_vec = NULL; |
2493 | unsigned int order = 0; |
2494 | - int err; |
2495 | |
2496 | ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; |
2497 | - queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
2498 | |
2499 | - if (!closing) { |
2500 | - if (atomic_read(&nlk->mapped)) |
2501 | - return -EBUSY; |
2502 | - if (atomic_read(&ring->pending)) |
2503 | - return -EBUSY; |
2504 | - } |
2505 | + if (atomic_read(&nlk->mapped)) |
2506 | + return -EBUSY; |
2507 | + if (atomic_read(&ring->pending)) |
2508 | + return -EBUSY; |
2509 | |
2510 | if (req->nm_block_nr) { |
2511 | if (ring->pg_vec != NULL) |
2512 | @@ -416,31 +443,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, |
2513 | return -EINVAL; |
2514 | } |
2515 | |
2516 | - err = -EBUSY; |
2517 | mutex_lock(&nlk->pg_vec_lock); |
2518 | - if (closing || atomic_read(&nlk->mapped) == 0) { |
2519 | - err = 0; |
2520 | - spin_lock_bh(&queue->lock); |
2521 | - |
2522 | - ring->frame_max = req->nm_frame_nr - 1; |
2523 | - ring->head = 0; |
2524 | - ring->frame_size = req->nm_frame_size; |
2525 | - ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; |
2526 | - |
2527 | - swap(ring->pg_vec_len, req->nm_block_nr); |
2528 | - swap(ring->pg_vec_order, order); |
2529 | - swap(ring->pg_vec, pg_vec); |
2530 | - |
2531 | - __skb_queue_purge(queue); |
2532 | - spin_unlock_bh(&queue->lock); |
2533 | - |
2534 | - WARN_ON(atomic_read(&nlk->mapped)); |
2535 | + if (atomic_read(&nlk->mapped) == 0) { |
2536 | + __netlink_set_ring(sk, req, tx_ring, pg_vec, order); |
2537 | + mutex_unlock(&nlk->pg_vec_lock); |
2538 | + return 0; |
2539 | } |
2540 | + |
2541 | mutex_unlock(&nlk->pg_vec_lock); |
2542 | |
2543 | if (pg_vec) |
2544 | free_pg_vec(pg_vec, order, req->nm_block_nr); |
2545 | - return err; |
2546 | + |
2547 | + return -EBUSY; |
2548 | } |
2549 | |
2550 | static void netlink_mm_open(struct vm_area_struct *vma) |
2551 | @@ -909,10 +924,10 @@ static void netlink_sock_destruct(struct sock *sk) |
2552 | |
2553 | memset(&req, 0, sizeof(req)); |
2554 | if (nlk->rx_ring.pg_vec) |
2555 | - netlink_set_ring(sk, &req, true, false); |
2556 | + __netlink_set_ring(sk, &req, false, NULL, 0); |
2557 | memset(&req, 0, sizeof(req)); |
2558 | if (nlk->tx_ring.pg_vec) |
2559 | - netlink_set_ring(sk, &req, true, true); |
2560 | + __netlink_set_ring(sk, &req, true, NULL, 0); |
2561 | } |
2562 | #endif /* CONFIG_NETLINK_MMAP */ |
2563 | |
2564 | @@ -2163,7 +2178,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, |
2565 | return -EINVAL; |
2566 | if (copy_from_user(&req, optval, sizeof(req))) |
2567 | return -EFAULT; |
2568 | - err = netlink_set_ring(sk, &req, false, |
2569 | + err = netlink_set_ring(sk, &req, |
2570 | optname == NETLINK_TX_RING); |
2571 | break; |
2572 | } |
2573 | diff --git a/net/rds/info.c b/net/rds/info.c |
2574 | index 9a6b4f66187c..140a44a5f7b7 100644 |
2575 | --- a/net/rds/info.c |
2576 | +++ b/net/rds/info.c |
2577 | @@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, |
2578 | |
2579 | /* check for all kinds of wrapping and the like */ |
2580 | start = (unsigned long)optval; |
2581 | - if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { |
2582 | + if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { |
2583 | ret = -EINVAL; |
2584 | goto out; |
2585 | } |
2586 | diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
2587 | index 51bddc236a15..8224016ebd70 100644 |
2588 | --- a/net/tipc/socket.c |
2589 | +++ b/net/tipc/socket.c |
2590 | @@ -1996,6 +1996,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) |
2591 | res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); |
2592 | if (res) |
2593 | goto exit; |
2594 | + security_sk_clone(sock->sk, new_sock->sk); |
2595 | |
2596 | new_sk = new_sock->sk; |
2597 | new_tsock = tipc_sk(new_sk); |
2598 | diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl |
2599 | index 9cb8522d8d22..f3d3fb42b873 100755 |
2600 | --- a/scripts/kconfig/streamline_config.pl |
2601 | +++ b/scripts/kconfig/streamline_config.pl |
2602 | @@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); |
2603 | my $kconfig = $ARGV[1]; |
2604 | my $lsmod_file = $ENV{'LSMOD'}; |
2605 | |
2606 | -my @makefiles = `find $ksource -name Makefile 2>/dev/null`; |
2607 | +my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`; |
2608 | chomp @makefiles; |
2609 | |
2610 | my %depends; |
2611 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
2612 | index 9ff5050d513a..2c10c9ee36a2 100644 |
2613 | --- a/sound/usb/quirks.c |
2614 | +++ b/sound/usb/quirks.c |
2615 | @@ -1258,6 +1258,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, |
2616 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
2617 | break; |
2618 | |
2619 | + case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */ |
2620 | case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ |
2621 | case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ |
2622 | if (fp->altsetting == 3) |