Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0152-4.9.53-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 75758 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index c53de1e38c6a..98e3be659b21 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 52
9     +SUBLEVEL = 53
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
14     index d062f08f5020..4b24964a520a 100644
15     --- a/arch/arm/xen/mm.c
16     +++ b/arch/arm/xen/mm.c
17     @@ -199,6 +199,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
18     .unmap_page = xen_swiotlb_unmap_page,
19     .dma_supported = xen_swiotlb_dma_supported,
20     .set_dma_mask = xen_swiotlb_set_dma_mask,
21     + .mmap = xen_swiotlb_dma_mmap,
22     };
23    
24     int __init xen_mm_init(void)
25     diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
26     index 332e33193ccf..539bebc1222f 100644
27     --- a/arch/arm64/kernel/head.S
28     +++ b/arch/arm64/kernel/head.S
29     @@ -486,6 +486,7 @@ ENTRY(kimage_vaddr)
30     * booted in EL1 or EL2 respectively.
31     */
32     ENTRY(el2_setup)
33     + msr SPsel, #1 // We want to use SP_EL{1,2}
34     mrs x0, CurrentEL
35     cmp x0, #CurrentEL_EL2
36     b.ne 1f
37     diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
38     index fec5b1ce97f8..403fe9e57135 100644
39     --- a/arch/arm64/mm/fault.c
40     +++ b/arch/arm64/mm/fault.c
41     @@ -509,7 +509,7 @@ static const struct fault_info fault_info[] = {
42     { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
43     { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
44     { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
45     - { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
46     + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
47     { do_bad, SIGBUS, 0, "unknown 8" },
48     { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
49     { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
50     diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
51     index 767ef6d68c9e..caa659671599 100644
52     --- a/arch/powerpc/kernel/entry_64.S
53     +++ b/arch/powerpc/kernel/entry_64.S
54     @@ -1235,10 +1235,14 @@ _GLOBAL(ftrace_caller)
55     stdu r1,-SWITCH_FRAME_SIZE(r1)
56    
57     /* Save all gprs to pt_regs */
58     - SAVE_8GPRS(0,r1)
59     - SAVE_8GPRS(8,r1)
60     - SAVE_8GPRS(16,r1)
61     - SAVE_8GPRS(24,r1)
62     + SAVE_GPR(0, r1)
63     + SAVE_10GPRS(2, r1)
64     + SAVE_10GPRS(12, r1)
65     + SAVE_10GPRS(22, r1)
66     +
67     + /* Save previous stack pointer (r1) */
68     + addi r8, r1, SWITCH_FRAME_SIZE
69     + std r8, GPR1(r1)
70    
71     /* Load special regs for save below */
72     mfmsr r8
73     @@ -1292,10 +1296,10 @@ ftrace_call:
74     #endif
75    
76     /* Restore gprs */
77     - REST_8GPRS(0,r1)
78     - REST_8GPRS(8,r1)
79     - REST_8GPRS(16,r1)
80     - REST_8GPRS(24,r1)
81     + REST_GPR(0,r1)
82     + REST_10GPRS(2,r1)
83     + REST_10GPRS(12,r1)
84     + REST_10GPRS(22,r1)
85    
86     /* Restore callee's TOC */
87     ld r2, 24(r1)
88     diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
89     index dcbb9144c16d..d97370866a5f 100644
90     --- a/arch/powerpc/kernel/ptrace.c
91     +++ b/arch/powerpc/kernel/ptrace.c
92     @@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
93     * in the appropriate thread structures from live.
94     */
95    
96     - if (tsk != current)
97     + if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
98     return;
99    
100     if (MSR_TM_SUSPENDED(mfmsr())) {
101     diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
102     index c379ff5a4438..da2a7eccb10a 100644
103     --- a/arch/powerpc/kvm/book3s_64_vio.c
104     +++ b/arch/powerpc/kvm/book3s_64_vio.c
105     @@ -129,8 +129,11 @@ static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
106     static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
107     {
108     struct kvmppc_spapr_tce_table *stt = filp->private_data;
109     + struct kvm *kvm = stt->kvm;
110    
111     + mutex_lock(&kvm->lock);
112     list_del_rcu(&stt->list);
113     + mutex_unlock(&kvm->lock);
114    
115     kvm_put_kvm(stt->kvm);
116    
117     @@ -150,6 +153,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
118     struct kvm_create_spapr_tce_64 *args)
119     {
120     struct kvmppc_spapr_tce_table *stt = NULL;
121     + struct kvmppc_spapr_tce_table *siter;
122     unsigned long npages, size;
123     int ret = -ENOMEM;
124     int i;
125     @@ -157,24 +161,16 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
126     if (!args->size)
127     return -EINVAL;
128    
129     - /* Check this LIOBN hasn't been previously allocated */
130     - list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
131     - if (stt->liobn == args->liobn)
132     - return -EBUSY;
133     - }
134     -
135     size = args->size;
136     npages = kvmppc_tce_pages(size);
137     ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
138     - if (ret) {
139     - stt = NULL;
140     - goto fail;
141     - }
142     + if (ret)
143     + return ret;
144    
145     stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
146     GFP_KERNEL);
147     if (!stt)
148     - goto fail;
149     + goto fail_acct;
150    
151     stt->liobn = args->liobn;
152     stt->page_shift = args->page_shift;
153     @@ -188,24 +184,39 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
154     goto fail;
155     }
156    
157     - kvm_get_kvm(kvm);
158     -
159     mutex_lock(&kvm->lock);
160     - list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
161     +
162     + /* Check this LIOBN hasn't been previously allocated */
163     + ret = 0;
164     + list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
165     + if (siter->liobn == args->liobn) {
166     + ret = -EBUSY;
167     + break;
168     + }
169     + }
170     +
171     + if (!ret)
172     + ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
173     + stt, O_RDWR | O_CLOEXEC);
174     +
175     + if (ret >= 0) {
176     + list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
177     + kvm_get_kvm(kvm);
178     + }
179    
180     mutex_unlock(&kvm->lock);
181    
182     - return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
183     - stt, O_RDWR | O_CLOEXEC);
184     + if (ret >= 0)
185     + return ret;
186    
187     -fail:
188     - if (stt) {
189     - for (i = 0; i < npages; i++)
190     - if (stt->pages[i])
191     - __free_page(stt->pages[i]);
192     + fail:
193     + for (i = 0; i < npages; i++)
194     + if (stt->pages[i])
195     + __free_page(stt->pages[i]);
196    
197     - kfree(stt);
198     - }
199     + kfree(stt);
200     + fail_acct:
201     + kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
202     return ret;
203     }
204    
205     diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
206     index a560a98bcf3b..6a5e7467445c 100644
207     --- a/arch/powerpc/platforms/pseries/mobility.c
208     +++ b/arch/powerpc/platforms/pseries/mobility.c
209     @@ -225,8 +225,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
210     return -ENOENT;
211    
212     dn = dlpar_configure_connector(drc_index, parent_dn);
213     - if (!dn)
214     + if (!dn) {
215     + of_node_put(parent_dn);
216     return -ENOENT;
217     + }
218    
219     rc = dlpar_attach_node(dn);
220     if (rc)
221     diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
222     index 18d4107e10ee..97fc449a7470 100644
223     --- a/arch/s390/mm/gup.c
224     +++ b/arch/s390/mm/gup.c
225     @@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
226     static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
227     unsigned long end, int write, struct page **pages, int *nr)
228     {
229     - unsigned long mask, result;
230     struct page *head, *page;
231     + unsigned long mask;
232     int refs;
233    
234     - result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
235     - mask = result | _SEGMENT_ENTRY_INVALID;
236     - if ((pmd_val(pmd) & mask) != result)
237     + mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
238     + if ((pmd_val(pmd) & mask) != 0)
239     return 0;
240     VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
241    
242     diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
243     index c114b132d121..7052d9a65fe9 100644
244     --- a/arch/x86/kernel/fpu/regset.c
245     +++ b/arch/x86/kernel/fpu/regset.c
246     @@ -130,11 +130,16 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
247    
248     fpu__activate_fpstate_write(fpu);
249    
250     - if (boot_cpu_has(X86_FEATURE_XSAVES))
251     + if (boot_cpu_has(X86_FEATURE_XSAVES)) {
252     ret = copyin_to_xsaves(kbuf, ubuf, xsave);
253     - else
254     + } else {
255     ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
256    
257     + /* xcomp_bv must be 0 when using uncompacted format */
258     + if (!ret && xsave->header.xcomp_bv)
259     + ret = -EINVAL;
260     + }
261     +
262     /*
263     * In case of failure, mark all states as init:
264     */
265     diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
266     index a184c210efba..3ec0d2d64601 100644
267     --- a/arch/x86/kernel/fpu/signal.c
268     +++ b/arch/x86/kernel/fpu/signal.c
269     @@ -329,6 +329,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
270     } else {
271     err = __copy_from_user(&fpu->state.xsave,
272     buf_fx, state_size);
273     +
274     + /* xcomp_bv must be 0 when using uncompacted format */
275     + if (!err && state_size > offsetof(struct xregs_state, header) && fpu->state.xsave.header.xcomp_bv)
276     + err = -EINVAL;
277     }
278    
279     if (err || __copy_from_user(&env, buf, sizeof(env))) {
280     diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
281     index 55ffd9dc2258..77f17cbfe271 100644
282     --- a/arch/x86/kernel/kvm.c
283     +++ b/arch/x86/kernel/kvm.c
284     @@ -141,7 +141,8 @@ void kvm_async_pf_task_wait(u32 token)
285    
286     n.token = token;
287     n.cpu = smp_processor_id();
288     - n.halted = is_idle_task(current) || preempt_count() > 1;
289     + n.halted = is_idle_task(current) || preempt_count() > 1 ||
290     + rcu_preempt_depth();
291     init_swait_queue_head(&n.wq);
292     hlist_add_head(&n.link, &b->list);
293     raw_spin_unlock(&b->lock);
294     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
295     index 3dc6d8017ce9..fb49212d25df 100644
296     --- a/arch/x86/kvm/vmx.c
297     +++ b/arch/x86/kvm/vmx.c
298     @@ -2167,46 +2167,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
299     struct pi_desc old, new;
300     unsigned int dest;
301    
302     - if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
303     - !irq_remapping_cap(IRQ_POSTING_CAP) ||
304     - !kvm_vcpu_apicv_active(vcpu))
305     + /*
306     + * In case of hot-plug or hot-unplug, we may have to undo
307     + * vmx_vcpu_pi_put even if there is no assigned device. And we
308     + * always keep PI.NDST up to date for simplicity: it makes the
309     + * code easier, and CPU migration is not a fast path.
310     + */
311     + if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
312     + return;
313     +
314     + /*
315     + * First handle the simple case where no cmpxchg is necessary; just
316     + * allow posting non-urgent interrupts.
317     + *
318     + * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
319     + * PI.NDST: pi_post_block will do it for us and the wakeup_handler
320     + * expects the VCPU to be on the blocked_vcpu_list that matches
321     + * PI.NDST.
322     + */
323     + if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
324     + vcpu->cpu == cpu) {
325     + pi_clear_sn(pi_desc);
326     return;
327     + }
328    
329     + /* The full case. */
330     do {
331     old.control = new.control = pi_desc->control;
332    
333     - /*
334     - * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
335     - * are two possible cases:
336     - * 1. After running 'pre_block', context switch
337     - * happened. For this case, 'sn' was set in
338     - * vmx_vcpu_put(), so we need to clear it here.
339     - * 2. After running 'pre_block', we were blocked,
340     - * and woken up by some other guy. For this case,
341     - * we don't need to do anything, 'pi_post_block'
342     - * will do everything for us. However, we cannot
343     - * check whether it is case #1 or case #2 here
344     - * (maybe, not needed), so we also clear sn here,
345     - * I think it is not a big deal.
346     - */
347     - if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
348     - if (vcpu->cpu != cpu) {
349     - dest = cpu_physical_id(cpu);
350     -
351     - if (x2apic_enabled())
352     - new.ndst = dest;
353     - else
354     - new.ndst = (dest << 8) & 0xFF00;
355     - }
356     + dest = cpu_physical_id(cpu);
357    
358     - /* set 'NV' to 'notification vector' */
359     - new.nv = POSTED_INTR_VECTOR;
360     - }
361     + if (x2apic_enabled())
362     + new.ndst = dest;
363     + else
364     + new.ndst = (dest << 8) & 0xFF00;
365    
366     - /* Allow posting non-urgent interrupts */
367     new.sn = 0;
368     - } while (cmpxchg(&pi_desc->control, old.control,
369     - new.control) != old.control);
370     + } while (cmpxchg64(&pi_desc->control, old.control,
371     + new.control) != old.control);
372     }
373    
374     static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
375     @@ -4761,21 +4759,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
376     {
377     #ifdef CONFIG_SMP
378     if (vcpu->mode == IN_GUEST_MODE) {
379     - struct vcpu_vmx *vmx = to_vmx(vcpu);
380     -
381     /*
382     - * Currently, we don't support urgent interrupt,
383     - * all interrupts are recognized as non-urgent
384     - * interrupt, so we cannot post interrupts when
385     - * 'SN' is set.
386     + * The vector of interrupt to be delivered to vcpu had
387     + * been set in PIR before this function.
388     *
389     - * If the vcpu is in guest mode, it means it is
390     - * running instead of being scheduled out and
391     - * waiting in the run queue, and that's the only
392     - * case when 'SN' is set currently, warning if
393     - * 'SN' is set.
394     + * Following cases will be reached in this block, and
395     + * we always send a notification event in all cases as
396     + * explained below.
397     + *
398     + * Case 1: vcpu keeps in non-root mode. Sending a
399     + * notification event posts the interrupt to vcpu.
400     + *
401     + * Case 2: vcpu exits to root mode and is still
402     + * runnable. PIR will be synced to vIRR before the
403     + * next vcpu entry. Sending a notification event in
404     + * this case has no effect, as vcpu is not in root
405     + * mode.
406     + *
407     + * Case 3: vcpu exits to root mode and is blocked.
408     + * vcpu_block() has already synced PIR to vIRR and
409     + * never blocks vcpu if vIRR is not cleared. Therefore,
410     + * a blocked vcpu here does not wait for any requested
411     + * interrupts in PIR, and sending a notification event
412     + * which has no effect is safe here.
413     */
414     - WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
415    
416     apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
417     POSTED_INTR_VECTOR);
418     @@ -9187,6 +9194,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
419    
420     vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
421    
422     + /*
423     + * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
424     + * or POSTED_INTR_WAKEUP_VECTOR.
425     + */
426     + vmx->pi_desc.nv = POSTED_INTR_VECTOR;
427     + vmx->pi_desc.sn = 1;
428     +
429     return &vmx->vcpu;
430    
431     free_vmcs:
432     @@ -9996,6 +10010,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
433     vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
434     page_to_phys(vmx->nested.virtual_apic_page));
435     vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
436     + } else {
437     +#ifdef CONFIG_X86_64
438     + exec_control |= CPU_BASED_CR8_LOAD_EXITING |
439     + CPU_BASED_CR8_STORE_EXITING;
440     +#endif
441     }
442    
443     if (cpu_has_vmx_msr_bitmap() &&
444     @@ -11000,6 +11019,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
445     kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
446     }
447    
448     +static void __pi_post_block(struct kvm_vcpu *vcpu)
449     +{
450     + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
451     + struct pi_desc old, new;
452     + unsigned int dest;
453     +
454     + do {
455     + old.control = new.control = pi_desc->control;
456     + WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
457     + "Wakeup handler not enabled while the VCPU is blocked\n");
458     +
459     + dest = cpu_physical_id(vcpu->cpu);
460     +
461     + if (x2apic_enabled())
462     + new.ndst = dest;
463     + else
464     + new.ndst = (dest << 8) & 0xFF00;
465     +
466     + /* set 'NV' to 'notification vector' */
467     + new.nv = POSTED_INTR_VECTOR;
468     + } while (cmpxchg64(&pi_desc->control, old.control,
469     + new.control) != old.control);
470     +
471     + if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
472     + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
473     + list_del(&vcpu->blocked_vcpu_list);
474     + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
475     + vcpu->pre_pcpu = -1;
476     + }
477     +}
478     +
479     /*
480     * This routine does the following things for vCPU which is going
481     * to be blocked if VT-d PI is enabled.
482     @@ -11015,7 +11065,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
483     */
484     static int pi_pre_block(struct kvm_vcpu *vcpu)
485     {
486     - unsigned long flags;
487     unsigned int dest;
488     struct pi_desc old, new;
489     struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
490     @@ -11025,34 +11074,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
491     !kvm_vcpu_apicv_active(vcpu))
492     return 0;
493    
494     - vcpu->pre_pcpu = vcpu->cpu;
495     - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
496     - vcpu->pre_pcpu), flags);
497     - list_add_tail(&vcpu->blocked_vcpu_list,
498     - &per_cpu(blocked_vcpu_on_cpu,
499     - vcpu->pre_pcpu));
500     - spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
501     - vcpu->pre_pcpu), flags);
502     + WARN_ON(irqs_disabled());
503     + local_irq_disable();
504     + if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
505     + vcpu->pre_pcpu = vcpu->cpu;
506     + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
507     + list_add_tail(&vcpu->blocked_vcpu_list,
508     + &per_cpu(blocked_vcpu_on_cpu,
509     + vcpu->pre_pcpu));
510     + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
511     + }
512    
513     do {
514     old.control = new.control = pi_desc->control;
515    
516     - /*
517     - * We should not block the vCPU if
518     - * an interrupt is posted for it.
519     - */
520     - if (pi_test_on(pi_desc) == 1) {
521     - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
522     - vcpu->pre_pcpu), flags);
523     - list_del(&vcpu->blocked_vcpu_list);
524     - spin_unlock_irqrestore(
525     - &per_cpu(blocked_vcpu_on_cpu_lock,
526     - vcpu->pre_pcpu), flags);
527     - vcpu->pre_pcpu = -1;
528     -
529     - return 1;
530     - }
531     -
532     WARN((pi_desc->sn == 1),
533     "Warning: SN field of posted-interrupts "
534     "is set before blocking\n");
535     @@ -11074,10 +11109,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
536    
537     /* set 'NV' to 'wakeup vector' */
538     new.nv = POSTED_INTR_WAKEUP_VECTOR;
539     - } while (cmpxchg(&pi_desc->control, old.control,
540     - new.control) != old.control);
541     + } while (cmpxchg64(&pi_desc->control, old.control,
542     + new.control) != old.control);
543    
544     - return 0;
545     + /* We should not block the vCPU if an interrupt is posted for it. */
546     + if (pi_test_on(pi_desc) == 1)
547     + __pi_post_block(vcpu);
548     +
549     + local_irq_enable();
550     + return (vcpu->pre_pcpu == -1);
551     }
552    
553     static int vmx_pre_block(struct kvm_vcpu *vcpu)
554     @@ -11093,44 +11133,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
555    
556     static void pi_post_block(struct kvm_vcpu *vcpu)
557     {
558     - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
559     - struct pi_desc old, new;
560     - unsigned int dest;
561     - unsigned long flags;
562     -
563     - if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
564     - !irq_remapping_cap(IRQ_POSTING_CAP) ||
565     - !kvm_vcpu_apicv_active(vcpu))
566     + if (vcpu->pre_pcpu == -1)
567     return;
568    
569     - do {
570     - old.control = new.control = pi_desc->control;
571     -
572     - dest = cpu_physical_id(vcpu->cpu);
573     -
574     - if (x2apic_enabled())
575     - new.ndst = dest;
576     - else
577     - new.ndst = (dest << 8) & 0xFF00;
578     -
579     - /* Allow posting non-urgent interrupts */
580     - new.sn = 0;
581     -
582     - /* set 'NV' to 'notification vector' */
583     - new.nv = POSTED_INTR_VECTOR;
584     - } while (cmpxchg(&pi_desc->control, old.control,
585     - new.control) != old.control);
586     -
587     - if(vcpu->pre_pcpu != -1) {
588     - spin_lock_irqsave(
589     - &per_cpu(blocked_vcpu_on_cpu_lock,
590     - vcpu->pre_pcpu), flags);
591     - list_del(&vcpu->blocked_vcpu_list);
592     - spin_unlock_irqrestore(
593     - &per_cpu(blocked_vcpu_on_cpu_lock,
594     - vcpu->pre_pcpu), flags);
595     - vcpu->pre_pcpu = -1;
596     - }
597     + WARN_ON(irqs_disabled());
598     + local_irq_disable();
599     + __pi_post_block(vcpu);
600     + local_irq_enable();
601     }
602    
603     static void vmx_post_block(struct kvm_vcpu *vcpu)
604     @@ -11158,7 +11167,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
605     struct kvm_lapic_irq irq;
606     struct kvm_vcpu *vcpu;
607     struct vcpu_data vcpu_info;
608     - int idx, ret = -EINVAL;
609     + int idx, ret = 0;
610    
611     if (!kvm_arch_has_assigned_device(kvm) ||
612     !irq_remapping_cap(IRQ_POSTING_CAP) ||
613     @@ -11167,7 +11176,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
614    
615     idx = srcu_read_lock(&kvm->irq_srcu);
616     irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
617     - BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
618     + if (guest_irq >= irq_rt->nr_rt_entries ||
619     + hlist_empty(&irq_rt->map[guest_irq])) {
620     + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
621     + guest_irq, irq_rt->nr_rt_entries);
622     + goto out;
623     + }
624    
625     hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
626     if (e->type != KVM_IRQ_ROUTING_MSI)
627     @@ -11210,12 +11224,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
628    
629     if (set)
630     ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
631     - else {
632     - /* suppress notification event before unposting */
633     - pi_set_sn(vcpu_to_pi_desc(vcpu));
634     + else
635     ret = irq_set_vcpu_affinity(host_irq, NULL);
636     - pi_clear_sn(vcpu_to_pi_desc(vcpu));
637     - }
638    
639     if (ret < 0) {
640     printk(KERN_INFO "%s: failed to update PI IRTE\n",
641     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
642     index 9f72ca3b2669..1dd796025472 100644
643     --- a/arch/x86/mm/fault.c
644     +++ b/arch/x86/mm/fault.c
645     @@ -191,8 +191,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
646     * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
647     * faulted on a pte with its pkey=4.
648     */
649     -static void fill_sig_info_pkey(int si_code, siginfo_t *info,
650     - struct vm_area_struct *vma)
651     +static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
652     {
653     /* This is effectively an #ifdef */
654     if (!boot_cpu_has(X86_FEATURE_OSPKE))
655     @@ -208,7 +207,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
656     * valid VMA, so we should never reach this without a
657     * valid VMA.
658     */
659     - if (!vma) {
660     + if (!pkey) {
661     WARN_ONCE(1, "PKU fault with no VMA passed in");
662     info->si_pkey = 0;
663     return;
664     @@ -218,13 +217,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
665     * absolutely guranteed to be 100% accurate because of
666     * the race explained above.
667     */
668     - info->si_pkey = vma_pkey(vma);
669     + info->si_pkey = *pkey;
670     }
671    
672     static void
673     force_sig_info_fault(int si_signo, int si_code, unsigned long address,
674     - struct task_struct *tsk, struct vm_area_struct *vma,
675     - int fault)
676     + struct task_struct *tsk, u32 *pkey, int fault)
677     {
678     unsigned lsb = 0;
679     siginfo_t info;
680     @@ -239,7 +237,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
681     lsb = PAGE_SHIFT;
682     info.si_addr_lsb = lsb;
683    
684     - fill_sig_info_pkey(si_code, &info, vma);
685     + fill_sig_info_pkey(si_code, &info, pkey);
686    
687     force_sig_info(si_signo, &info, tsk);
688     }
689     @@ -718,8 +716,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
690     struct task_struct *tsk = current;
691     unsigned long flags;
692     int sig;
693     - /* No context means no VMA to pass down */
694     - struct vm_area_struct *vma = NULL;
695    
696     /* Are we prepared to handle this kernel fault? */
697     if (fixup_exception(regs, X86_TRAP_PF)) {
698     @@ -744,7 +740,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
699    
700     /* XXX: hwpoison faults will set the wrong code. */
701     force_sig_info_fault(signal, si_code, address,
702     - tsk, vma, 0);
703     + tsk, NULL, 0);
704     }
705    
706     /*
707     @@ -853,8 +849,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
708    
709     static void
710     __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
711     - unsigned long address, struct vm_area_struct *vma,
712     - int si_code)
713     + unsigned long address, u32 *pkey, int si_code)
714     {
715     struct task_struct *tsk = current;
716    
717     @@ -902,7 +897,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
718     tsk->thread.error_code = error_code;
719     tsk->thread.trap_nr = X86_TRAP_PF;
720    
721     - force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
722     + force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
723    
724     return;
725     }
726     @@ -915,9 +910,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
727    
728     static noinline void
729     bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
730     - unsigned long address, struct vm_area_struct *vma)
731     + unsigned long address, u32 *pkey)
732     {
733     - __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
734     + __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
735     }
736    
737     static void
738     @@ -925,6 +920,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
739     unsigned long address, struct vm_area_struct *vma, int si_code)
740     {
741     struct mm_struct *mm = current->mm;
742     + u32 pkey;
743     +
744     + if (vma)
745     + pkey = vma_pkey(vma);
746    
747     /*
748     * Something tried to access memory that isn't in our memory map..
749     @@ -932,7 +931,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
750     */
751     up_read(&mm->mmap_sem);
752    
753     - __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
754     + __bad_area_nosemaphore(regs, error_code, address,
755     + (vma) ? &pkey : NULL, si_code);
756     }
757    
758     static noinline void
759     @@ -975,7 +975,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
760    
761     static void
762     do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
763     - struct vm_area_struct *vma, unsigned int fault)
764     + u32 *pkey, unsigned int fault)
765     {
766     struct task_struct *tsk = current;
767     int code = BUS_ADRERR;
768     @@ -1002,13 +1002,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
769     code = BUS_MCEERR_AR;
770     }
771     #endif
772     - force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
773     + force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
774     }
775    
776     static noinline void
777     mm_fault_error(struct pt_regs *regs, unsigned long error_code,
778     - unsigned long address, struct vm_area_struct *vma,
779     - unsigned int fault)
780     + unsigned long address, u32 *pkey, unsigned int fault)
781     {
782     if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
783     no_context(regs, error_code, address, 0, 0);
784     @@ -1032,9 +1031,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
785     } else {
786     if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
787     VM_FAULT_HWPOISON_LARGE))
788     - do_sigbus(regs, error_code, address, vma, fault);
789     + do_sigbus(regs, error_code, address, pkey, fault);
790     else if (fault & VM_FAULT_SIGSEGV)
791     - bad_area_nosemaphore(regs, error_code, address, vma);
792     + bad_area_nosemaphore(regs, error_code, address, pkey);
793     else
794     BUG();
795     }
796     @@ -1220,6 +1219,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
797     struct mm_struct *mm;
798     int fault, major = 0;
799     unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
800     + u32 pkey;
801    
802     tsk = current;
803     mm = tsk->mm;
804     @@ -1420,9 +1420,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
805     return;
806     }
807    
808     + pkey = vma_pkey(vma);
809     up_read(&mm->mmap_sem);
810     if (unlikely(fault & VM_FAULT_ERROR)) {
811     - mm_fault_error(regs, error_code, address, vma, fault);
812     + mm_fault_error(regs, error_code, address, &pkey, fault);
813     return;
814     }
815    
816     diff --git a/block/bsg-lib.c b/block/bsg-lib.c
817     index 650f427d915b..341b8d858e67 100644
818     --- a/block/bsg-lib.c
819     +++ b/block/bsg-lib.c
820     @@ -147,7 +147,6 @@ static int bsg_create_job(struct device *dev, struct request *req)
821     failjob_rls_rqst_payload:
822     kfree(job->request_payload.sg_list);
823     failjob_rls_job:
824     - kfree(job);
825     return -ENOMEM;
826     }
827    
828     diff --git a/crypto/drbg.c b/crypto/drbg.c
829     index 8cac3d31a5f8..942ddff68408 100644
830     --- a/crypto/drbg.c
831     +++ b/crypto/drbg.c
832     @@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
833     {
834     if (!drbg)
835     return;
836     - kzfree(drbg->V);
837     - drbg->Vbuf = NULL;
838     - kzfree(drbg->C);
839     - drbg->Cbuf = NULL;
840     + kzfree(drbg->Vbuf);
841     + drbg->V = NULL;
842     + kzfree(drbg->Cbuf);
843     + drbg->C = NULL;
844     kzfree(drbg->scratchpadbuf);
845     drbg->scratchpadbuf = NULL;
846     drbg->reseed_ctr = 0;
847     diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
848     index 2932a5bd892f..dfffba39f723 100644
849     --- a/drivers/base/power/main.c
850     +++ b/drivers/base/power/main.c
851     @@ -1757,10 +1757,13 @@ void device_pm_check_callbacks(struct device *dev)
852     {
853     spin_lock_irq(&dev->power.lock);
854     dev->power.no_pm_callbacks =
855     - (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
856     - (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
857     + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
858     + !dev->bus->suspend && !dev->bus->resume)) &&
859     + (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
860     + !dev->class->suspend && !dev->class->resume)) &&
861     (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
862     (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
863     - (!dev->driver || pm_ops_is_empty(dev->driver->pm));
864     + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
865     + !dev->driver->suspend && !dev->driver->resume));
866     spin_unlock_irq(&dev->power.lock);
867     }
868     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
869     index 571de2f284cf..e2d323fa2437 100644
870     --- a/drivers/crypto/talitos.c
871     +++ b/drivers/crypto/talitos.c
872     @@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
873     req_ctx->swinit = 0;
874     } else {
875     desc->ptr[1] = zero_entry;
876     - /* Indicate next op is not the first. */
877     - req_ctx->first = 0;
878     }
879     + /* Indicate next op is not the first. */
880     + req_ctx->first = 0;
881    
882     /* HMAC key */
883     if (ctx->keylen)
884     @@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
885    
886     sg_count = edesc->src_nents ?: 1;
887     if (is_sec1 && sg_count > 1)
888     - sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
889     + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
890     else
891     sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
892     DMA_TO_DEVICE);
893     @@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
894     t_alg->algt.alg.hash.final = ahash_final;
895     t_alg->algt.alg.hash.finup = ahash_finup;
896     t_alg->algt.alg.hash.digest = ahash_digest;
897     - t_alg->algt.alg.hash.setkey = ahash_setkey;
898     + if (!strncmp(alg->cra_name, "hmac", 4))
899     + t_alg->algt.alg.hash.setkey = ahash_setkey;
900     t_alg->algt.alg.hash.import = ahash_import;
901     t_alg->algt.alg.hash.export = ahash_export;
902    
903     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
904     index 0370b842d9cc..82dd57d4843c 100644
905     --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
906     +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
907     @@ -549,12 +549,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
908     void etnaviv_gem_free_object(struct drm_gem_object *obj)
909     {
910     struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
911     + struct etnaviv_drm_private *priv = obj->dev->dev_private;
912     struct etnaviv_vram_mapping *mapping, *tmp;
913    
914     /* object should not be active */
915     WARN_ON(is_active(etnaviv_obj));
916    
917     + mutex_lock(&priv->gem_lock);
918     list_del(&etnaviv_obj->gem_node);
919     + mutex_unlock(&priv->gem_lock);
920    
921     list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
922     obj_node) {
923     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
924     index 3b21ca5a6c81..82b01123c386 100644
925     --- a/drivers/gpu/drm/radeon/radeon_device.c
926     +++ b/drivers/gpu/drm/radeon/radeon_device.c
927     @@ -1674,7 +1674,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
928     radeon_agp_suspend(rdev);
929    
930     pci_save_state(dev->pdev);
931     - if (freeze && rdev->family >= CHIP_CEDAR) {
932     + if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
933     rdev->asic->asic_reset(rdev, true);
934     pci_restore_state(dev->pdev);
935     } else if (suspend) {
936     diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
937     index 9398143d7c5e..6512a555f7f8 100644
938     --- a/drivers/infiniband/hw/cxgb4/cm.c
939     +++ b/drivers/infiniband/hw/cxgb4/cm.c
940     @@ -2577,9 +2577,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
941     c4iw_put_ep(&child_ep->com);
942     reject:
943     reject_cr(dev, hwtid, skb);
944     +out:
945     if (parent_ep)
946     c4iw_put_ep(&parent_ep->com);
947     -out:
948     return 0;
949     }
950    
951     @@ -3441,7 +3441,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
952     cm_id->provider_data = ep;
953     goto out;
954     }
955     -
956     + remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
957     cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
958     ep->com.local_addr.ss_family);
959     fail2:
960     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
961     index 549b4afd12e1..7aea0221530c 100644
962     --- a/drivers/md/raid5.c
963     +++ b/drivers/md/raid5.c
964     @@ -829,6 +829,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
965     spin_unlock(&head->batch_head->batch_lock);
966     goto unlock_out;
967     }
968     + /*
969     + * We must assign batch_head of this stripe within the
970     + * batch_lock, otherwise clear_batch_ready of batch head
971     + * stripe could clear BATCH_READY bit of this stripe and
972     + * this stripe->batch_head doesn't get assigned, which
973     + * could confuse clear_batch_ready for this stripe
974     + */
975     + sh->batch_head = head->batch_head;
976    
977     /*
978     * at this point, head's BATCH_READY could be cleared, but we
979     @@ -836,8 +844,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
980     */
981     list_add(&sh->batch_list, &head->batch_list);
982     spin_unlock(&head->batch_head->batch_lock);
983     -
984     - sh->batch_head = head->batch_head;
985     } else {
986     head->batch_head = head;
987     sh->batch_head = head->batch_head;
988     @@ -4277,7 +4283,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
989    
990     set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
991     (1 << STRIPE_PREREAD_ACTIVE) |
992     - (1 << STRIPE_DEGRADED)),
993     + (1 << STRIPE_DEGRADED) |
994     + (1 << STRIPE_ON_UNPLUG_LIST)),
995     head_sh->state & (1 << STRIPE_INSYNC));
996    
997     sh->check_state = head_sh->check_state;
998     diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
999     index 2e5233b60971..ae856161faa9 100644
1000     --- a/drivers/misc/cxl/api.c
1001     +++ b/drivers/misc/cxl/api.c
1002     @@ -244,6 +244,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
1003     ctx->real_mode = false;
1004     }
1005    
1006     + /*
1007     + * Increment driver use count. Enables global TLBIs for hash
1008     + * and callbacks to handle the segment table
1009     + */
1010     cxl_ctx_get();
1011    
1012     if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
1013     diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
1014     index afa211397048..d3e009438991 100644
1015     --- a/drivers/misc/cxl/file.c
1016     +++ b/drivers/misc/cxl/file.c
1017     @@ -91,7 +91,6 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
1018    
1019     pr_devel("afu_open pe: %i\n", ctx->pe);
1020     file->private_data = ctx;
1021     - cxl_ctx_get();
1022    
1023     /* indicate success */
1024     rc = 0;
1025     @@ -213,6 +212,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
1026     ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
1027    
1028    
1029     + /*
1030     + * Increment driver use count. Enables global TLBIs for hash
1031     + * and callbacks to handle the segment table
1032     + */
1033     + cxl_ctx_get();
1034     +
1035     trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
1036    
1037     if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
1038     @@ -222,6 +227,7 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
1039     put_pid(ctx->glpid);
1040     put_pid(ctx->pid);
1041     ctx->glpid = ctx->pid = NULL;
1042     + cxl_ctx_put();
1043     goto out;
1044     }
1045    
1046     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1047     index 0fd7d7ed07ce..c06932c5ecdb 100644
1048     --- a/drivers/net/wireless/mac80211_hwsim.c
1049     +++ b/drivers/net/wireless/mac80211_hwsim.c
1050     @@ -1357,8 +1357,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
1051     txi->control.rates,
1052     ARRAY_SIZE(txi->control.rates));
1053    
1054     - txi->rate_driver_data[0] = channel;
1055     -
1056     if (skb->len >= 24 + 8 &&
1057     ieee80211_is_probe_resp(hdr->frame_control)) {
1058     /* fake header transmission time */
1059     diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
1060     index 1b0786555394..f9f4d1c18eb2 100644
1061     --- a/drivers/pci/pci-sysfs.c
1062     +++ b/drivers/pci/pci-sysfs.c
1063     @@ -527,7 +527,7 @@ static ssize_t driver_override_store(struct device *dev,
1064     const char *buf, size_t count)
1065     {
1066     struct pci_dev *pdev = to_pci_dev(dev);
1067     - char *driver_override, *old = pdev->driver_override, *cp;
1068     + char *driver_override, *old, *cp;
1069    
1070     /* We need to keep extra room for a newline */
1071     if (count >= (PAGE_SIZE - 1))
1072     @@ -541,12 +541,15 @@ static ssize_t driver_override_store(struct device *dev,
1073     if (cp)
1074     *cp = '\0';
1075    
1076     + device_lock(dev);
1077     + old = pdev->driver_override;
1078     if (strlen(driver_override)) {
1079     pdev->driver_override = driver_override;
1080     } else {
1081     kfree(driver_override);
1082     pdev->driver_override = NULL;
1083     }
1084     + device_unlock(dev);
1085    
1086     kfree(old);
1087    
1088     @@ -557,8 +560,12 @@ static ssize_t driver_override_show(struct device *dev,
1089     struct device_attribute *attr, char *buf)
1090     {
1091     struct pci_dev *pdev = to_pci_dev(dev);
1092     + ssize_t len;
1093    
1094     - return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
1095     + device_lock(dev);
1096     + len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
1097     + device_unlock(dev);
1098     + return len;
1099     }
1100     static DEVICE_ATTR_RW(driver_override);
1101    
1102     diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
1103     index 42bca619f854..c39551b32e94 100644
1104     --- a/drivers/scsi/scsi_transport_iscsi.c
1105     +++ b/drivers/scsi/scsi_transport_iscsi.c
1106     @@ -3696,7 +3696,7 @@ iscsi_if_rx(struct sk_buff *skb)
1107     uint32_t group;
1108    
1109     nlh = nlmsg_hdr(skb);
1110     - if (nlh->nlmsg_len < sizeof(*nlh) ||
1111     + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
1112     skb->len < nlh->nlmsg_len) {
1113     break;
1114     }
1115     diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
1116     index 11026e726b68..81367cf0af77 100644
1117     --- a/drivers/video/fbdev/aty/atyfb_base.c
1118     +++ b/drivers/video/fbdev/aty/atyfb_base.c
1119     @@ -1861,7 +1861,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
1120     #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
1121     case ATYIO_CLKR:
1122     if (M64_HAS(INTEGRATED)) {
1123     - struct atyclk clk;
1124     + struct atyclk clk = { 0 };
1125     union aty_pll *pll = &par->pll;
1126     u32 dsp_config = pll->ct.dsp_config;
1127     u32 dsp_on_off = pll->ct.dsp_on_off;
1128     diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
1129     index 679f79f68182..b68ced5a6331 100644
1130     --- a/drivers/xen/swiotlb-xen.c
1131     +++ b/drivers/xen/swiotlb-xen.c
1132     @@ -680,3 +680,22 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
1133     return 0;
1134     }
1135     EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
1136     +
1137     +/*
1138     + * Create userspace mapping for the DMA-coherent memory.
1139     + * This function should be called with the pages from the current domain only,
1140     + * passing pages mapped from other domains would lead to memory corruption.
1141     + */
1142     +int
1143     +xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1144     + void *cpu_addr, dma_addr_t dma_addr, size_t size,
1145     + unsigned long attrs)
1146     +{
1147     +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
1148     + if (__generic_dma_ops(dev)->mmap)
1149     + return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
1150     + dma_addr, size, attrs);
1151     +#endif
1152     + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
1153     +}
1154     +EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
1155     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1156     index 1782804f6c26..0fe346c4bd28 100644
1157     --- a/fs/btrfs/ioctl.c
1158     +++ b/fs/btrfs/ioctl.c
1159     @@ -3052,7 +3052,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
1160     out:
1161     if (ret)
1162     btrfs_cmp_data_free(cmp);
1163     - return 0;
1164     + return ret;
1165     }
1166    
1167     static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
1168     @@ -4082,6 +4082,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
1169     ret = PTR_ERR(new_root);
1170     goto out;
1171     }
1172     + if (!is_fstree(new_root->objectid)) {
1173     + ret = -ENOENT;
1174     + goto out;
1175     + }
1176    
1177     path = btrfs_alloc_path();
1178     if (!path) {
1179     diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
1180     index 2cf5e142675e..04c61bcf62e5 100644
1181     --- a/fs/btrfs/relocation.c
1182     +++ b/fs/btrfs/relocation.c
1183     @@ -2367,11 +2367,11 @@ void free_reloc_roots(struct list_head *list)
1184     while (!list_empty(list)) {
1185     reloc_root = list_entry(list->next, struct btrfs_root,
1186     root_list);
1187     + __del_reloc_root(reloc_root);
1188     free_extent_buffer(reloc_root->node);
1189     free_extent_buffer(reloc_root->commit_root);
1190     reloc_root->node = NULL;
1191     reloc_root->commit_root = NULL;
1192     - __del_reloc_root(reloc_root);
1193     }
1194     }
1195    
1196     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1197     index c0c253005b76..87658f63b374 100644
1198     --- a/fs/cifs/cifsfs.c
1199     +++ b/fs/cifs/cifsfs.c
1200     @@ -1360,7 +1360,7 @@ exit_cifs(void)
1201     exit_cifs_idmap();
1202     #endif
1203     #ifdef CONFIG_CIFS_UPCALL
1204     - unregister_key_type(&cifs_spnego_key_type);
1205     + exit_cifs_spnego();
1206     #endif
1207     cifs_destroy_request_bufs();
1208     cifs_destroy_mids();
1209     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1210     index 1a545695f547..f6712b6128d8 100644
1211     --- a/fs/cifs/connect.c
1212     +++ b/fs/cifs/connect.c
1213     @@ -4071,6 +4071,14 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
1214     cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
1215     server->sec_mode, server->capabilities, server->timeAdj);
1216    
1217     + if (ses->auth_key.response) {
1218     + cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
1219     + ses->auth_key.response);
1220     + kfree(ses->auth_key.response);
1221     + ses->auth_key.response = NULL;
1222     + ses->auth_key.len = 0;
1223     + }
1224     +
1225     if (server->ops->sess_setup)
1226     rc = server->ops->sess_setup(xid, ses, nls_info);
1227    
1228     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1229     index 3925758f6dde..cf192f9ce254 100644
1230     --- a/fs/cifs/file.c
1231     +++ b/fs/cifs/file.c
1232     @@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
1233     if (backup_cred(cifs_sb))
1234     create_options |= CREATE_OPEN_BACKUP_INTENT;
1235    
1236     + /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1237     + if (f_flags & O_SYNC)
1238     + create_options |= CREATE_WRITE_THROUGH;
1239     +
1240     + if (f_flags & O_DIRECT)
1241     + create_options |= CREATE_NO_BUFFER;
1242     +
1243     oparms.tcon = tcon;
1244     oparms.cifs_sb = cifs_sb;
1245     oparms.desired_access = desired_access;
1246     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1247     index 0437e5fdba56..69b610ad3fdc 100644
1248     --- a/fs/cifs/smb2pdu.c
1249     +++ b/fs/cifs/smb2pdu.c
1250     @@ -366,7 +366,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req)
1251     build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
1252     req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
1253     req->NegotiateContextCount = cpu_to_le16(2);
1254     - inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
1255     + inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
1256     + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
1257     }
1258     #else
1259     @@ -531,15 +531,22 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1260    
1261     /*
1262     * validation ioctl must be signed, so no point sending this if we
1263     - * can not sign it. We could eventually change this to selectively
1264     + * can not sign it (ie are not known user). Even if signing is not
1265     + * required (enabled but not negotiated), in those cases we selectively
1266     * sign just this, the first and only signed request on a connection.
1267     - * This is good enough for now since a user who wants better security
1268     - * would also enable signing on the mount. Having validation of
1269     - * negotiate info for signed connections helps reduce attack vectors
1270     + * Having validation of negotiate info helps reduce attack vectors.
1271     */
1272     - if (tcon->ses->server->sign == false)
1273     + if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
1274     return 0; /* validation requires signing */
1275    
1276     + if (tcon->ses->user_name == NULL) {
1277     + cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1278     + return 0; /* validation requires signing */
1279     + }
1280     +
1281     + if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
1282     + cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
1283     +
1284     vneg_inbuf.Capabilities =
1285     cpu_to_le32(tcon->ses->server->vals->req_capabilities);
1286     memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
1287     @@ -1010,6 +1017,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1288     while (sess_data->func)
1289     sess_data->func(sess_data);
1290    
1291     + if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
1292     + cifs_dbg(VFS, "signing requested but authenticated as guest\n");
1293     rc = sess_data->result;
1294     out:
1295     kfree(sess_data);
1296     diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
1297     index 7bff6f46f5da..f7cae1629c6c 100644
1298     --- a/fs/gfs2/glock.c
1299     +++ b/fs/gfs2/glock.c
1300     @@ -1836,13 +1836,9 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1301     {
1302     struct gfs2_glock_iter *gi = seq->private;
1303     loff_t n = *pos;
1304     - int ret;
1305     -
1306     - if (gi->last_pos <= *pos)
1307     - n = (*pos - gi->last_pos);
1308    
1309     - ret = rhashtable_walk_start(&gi->hti);
1310     - if (ret)
1311     + rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1312     + if (rhashtable_walk_start(&gi->hti) != 0)
1313     return NULL;
1314    
1315     do {
1316     @@ -1850,6 +1846,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1317     } while (gi->gl && n--);
1318    
1319     gi->last_pos = *pos;
1320     +
1321     return gi->gl;
1322     }
1323    
1324     @@ -1861,6 +1858,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1325     (*pos)++;
1326     gi->last_pos = *pos;
1327     gfs2_glock_iter_next(gi);
1328     +
1329     return gi->gl;
1330     }
1331    
1332     @@ -1870,6 +1868,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1333    
1334     gi->gl = NULL;
1335     rhashtable_walk_stop(&gi->hti);
1336     + rhashtable_walk_exit(&gi->hti);
1337     }
1338    
1339     static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1340     @@ -1932,12 +1931,10 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
1341     struct gfs2_glock_iter *gi = seq->private;
1342    
1343     gi->sdp = inode->i_private;
1344     - gi->last_pos = 0;
1345     seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1346     if (seq->buf)
1347     seq->size = GFS2_SEQ_GOODSIZE;
1348     gi->gl = NULL;
1349     - ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
1350     }
1351     return ret;
1352     }
1353     @@ -1948,7 +1945,6 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
1354     struct gfs2_glock_iter *gi = seq->private;
1355    
1356     gi->gl = NULL;
1357     - rhashtable_walk_exit(&gi->hti);
1358     return seq_release_private(inode, file);
1359     }
1360    
1361     @@ -1960,12 +1956,10 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file)
1362     struct seq_file *seq = file->private_data;
1363     struct gfs2_glock_iter *gi = seq->private;
1364     gi->sdp = inode->i_private;
1365     - gi->last_pos = 0;
1366     seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1367     if (seq->buf)
1368     seq->size = GFS2_SEQ_GOODSIZE;
1369     gi->gl = NULL;
1370     - ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
1371     }
1372     return ret;
1373     }
1374     diff --git a/fs/proc/array.c b/fs/proc/array.c
1375     index 81818adb8e9e..c932ec454625 100644
1376     --- a/fs/proc/array.c
1377     +++ b/fs/proc/array.c
1378     @@ -60,6 +60,7 @@
1379     #include <linux/tty.h>
1380     #include <linux/string.h>
1381     #include <linux/mman.h>
1382     +#include <linux/sched.h>
1383     #include <linux/proc_fs.h>
1384     #include <linux/ioport.h>
1385     #include <linux/uaccess.h>
1386     @@ -416,7 +417,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
1387     * esp and eip are intentionally zeroed out. There is no
1388     * non-racy way to read them without freezing the task.
1389     * Programs that need reliable values can use ptrace(2).
1390     + *
1391     + * The only exception is if the task is core dumping because
1392     + * a program is not able to use ptrace(2) in that case. It is
1393     + * safe because the task has stopped executing permanently.
1394     */
1395     + if (permitted && (task->flags & PF_DUMPCORE)) {
1396     + eip = KSTK_EIP(task);
1397     + esp = KSTK_ESP(task);
1398     + }
1399     }
1400    
1401     get_task_comm(tcomm, task);
1402     diff --git a/fs/read_write.c b/fs/read_write.c
1403     index e479e24dcd4c..09a8757efd34 100644
1404     --- a/fs/read_write.c
1405     +++ b/fs/read_write.c
1406     @@ -114,7 +114,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
1407     * In the generic case the entire file is data, so as long as
1408     * offset isn't at the end of the file then the offset is data.
1409     */
1410     - if (offset >= eof)
1411     + if ((unsigned long long)offset >= eof)
1412     return -ENXIO;
1413     break;
1414     case SEEK_HOLE:
1415     @@ -122,7 +122,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
1416     * There is a virtual hole at the end of the file, so as long as
1417     * offset isn't i_size or larger, return i_size.
1418     */
1419     - if (offset >= eof)
1420     + if ((unsigned long long)offset >= eof)
1421     return -ENXIO;
1422     offset = eof;
1423     break;
1424     diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
1425     index bce2e260f55e..6c95812120eb 100644
1426     --- a/fs/xfs/xfs_ioctl.c
1427     +++ b/fs/xfs/xfs_ioctl.c
1428     @@ -1085,6 +1085,7 @@ xfs_ioctl_setattr_dax_invalidate(
1429     int *join_flags)
1430     {
1431     struct inode *inode = VFS_I(ip);
1432     + struct super_block *sb = inode->i_sb;
1433     int error;
1434    
1435     *join_flags = 0;
1436     @@ -1097,7 +1098,7 @@ xfs_ioctl_setattr_dax_invalidate(
1437     if (fa->fsx_xflags & FS_XFLAG_DAX) {
1438     if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
1439     return -EINVAL;
1440     - if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE)
1441     + if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
1442     return -EINVAL;
1443     }
1444    
1445     diff --git a/include/linux/key.h b/include/linux/key.h
1446     index 722914798f37..6a544726903e 100644
1447     --- a/include/linux/key.h
1448     +++ b/include/linux/key.h
1449     @@ -176,6 +176,7 @@ struct key {
1450     #define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
1451     #define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
1452     #define KEY_FLAG_KEEP 10 /* set if key should not be removed */
1453     +#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
1454    
1455     /* the key type and key description string
1456     * - the desc is used to match a key against search criteria
1457     @@ -235,6 +236,7 @@ extern struct key *key_alloc(struct key_type *type,
1458     #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
1459     #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
1460     #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
1461     +#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
1462    
1463     extern void key_revoke(struct key *key);
1464     extern void key_invalidate(struct key *key);
1465     diff --git a/include/net/mac80211.h b/include/net/mac80211.h
1466     index e2dba93e374f..2c7d876e2a1a 100644
1467     --- a/include/net/mac80211.h
1468     +++ b/include/net/mac80211.h
1469     @@ -902,21 +902,10 @@ struct ieee80211_tx_info {
1470     unsigned long jiffies;
1471     };
1472     /* NB: vif can be NULL for injected frames */
1473     - union {
1474     - /* NB: vif can be NULL for injected frames */
1475     - struct ieee80211_vif *vif;
1476     -
1477     - /* When packets are enqueued on txq it's easy
1478     - * to re-construct the vif pointer. There's no
1479     - * more space in tx_info so it can be used to
1480     - * store the necessary enqueue time for packet
1481     - * sojourn time computation.
1482     - */
1483     - codel_time_t enqueue_time;
1484     - };
1485     + struct ieee80211_vif *vif;
1486     struct ieee80211_key_conf *hw_key;
1487     u32 flags;
1488     - /* 4 bytes free */
1489     + codel_time_t enqueue_time;
1490     } control;
1491     struct {
1492     u64 cookie;
1493     diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
1494     index 7c35e279d1e3..683057f79dca 100644
1495     --- a/include/xen/swiotlb-xen.h
1496     +++ b/include/xen/swiotlb-xen.h
1497     @@ -58,4 +58,9 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
1498    
1499     extern int
1500     xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
1501     +
1502     +extern int
1503     +xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1504     + void *cpu_addr, dma_addr_t dma_addr, size_t size,
1505     + unsigned long attrs);
1506     #endif /* __LINUX_SWIOTLB_XEN_H */
1507     diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
1508     index 00bb0aeea1d0..77977f55dff7 100644
1509     --- a/kernel/irq/irqdesc.c
1510     +++ b/kernel/irq/irqdesc.c
1511     @@ -405,10 +405,8 @@ static void free_desc(unsigned int irq)
1512     * The sysfs entry must be serialized against a concurrent
1513     * irq_sysfs_init() as well.
1514     */
1515     - mutex_lock(&sparse_irq_lock);
1516     kobject_del(&desc->kobj);
1517     delete_irq_desc(irq);
1518     - mutex_unlock(&sparse_irq_lock);
1519    
1520     /*
1521     * We free the descriptor, masks and stat fields via RCU. That
1522     @@ -446,20 +444,15 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
1523     desc = alloc_desc(start + i, node, flags, mask, owner);
1524     if (!desc)
1525     goto err;
1526     - mutex_lock(&sparse_irq_lock);
1527     irq_insert_desc(start + i, desc);
1528     irq_sysfs_add(start + i, desc);
1529     - mutex_unlock(&sparse_irq_lock);
1530     }
1531     + bitmap_set(allocated_irqs, start, cnt);
1532     return start;
1533    
1534     err:
1535     for (i--; i >= 0; i--)
1536     free_desc(start + i);
1537     -
1538     - mutex_lock(&sparse_irq_lock);
1539     - bitmap_clear(allocated_irqs, start, cnt);
1540     - mutex_unlock(&sparse_irq_lock);
1541     return -ENOMEM;
1542     }
1543    
1544     @@ -558,6 +551,7 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
1545    
1546     desc->owner = owner;
1547     }
1548     + bitmap_set(allocated_irqs, start, cnt);
1549     return start;
1550     }
1551    
1552     @@ -653,10 +647,10 @@ void irq_free_descs(unsigned int from, unsigned int cnt)
1553     if (from >= nr_irqs || (from + cnt) > nr_irqs)
1554     return;
1555    
1556     + mutex_lock(&sparse_irq_lock);
1557     for (i = 0; i < cnt; i++)
1558     free_desc(from + i);
1559    
1560     - mutex_lock(&sparse_irq_lock);
1561     bitmap_clear(allocated_irqs, from, cnt);
1562     mutex_unlock(&sparse_irq_lock);
1563     }
1564     @@ -703,19 +697,15 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
1565     from, cnt, 0);
1566     ret = -EEXIST;
1567     if (irq >=0 && start != irq)
1568     - goto err;
1569     + goto unlock;
1570    
1571     if (start + cnt > nr_irqs) {
1572     ret = irq_expand_nr_irqs(start + cnt);
1573     if (ret)
1574     - goto err;
1575     + goto unlock;
1576     }
1577     -
1578     - bitmap_set(allocated_irqs, start, cnt);
1579     - mutex_unlock(&sparse_irq_lock);
1580     - return alloc_descs(start, cnt, node, affinity, owner);
1581     -
1582     -err:
1583     + ret = alloc_descs(start, cnt, node, affinity, owner);
1584     +unlock:
1585     mutex_unlock(&sparse_irq_lock);
1586     return ret;
1587     }
1588     diff --git a/kernel/seccomp.c b/kernel/seccomp.c
1589     index 0db7c8a2afe2..af182a6df25b 100644
1590     --- a/kernel/seccomp.c
1591     +++ b/kernel/seccomp.c
1592     @@ -457,14 +457,19 @@ static long seccomp_attach_filter(unsigned int flags,
1593     return 0;
1594     }
1595    
1596     +void __get_seccomp_filter(struct seccomp_filter *filter)
1597     +{
1598     + /* Reference count is bounded by the number of total processes. */
1599     + atomic_inc(&filter->usage);
1600     +}
1601     +
1602     /* get_seccomp_filter - increments the reference count of the filter on @tsk */
1603     void get_seccomp_filter(struct task_struct *tsk)
1604     {
1605     struct seccomp_filter *orig = tsk->seccomp.filter;
1606     if (!orig)
1607     return;
1608     - /* Reference count is bounded by the number of total processes. */
1609     - atomic_inc(&orig->usage);
1610     + __get_seccomp_filter(orig);
1611     }
1612    
1613     static inline void seccomp_filter_free(struct seccomp_filter *filter)
1614     @@ -475,10 +480,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
1615     }
1616     }
1617    
1618     -/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
1619     -void put_seccomp_filter(struct task_struct *tsk)
1620     +static void __put_seccomp_filter(struct seccomp_filter *orig)
1621     {
1622     - struct seccomp_filter *orig = tsk->seccomp.filter;
1623     /* Clean up single-reference branches iteratively. */
1624     while (orig && atomic_dec_and_test(&orig->usage)) {
1625     struct seccomp_filter *freeme = orig;
1626     @@ -487,6 +490,12 @@ void put_seccomp_filter(struct task_struct *tsk)
1627     }
1628     }
1629    
1630     +/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
1631     +void put_seccomp_filter(struct task_struct *tsk)
1632     +{
1633     + __put_seccomp_filter(tsk->seccomp.filter);
1634     +}
1635     +
1636     /**
1637     * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
1638     * @syscall: syscall number to send to userland
1639     @@ -892,13 +901,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1640     if (!data)
1641     goto out;
1642    
1643     - get_seccomp_filter(task);
1644     + __get_seccomp_filter(filter);
1645     spin_unlock_irq(&task->sighand->siglock);
1646    
1647     if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1648     ret = -EFAULT;
1649    
1650     - put_seccomp_filter(task);
1651     + __put_seccomp_filter(filter);
1652     return ret;
1653    
1654     out:
1655     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1656     index 265e0d0216e3..24d603d29512 100644
1657     --- a/kernel/sysctl.c
1658     +++ b/kernel/sysctl.c
1659     @@ -1189,6 +1189,8 @@ static struct ctl_table kern_table[] = {
1660     .maxlen = sizeof(unsigned int),
1661     .mode = 0644,
1662     .proc_handler = timer_migration_handler,
1663     + .extra1 = &zero,
1664     + .extra2 = &one,
1665     },
1666     #endif
1667     #ifdef CONFIG_BPF_SYSCALL
1668     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1669     index df445cde8a1e..7d670362891a 100644
1670     --- a/kernel/time/timer.c
1671     +++ b/kernel/time/timer.c
1672     @@ -240,7 +240,7 @@ int timer_migration_handler(struct ctl_table *table, int write,
1673     int ret;
1674    
1675     mutex_lock(&mutex);
1676     - ret = proc_dointvec(table, write, buffer, lenp, ppos);
1677     + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1678     if (!ret && write)
1679     timers_update_migration(false);
1680     mutex_unlock(&mutex);
1681     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1682     index f95bf81529f5..c1e50cc0d7b0 100644
1683     --- a/kernel/trace/trace.c
1684     +++ b/kernel/trace/trace.c
1685     @@ -3569,11 +3569,17 @@ static int tracing_open(struct inode *inode, struct file *file)
1686     /* If this file was open for write, then erase contents */
1687     if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1688     int cpu = tracing_get_cpu(inode);
1689     + struct trace_buffer *trace_buf = &tr->trace_buffer;
1690     +
1691     +#ifdef CONFIG_TRACER_MAX_TRACE
1692     + if (tr->current_trace->print_max)
1693     + trace_buf = &tr->max_buffer;
1694     +#endif
1695    
1696     if (cpu == RING_BUFFER_ALL_CPUS)
1697     - tracing_reset_online_cpus(&tr->trace_buffer);
1698     + tracing_reset_online_cpus(trace_buf);
1699     else
1700     - tracing_reset(&tr->trace_buffer, cpu);
1701     + tracing_reset(trace_buf, cpu);
1702     }
1703    
1704     if (file->f_mode & FMODE_READ) {
1705     @@ -5128,7 +5134,7 @@ static int tracing_wait_pipe(struct file *filp)
1706     *
1707     * iter->pos will be 0 if we haven't read anything.
1708     */
1709     - if (!tracing_is_on() && iter->pos)
1710     + if (!tracer_tracing_is_on(iter->tr) && iter->pos)
1711     break;
1712    
1713     mutex_unlock(&iter->mutex);
1714     diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
1715     index 37bec0f864b7..a7aa54f45e19 100644
1716     --- a/net/mac80211/iface.c
1717     +++ b/net/mac80211/iface.c
1718     @@ -791,6 +791,7 @@ static int ieee80211_open(struct net_device *dev)
1719     static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
1720     bool going_down)
1721     {
1722     + struct ieee80211_sub_if_data *txq_sdata = sdata;
1723     struct ieee80211_local *local = sdata->local;
1724     struct fq *fq = &local->fq;
1725     unsigned long flags;
1726     @@ -931,6 +932,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
1727    
1728     switch (sdata->vif.type) {
1729     case NL80211_IFTYPE_AP_VLAN:
1730     + txq_sdata = container_of(sdata->bss,
1731     + struct ieee80211_sub_if_data, u.ap);
1732     +
1733     mutex_lock(&local->mtx);
1734     list_del(&sdata->u.vlan.list);
1735     mutex_unlock(&local->mtx);
1736     @@ -1001,8 +1005,17 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
1737     }
1738     spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1739    
1740     - if (sdata->vif.txq) {
1741     - struct txq_info *txqi = to_txq_info(sdata->vif.txq);
1742     + if (txq_sdata->vif.txq) {
1743     + struct txq_info *txqi = to_txq_info(txq_sdata->vif.txq);
1744     +
1745     + /*
1746     + * FIXME FIXME
1747     + *
1748     + * We really shouldn't purge the *entire* txqi since that
1749     + * contains frames for the other AP_VLANs (and possibly
1750     + * the AP itself) as well, but there's no API in FQ now
1751     + * to be able to filter.
1752     + */
1753    
1754     spin_lock_bh(&fq->lock);
1755     ieee80211_txq_purge(local, txqi);
1756     diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
1757     index eede5c6db8d5..30bba53c2992 100644
1758     --- a/net/mac80211/offchannel.c
1759     +++ b/net/mac80211/offchannel.c
1760     @@ -707,6 +707,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
1761     if (!cookie)
1762     return -ENOENT;
1763    
1764     + flush_work(&local->hw_roc_start);
1765     +
1766     mutex_lock(&local->mtx);
1767     list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
1768     if (!mgmt_tx && roc->cookie != cookie)
1769     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1770     index dd190ff3daea..274c564bd9af 100644
1771     --- a/net/mac80211/tx.c
1772     +++ b/net/mac80211/tx.c
1773     @@ -1277,11 +1277,6 @@ static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
1774     IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
1775     }
1776    
1777     -static void ieee80211_set_skb_vif(struct sk_buff *skb, struct txq_info *txqi)
1778     -{
1779     - IEEE80211_SKB_CB(skb)->control.vif = txqi->txq.vif;
1780     -}
1781     -
1782     static u32 codel_skb_len_func(const struct sk_buff *skb)
1783     {
1784     return skb->len;
1785     @@ -3388,6 +3383,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
1786     struct ieee80211_tx_info *info;
1787     struct ieee80211_tx_data tx;
1788     ieee80211_tx_result r;
1789     + struct ieee80211_vif *vif;
1790    
1791     spin_lock_bh(&fq->lock);
1792    
1793     @@ -3404,8 +3400,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
1794     if (!skb)
1795     goto out;
1796    
1797     - ieee80211_set_skb_vif(skb, txqi);
1798     -
1799     hdr = (struct ieee80211_hdr *)skb->data;
1800     info = IEEE80211_SKB_CB(skb);
1801    
1802     @@ -3462,6 +3456,34 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
1803     }
1804     }
1805    
1806     + switch (tx.sdata->vif.type) {
1807     + case NL80211_IFTYPE_MONITOR:
1808     + if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
1809     + vif = &tx.sdata->vif;
1810     + break;
1811     + }
1812     + tx.sdata = rcu_dereference(local->monitor_sdata);
1813     + if (tx.sdata) {
1814     + vif = &tx.sdata->vif;
1815     + info->hw_queue =
1816     + vif->hw_queue[skb_get_queue_mapping(skb)];
1817     + } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
1818     + ieee80211_free_txskb(&local->hw, skb);
1819     + goto begin;
1820     + } else {
1821     + vif = NULL;
1822     + }
1823     + break;
1824     + case NL80211_IFTYPE_AP_VLAN:
1825     + tx.sdata = container_of(tx.sdata->bss,
1826     + struct ieee80211_sub_if_data, u.ap);
1827     + /* fall through */
1828     + default:
1829     + vif = &tx.sdata->vif;
1830     + break;
1831     + }
1832     +
1833     + IEEE80211_SKB_CB(skb)->control.vif = vif;
1834     out:
1835     spin_unlock_bh(&fq->lock);
1836    
1837     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
1838     index e9e9bc5c8773..ece0fbc08607 100644
1839     --- a/net/wireless/nl80211.c
1840     +++ b/net/wireless/nl80211.c
1841     @@ -10385,6 +10385,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
1842     if (err)
1843     return err;
1844    
1845     + if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
1846     + !tb[NL80211_REKEY_DATA_KCK])
1847     + return -EINVAL;
1848     if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
1849     return -ERANGE;
1850     if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
1851     diff --git a/security/keys/Kconfig b/security/keys/Kconfig
1852     index d942c7c2bc0a..e0a39781b10f 100644
1853     --- a/security/keys/Kconfig
1854     +++ b/security/keys/Kconfig
1855     @@ -41,10 +41,8 @@ config BIG_KEYS
1856     bool "Large payload keys"
1857     depends on KEYS
1858     depends on TMPFS
1859     - depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
1860     select CRYPTO_AES
1861     - select CRYPTO_ECB
1862     - select CRYPTO_RNG
1863     + select CRYPTO_GCM
1864     help
1865     This option provides support for holding large keys within the kernel
1866     (for example Kerberos ticket caches). The data may be stored out to
1867     diff --git a/security/keys/big_key.c b/security/keys/big_key.c
1868     index 835c1ab30d01..47c6dcab1a8e 100644
1869     --- a/security/keys/big_key.c
1870     +++ b/security/keys/big_key.c
1871     @@ -1,5 +1,6 @@
1872     /* Large capacity key type
1873     *
1874     + * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
1875     * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
1876     * Written by David Howells (dhowells@redhat.com)
1877     *
1878     @@ -16,10 +17,10 @@
1879     #include <linux/shmem_fs.h>
1880     #include <linux/err.h>
1881     #include <linux/scatterlist.h>
1882     +#include <linux/random.h>
1883     #include <keys/user-type.h>
1884     #include <keys/big_key-type.h>
1885     -#include <crypto/rng.h>
1886     -#include <crypto/skcipher.h>
1887     +#include <crypto/aead.h>
1888    
1889     /*
1890     * Layout of key payload words.
1891     @@ -49,7 +50,12 @@ enum big_key_op {
1892     /*
1893     * Key size for big_key data encryption
1894     */
1895     -#define ENC_KEY_SIZE 16
1896     +#define ENC_KEY_SIZE 32
1897     +
1898     +/*
1899     + * Authentication tag length
1900     + */
1901     +#define ENC_AUTHTAG_SIZE 16
1902    
1903     /*
1904     * big_key defined keys take an arbitrary string as the description and an
1905     @@ -64,57 +70,62 @@ struct key_type key_type_big_key = {
1906     .destroy = big_key_destroy,
1907     .describe = big_key_describe,
1908     .read = big_key_read,
1909     + /* no ->update(); don't add it without changing big_key_crypt() nonce */
1910     };
1911    
1912     /*
1913     - * Crypto names for big_key data encryption
1914     + * Crypto names for big_key data authenticated encryption
1915     */
1916     -static const char big_key_rng_name[] = "stdrng";
1917     -static const char big_key_alg_name[] = "ecb(aes)";
1918     +static const char big_key_alg_name[] = "gcm(aes)";
1919    
1920     /*
1921     - * Crypto algorithms for big_key data encryption
1922     + * Crypto algorithms for big_key data authenticated encryption
1923     */
1924     -static struct crypto_rng *big_key_rng;
1925     -static struct crypto_skcipher *big_key_skcipher;
1926     +static struct crypto_aead *big_key_aead;
1927    
1928     /*
1929     - * Generate random key to encrypt big_key data
1930     + * Since changing the key affects the entire object, we need a mutex.
1931     */
1932     -static inline int big_key_gen_enckey(u8 *key)
1933     -{
1934     - return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
1935     -}
1936     +static DEFINE_MUTEX(big_key_aead_lock);
1937    
1938     /*
1939     * Encrypt/decrypt big_key data
1940     */
1941     static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
1942     {
1943     - int ret = -EINVAL;
1944     + int ret;
1945     struct scatterlist sgio;
1946     - SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher);
1947     -
1948     - if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) {
1949     + struct aead_request *aead_req;
1950     + /* We always use a zero nonce. The reason we can get away with this is
1951     + * because we're using a different randomly generated key for every
1952     + * different encryption. Notably, too, key_type_big_key doesn't define
1953     + * an .update function, so there's no chance we'll wind up reusing the
1954     + * key to encrypt updated data. Simply put: one key, one encryption.
1955     + */
1956     + u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
1957     +
1958     + aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
1959     + if (!aead_req)
1960     + return -ENOMEM;
1961     +
1962     + memset(zero_nonce, 0, sizeof(zero_nonce));
1963     + sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
1964     + aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
1965     + aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1966     + aead_request_set_ad(aead_req, 0);
1967     +
1968     + mutex_lock(&big_key_aead_lock);
1969     + if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
1970     ret = -EAGAIN;
1971     goto error;
1972     }
1973     -
1974     - skcipher_request_set_tfm(req, big_key_skcipher);
1975     - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
1976     - NULL, NULL);
1977     -
1978     - sg_init_one(&sgio, data, datalen);
1979     - skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
1980     -
1981     if (op == BIG_KEY_ENC)
1982     - ret = crypto_skcipher_encrypt(req);
1983     + ret = crypto_aead_encrypt(aead_req);
1984     else
1985     - ret = crypto_skcipher_decrypt(req);
1986     -
1987     - skcipher_request_zero(req);
1988     -
1989     + ret = crypto_aead_decrypt(aead_req);
1990     error:
1991     + mutex_unlock(&big_key_aead_lock);
1992     + aead_request_free(aead_req);
1993     return ret;
1994     }
1995    
1996     @@ -146,15 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep)
1997     *
1998     * File content is stored encrypted with randomly generated key.
1999     */
2000     - size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
2001     + size_t enclen = datalen + ENC_AUTHTAG_SIZE;
2002    
2003     - /* prepare aligned data to encrypt */
2004     data = kmalloc(enclen, GFP_KERNEL);
2005     if (!data)
2006     return -ENOMEM;
2007    
2008     memcpy(data, prep->data, datalen);
2009     - memset(data + datalen, 0x00, enclen - datalen);
2010    
2011     /* generate random key */
2012     enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
2013     @@ -162,13 +171,10 @@ int big_key_preparse(struct key_preparsed_payload *prep)
2014     ret = -ENOMEM;
2015     goto error;
2016     }
2017     -
2018     - ret = big_key_gen_enckey(enckey);
2019     - if (ret)
2020     - goto err_enckey;
2021     + get_random_bytes(enckey, ENC_KEY_SIZE);
2022    
2023     /* encrypt aligned data */
2024     - ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
2025     + ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
2026     if (ret)
2027     goto err_enckey;
2028    
2029     @@ -194,7 +200,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
2030     *path = file->f_path;
2031     path_get(path);
2032     fput(file);
2033     - kfree(data);
2034     + kzfree(data);
2035     } else {
2036     /* Just store the data in a buffer */
2037     void *data = kmalloc(datalen, GFP_KERNEL);
2038     @@ -210,9 +216,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
2039     err_fput:
2040     fput(file);
2041     err_enckey:
2042     - kfree(enckey);
2043     + kzfree(enckey);
2044     error:
2045     - kfree(data);
2046     + kzfree(data);
2047     return ret;
2048     }
2049    
2050     @@ -226,7 +232,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
2051    
2052     path_put(path);
2053     }
2054     - kfree(prep->payload.data[big_key_data]);
2055     + kzfree(prep->payload.data[big_key_data]);
2056     }
2057    
2058     /*
2059     @@ -258,7 +264,7 @@ void big_key_destroy(struct key *key)
2060     path->mnt = NULL;
2061     path->dentry = NULL;
2062     }
2063     - kfree(key->payload.data[big_key_data]);
2064     + kzfree(key->payload.data[big_key_data]);
2065     key->payload.data[big_key_data] = NULL;
2066     }
2067    
2068     @@ -294,7 +300,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
2069     struct file *file;
2070     u8 *data;
2071     u8 *enckey = (u8 *)key->payload.data[big_key_data];
2072     - size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
2073     + size_t enclen = datalen + ENC_AUTHTAG_SIZE;
2074    
2075     data = kmalloc(enclen, GFP_KERNEL);
2076     if (!data)
2077     @@ -326,7 +332,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
2078     err_fput:
2079     fput(file);
2080     error:
2081     - kfree(data);
2082     + kzfree(data);
2083     } else {
2084     ret = datalen;
2085     if (copy_to_user(buffer, key->payload.data[big_key_data],
2086     @@ -342,47 +348,31 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
2087     */
2088     static int __init big_key_init(void)
2089     {
2090     - struct crypto_skcipher *cipher;
2091     - struct crypto_rng *rng;
2092     int ret;
2093    
2094     - rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
2095     - if (IS_ERR(rng)) {
2096     - pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
2097     - return PTR_ERR(rng);
2098     - }
2099     -
2100     - big_key_rng = rng;
2101     -
2102     - /* seed RNG */
2103     - ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
2104     - if (ret) {
2105     - pr_err("Can't reset rng: %d\n", ret);
2106     - goto error_rng;
2107     - }
2108     -
2109     /* init block cipher */
2110     - cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
2111     - if (IS_ERR(cipher)) {
2112     - ret = PTR_ERR(cipher);
2113     + big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
2114     + if (IS_ERR(big_key_aead)) {
2115     + ret = PTR_ERR(big_key_aead);
2116     pr_err("Can't alloc crypto: %d\n", ret);
2117     - goto error_rng;
2118     + return ret;
2119     + }
2120     + ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
2121     + if (ret < 0) {
2122     + pr_err("Can't set crypto auth tag len: %d\n", ret);
2123     + goto free_aead;
2124     }
2125     -
2126     - big_key_skcipher = cipher;
2127    
2128     ret = register_key_type(&key_type_big_key);
2129     if (ret < 0) {
2130     pr_err("Can't register type: %d\n", ret);
2131     - goto error_cipher;
2132     + goto free_aead;
2133     }
2134    
2135     return 0;
2136    
2137     -error_cipher:
2138     - crypto_free_skcipher(big_key_skcipher);
2139     -error_rng:
2140     - crypto_free_rng(big_key_rng);
2141     +free_aead:
2142     + crypto_free_aead(big_key_aead);
2143     return ret;
2144     }
2145    
2146     diff --git a/security/keys/internal.h b/security/keys/internal.h
2147     index a705a7d92ad7..fb0c65049c19 100644
2148     --- a/security/keys/internal.h
2149     +++ b/security/keys/internal.h
2150     @@ -137,7 +137,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
2151     extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
2152     extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
2153    
2154     -extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
2155     +extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
2156    
2157     extern int install_user_keyrings(void);
2158     extern int install_thread_keyring_to_cred(struct cred *);
2159     diff --git a/security/keys/key.c b/security/keys/key.c
2160     index 2f4ce35ae2aa..135e1eb7e468 100644
2161     --- a/security/keys/key.c
2162     +++ b/security/keys/key.c
2163     @@ -301,6 +301,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
2164     key->flags |= 1 << KEY_FLAG_IN_QUOTA;
2165     if (flags & KEY_ALLOC_BUILT_IN)
2166     key->flags |= 1 << KEY_FLAG_BUILTIN;
2167     + if (flags & KEY_ALLOC_UID_KEYRING)
2168     + key->flags |= 1 << KEY_FLAG_UID_KEYRING;
2169    
2170     #ifdef KEY_DEBUGGING
2171     key->magic = KEY_DEBUG_MAGIC;
2172     diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
2173     index ada12c3e3ac4..1302cb398346 100644
2174     --- a/security/keys/keyctl.c
2175     +++ b/security/keys/keyctl.c
2176     @@ -766,6 +766,11 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
2177    
2178     key = key_ref_to_ptr(key_ref);
2179    
2180     + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
2181     + ret = -ENOKEY;
2182     + goto error2;
2183     + }
2184     +
2185     /* see if we can read it directly */
2186     ret = key_permission(key_ref, KEY_NEED_READ);
2187     if (ret == 0)
2188     diff --git a/security/keys/keyring.c b/security/keys/keyring.c
2189     index c91e4e0cea08..a86d0ae1773c 100644
2190     --- a/security/keys/keyring.c
2191     +++ b/security/keys/keyring.c
2192     @@ -416,7 +416,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
2193     }
2194    
2195     struct keyring_read_iterator_context {
2196     - size_t qty;
2197     + size_t buflen;
2198     size_t count;
2199     key_serial_t __user *buffer;
2200     };
2201     @@ -428,9 +428,9 @@ static int keyring_read_iterator(const void *object, void *data)
2202     int ret;
2203    
2204     kenter("{%s,%d},,{%zu/%zu}",
2205     - key->type->name, key->serial, ctx->count, ctx->qty);
2206     + key->type->name, key->serial, ctx->count, ctx->buflen);
2207    
2208     - if (ctx->count >= ctx->qty)
2209     + if (ctx->count >= ctx->buflen)
2210     return 1;
2211    
2212     ret = put_user(key->serial, ctx->buffer);
2213     @@ -465,16 +465,12 @@ static long keyring_read(const struct key *keyring,
2214     return 0;
2215    
2216     /* Calculate how much data we could return */
2217     - ctx.qty = nr_keys * sizeof(key_serial_t);
2218     -
2219     if (!buffer || !buflen)
2220     - return ctx.qty;
2221     -
2222     - if (buflen > ctx.qty)
2223     - ctx.qty = buflen;
2224     + return nr_keys * sizeof(key_serial_t);
2225    
2226     /* Copy the IDs of the subscribed keys into the buffer */
2227     ctx.buffer = (key_serial_t __user *)buffer;
2228     + ctx.buflen = buflen;
2229     ctx.count = 0;
2230     ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
2231     if (ret < 0) {
2232     @@ -989,15 +985,15 @@ key_ref_t find_key_to_update(key_ref_t keyring_ref,
2233     /*
2234     * Find a keyring with the specified name.
2235     *
2236     - * All named keyrings in the current user namespace are searched, provided they
2237     - * grant Search permission directly to the caller (unless this check is
2238     - * skipped). Keyrings whose usage points have reached zero or who have been
2239     - * revoked are skipped.
2240     + * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
2241     + * user in the current user namespace are considered. If @uid_keyring is %true,
2242     + * the keyring additionally must have been allocated as a user or user session
2243     + * keyring; otherwise, it must grant Search permission directly to the caller.
2244     *
2245     * Returns a pointer to the keyring with the keyring's refcount having being
2246     * incremented on success. -ENOKEY is returned if a key could not be found.
2247     */
2248     -struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
2249     +struct key *find_keyring_by_name(const char *name, bool uid_keyring)
2250     {
2251     struct key *keyring;
2252     int bucket;
2253     @@ -1025,10 +1021,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
2254     if (strcmp(keyring->description, name) != 0)
2255     continue;
2256    
2257     - if (!skip_perm_check &&
2258     - key_permission(make_key_ref(keyring, 0),
2259     - KEY_NEED_SEARCH) < 0)
2260     - continue;
2261     + if (uid_keyring) {
2262     + if (!test_bit(KEY_FLAG_UID_KEYRING,
2263     + &keyring->flags))
2264     + continue;
2265     + } else {
2266     + if (key_permission(make_key_ref(keyring, 0),
2267     + KEY_NEED_SEARCH) < 0)
2268     + continue;
2269     + }
2270    
2271     /* we've got a match but we might end up racing with
2272     * key_cleanup() if the keyring is currently 'dead'
2273     diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
2274     index 45536c677b05..ce45c78cf0a2 100644
2275     --- a/security/keys/process_keys.c
2276     +++ b/security/keys/process_keys.c
2277     @@ -76,7 +76,8 @@ int install_user_keyrings(void)
2278     if (IS_ERR(uid_keyring)) {
2279     uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
2280     cred, user_keyring_perm,
2281     - KEY_ALLOC_IN_QUOTA,
2282     + KEY_ALLOC_UID_KEYRING |
2283     + KEY_ALLOC_IN_QUOTA,
2284     NULL, NULL);
2285     if (IS_ERR(uid_keyring)) {
2286     ret = PTR_ERR(uid_keyring);
2287     @@ -93,7 +94,8 @@ int install_user_keyrings(void)
2288     session_keyring =
2289     keyring_alloc(buf, user->uid, INVALID_GID,
2290     cred, user_keyring_perm,
2291     - KEY_ALLOC_IN_QUOTA,
2292     + KEY_ALLOC_UID_KEYRING |
2293     + KEY_ALLOC_IN_QUOTA,
2294     NULL, NULL);
2295     if (IS_ERR(session_keyring)) {
2296     ret = PTR_ERR(session_keyring);
2297     diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
2298     index 03f1fa495d74..cbb0564c0ec4 100644
2299     --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
2300     +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
2301     @@ -6,10 +6,18 @@
2302     */
2303    
2304     #include <sys/types.h>
2305     -#include <asm/siginfo.h>
2306     -#define __have_siginfo_t 1
2307     -#define __have_sigval_t 1
2308     -#define __have_sigevent_t 1
2309     +
2310     +/*
2311     + * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
2312     + * we need to use the kernel's siginfo.h file and trick glibc
2313     + * into accepting it.
2314     + */
2315     +#if !__GLIBC_PREREQ(2, 26)
2316     +# include <asm/siginfo.h>
2317     +# define __have_siginfo_t 1
2318     +# define __have_sigval_t 1
2319     +# define __have_sigevent_t 1
2320     +#endif
2321    
2322     #include <errno.h>
2323     #include <linux/filter.h>
2324     @@ -676,7 +684,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS)
2325     syscall(__NR_getpid);
2326     }
2327    
2328     -static struct siginfo TRAP_info;
2329     +static siginfo_t TRAP_info;
2330     static volatile int TRAP_nr;
2331     static void TRAP_action(int nr, siginfo_t *info, void *void_context)
2332     {